summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk2
-rw-r--r--benchmark/Android.bp1
-rw-r--r--build/Android.bp5
-rw-r--r--build/Android.common.mk15
-rw-r--r--build/Android.common_build.mk365
-rw-r--r--build/Android.common_test.mk4
-rw-r--r--build/Android.cpplint.mk2
-rw-r--r--build/Android.executable.mk251
-rw-r--r--build/Android.gtest.mk6
-rw-r--r--build/art.go30
-rw-r--r--compiler/Android.bp3
-rw-r--r--compiler/common_compiler_test.cc1
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc71
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc40
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h11
-rw-r--r--compiler/driver/compiler_driver-inl.h140
-rw-r--r--compiler/driver/compiler_driver.cc162
-rw-r--r--compiler/driver/compiler_driver.h31
-rw-r--r--compiler/image_test.cc471
-rw-r--r--compiler/image_writer.cc338
-rw-r--r--compiler/image_writer.h25
-rw-r--r--compiler/jni/jni_compiler_test.cc34
-rw-r--r--compiler/oat_writer.cc76
-rw-r--r--compiler/oat_writer.h14
-rw-r--r--compiler/optimizing/code_generator.cc37
-rw-r--r--compiler/optimizing/code_generator.h6
-rw-r--r--compiler/optimizing/code_generator_arm.cc52
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc105
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc2145
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h405
-rw-r--r--compiler/optimizing/code_generator_mips.cc40
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc40
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc57
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc58
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/codegen_test.cc32
-rw-r--r--compiler/optimizing/common_arm.h127
-rw-r--r--compiler/optimizing/common_arm64.h6
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.cc8
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.cc7
-rw-r--r--compiler/optimizing/graph_visualizer.cc2
-rw-r--r--compiler/optimizing/inliner.cc47
-rw-r--r--compiler/optimizing/instruction_builder.cc93
-rw-r--r--compiler/optimizing/instruction_simplifier.cc14
-rw-r--r--compiler/optimizing/intrinsics.cc4
-rw-r--r--compiler/optimizing/intrinsics_arm.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
-rw-r--r--compiler/optimizing/locations.cc4
-rw-r--r--compiler/optimizing/locations.h4
-rw-r--r--compiler/optimizing/nodes.h87
-rw-r--r--compiler/optimizing/sharpening.cc55
-rw-r--r--compiler/optimizing/ssa_builder.cc14
-rw-r--r--dex2oat/Android.bp2
-rw-r--r--dex2oat/dex2oat.cc79
-rw-r--r--dexdump/Android.bp5
-rw-r--r--dexlayout/Android.bp6
-rw-r--r--dexlayout/dex_ir.cc487
-rw-r--r--dexlayout/dex_ir.h576
-rw-r--r--dexlayout/dex_ir_builder.cc457
-rw-r--r--dexlayout/dexlayout.cc240
-rw-r--r--dexlayout/dexlayout.h1
-rw-r--r--dexlayout/dexlayout_main.cc8
-rw-r--r--imgdiag/Android.bp5
-rw-r--r--oatdump/Android.bp2
-rw-r--r--patchoat/Android.bp3
-rw-r--r--patchoat/patchoat.cc6
-rw-r--r--profman/Android.bp4
-rw-r--r--profman/profman.cc17
-rw-r--r--runtime/Android.bp8
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S21
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S23
-rw-r--r--runtime/arch/mips/asm_support_mips.S25
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc2
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S32
-rw-r--r--runtime/arch/mips64/asm_support_mips64.S29
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc2
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S47
-rw-r--r--runtime/arch/stub_test.cc1
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S25
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S22
-rw-r--r--runtime/art_method-inl.h4
-rw-r--r--runtime/art_method.cc8
-rw-r--r--runtime/art_method.h172
-rw-r--r--runtime/base/bit_utils.h7
-rw-r--r--runtime/base/logging.cc216
-rw-r--r--runtime/base/logging.h184
-rw-r--r--runtime/base/mutex.cc33
-rw-r--r--runtime/base/stl_util.h4
-rw-r--r--runtime/base/unix_file/fd_file.cc22
-rw-r--r--runtime/check_jni.cc10
-rw-r--r--runtime/class_linker.cc11
-rw-r--r--runtime/class_linker.h1
-rw-r--r--runtime/class_linker_test.cc21
-rw-r--r--runtime/class_table.cc4
-rw-r--r--runtime/class_table.h4
-rw-r--r--runtime/common_runtime_test.cc21
-rw-r--r--runtime/common_runtime_test.h9
-rw-r--r--runtime/dex_file.cc406
-rw-r--r--runtime/dex_file.h88
-rw-r--r--runtime/dex_file_annotations.cc60
-rw-r--r--runtime/dex_file_annotations.h5
-rw-r--r--runtime/dex_file_test.cc181
-rw-r--r--runtime/dex_file_verifier_test.cc78
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h4
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc15
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc128
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/allocator/dlmalloc.cc9
-rw-r--r--runtime/gc/collector/concurrent_copying.cc22
-rw-r--r--runtime/gc/collector/mark_compact.cc2
-rw-r--r--runtime/gc/collector/mark_sweep.cc14
-rw-r--r--runtime/gc/collector/semi_space.cc2
-rw-r--r--runtime/gc/heap.cc14
-rw-r--r--runtime/gc/reference_queue_test.cc16
-rw-r--r--runtime/gc/space/image_space.cc81
-rw-r--r--runtime/gc/space/image_space.h6
-rw-r--r--runtime/gc/space/large_object_space.cc2
-rw-r--r--runtime/gc/space/large_object_space_test.cc4
-rw-r--r--runtime/gc/space/region_space.cc2
-rw-r--r--runtime/image-inl.h1
-rw-r--r--runtime/imt_conflict_table.h201
-rw-r--r--runtime/imtable-inl.h37
-rw-r--r--runtime/imtable.h17
-rw-r--r--runtime/indirect_reference_table.cc2
-rw-r--r--runtime/interpreter/interpreter_common.cc4
-rw-r--r--runtime/interpreter/mterp/mterp.cc4
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/jdwp/jdwp_socket.cc10
-rw-r--r--runtime/jit/jit.cc2
-rw-r--r--runtime/jit/profile_saver.cc2
-rw-r--r--runtime/jni_env_ext-inl.h7
-rw-r--r--runtime/jni_internal.cc31
-rw-r--r--runtime/mirror/abstract_method.h77
-rw-r--r--runtime/mirror/class.cc4
-rw-r--r--runtime/mirror/executable.cc (renamed from runtime/mirror/abstract_method.cc)23
-rw-r--r--runtime/mirror/executable.h33
-rw-r--r--runtime/mirror/method.cc4
-rw-r--r--runtime/mirror/method.h6
-rw-r--r--runtime/mirror/obj_ptr.h159
-rw-r--r--runtime/mirror/object_test.cc58
-rw-r--r--runtime/monitor.cc2
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc4
-rw-r--r--runtime/native/java_lang_reflect_AbstractMethod.cc107
-rw-r--r--runtime/native/java_lang_reflect_Executable.cc208
-rw-r--r--runtime/native/java_lang_reflect_Executable.h (renamed from runtime/native/java_lang_reflect_AbstractMethod.h)8
-rw-r--r--runtime/openjdkjvm/Android.bp5
-rw-r--r--runtime/openjdkjvm/OpenjdkJvm.cc8
-rw-r--r--runtime/openjdkjvmti/Android.bp8
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc75
-rw-r--r--runtime/openjdkjvmti/transform.cc362
-rw-r--r--runtime/openjdkjvmti/transform.h64
-rw-r--r--runtime/quick_exception_handler.cc6
-rw-r--r--runtime/reference_table.cc7
-rw-r--r--runtime/reference_table_test.cc60
-rw-r--r--runtime/reflection.cc19
-rw-r--r--runtime/runtime.cc18
-rw-r--r--runtime/runtime_android.cc11
-rw-r--r--runtime/runtime_linux.cc19
-rw-r--r--runtime/signal_catcher.cc2
-rw-r--r--runtime/simulator/Android.bp1
-rw-r--r--runtime/thread-inl.h1
-rw-r--r--runtime/thread.cc65
-rw-r--r--runtime/thread.h11
-rw-r--r--runtime/thread_list.cc27
-rw-r--r--runtime/utils.cc71
-rw-r--r--runtime/utils.h2
-rw-r--r--runtime/vdex_file.cc4
-rw-r--r--runtime/vdex_file.h7
-rw-r--r--runtime/verifier/method_verifier.cc9
-rw-r--r--runtime/verifier/verifier_log_mode.h2
-rw-r--r--runtime/well_known_classes.cc217
-rw-r--r--runtime/well_known_classes.h48
-rw-r--r--test/005-annotations/build3
-rw-r--r--test/031-class-attributes/src/ClassAttrs.java4
-rwxr-xr-xtest/099-vmdebug/check2
-rwxr-xr-xtest/118-noimage-dex2oat/check2
-rwxr-xr-xtest/119-noimage-patchoat/check2
-rw-r--r--test/137-cfi/cfi.cc2
-rwxr-xr-xtest/143-string-value/check2
-rw-r--r--test/149-suspend-all-stress/suspend_all.cc8
-rw-r--r--test/458-checker-instruct-simplification/src/Main.java24
-rw-r--r--test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali65
-rw-r--r--test/463-checker-boolean-simplifier/src/Main.java50
-rw-r--r--test/555-checker-regression-x86const/build44
-rw-r--r--test/555-checker-regression-x86const/expected.txt0
-rw-r--r--test/555-checker-regression-x86const/info.txt2
-rw-r--r--test/555-checker-regression-x86const/src/Main.java41
-rw-r--r--test/565-checker-doublenegbitwise/src/Main.java39
-rw-r--r--test/800-smali/expected.txt1
-rw-r--r--test/800-smali/smali/b_31313170.smali22
-rw-r--r--test/800-smali/src/Main.java3
-rwxr-xr-x[-rw-r--r--]test/902-hello-transformation/build (renamed from test/555-checker-regression-x86const/run)7
-rw-r--r--test/902-hello-transformation/expected.txt3
-rw-r--r--test/902-hello-transformation/info.txt1
-rwxr-xr-xtest/902-hello-transformation/run43
-rw-r--r--test/902-hello-transformation/src/Main.java31
-rw-r--r--test/902-hello-transformation/src/Transform.java21
-rw-r--r--test/902-hello-transformation/transform.cc154
-rw-r--r--test/902-hello-transformation/transform.h30
-rwxr-xr-xtest/961-default-iface-resolution-gen/build (renamed from test/961-default-iface-resolut-generated/build)0
-rw-r--r--test/961-default-iface-resolution-gen/expected.txt (renamed from test/961-default-iface-resolut-generated/expected.txt)0
-rw-r--r--test/961-default-iface-resolution-gen/info.txt (renamed from test/961-default-iface-resolut-generated/info.txt)0
-rwxr-xr-xtest/961-default-iface-resolution-gen/util-src/generate_java.py (renamed from test/961-default-iface-resolut-generated/util-src/generate_java.py)0
-rwxr-xr-xtest/964-default-iface-init-gen/build (renamed from test/964-default-iface-init-generated/build)0
-rw-r--r--test/964-default-iface-init-gen/expected.txt (renamed from test/964-default-iface-init-generated/expected.txt)0
-rw-r--r--test/964-default-iface-init-gen/info.txt (renamed from test/964-default-iface-init-generated/info.txt)0
-rw-r--r--test/964-default-iface-init-gen/src/Displayer.java (renamed from test/964-default-iface-init-generated/src/Displayer.java)0
-rwxr-xr-xtest/964-default-iface-init-gen/util-src/generate_java.py (renamed from test/964-default-iface-init-generated/util-src/generate_java.py)0
-rwxr-xr-xtest/968-default-partial-compile-gen/build (renamed from test/968-default-part-compile-generated/build)0
-rw-r--r--test/968-default-partial-compile-gen/expected.txt (renamed from test/968-default-part-compile-generated/expected.txt)0
-rw-r--r--test/968-default-partial-compile-gen/info.txt (renamed from test/968-default-part-compile-generated/info.txt)0
-rwxr-xr-xtest/968-default-partial-compile-gen/util-src/generate_java.py (renamed from test/968-default-part-compile-generated/util-src/generate_java.py)0
-rwxr-xr-xtest/968-default-partial-compile-gen/util-src/generate_smali.py (renamed from test/968-default-part-compile-generated/util-src/generate_smali.py)0
-rwxr-xr-xtest/970-iface-super-resolution-gen/build (renamed from test/970-iface-superresolution-generated/build)0
-rw-r--r--test/970-iface-super-resolution-gen/expected.txt (renamed from test/970-iface-superresolution-generated/expected.txt)0
-rw-r--r--test/970-iface-super-resolution-gen/info.txt (renamed from test/970-iface-superresolution-generated/info.txt)0
-rwxr-xr-xtest/970-iface-super-resolution-gen/util-src/generate_java.py (renamed from test/970-iface-superresolution-generated/util-src/generate_java.py)0
-rwxr-xr-xtest/970-iface-super-resolution-gen/util-src/generate_smali.py (renamed from test/970-iface-superresolution-generated/util-src/generate_smali.py)0
-rw-r--r--test/Android.bp9
-rw-r--r--test/Android.run-test.mk29
-rw-r--r--test/ImageLayoutA/ImageLayoutA.java (renamed from test/555-checker-regression-x86const/src/Unresolved.java)5
-rw-r--r--test/ImageLayoutB/ImageLayoutB.java25
-rwxr-xr-xtest/etc/run-test-jar3
-rwxr-xr-xtest/run-test5
-rw-r--r--test/ti-agent/common_load.cc2
-rw-r--r--tools/ahat/Android.mk2
-rw-r--r--tools/ahat/README.txt2
-rw-r--r--tools/ahat/src/HeapTable.java4
-rw-r--r--tools/ahat/src/InstanceUtils.java94
-rw-r--r--tools/ahat/src/ObjectHandler.java45
-rw-r--r--tools/ahat/test-dump/Main.java17
-rw-r--r--tools/ahat/test/InstanceUtilsTest.java56
-rw-r--r--tools/bisection_search/README.md6
-rwxr-xr-xtools/bisection_search/bisection_search.py56
-rw-r--r--tools/common/__init__.py (renamed from tools/javafuzz/__init__.py)0
-rwxr-xr-xtools/common/common.py (renamed from tools/bisection_search/common.py)197
-rw-r--r--tools/jfuzz/Android.mk (renamed from tools/javafuzz/Android.mk)6
-rw-r--r--tools/jfuzz/README.md (renamed from tools/javafuzz/README.md)72
-rw-r--r--tools/jfuzz/__init__.py (renamed from build/Android.common_utils.mk)13
-rw-r--r--tools/jfuzz/jfuzz.cc (renamed from tools/javafuzz/javafuzz.cc)147
-rwxr-xr-xtools/jfuzz/run_dex_fuzz_test.py139
-rwxr-xr-xtools/jfuzz/run_jfuzz_test.py (renamed from tools/javafuzz/run_java_fuzz_test.py)185
-rwxr-xr-xtools/jfuzz/run_jfuzz_test_nightly.py65
-rw-r--r--tools/libcore_failures_concurrent_collector.txt7
253 files changed, 8915 insertions, 4602 deletions
diff --git a/Android.mk b/Android.mk
index 0567b221a7..2647268d07 100644
--- a/Android.mk
+++ b/Android.mk
@@ -82,8 +82,6 @@ include $(art_path)/tools/ahat/Android.mk
include $(art_path)/tools/dexfuzz/Android.mk
include $(art_path)/libart_fake/Android.mk
-
-# ART_HOST_DEPENDENCIES depends on Android.executable.mk above for ART_HOST_EXECUTABLES
ART_HOST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(ART_HOST_DEX_DEPENDENCIES) \
diff --git a/benchmark/Android.bp b/benchmark/Android.bp
index 94ad0155ff..dd198889e9 100644
--- a/benchmark/Android.bp
+++ b/benchmark/Android.bp
@@ -28,6 +28,7 @@ art_cc_library {
shared_libs: [
"libart",
"libbacktrace",
+ "libbase",
"libnativehelper",
],
clang: true,
diff --git a/build/Android.bp b/build/Android.bp
index 4be43ec439..9156027dee 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -22,8 +22,6 @@ art_global_defaults {
name: "art_defaults",
clang: true,
cflags: [
- "-O3",
-
// Base set of cflags used by all things ART.
"-fno-rtti",
"-ggdb3",
@@ -149,10 +147,9 @@ art_global_defaults {
],
}
-cc_defaults {
+art_debug_defaults {
name: "art_debug_defaults",
cflags: [
- "-O2",
"-DDYNAMIC_ANNOTATIONS_ENABLED=1",
"-DVIXL_DEBUG",
"-UNDEBUG",
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 6befec5608..b0fa124e48 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -26,7 +26,6 @@ else
# Mac OS doesn't support low-4GB allocation in a 64-bit process. So we won't be able to create
# our heaps.
ART_HOST_SUPPORTED_ARCH := x86
- ART_MULTILIB_OVERRIDE_host := 32
endif
ART_COVERAGE := false
@@ -59,27 +58,19 @@ ifdef TARGET_2ND_ARCH
ifneq ($(filter %64,$(TARGET_ARCH)),)
ART_PHONY_TEST_TARGET_SUFFIX := 64
2ND_ART_PHONY_TEST_TARGET_SUFFIX := 32
- ART_TARGET_ARCH_32 := $(TARGET_2ND_ARCH)
- ART_TARGET_ARCH_64 := $(TARGET_ARCH)
else
# TODO: ???
$(warning Do not know what to do with this multi-target configuration!)
ART_PHONY_TEST_TARGET_SUFFIX := 32
2ND_ART_PHONY_TEST_TARGET_SUFFIX :=
- ART_TARGET_ARCH_32 := $(TARGET_ARCH)
- ART_TARGET_ARCH_64 :=
endif
else
ifneq ($(filter %64,$(TARGET_ARCH)),)
ART_PHONY_TEST_TARGET_SUFFIX := 64
2ND_ART_PHONY_TEST_TARGET_SUFFIX :=
- ART_TARGET_ARCH_32 :=
- ART_TARGET_ARCH_64 := $(TARGET_ARCH)
else
ART_PHONY_TEST_TARGET_SUFFIX := 32
2ND_ART_PHONY_TEST_TARGET_SUFFIX :=
- ART_TARGET_ARCH_32 := $(TARGET_ARCH)
- ART_TARGET_ARCH_64 :=
endif
endif
@@ -88,23 +79,17 @@ ART_HOST_SHLIB_EXTENSION ?= .so
ifeq ($(HOST_PREFER_32_BIT),true)
ART_PHONY_TEST_HOST_SUFFIX := 32
2ND_ART_PHONY_TEST_HOST_SUFFIX :=
- ART_HOST_ARCH_32 := x86
- ART_HOST_ARCH_64 :=
ART_HOST_ARCH := x86
2ND_ART_HOST_ARCH :=
2ND_HOST_ARCH :=
- ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH)
ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES)
2ND_ART_HOST_OUT_SHARED_LIBRARIES :=
else
ART_PHONY_TEST_HOST_SUFFIX := 64
2ND_ART_PHONY_TEST_HOST_SUFFIX := 32
- ART_HOST_ARCH_32 := x86
- ART_HOST_ARCH_64 := x86_64
ART_HOST_ARCH := x86_64
2ND_ART_HOST_ARCH := x86
2ND_HOST_ARCH := x86
- ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH)
ART_HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT_SHARED_LIBRARIES)
2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES)
endif
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 7edc1ccb37..4c82506516 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -18,7 +18,6 @@ ifndef ART_ANDROID_COMMON_BUILD_MK
ART_ANDROID_COMMON_BUILD_MK = true
include art/build/Android.common.mk
-include art/build/Android.common_utils.mk
# These can be overridden via the environment or by editing to
# enable/disable certain build configuration.
@@ -34,26 +33,6 @@ ART_BUILD_TARGET_DEBUG ?= true
ART_BUILD_HOST_NDEBUG ?= true
ART_BUILD_HOST_DEBUG ?= true
-# Set this to change what opt level ART is built at.
-ART_DEBUG_OPT_FLAG ?= -O2
-ART_NDEBUG_OPT_FLAG ?= -O3
-
-# Enable the static builds only for checkbuilds.
-ifneq (,$(filter checkbuild,$(MAKECMDGOALS)))
- ART_BUILD_HOST_STATIC ?= true
-else
- ART_BUILD_HOST_STATIC ?= false
-endif
-
-# Asan does not support static linkage
-ifdef SANITIZE_HOST
- ART_BUILD_HOST_STATIC := false
-endif
-
-ifneq ($(HOST_OS),linux)
- ART_BUILD_HOST_STATIC := false
-endif
-
ifeq ($(ART_BUILD_TARGET_NDEBUG),false)
$(info Disabling ART_BUILD_TARGET_NDEBUG)
endif
@@ -66,375 +45,31 @@ endif
ifeq ($(ART_BUILD_HOST_DEBUG),false)
$(info Disabling ART_BUILD_HOST_DEBUG)
endif
-ifeq ($(ART_BUILD_HOST_STATIC),true)
-$(info Enabling ART_BUILD_HOST_STATIC)
-endif
-
-ifeq ($(ART_TEST_DEBUG_GC),true)
- ART_DEFAULT_GC_TYPE := SS
- ART_USE_TLAB := true
-endif
-
-#
-# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
-#
-ART_DEFAULT_GC_TYPE ?= CMS
-art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
-
-ART_HOST_CLANG := true
-ART_TARGET_CLANG := true
ART_CPP_EXTENSION := .cc
-ART_C_INCLUDES := \
- external/icu/icu4c/source/common \
- external/lz4/lib \
- external/valgrind/include \
- external/valgrind \
- external/vixl/src \
- external/zlib \
-
-# We optimize Thread::Current() with a direct TLS access. This requires access to a private
-# Bionic header.
-# Note: technically we only need this on device, but this avoids the duplication of the includes.
-ART_C_INCLUDES += bionic/libc/private
-
-art_cflags :=
-
-# Warn about thread safety violations with clang.
-art_cflags += -Wthread-safety -Wthread-safety-negative
-
-# Warn if switch fallthroughs aren't annotated.
-art_cflags += -Wimplicit-fallthrough
-
-# Enable float equality warnings.
-art_cflags += -Wfloat-equal
-
-# Enable warning of converting ints to void*.
-art_cflags += -Wint-to-void-pointer-cast
-
-# Enable warning of wrong unused annotations.
-art_cflags += -Wused-but-marked-unused
-
-# Enable warning for deprecated language features.
-art_cflags += -Wdeprecated
-
-# Enable warning for unreachable break & return.
-art_cflags += -Wunreachable-code-break -Wunreachable-code-return
-
-# Bug: http://b/29823425 Disable -Wconstant-conversion and
-# -Wundefined-var-template for Clang update to r271374
-art_cflags += -Wno-constant-conversion -Wno-undefined-var-template
-
-# Enable missing-noreturn only on non-Mac. As lots of things are not implemented for Apple, it's
-# a pain.
-ifneq ($(HOST_OS),darwin)
- art_cflags += -Wmissing-noreturn
-endif
-
-# Base set of cflags used by all things ART.
-art_cflags += \
- -fno-rtti \
- -ggdb3 \
- -Wall \
- -Werror \
- -Wextra \
- -Wstrict-aliasing \
- -fstrict-aliasing \
- -Wunreachable-code \
- -Wredundant-decls \
- -Wshadow \
- -Wunused \
- -fvisibility=protected \
- $(art_default_gc_type_cflags)
-
-# The architectures the compiled tools are able to run on. Setting this to 'all' will cause all
-# architectures to be included.
-ART_TARGET_CODEGEN_ARCHS ?= svelte
-ART_HOST_CODEGEN_ARCHS ?= all
-
-ifeq ($(ART_TARGET_CODEGEN_ARCHS),all)
- ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH))
-else
- ifeq ($(ART_TARGET_CODEGEN_ARCHS),svelte)
- ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_ARCH_64) $(ART_TARGET_ARCH_32))
- endif
-endif
-ifeq ($(ART_HOST_CODEGEN_ARCHS),all)
- ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH))
-else
- ifeq ($(ART_HOST_CODEGEN_ARCHS),svelte)
- ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS) $(ART_HOST_ARCH_64) $(ART_HOST_ARCH_32))
- endif
-endif
-
-ifneq (,$(filter arm64,$(ART_TARGET_CODEGEN_ARCHS)))
- ART_TARGET_CODEGEN_ARCHS += arm
-endif
-ifneq (,$(filter mips64,$(ART_TARGET_CODEGEN_ARCHS)))
- ART_TARGET_CODEGEN_ARCHS += mips
-endif
-ifneq (,$(filter x86_64,$(ART_TARGET_CODEGEN_ARCHS)))
- ART_TARGET_CODEGEN_ARCHS += x86
-endif
-ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS))
-ifneq (,$(filter arm64,$(ART_HOST_CODEGEN_ARCHS)))
- ART_HOST_CODEGEN_ARCHS += arm
-endif
-ifneq (,$(filter mips64,$(ART_HOST_CODEGEN_ARCHS)))
- ART_HOST_CODEGEN_ARCHS += mips
-endif
-ifneq (,$(filter x86_64,$(ART_HOST_CODEGEN_ARCHS)))
- ART_HOST_CODEGEN_ARCHS += x86
-endif
-ART_HOST_CODEGEN_ARCHS := $(sort $(ART_HOST_CODEGEN_ARCHS))
-
-# Base set of cflags used by target build only
-art_target_cflags := \
- $(foreach target_arch,$(strip $(ART_TARGET_CODEGEN_ARCHS)), -DART_ENABLE_CODEGEN_$(target_arch))
-# Base set of cflags used by host build only
-art_host_cflags := \
- $(foreach host_arch,$(strip $(ART_HOST_CODEGEN_ARCHS)), -DART_ENABLE_CODEGEN_$(host_arch))
-
-# Base set of asflags used by all things ART.
-art_asflags :=
-
-# Missing declarations: too many at the moment, as we use "extern" quite a bit.
-# -Wmissing-declarations \
-
-
-
-ifdef ART_IMT_SIZE
- art_cflags += -DIMT_SIZE=$(ART_IMT_SIZE)
-else
- # Default is 43
- art_cflags += -DIMT_SIZE=43
-endif
-
-ifeq ($(ART_HEAP_POISONING),true)
- art_cflags += -DART_HEAP_POISONING=1
- art_asflags += -DART_HEAP_POISONING=1
-endif
-
-#
-# Used to change the read barrier type. Valid values are BAKER, BROOKS, TABLELOOKUP.
-# The default is BAKER.
-#
-ART_READ_BARRIER_TYPE ?= BAKER
-
-ifeq ($(ART_USE_READ_BARRIER),true)
- art_cflags += -DART_USE_READ_BARRIER=1
- art_cflags += -DART_READ_BARRIER_TYPE_IS_$(ART_READ_BARRIER_TYPE)=1
- art_asflags += -DART_USE_READ_BARRIER=1
- art_asflags += -DART_READ_BARRIER_TYPE_IS_$(ART_READ_BARRIER_TYPE)=1
-
- # Temporarily override -fstack-protector-strong with -fstack-protector to avoid a major
- # slowdown with the read barrier config. b/26744236.
- art_cflags += -fstack-protector
-endif
-
-ifeq ($(ART_USE_TLAB),true)
- art_cflags += -DART_USE_TLAB=1
-endif
-
-# Are additional statically-linked ART host binaries (dex2oats,
-# oatdumps, etc.) getting built?
-ifeq ($(ART_BUILD_HOST_STATIC),true)
- art_cflags += -DART_BUILD_HOST_STATIC=1
-endif
-
-# Temporary flag allowing to disable recent changes in oat file management.
-ifneq ($(ART_ENABLE_VDEX),false)
- art_cflags += -DART_ENABLE_VDEX
-endif
-
-# Cflags for non-debug ART and ART tools.
-art_non_debug_cflags := \
- $(ART_NDEBUG_OPT_FLAG)
-
-# Cflags for debug ART and ART tools.
-art_debug_cflags := \
- $(ART_DEBUG_OPT_FLAG) \
- -DDYNAMIC_ANNOTATIONS_ENABLED=1 \
- -DVIXL_DEBUG \
- -UNDEBUG
-
-# Assembler flags for non-debug ART and ART tools.
-art_non_debug_asflags :=
-
-# Assembler flags for debug ART and ART tools.
-art_debug_asflags := -UNDEBUG
-
-art_host_non_debug_cflags := $(art_non_debug_cflags)
-art_target_non_debug_cflags := $(art_non_debug_cflags)
-
-###
-# Frame size
-###
-
-# Size of the stack-overflow gap.
-ART_STACK_OVERFLOW_GAP_arm := 8192
-ART_STACK_OVERFLOW_GAP_arm64 := 8192
-ART_STACK_OVERFLOW_GAP_mips := 16384
-ART_STACK_OVERFLOW_GAP_mips64 := 16384
-ART_STACK_OVERFLOW_GAP_x86 := 8192
-ART_STACK_OVERFLOW_GAP_x86_64 := 8192
-ART_COMMON_STACK_OVERFLOW_DEFINES := \
- -DART_STACK_OVERFLOW_GAP_arm=$(ART_STACK_OVERFLOW_GAP_arm) \
- -DART_STACK_OVERFLOW_GAP_arm64=$(ART_STACK_OVERFLOW_GAP_arm64) \
- -DART_STACK_OVERFLOW_GAP_mips=$(ART_STACK_OVERFLOW_GAP_mips) \
- -DART_STACK_OVERFLOW_GAP_mips64=$(ART_STACK_OVERFLOW_GAP_mips64) \
- -DART_STACK_OVERFLOW_GAP_x86=$(ART_STACK_OVERFLOW_GAP_x86) \
- -DART_STACK_OVERFLOW_GAP_x86_64=$(ART_STACK_OVERFLOW_GAP_x86_64) \
-
-# Keep these as small as possible. We have separate values as we have some host vs target
-# specific code (and previously GCC vs Clang).
-ART_HOST_FRAME_SIZE_LIMIT := 1736
-ART_TARGET_FRAME_SIZE_LIMIT := 1736
-
-# Frame size adaptations for instrumented builds.
-ifdef SANITIZE_TARGET
- ART_TARGET_FRAME_SIZE_LIMIT := 6400
-endif
-
-# Add frame-size checks for non-debug builds.
-ifeq ($(HOST_OS),linux)
- ifneq ($(ART_COVERAGE),true)
- ifneq ($(NATIVE_COVERAGE),true)
- art_host_non_debug_cflags += -Wframe-larger-than=$(ART_HOST_FRAME_SIZE_LIMIT)
- art_target_non_debug_cflags += -Wframe-larger-than=$(ART_TARGET_FRAME_SIZE_LIMIT)
- endif
- endif
-endif
-
-
-ART_HOST_CFLAGS := $(art_cflags)
-ART_TARGET_CFLAGS := $(art_cflags)
-
-ART_HOST_ASFLAGS := $(art_asflags)
-ART_TARGET_ASFLAGS := $(art_asflags)
-
-# Bug: 15446488. We don't omit the frame pointer to work around
-# clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress.
-ART_HOST_CFLAGS += -fno-omit-frame-pointer
-
ifndef LIBART_IMG_HOST_BASE_ADDRESS
$(error LIBART_IMG_HOST_BASE_ADDRESS unset)
endif
-ART_HOST_CFLAGS += -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS)
-ART_HOST_CFLAGS += $(art_host_cflags)
-
-ART_HOST_CFLAGS += -DART_FRAME_SIZE_LIMIT=$(ART_HOST_FRAME_SIZE_LIMIT) \
- $(ART_COMMON_STACK_OVERFLOW_DEFINES)
-
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
-ART_TARGET_CFLAGS += -DART_TARGET \
- -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) \
-
-ART_TARGET_CFLAGS += -DART_FRAME_SIZE_LIMIT=$(ART_TARGET_FRAME_SIZE_LIMIT) \
- $(ART_COMMON_STACK_OVERFLOW_DEFINES)
-
-ifeq ($(ART_TARGET_LINUX),true)
-# Setting ART_TARGET_LINUX to true compiles art/ assuming that the target device
-# will be running linux rather than android.
-ART_TARGET_CFLAGS += -DART_TARGET_LINUX
-else
-# The ART_TARGET_ANDROID macro is passed to target builds, which check
-# against it instead of against __ANDROID__ (which is provided by target
-# toolchains).
-ART_TARGET_CFLAGS += -DART_TARGET_ANDROID
-endif
-
-ART_TARGET_CFLAGS += $(art_target_cflags)
-
-ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags)
-ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags)
-ART_HOST_DEBUG_CFLAGS := $(art_debug_cflags)
-ART_TARGET_DEBUG_CFLAGS := $(art_debug_cflags)
-
-ART_HOST_NON_DEBUG_ASFLAGS := $(art_non_debug_asflags)
-ART_TARGET_NON_DEBUG_ASFLAGS := $(art_non_debug_asflags)
-ART_HOST_DEBUG_ASFLAGS := $(art_debug_asflags)
-ART_TARGET_DEBUG_ASFLAGS := $(art_debug_asflags)
-
-ifndef LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA
- LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA=-0x1000000
-endif
-ifndef LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA
- LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA=0x1000000
-endif
-ART_HOST_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA)
-ART_HOST_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA)
-
-ifndef LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA
- LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA=-0x1000000
-endif
-ifndef LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA
- LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA=0x1000000
-endif
-ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA)
-ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
-
-# To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16"
-# ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs
-
-# Clear locals now they've served their purpose.
-art_cflags :=
-art_asflags :=
-art_host_cflags :=
-art_target_cflags :=
-art_debug_cflags :=
-art_non_debug_cflags :=
-art_debug_asflags :=
-art_non_debug_asflags :=
-art_host_non_debug_cflags :=
-art_target_non_debug_cflags :=
-art_default_gc_type_cflags :=
-
-ART_TARGET_LDFLAGS :=
-
-# $(1): ndebug_or_debug
-define set-target-local-cflags-vars
- LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
- LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS)
- LOCAL_LDFLAGS += $(ART_TARGET_LDFLAGS)
- art_target_cflags_ndebug_or_debug := $(1)
- ifeq ($$(art_target_cflags_ndebug_or_debug),debug)
- LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_TARGET_DEBUG_ASFLAGS)
- else
- LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_TARGET_NON_DEBUG_ASFLAGS)
- endif
-
- # Clear locally used variables.
- art_target_cflags_ndebug_or_debug :=
-endef
-
# Support for disabling certain builds.
ART_BUILD_TARGET := false
ART_BUILD_HOST := false
-ART_BUILD_NDEBUG := false
-ART_BUILD_DEBUG := false
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
ART_BUILD_TARGET := true
- ART_BUILD_NDEBUG := true
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
ART_BUILD_TARGET := true
- ART_BUILD_DEBUG := true
endif
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
ART_BUILD_HOST := true
- ART_BUILD_NDEBUG := true
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
ART_BUILD_HOST := true
- ART_BUILD_DEBUG := true
endif
endif # ART_ANDROID_COMMON_BUILD_MK
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 8124ca311d..449502c771 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -26,10 +26,6 @@ else
ART_HOST_TEST_DIR := /tmp/$(USER)/test-art-$(shell echo $$PPID)
endif
-# We need to set a define for the nativetest dir so that common_runtime_test will know the right
-# path. (The problem is being a 32b test on 64b device, which is still located in nativetest64).
-ART_TARGET_CFLAGS += -DART_TARGET_NATIVETEST_DIR=${ART_TARGET_NATIVETEST_DIR}
-
# List of known broken tests that we won't attempt to execute. The test name must be the full
# rule name such as test-art-host-oat-optimizing-HelloWorld64.
ART_TEST_KNOWN_BROKEN :=
diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk
index 03791f3907..d09f2902db 100644
--- a/build/Android.cpplint.mk
+++ b/build/Android.cpplint.mk
@@ -18,7 +18,7 @@ include art/build/Android.common_build.mk
ART_CPPLINT := $(LOCAL_PATH)/tools/cpplint.py
ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf
-ART_CPPLINT_FLAGS := --quiet
+ART_CPPLINT_FLAGS := --quiet --root=$(ANDROID_BUILD_TOP)
ART_CPPLINT_INGORED := \
runtime/elf.h \
runtime/openjdkjvmti/jvmti.h
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
deleted file mode 100644
index f38a14d14c..0000000000
--- a/build/Android.executable.mk
+++ /dev/null
@@ -1,251 +0,0 @@
-#
-# Copyright (C) 2011 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-include art/build/Android.common_build.mk
-
-ART_EXECUTABLES_CFLAGS :=
-
-# $(1): executable ("d" will be appended for debug version, "s" will be appended for static version)
-# $(2): source
-# $(3): extra shared libraries
-# $(4): extra include directories
-# $(5): target or host
-# $(6): ndebug or debug
-# $(7): value for LOCAL_MULTILIB (empty means default)
-# $(8): static or shared (empty means shared, applies only for host)
-define build-art-executable
- ifneq ($(5),target)
- ifneq ($(5),host)
- $$(error expected target or host for argument 5, received $(5))
- endif
- endif
- ifneq ($(6),ndebug)
- ifneq ($(6),debug)
- $$(error expected ndebug or debug for argument 6, received $(6))
- endif
- endif
-
- art_executable := $(1)
- art_source := $(2)
- art_libraries := $(3)
- art_c_includes := $(4)
- art_target_or_host := $(5)
- art_ndebug_or_debug := $(6)
- art_multilib := $(7)
- art_static_or_shared := $(8)
- art_out_binary_name :=
-
- include $(CLEAR_VARS)
- LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
- LOCAL_MODULE_TAGS := optional
- LOCAL_SRC_FILES := $$(art_source)
- LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime art/cmdline $$(art_c_includes)
-
- ifeq ($$(art_static_or_shared),static)
- LOCAL_STATIC_LIBRARIES += $$(art_libraries)
- else
- LOCAL_SHARED_LIBRARIES += $$(art_libraries)
- endif
-
- ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := $$(art_executable)
- else #debug
- LOCAL_MODULE := $$(art_executable)d
- endif
-
- ifeq ($$(art_static_or_shared),static)
- LOCAL_MODULE := $$(LOCAL_MODULE)s
- endif
-
- LOCAL_CFLAGS := $(ART_EXECUTABLES_CFLAGS)
- # Mac OS linker doesn't understand --export-dynamic.
- ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host)
- LOCAL_LDFLAGS := -Wl,--export-dynamic
- endif
-
- ifeq ($$(art_target_or_host),target)
- LOCAL_CLANG := $(ART_TARGET_CLANG)
- $(call set-target-local-cflags-vars,$(6))
- LOCAL_SHARED_LIBRARIES += libdl
- else # host
- LOCAL_CLANG := $(ART_HOST_CLANG)
- LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS)
- ifeq ($$(art_ndebug_or_debug),debug)
- LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS)
- else
- LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
- LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS)
- endif
- LOCAL_LDLIBS += -lpthread -ldl
- ifeq ($$(art_static_or_shared),static)
- LOCAL_LDFLAGS += -static
- # We need this because GC stress mode makes use of _Unwind_GetIP and _Unwind_Backtrace and
- # the symbols are also defined in libgcc_eh.a(unwind-dw2.o)
- # TODO: Having this is not ideal as it might obscure errors. Try to get rid of it.
- LOCAL_LDFLAGS += -z muldefs
- ifeq ($$(HOST_OS),linux)
- LOCAL_LDLIBS += -lrt -lncurses -ltinfo
- endif
- ifeq ($$(HOST_OS),darwin)
- LOCAL_LDLIBS += -lncurses -ltinfo
- endif
- endif
-
- endif
-
- # If dynamically linked add libart by default. Statically linked executables
- # needs to specify it in art_libraries to ensure proper ordering.
- ifeq ($$(art_ndebug_or_debug),ndebug)
- ifneq ($$(art_static_or_shared),static)
- LOCAL_SHARED_LIBRARIES += libart
- endif
- else # debug
- ifneq ($$(art_static_or_shared),static)
- LOCAL_SHARED_LIBRARIES += libartd
- endif
- endif
-
- LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_utils.mk
- LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.executable.mk
-
- ifeq ($$(art_target_or_host),target)
- LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
- endif
-
- ifdef ART_MULTILIB_OVERRIDE_$$(art_target_or_host)
- art_multilib := $$(ART_MULTILIB_OVERRIDE_$$(art_target_or_host))
- endif
-
- LOCAL_MULTILIB := $$(art_multilib)
- art_out_binary_name := $$(LOCAL_MODULE)
-
- # If multilib=both (potentially building both 32-bit and 64-bit), need to provide stem.
- ifeq ($$(art_multilib),both)
- # Set up a 32-bit/64-bit stem if we are building both binaries.
- # In this case, the 32-bit binary has an additional 32-bit suffix.
- LOCAL_MODULE_STEM_32 := $$(LOCAL_MODULE)32
- LOCAL_MODULE_STEM_64 := $$(LOCAL_MODULE)
-
- # Remember the binary names so we can add them to the global art executables list later.
- art_out_binary_name := $$(LOCAL_MODULE_STEM_32) $$(LOCAL_MODULE_STEM_64)
-
- # For single-architecture targets, remove any binary name suffixes.
- ifeq ($$(art_target_or_host),target)
- ifeq (,$(TARGET_2ND_ARCH))
- LOCAL_MODULE_STEM_32 := $$(LOCAL_MODULE)
- art_out_binary_name := $$(LOCAL_MODULE)
- endif
- endif
-
- # For single-architecture hosts, remove any binary name suffixes.
- ifeq ($$(art_target_or_host),host)
- ifeq (,$(HOST_2ND_ARCH))
- LOCAL_MODULE_STEM_32 := $$(LOCAL_MODULE)
- art_out_binary_name := $$(LOCAL_MODULE)
- endif
- endif
- endif
-
- LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE)
-
- ifeq ($$(art_target_or_host),target)
- include $(BUILD_EXECUTABLE)
- else # host
- LOCAL_IS_HOST_MODULE := true
- include $(BUILD_HOST_EXECUTABLE)
- endif
-
- # Clear out local variables now that we're done with them.
- art_executable :=
- art_source :=
- art_libraries :=
- art_c_includes :=
- art_target_or_host :=
- art_ndebug_or_debug :=
- art_multilib :=
- art_static_or_shared :=
- art_out_binary_name :=
-
-endef
-
-#
-# Build many art executables from multiple variations (debug/ndebug, host/target, 32/64bit).
-# By default only either 32-bit or 64-bit is built (but not both -- see multilib arg).
-# All other variations are gated by ANDROID_BUILD_(TARGET|HOST)_[N]DEBUG.
-# The result must be eval-uated.
-#
-# $(1): executable name
-# $(2): source files
-# $(3): library dependencies (common); debug prefix is added on as necessary automatically.
-# $(4): library dependencies (target only)
-# $(5): library dependencies (host only)
-# $(6): extra include directories
-# $(7): multilib (default: empty), valid values: {,32,64,both})
-# $(8): host prefer 32-bit: {true, false} (default: false). If argument
-# `multilib` is explicitly set to 64, ignore the "host prefer 32-bit"
-# setting and only build a 64-bit executable on host.
-define build-art-multi-executable
- $(foreach debug_flavor,ndebug debug,
- $(foreach target_flavor,host target,
- art-multi-binary-name := $(1)
- art-multi-source-files := $(2)
- art-multi-lib-dependencies := $(3)
- art-multi-lib-dependencies-target := $(4)
- art-multi-lib-dependencies-host := $(5)
- art-multi-include-extra := $(6)
- art-multi-multilib := $(7)
- art-multi-host-prefer-32-bit := $(8)
-
- # Add either -host or -target specific lib dependencies to the lib dependencies.
- art-multi-lib-dependencies += $$(art-multi-lib-dependencies-$(target_flavor))
-
- # Replace libart- prefix with libartd- for debug flavor.
- ifeq ($(debug_flavor),debug)
- art-multi-lib-dependencies := $$(subst libart-,libartd-,$$(art-multi-lib-dependencies))
- endif
-
- # Build the env guard var name, e.g. ART_BUILD_HOST_NDEBUG.
- art-multi-env-guard := $$(call art-string-to-uppercase,ART_BUILD_$(target_flavor)_$(debug_flavor))
-
- ifeq ($(target_flavor),host)
- ifeq ($$(art-multi-host-prefer-32-bit),true)
- ifneq ($$(art-multi-multilib),64)
- art-multi-multilib := 32
- endif
- endif
- endif
-
- # Build the art executable only if the corresponding env guard was set.
- ifeq ($$($$(art-multi-env-guard)),true)
- $$(eval $$(call build-art-executable,$$(art-multi-binary-name),$$(art-multi-source-files),$$(art-multi-lib-dependencies),$$(art-multi-include-extra),$(target_flavor),$(debug_flavor),$$(art-multi-multilib)))
- endif
-
- # Clear locals now they've served their purpose.
- art-multi-binary-name :=
- art-multi-source-files :=
- art-multi-lib-dependencies :=
- art-multi-lib-dependencies-target :=
- art-multi-lib-dependencies-host :=
- art-multi-include-extra :=
- art-multi-multilib :=
- art-multi-host-prefer-32-bit :=
- art-multi-env-guard :=
- )
- )
-endef
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c70f005878..4f63099930 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -27,6 +27,8 @@ GTEST_DEX_DIRECTORIES := \
AllFields \
ExceptionHandle \
GetMethodSignature \
+ ImageLayoutA \
+ ImageLayoutB \
Instrumentation \
Interfaces \
Lookup \
@@ -84,6 +86,7 @@ ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
+ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
@@ -223,8 +226,6 @@ ifneq ($(HOST_PREFER_32_BIT),true)
$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_$(m)))
endif
-ART_TEST_CFLAGS :=
-
# Variables holding collections of gtest pre-requisits used to run a number of gtests.
ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
@@ -572,7 +573,6 @@ RUNTIME_GTEST_TARGET_SRC_FILES :=
RUNTIME_GTEST_HOST_SRC_FILES :=
COMPILER_GTEST_TARGET_SRC_FILES :=
COMPILER_GTEST_HOST_SRC_FILES :=
-ART_TEST_CFLAGS :=
ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST_RULES :=
diff --git a/build/art.go b/build/art.go
index ba5521a9ae..0ae6c8fa58 100644
--- a/build/art.go
+++ b/build/art.go
@@ -30,6 +30,9 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
var cflags []string
var asflags []string
+ opt := envDefault(ctx, "ART_NDEBUG_OPT_FLAG", "-O3")
+ cflags = append(cflags, opt)
+
tlab := false
gcType := envDefault(ctx, "ART_DEFAULT_GC_TYPE", "CMS")
@@ -75,6 +78,15 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
return cflags, asflags
}
+func debugFlags(ctx android.BaseContext) []string {
+ var cflags []string
+
+ opt := envDefault(ctx, "ART_DEBUG_OPT_FLAG", "-O2")
+ cflags = append(cflags, opt)
+
+ return cflags
+}
+
func deviceFlags(ctx android.BaseContext) []string {
var cflags []string
deviceFrameSizeLimit := 1736
@@ -143,6 +155,16 @@ func globalDefaults(ctx android.LoadHookContext) {
ctx.AppendProperties(p)
}
+func debugDefaults(ctx android.LoadHookContext) {
+ type props struct {
+ Cflags []string
+ }
+
+ p := &props{}
+ p.Cflags = debugFlags(ctx)
+ ctx.AppendProperties(p)
+}
+
func customLinker(ctx android.LoadHookContext) {
linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "")
if linker != "" {
@@ -206,6 +228,7 @@ func init() {
soong.RegisterModuleType("art_cc_test_library", artTestLibrary)
soong.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
soong.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
+ soong.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
}
func artGlobalDefaultsFactory() (blueprint.Module, []interface{}) {
@@ -215,6 +238,13 @@ func artGlobalDefaultsFactory() (blueprint.Module, []interface{}) {
return module, props
}
+func artDebugDefaultsFactory() (blueprint.Module, []interface{}) {
+ module, props := artDefaultsFactory()
+ android.AddLoadHook(module, debugDefaults)
+
+ return module, props
+}
+
func artDefaultsFactory() (blueprint.Module, []interface{}) {
c := &codegenProperties{}
module, props := cc.DefaultsFactory(c)
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 8a2c94a90a..f264d3023b 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -100,6 +100,7 @@ art_cc_defaults {
"linker/arm/relative_patcher_arm_base.cc",
"linker/arm/relative_patcher_thumb2.cc",
"optimizing/code_generator_arm.cc",
+ "optimizing/code_generator_arm_vixl.cc",
"optimizing/dex_cache_array_fixups_arm.cc",
"optimizing/instruction_simplifier_arm.cc",
"optimizing/instruction_simplifier_shared.cc",
@@ -184,6 +185,7 @@ art_cc_defaults {
},
generated_sources: ["art_compiler_operator_srcs"],
shared_libs: [
+ "libbase",
"liblz4",
"liblzma",
],
@@ -287,6 +289,7 @@ art_cc_library {
shared_libs: [
"libartd-compiler",
"libart-runtime-gtest",
+ "libbase",
],
}
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 06a39b2537..2af4d72bb2 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -226,6 +226,7 @@ void CommonCompilerTest::TearDown() {
method_inliner_map_.reset();
verification_results_.reset();
compiler_options_.reset();
+ image_reservation_.reset();
CommonRuntimeTest::TearDown();
}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 3ce786e008..e0abf197d5 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -90,7 +90,7 @@ class DexCompiler {
// Compiles a virtual method invocation into a quick virtual method invocation.
// The method index is replaced by the vtable index where the corresponding
- // AbstractMethod can be found. Therefore, this does not involve any resolution
+ // Executable can be found. Therefore, this does not involve any resolution
// at runtime.
// Since the method index is encoded with 16 bits, we can replace it only if the
// vtable index can be encoded with 16 bits too.
@@ -277,39 +277,44 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
return;
}
uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- MethodReference target_method(&GetDexFile(), method_idx);
- InvokeType invoke_type = kVirtual;
- InvokeType original_invoke_type = invoke_type;
- int vtable_idx;
- uintptr_t direct_code;
- uintptr_t direct_method;
- // TODO: support devirtualization.
- const bool kEnableDevirtualization = false;
- bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc,
- false, kEnableDevirtualization,
- &invoke_type,
- &target_method, &vtable_idx,
- &direct_code, &direct_method);
- if (fast_path && original_invoke_type == invoke_type) {
- if (vtable_idx >= 0 && IsUint<16>(vtable_idx)) {
- VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
- << "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")"
- << " to " << Instruction::Name(new_opcode)
- << " by replacing method index " << method_idx
- << " by vtable index " << vtable_idx
- << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
- << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(new_opcode);
- // Replace method index by vtable index.
- if (is_range) {
- inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
- } else {
- inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
- }
- quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx));
- }
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(unit_.GetClassLoader())));
+
+ ClassLinker* class_linker = unit_.GetClassLinker();
+ ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::kForceICCECheck>(
+ GetDexFile(),
+ method_idx,
+ unit_.GetDexCache(),
+ class_loader,
+ /* referrer */ nullptr,
+ kVirtual);
+
+ if (UNLIKELY(resolved_method == nullptr)) {
+ // Clean up any exception left by type resolution.
+ soa.Self()->ClearException();
+ return;
+ }
+
+ uint32_t vtable_idx = resolved_method->GetMethodIndex();
+ DCHECK(IsUint<16>(vtable_idx));
+ VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode())
+ << "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")"
+ << " to " << Instruction::Name(new_opcode)
+ << " by replacing method index " << method_idx
+ << " by vtable index " << vtable_idx
+ << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
+ << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true);
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(new_opcode);
+ // Replace method index by vtable index.
+ if (is_range) {
+ inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
+ } else {
+ inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
}
+ quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx));
}
CompiledMethod* ArtCompileDEX(
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 8d53dbfe39..67505541c2 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -640,28 +640,6 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangLong, RotateLeft, JI_J, kIntrinsicRotateLeft, k64),
#undef INTRINSIC
-
-#define SPECIAL(c, n, p, o, d) \
- { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, kInlineSpecial, { d } } }
-
- SPECIAL(JavaLangString, Init, _V, kInlineStringInit, 0),
- SPECIAL(JavaLangString, Init, ByteArray_V, kInlineStringInit, 1),
- SPECIAL(JavaLangString, Init, ByteArrayI_V, kInlineStringInit, 2),
- SPECIAL(JavaLangString, Init, ByteArrayII_V, kInlineStringInit, 3),
- SPECIAL(JavaLangString, Init, ByteArrayIII_V, kInlineStringInit, 4),
- SPECIAL(JavaLangString, Init, ByteArrayIIString_V, kInlineStringInit, 5),
- SPECIAL(JavaLangString, Init, ByteArrayString_V, kInlineStringInit, 6),
- SPECIAL(JavaLangString, Init, ByteArrayIICharset_V, kInlineStringInit, 7),
- SPECIAL(JavaLangString, Init, ByteArrayCharset_V, kInlineStringInit, 8),
- SPECIAL(JavaLangString, Init, CharArray_V, kInlineStringInit, 9),
- SPECIAL(JavaLangString, Init, CharArrayII_V, kInlineStringInit, 10),
- SPECIAL(JavaLangString, Init, IICharArray_V, kInlineStringInit, 11),
- SPECIAL(JavaLangString, Init, IntArrayII_V, kInlineStringInit, 12),
- SPECIAL(JavaLangString, Init, String_V, kInlineStringInit, 13),
- SPECIAL(JavaLangString, Init, StringBuffer_V, kInlineStringInit, 14),
- SPECIAL(JavaLangString, Init, StringBuilder_V, kInlineStringInit, 15),
-
-#undef SPECIAL
};
DexFileMethodInliner::DexFileMethodInliner()
@@ -843,22 +821,4 @@ bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, const InlineMetho
}
}
-uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index,
- PointerSize pointer_size) {
- ReaderMutexLock mu(Thread::Current(), lock_);
- auto it = inline_methods_.find(method_index);
- if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) {
- uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize(
- OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size);
- return string_init_base_offset + it->second.d.data * static_cast<size_t>(pointer_size);
- }
- return 0;
-}
-
-bool DexFileMethodInliner::IsStringInitMethodIndex(uint32_t method_index) {
- ReaderMutexLock mu(Thread::Current(), lock_);
- auto it = inline_methods_.find(method_index);
- return (it != inline_methods_.end()) && (it->second.opcode == kInlineStringInit);
-}
-
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 43fc687957..f4ae5a548e 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -82,17 +82,6 @@ class DexFileMethodInliner {
bool IsSpecial(uint32_t method_index) REQUIRES(!lock_);
/**
- * Gets the thread pointer entrypoint offset for a string init method index and pointer size.
- */
- uint32_t GetOffsetForStringInit(uint32_t method_index, PointerSize pointer_size)
- REQUIRES(!lock_);
-
- /**
- * Check whether a particular method index is a string init.
- */
- bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_);
-
- /**
* To avoid multiple lookups of a class by its descriptor, we cache its
* type index in the IndexCache. These are the indexes into the IndexCache
* class_indexes array.
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 3a260f5a80..4b913f4255 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -293,146 +293,6 @@ inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex(
}
}
-inline int CompilerDriver::IsFastInvoke(
- ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
- MethodReference* target_method, const MethodReference* devirt_target,
- uintptr_t* direct_code, uintptr_t* direct_method) {
- // Don't try to fast-path if we don't understand the caller's class.
- // Referrer_class is the class that this invoke is contained in.
- if (UNLIKELY(referrer_class == nullptr)) {
- return 0;
- }
- StackHandleScope<2> hs(soa.Self());
- // Methods_class is the class refered to by the class_idx field of the methodId the method_idx is
- // pointing to.
- // For example in
- // .class LABC;
- // .super LDEF;
- // .method hi()V
- // ...
- // invoke-super {p0}, LDEF;->hi()V
- // ...
- // .end method
- // the referrer_class is 'ABC' and the methods_class is DEF. Note that the methods class is 'DEF'
- // even if 'DEF' inherits the method from it's superclass.
- Handle<mirror::Class> methods_class(hs.NewHandle(mUnit->GetClassLinker()->ResolveType(
- *target_method->dex_file,
- target_method->dex_file->GetMethodId(target_method->dex_method_index).class_idx_,
- dex_cache,
- class_loader)));
- DCHECK(methods_class.Get() != nullptr);
- mirror::Class* methods_declaring_class = resolved_method->GetDeclaringClass();
- if (UNLIKELY(!referrer_class->CanAccessResolvedMethod(methods_declaring_class, resolved_method,
- dex_cache.Get(),
- target_method->dex_method_index))) {
- return 0;
- }
- // Sharpen a virtual call into a direct call when the target is known not to have been
- // overridden (ie is final).
- const bool same_dex_file = target_method->dex_file == mUnit->GetDexFile();
- bool can_sharpen_virtual_based_on_type = same_dex_file &&
- (*invoke_type == kVirtual) && (resolved_method->IsFinal() ||
- methods_declaring_class->IsFinal());
- // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of
- // the super class.
- const PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
- // TODO We should be able to sharpen if we are going into the boot image as well.
- bool can_sharpen_super_based_on_type = same_dex_file &&
- (*invoke_type == kSuper) &&
- !methods_class->IsInterface() &&
- (referrer_class != methods_declaring_class) &&
- referrer_class->IsSubClass(methods_declaring_class) &&
- resolved_method->GetMethodIndex() < methods_declaring_class->GetVTableLength() &&
- (methods_declaring_class->GetVTableEntry(
- resolved_method->GetMethodIndex(), pointer_size) == resolved_method) &&
- resolved_method->IsInvokable();
- // TODO We should be able to sharpen if we are going into the boot image as well.
- bool can_sharpen_interface_super_based_on_type = same_dex_file &&
- (*invoke_type == kSuper) &&
- methods_class->IsInterface() &&
- methods_class->IsAssignableFrom(referrer_class) &&
- resolved_method->IsInvokable();
-
- if (can_sharpen_virtual_based_on_type ||
- can_sharpen_super_based_on_type ||
- can_sharpen_interface_super_based_on_type) {
- // Sharpen a virtual call into a direct call. The method_idx is into referrer's
- // dex cache, check that this resolved method is where we expect it.
- CHECK_EQ(target_method->dex_file, mUnit->GetDexFile());
- DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache(
- soa.Self(), *mUnit->GetDexFile(), false));
- CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod(
- target_method->dex_method_index, pointer_size),
- resolved_method) << PrettyMethod(resolved_method);
- int stats_flags = kFlagMethodResolved;
- GetCodeAndMethodForDirectCall(/*out*/invoke_type,
- kDirect, // Sharp type
- false, // The dex cache is guaranteed to be available
- referrer_class, resolved_method,
- /*out*/&stats_flags,
- target_method,
- /*out*/direct_code,
- /*out*/direct_method);
- DCHECK_NE(*invoke_type, kSuper) << PrettyMethod(resolved_method);
- if (*invoke_type == kDirect) {
- stats_flags |= kFlagsMethodResolvedVirtualMadeDirect;
- }
- return stats_flags;
- }
-
- if ((*invoke_type == kVirtual || *invoke_type == kInterface) && devirt_target != nullptr) {
- // Post-verification callback recorded a more precise invoke target based on its type info.
- ArtMethod* called_method;
- ClassLinker* class_linker = mUnit->GetClassLinker();
- if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) {
- called_method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
- *devirt_target->dex_file, devirt_target->dex_method_index, dex_cache, class_loader,
- nullptr, kVirtual);
- } else {
- auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file,
- class_loader.Get())));
- called_method = class_linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
- *devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
- class_loader, nullptr, kVirtual);
- }
- CHECK(called_method != nullptr);
- CHECK(called_method->IsInvokable());
- int stats_flags = kFlagMethodResolved;
- GetCodeAndMethodForDirectCall(/*out*/invoke_type,
- kDirect, // Sharp type
- true, // The dex cache may not be available
- referrer_class, called_method,
- /*out*/&stats_flags,
- target_method,
- /*out*/direct_code,
- /*out*/direct_method);
- DCHECK_NE(*invoke_type, kSuper);
- if (*invoke_type == kDirect) {
- stats_flags |= kFlagsMethodResolvedPreciseTypeDevirtualization;
- }
- return stats_flags;
- }
-
- if (UNLIKELY(*invoke_type == kSuper)) {
- // Unsharpened super calls are suspicious so go slow-path.
- return 0;
- }
-
- // Sharpening failed so generate a regular resolved method dispatch.
- int stats_flags = kFlagMethodResolved;
- GetCodeAndMethodForDirectCall(/*out*/invoke_type,
- *invoke_type, // Sharp type
- false, // The dex cache is guaranteed to be available
- referrer_class, resolved_method,
- /*out*/&stats_flags,
- target_method,
- /*out*/direct_code,
- /*out*/direct_method);
- return stats_flags;
-}
-
inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class,
ArtMethod* resolved_method) {
if (!resolved_method->IsStatic()) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a149c07beb..f1d3116acd 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -595,9 +595,14 @@ static void CompileMethod(Thread* self,
: optimizer::DexToDexCompilationLevel::kRequired);
}
} else if ((access_flags & kAccNative) != 0) {
- // Are we extracting only and have support for generic JNI down calls?
- if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
- InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
+ const InstructionSet instruction_set = driver->GetInstructionSet();
+ const bool use_generic_jni =
+ // Are we extracting only and have support for generic JNI down calls?
+ (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
+ InstructionSetHasGenericJniStub(instruction_set)) ||
+ // Always punt to generic JNI for MIPS because of no support for @CriticalNative. b/31743474
+ (instruction_set == kMips || instruction_set == kMips64);
+ if (use_generic_jni) {
// Leaving this empty will trigger the generic JNI version
} else {
// Look-up the ArtMethod associated with this code_item (if any)
@@ -1654,12 +1659,8 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
}
}
-void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type,
- bool no_guarantee_of_dex_cache_entry,
- const mirror::Class* referrer_class,
+void CompilerDriver::GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
ArtMethod* method,
- int* stats_flags,
- MethodReference* target_method,
uintptr_t* direct_code,
uintptr_t* direct_method) {
// For direct and static methods compute possible direct_code and direct_method values, ie
@@ -1671,15 +1672,11 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
Runtime* const runtime = Runtime::Current();
gc::Heap* const heap = runtime->GetHeap();
auto* cl = runtime->GetClassLinker();
- const auto pointer_size = cl->GetImagePointerSize();
bool use_dex_cache = GetCompilerOptions().GetCompilePic(); // Off by default
const bool compiling_boot = heap->IsCompilingBoot();
// TODO This is somewhat hacky. We should refactor all of this invoke codepath.
const bool force_relocations = (compiling_boot ||
GetCompilerOptions().GetIncludePatchInformation());
- if (sharp_type != kStatic && sharp_type != kDirect) {
- return;
- }
// TODO: support patching on all architectures.
use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -1687,14 +1684,12 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
if (!use_dex_cache) {
if (!method_code_in_boot) {
use_dex_cache = true;
- } else {
- bool has_clinit_trampoline =
- method->IsStatic() && !declaring_class->IsInitialized();
- if (has_clinit_trampoline && declaring_class != referrer_class) {
- // Ensure we run the clinit trampoline unless we are invoking a static method in the same
- // class.
- use_dex_cache = true;
- }
+ } else if (method->IsStatic() &&
+ declaring_class != referrer_class &&
+ !declaring_class->IsInitialized()) {
+ // Ensure we run the clinit trampoline unless we are invoking a static method in the same
+ // class.
+ use_dex_cache = true;
}
}
if (runtime->UseJitCompilation()) {
@@ -1705,9 +1700,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
use_dex_cache = true;
}
}
- if (method_code_in_boot) {
- *stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot;
- }
+
if (!use_dex_cache && force_relocations) {
bool is_in_image;
if (IsBootImage()) {
@@ -1724,39 +1717,8 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
use_dex_cache = true;
}
}
- // The method is defined not within this dex file. We need a dex cache slot within the current
- // dex file or direct pointers.
- bool must_use_direct_pointers = false;
- mirror::DexCache* dex_cache = declaring_class->GetDexCache();
- if (target_method->dex_file == dex_cache->GetDexFile() &&
- !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod(
- method->GetDexMethodIndex(), pointer_size) == nullptr)) {
- target_method->dex_method_index = method->GetDexMethodIndex();
- } else {
- if (no_guarantee_of_dex_cache_entry) {
- // See if the method is also declared in this dex cache.
- uint32_t dex_method_idx = method->FindDexMethodIndexInOtherDexFile(
- *target_method->dex_file, target_method->dex_method_index);
- if (dex_method_idx != DexFile::kDexNoIndex) {
- target_method->dex_method_index = dex_method_idx;
- } else {
- if (force_relocations && !use_dex_cache) {
- target_method->dex_method_index = method->GetDexMethodIndex();
- target_method->dex_file = dex_cache->GetDexFile();
- }
- must_use_direct_pointers = true;
- }
- }
- }
- if (use_dex_cache) {
- if (must_use_direct_pointers) {
- // Fail. Test above showed the only safe dispatch was via the dex cache, however, the direct
- // pointers are required as the dex cache lacks an appropriate entry.
- VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
- } else {
- *type = sharp_type;
- }
- } else {
+
+ if (!use_dex_cache) {
bool method_in_image = false;
const std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
for (gc::space::ImageSpace* image_space : image_spaces) {
@@ -1772,85 +1734,13 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
// the method and its code are / will be. We don't sharpen to interpreter bridge since we
// check IsQuickToInterpreterBridge above.
CHECK(!method->IsAbstract());
- *type = sharp_type;
*direct_method = force_relocations ? -1 : reinterpret_cast<uintptr_t>(method);
*direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method);
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- target_method->dex_method_index = method->GetDexMethodIndex();
- } else if (!must_use_direct_pointers) {
- // Set the code and rely on the dex cache for the method.
- *type = sharp_type;
- if (force_relocations) {
- *direct_code = -1;
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- target_method->dex_method_index = method->GetDexMethodIndex();
- } else {
- *direct_code = compiler_->GetEntryPointOf(method);
- }
} else {
- // Direct pointers were required but none were available.
- VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
- }
- }
-}
-
-bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
- bool update_stats, bool enable_devirtualization,
- InvokeType* invoke_type, MethodReference* target_method,
- int* vtable_idx, uintptr_t* direct_code,
- uintptr_t* direct_method) {
- InvokeType orig_invoke_type = *invoke_type;
- int stats_flags = 0;
- ScopedObjectAccess soa(Thread::Current());
- // Try to resolve the method and compiling method's class.
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())));
- uint32_t method_idx = target_method->dex_method_index;
- ArtMethod* resolved_method = ResolveMethod(
- soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type);
- auto h_referrer_class = hs.NewHandle(resolved_method != nullptr ?
- ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr);
- bool result = false;
- if (resolved_method != nullptr) {
- *vtable_idx = GetResolvedMethodVTableIndex(resolved_method, orig_invoke_type);
-
- if (enable_devirtualization && mUnit->GetVerifiedMethod() != nullptr) {
- const MethodReference* devirt_target = mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc);
-
- stats_flags = IsFastInvoke(
- soa, dex_cache, class_loader, mUnit, h_referrer_class.Get(), resolved_method,
- invoke_type, target_method, devirt_target, direct_code, direct_method);
- result = stats_flags != 0;
- } else {
- // Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts.
- if (UNLIKELY(h_referrer_class.Get() == nullptr) ||
- UNLIKELY(!h_referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(),
- resolved_method, dex_cache.Get(),
- target_method->dex_method_index)) ||
- *invoke_type == kSuper) {
- // Slow path. (Without devirtualization, all super calls go slow path as well.)
- } else {
- // Sharpening failed so generate a regular resolved method dispatch.
- stats_flags = kFlagMethodResolved;
- GetCodeAndMethodForDirectCall(
- invoke_type, *invoke_type, false, h_referrer_class.Get(), resolved_method, &stats_flags,
- target_method, direct_code, direct_method);
- result = true;
- }
+ // Set the code and rely on the dex cache for the method.
+ *direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method);
}
}
- if (!result) {
- // Conservative defaults.
- *vtable_idx = -1;
- *direct_code = 0u;
- *direct_method = 0u;
- }
- if (update_stats) {
- ProcessedInvoke(orig_invoke_type, stats_flags);
- }
- return result;
}
const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
@@ -2904,18 +2794,6 @@ std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
return oss.str();
}
-bool CompilerDriver::IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file) {
- const char* type = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_index));
- return strcmp(type, "Ljava/lang/String;") == 0;
-}
-
-bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) {
- DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file);
- const PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
- *offset = inliner->GetOffsetForStringInit(method_index, pointer_size);
- return inliner->IsStringInitMethodIndex(method_index);
-}
-
bool CompilerDriver::MayInlineInternal(const DexFile* inlined_from,
const DexFile* inlined_into) const {
// We're not allowed to inline across dex files if we're the no-inline-from dex file.
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index ee21efa854..41f0d36c79 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -328,16 +328,6 @@ class CompilerDriver {
ArtMethod* resolved_method, InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
- // for ProcessedInvoke() and computes the necessary lowering info.
- int IsFastInvoke(
- ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
- mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
- MethodReference* target_method, const MethodReference* devirt_target,
- uintptr_t* direct_code, uintptr_t* direct_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Is method's class initialized for an invoke?
// For static invokes to determine whether we need to consider potential call to <clinit>().
// For non-static invokes, assuming a non-null reference, the class is always initialized.
@@ -371,14 +361,6 @@ class CompilerDriver {
REQUIRES_SHARED(Locks::mutator_lock_);
- // Can we fastpath a interface, super class or virtual method call? Computes method's vtable
- // index.
- bool ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc,
- bool update_stats, bool enable_devirtualization,
- InvokeType* type, MethodReference* target_method, int* vtable_idx,
- uintptr_t* direct_code, uintptr_t* direct_method)
- REQUIRES(!Locks::mutator_lock_);
-
const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
@@ -450,9 +432,6 @@ class CompilerDriver {
// Get memory usage during compilation.
std::string GetMemoryUsageString(bool extended) const;
- bool IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file);
- bool IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset);
-
void SetHadHardVerifierFailure() {
had_hard_verifier_failure_ = true;
}
@@ -538,14 +517,10 @@ class CompilerDriver {
public: // TODO make private or eliminate.
// Compute constant code and method pointers when possible.
- void GetCodeAndMethodForDirectCall(/*out*/InvokeType* type,
- InvokeType sharp_type,
- bool no_guarantee_of_dex_cache_entry,
- const mirror::Class* referrer_class,
+ void GetCodeAndMethodForDirectCall(const mirror::Class* referrer_class,
ArtMethod* method,
- /*out*/int* stats_flags,
- MethodReference* target_method,
- uintptr_t* direct_code, uintptr_t* direct_method)
+ /* out */ uintptr_t* direct_code,
+ /* out */ uintptr_t* direct_method)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index a18935f09a..ea4b7ee40e 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -39,161 +39,309 @@
namespace art {
+static const uintptr_t kRequestedImageBase = ART_BASE_ADDRESS;
+
+struct CompilationHelper {
+ std::vector<std::string> dex_file_locations;
+ std::vector<ScratchFile> image_locations;
+ std::vector<std::unique_ptr<const DexFile>> extra_dex_files;
+ std::vector<ScratchFile> image_files;
+ std::vector<ScratchFile> oat_files;
+ std::vector<ScratchFile> vdex_files;
+ std::string image_dir;
+
+ void Compile(CompilerDriver* driver,
+ ImageHeader::StorageMode storage_mode);
+
+ std::vector<size_t> GetImageObjectSectionSizes();
+
+ ~CompilationHelper();
+};
+
class ImageTest : public CommonCompilerTest {
protected:
virtual void SetUp() {
ReserveImageSpace();
CommonCompilerTest::SetUp();
}
+
void TestWriteRead(ImageHeader::StorageMode storage_mode);
+
+ void Compile(ImageHeader::StorageMode storage_mode,
+ CompilationHelper& out_helper,
+ const std::string& extra_dex = "",
+ const std::string& image_class = "");
+
+ std::unordered_set<std::string>* GetImageClasses() OVERRIDE {
+ return new std::unordered_set<std::string>(image_classes_);
+ }
+
+ private:
+ std::unordered_set<std::string> image_classes_;
};
-void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
- CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U);
+CompilationHelper::~CompilationHelper() {
+ for (ScratchFile& image_file : image_files) {
+ image_file.Unlink();
+ }
+ for (ScratchFile& oat_file : oat_files) {
+ oat_file.Unlink();
+ }
+ for (ScratchFile& vdex_file : vdex_files) {
+ vdex_file.Unlink();
+ }
+ const int rmdir_result = rmdir(image_dir.c_str());
+ CHECK_EQ(0, rmdir_result);
+}
- // Set inline filter values.
- compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit);
- compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+std::vector<size_t> CompilationHelper::GetImageObjectSectionSizes() {
+ std::vector<size_t> ret;
+ for (ScratchFile& image_file : image_files) {
+ std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
+ CHECK(file.get() != nullptr);
+ ImageHeader image_header;
+ CHECK_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
+ CHECK(image_header.IsValid());
+ ret.push_back(image_header.GetImageSize());
+ }
+ return ret;
+}
+void CompilationHelper::Compile(CompilerDriver* driver,
+ ImageHeader::StorageMode storage_mode) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ std::vector<const DexFile*> class_path = class_linker->GetBootClassPath();
+
+ for (const std::unique_ptr<const DexFile>& dex_file : extra_dex_files) {
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ // Inject in boot class path so that the compiler driver can see it.
+ class_linker->AppendToBootClassPath(soa.Self(), *dex_file.get());
+ }
+ class_path.push_back(dex_file.get());
+ }
+
// Enable write for dex2dex.
- for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
- dex_file->EnableWrite();
+ for (const DexFile* dex_file : class_path) {
+ dex_file_locations.push_back(dex_file->GetLocation());
+ if (dex_file->IsReadOnly()) {
+ dex_file->EnableWrite();
+ }
+ }
+ {
+ // Create a generic tmp file, to be the base of the .art and .oat temporary files.
+ ScratchFile location;
+ for (int i = 0; i < static_cast<int>(class_path.size()); ++i) {
+ std::string cur_location(StringPrintf("%s-%d.art", location.GetFilename().c_str(), i));
+ image_locations.push_back(ScratchFile(cur_location));
+ }
+ }
+ std::vector<std::string> image_filenames;
+ for (ScratchFile& file : image_locations) {
+ std::string image_filename(GetSystemImageFilename(file.GetFilename().c_str(), kRuntimeISA));
+ image_filenames.push_back(image_filename);
+ size_t pos = image_filename.rfind('/');
+ CHECK_NE(pos, std::string::npos) << image_filename;
+ if (image_dir.empty()) {
+ image_dir = image_filename.substr(0, pos);
+ int mkdir_result = mkdir(image_dir.c_str(), 0700);
+ CHECK_EQ(0, mkdir_result) << image_dir;
+ }
+ image_files.push_back(ScratchFile(OS::CreateEmptyFile(image_filename.c_str())));
}
- // Create a generic location tmp file, to be the base of the .art and .oat temporary files.
- ScratchFile location;
- ScratchFile image_location(location, ".art");
-
- std::string image_filename(GetSystemImageFilename(image_location.GetFilename().c_str(),
- kRuntimeISA));
- size_t pos = image_filename.rfind('/');
- CHECK_NE(pos, std::string::npos) << image_filename;
- std::string image_dir(image_filename, 0, pos);
- int mkdir_result = mkdir(image_dir.c_str(), 0700);
- CHECK_EQ(0, mkdir_result) << image_dir;
- ScratchFile image_file(OS::CreateEmptyFile(image_filename.c_str()));
-
- std::string oat_filename = ReplaceFileExtension(image_filename, "oat");
- ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
-
- std::string vdex_filename = ReplaceFileExtension(image_filename, "vdex");
- ScratchFile vdex_file(OS::CreateEmptyFile(vdex_filename.c_str()));
-
- const uintptr_t requested_image_base = ART_BASE_ADDRESS;
+
+ std::vector<std::string> oat_filenames;
+ std::vector<std::string> vdex_filenames;
+ for (const std::string& image_filename : image_filenames) {
+ std::string oat_filename = ReplaceFileExtension(image_filename, "oat");
+ oat_files.push_back(ScratchFile(OS::CreateEmptyFile(oat_filename.c_str())));
+ oat_filenames.push_back(oat_filename);
+ std::string vdex_filename = ReplaceFileExtension(image_filename, "vdex");
+ vdex_files.push_back(ScratchFile(OS::CreateEmptyFile(vdex_filename.c_str())));
+ vdex_filenames.push_back(vdex_filename);
+ }
+
std::unordered_map<const DexFile*, size_t> dex_file_to_oat_index_map;
- std::vector<const char*> oat_filename_vector(1, oat_filename.c_str());
- for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
- dex_file_to_oat_index_map.emplace(dex_file, 0);
+ std::vector<const char*> oat_filename_vector;
+ for (const std::string& file : oat_filenames) {
+ oat_filename_vector.push_back(file.c_str());
}
- std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_,
- requested_image_base,
+ std::vector<const char*> image_filename_vector;
+ for (const std::string& file : image_filenames) {
+ image_filename_vector.push_back(file.c_str());
+ }
+ size_t image_idx = 0;
+ for (const DexFile* dex_file : class_path) {
+ dex_file_to_oat_index_map.emplace(dex_file, image_idx);
+ ++image_idx;
+ }
+ // TODO: compile_pic should be a test argument.
+ std::unique_ptr<ImageWriter> writer(new ImageWriter(*driver,
+ kRequestedImageBase,
/*compile_pic*/false,
/*compile_app_image*/false,
storage_mode,
oat_filename_vector,
dex_file_to_oat_index_map));
- // TODO: compile_pic should be a test argument.
{
{
jobject class_loader = nullptr;
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
- compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
- compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
+ driver->SetDexFilesForOatFile(class_path);
+ driver->CompileAll(class_loader, class_path, &timings);
t.NewTiming("WriteElf");
SafeMap<std::string, std::string> key_value_store;
- const std::vector<const DexFile*>& dex_files = class_linker->GetBootClassPath();
- std::unique_ptr<ElfWriter> elf_writer = CreateElfWriterQuick(
- compiler_driver_->GetInstructionSet(),
- compiler_driver_->GetInstructionSetFeatures(),
- &compiler_driver_->GetCompilerOptions(),
- oat_file.GetFile());
- elf_writer->Start();
- OatWriter oat_writer(/*compiling_boot_image*/true, &timings);
- OutputStream* oat_rodata = elf_writer->StartRoData();
- for (const DexFile* dex_file : dex_files) {
+ std::vector<const char*> dex_filename_vector;
+ for (size_t i = 0; i < class_path.size(); ++i) {
+ dex_filename_vector.push_back("");
+ }
+ key_value_store.Put(OatHeader::kBootClassPathKey,
+ gc::space::ImageSpace::GetMultiImageBootClassPath(
+ dex_filename_vector,
+ oat_filename_vector,
+ image_filename_vector));
+
+ std::vector<std::unique_ptr<ElfWriter>> elf_writers;
+ std::vector<std::unique_ptr<OatWriter>> oat_writers;
+ for (ScratchFile& oat_file : oat_files) {
+ elf_writers.emplace_back(CreateElfWriterQuick(driver->GetInstructionSet(),
+ driver->GetInstructionSetFeatures(),
+ &driver->GetCompilerOptions(),
+ oat_file.GetFile()));
+ elf_writers.back()->Start();
+ oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true, &timings));
+ }
+
+ std::vector<OutputStream*> rodata;
+ std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
+ std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
+ // Now that we have finalized key_value_store_, start writing the oat file.
+ for (size_t i = 0, size = oat_writers.size(); i != size; ++i) {
+ const DexFile* dex_file = class_path[i];
+ rodata.push_back(elf_writers[i]->StartRoData());
ArrayRef<const uint8_t> raw_dex_file(
reinterpret_cast<const uint8_t*>(&dex_file->GetHeader()),
dex_file->GetHeader().file_size_);
- oat_writer.AddRawDexFileSource(raw_dex_file,
- dex_file->GetLocation().c_str(),
- dex_file->GetLocationChecksum());
- }
- std::unique_ptr<MemMap> opened_dex_files_map;
- std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
- {
- bool dex_files_ok = oat_writer.WriteAndOpenDexFiles(
- kIsVdexEnabled ? vdex_file.GetFile() : oat_file.GetFile(),
- oat_rodata,
- compiler_driver_->GetInstructionSet(),
- compiler_driver_->GetInstructionSetFeatures(),
+ oat_writers[i]->AddRawDexFileSource(raw_dex_file,
+ dex_file->GetLocation().c_str(),
+ dex_file->GetLocationChecksum());
+
+ std::unique_ptr<MemMap> cur_opened_dex_files_map;
+ std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
+ bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
+ kIsVdexEnabled ? vdex_files[i].GetFile() : oat_files[i].GetFile(),
+ rodata.back(),
+ driver->GetInstructionSet(),
+ driver->GetInstructionSetFeatures(),
&key_value_store,
/* verify */ false, // Dex files may be dex-to-dex-ed, don't verify.
- &opened_dex_files_map,
- &opened_dex_files);
+ &cur_opened_dex_files_map,
+ &cur_opened_dex_files);
ASSERT_TRUE(dex_files_ok);
- }
+ if (cur_opened_dex_files_map != nullptr) {
+ opened_dex_files_map.push_back(std::move(cur_opened_dex_files_map));
+ for (std::unique_ptr<const DexFile>& cur_dex_file : cur_opened_dex_files) {
+ // dex_file_oat_index_map_.emplace(dex_file.get(), i);
+ opened_dex_files.push_back(std::move(cur_dex_file));
+ }
+ } else {
+ ASSERT_TRUE(cur_opened_dex_files.empty());
+ }
+ }
bool image_space_ok = writer->PrepareImageAddressSpace();
ASSERT_TRUE(image_space_ok);
- linker::MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
- instruction_set_features_.get());
- oat_writer.PrepareLayout(compiler_driver_.get(), writer.get(), dex_files, &patcher);
- size_t rodata_size = oat_writer.GetOatHeader().GetExecutableOffset();
- size_t text_size = oat_writer.GetOatSize() - rodata_size;
- elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer.GetBssSize());
-
- writer->UpdateOatFileLayout(/* oat_index */ 0u,
- elf_writer->GetLoadedSize(),
- oat_writer.GetOatDataOffset(),
- oat_writer.GetOatSize());
-
- bool rodata_ok = oat_writer.WriteRodata(oat_rodata);
- ASSERT_TRUE(rodata_ok);
- elf_writer->EndRoData(oat_rodata);
-
- OutputStream* text = elf_writer->StartText();
- bool text_ok = oat_writer.WriteCode(text);
- ASSERT_TRUE(text_ok);
- elf_writer->EndText(text);
-
- bool header_ok = oat_writer.WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
- ASSERT_TRUE(header_ok);
-
- writer->UpdateOatFileHeader(/* oat_index */ 0u, oat_writer.GetOatHeader());
-
- elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
- elf_writer->WritePatchLocations(oat_writer.GetAbsolutePatchLocations());
-
- bool success = elf_writer->End();
- ASSERT_TRUE(success);
+ for (size_t i = 0, size = oat_files.size(); i != size; ++i) {
+ linker::MultiOatRelativePatcher patcher(driver->GetInstructionSet(),
+ driver->GetInstructionSetFeatures());
+ OatWriter* const oat_writer = oat_writers[i].get();
+ ElfWriter* const elf_writer = elf_writers[i].get();
+ std::vector<const DexFile*> cur_dex_files(1u, class_path[i]);
+ oat_writer->PrepareLayout(driver, writer.get(), cur_dex_files, &patcher);
+ size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset();
+ size_t text_size = oat_writer->GetOatSize() - rodata_size;
+ elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer->GetBssSize());
+
+ writer->UpdateOatFileLayout(i,
+ elf_writer->GetLoadedSize(),
+ oat_writer->GetOatDataOffset(),
+ oat_writer->GetOatSize());
+
+ bool rodata_ok = oat_writer->WriteRodata(rodata[i]);
+ ASSERT_TRUE(rodata_ok);
+ elf_writer->EndRoData(rodata[i]);
+
+ OutputStream* text = elf_writer->StartText();
+ bool text_ok = oat_writer->WriteCode(text);
+ ASSERT_TRUE(text_ok);
+ elf_writer->EndText(text);
+
+ bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
+ ASSERT_TRUE(header_ok);
+
+ writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
+
+ elf_writer->WriteDynamicSection();
+ elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
+
+ bool success = elf_writer->End();
+ ASSERT_TRUE(success);
+ }
}
- }
- // Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
- std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
- ASSERT_TRUE(dup_oat.get() != nullptr);
- {
- std::vector<const char*> dup_oat_filename(1, dup_oat->GetPath().c_str());
- std::vector<const char*> dup_image_filename(1, image_file.GetFilename().c_str());
bool success_image = writer->Write(kInvalidFd,
- dup_image_filename,
- dup_oat_filename);
+ image_filename_vector,
+ oat_filename_vector);
ASSERT_TRUE(success_image);
- bool success_fixup = ElfWriter::Fixup(dup_oat.get(),
- writer->GetOatDataBegin(0));
- ASSERT_TRUE(success_fixup);
- ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
- << oat_file.GetFilename();
+ for (size_t i = 0, size = oat_filenames.size(); i != size; ++i) {
+ const char* oat_filename = oat_filenames[i].c_str();
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
+ ASSERT_TRUE(oat_file != nullptr);
+ bool success_fixup = ElfWriter::Fixup(oat_file.get(),
+ writer->GetOatDataBegin(i));
+ ASSERT_TRUE(success_fixup);
+ ASSERT_EQ(oat_file->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
+ << oat_filename;
+ }
}
+}
- uint64_t image_file_size;
- size_t image_size;
- {
+void ImageTest::Compile(ImageHeader::StorageMode storage_mode,
+ CompilationHelper& helper,
+ const std::string& extra_dex,
+ const std::string& image_class) {
+ if (!image_class.empty()) {
+ image_classes_.insert(image_class);
+ }
+ CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U);
+ // Set inline filter values.
+ compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit);
+ compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+ image_classes_.clear();
+ if (!extra_dex.empty()) {
+ helper.extra_dex_files = OpenTestDexFiles(extra_dex.c_str());
+ }
+ helper.Compile(compiler_driver_.get(), storage_mode);
+ if (!image_class.empty()) {
+ // Make sure the class got initialized.
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ mirror::Class* klass = class_linker->FindSystemClass(Thread::Current(), image_class.c_str());
+ EXPECT_TRUE(klass != nullptr);
+ EXPECT_TRUE(klass->IsInitialized());
+ }
+}
+
+void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
+ CompilationHelper helper;
+ Compile(storage_mode, /*out*/ helper);
+ std::vector<uint64_t> image_file_sizes;
+ for (ScratchFile& image_file : helper.image_files) {
std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
ASSERT_TRUE(file.get() != nullptr);
ImageHeader image_header;
@@ -209,9 +357,7 @@ void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
ASSERT_FALSE(space->IsImageSpace());
ASSERT_TRUE(space != nullptr);
ASSERT_TRUE(space->IsMallocSpace());
-
- image_file_size = file->GetLength();
- image_size = image_header.GetImageSize();
+ image_file_sizes.push_back(file->GetLength());
}
ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
@@ -225,17 +371,16 @@ void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
// Remove the reservation of the memory for use to load the image.
// Need to do this before we reset the runtime.
UnreserveImageSpace();
- writer.reset(nullptr);
+ helper.extra_dex_files.clear();
runtime_.reset();
java_lang_dex_file_ = nullptr;
MemMap::Init();
- std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileNames()[0].c_str()));
RuntimeOptions options;
std::string image("-Ximage:");
- image.append(image_location.GetFilename());
+ image.append(helper.image_locations[0].GetFilename());
options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
// By default the compiler this creates will not include patch information.
options.push_back(std::make_pair("-Xnorelocate", nullptr));
@@ -257,41 +402,45 @@ void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
// We loaded the runtime with an explicit image, so it must exist.
- gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[0];
- ASSERT_TRUE(image_space != nullptr);
- if (storage_mode == ImageHeader::kStorageModeUncompressed) {
- // Uncompressed, image should be smaller than file.
- ASSERT_LE(image_size, image_file_size);
- } else {
- // Compressed, file should be smaller than image.
- ASSERT_LE(image_file_size, image_size);
- }
+ ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size());
+ for (size_t i = 0; i < helper.dex_file_locations.size(); ++i) {
+ std::unique_ptr<const DexFile> dex(
+ LoadExpectSingleDexFile(helper.dex_file_locations[i].c_str()));
+ ASSERT_TRUE(dex != nullptr);
+ uint64_t image_file_size = image_file_sizes[i];
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[i];
+ ASSERT_TRUE(image_space != nullptr);
+ if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ // Uncompressed, image should be smaller than file.
+ ASSERT_LE(image_space->GetImageHeader().GetImageSize(), image_file_size);
+ } else if (image_file_size > 16 * KB) {
+ // Compressed, file should be smaller than image. Not really valid for small images.
+ ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize());
+ }
- image_space->VerifyImageAllocations();
- uint8_t* image_begin = image_space->Begin();
- uint8_t* image_end = image_space->End();
- CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
- for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex->GetClassDef(i);
- const char* descriptor = dex->GetClassDescriptor(class_def);
- mirror::Class* klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
- EXPECT_TRUE(klass != nullptr) << descriptor;
- if (image_classes.find(descriptor) != image_classes.end()) {
- // Image classes should be located inside the image.
- EXPECT_LT(image_begin, reinterpret_cast<uint8_t*>(klass)) << descriptor;
- EXPECT_LT(reinterpret_cast<uint8_t*>(klass), image_end) << descriptor;
- } else {
- EXPECT_TRUE(reinterpret_cast<uint8_t*>(klass) >= image_end ||
- reinterpret_cast<uint8_t*>(klass) < image_begin) << descriptor;
+ image_space->VerifyImageAllocations();
+ uint8_t* image_begin = image_space->Begin();
+ uint8_t* image_end = image_space->End();
+ if (i == 0) {
+ // This check is only valid for image 0.
+ CHECK_EQ(kRequestedImageBase, reinterpret_cast<uintptr_t>(image_begin));
+ }
+ for (size_t j = 0; j < dex->NumClassDefs(); ++j) {
+ const DexFile::ClassDef& class_def = dex->GetClassDef(j);
+ const char* descriptor = dex->GetClassDescriptor(class_def);
+ mirror::Class* klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
+ EXPECT_TRUE(klass != nullptr) << descriptor;
+ if (image_classes.find(descriptor) == image_classes.end()) {
+ EXPECT_TRUE(reinterpret_cast<uint8_t*>(klass) >= image_end ||
+ reinterpret_cast<uint8_t*>(klass) < image_begin) << descriptor;
+ } else {
+ // Image classes should be located inside the image.
+ EXPECT_LT(image_begin, reinterpret_cast<uint8_t*>(klass)) << descriptor;
+ EXPECT_LT(reinterpret_cast<uint8_t*>(klass), image_end) << descriptor;
+ }
+ EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
}
- EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
}
-
- image_file.Unlink();
- oat_file.Unlink();
- vdex_file.Unlink();
- int rmdir_result = rmdir(image_dir.c_str());
- CHECK_EQ(0, rmdir_result);
}
TEST_F(ImageTest, WriteReadUncompressed) {
@@ -306,6 +455,34 @@ TEST_F(ImageTest, WriteReadLZ4HC) {
TestWriteRead(ImageHeader::kStorageModeLZ4HC);
}
+TEST_F(ImageTest, TestImageLayout) {
+ std::vector<size_t> image_sizes;
+ std::vector<size_t> image_sizes_extra;
+ // Compile multi-image with ImageLayoutA being the last image.
+ {
+ CompilationHelper helper;
+ Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutA", "LMyClass;");
+ image_sizes = helper.GetImageObjectSectionSizes();
+ }
+ TearDown();
+ runtime_.reset();
+ SetUp();
+ // Compile multi-image with ImageLayoutB being the last image.
+ {
+ CompilationHelper helper;
+ Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutB", "LMyClass;");
+ image_sizes_extra = helper.GetImageObjectSectionSizes();
+ }
+ // Make sure that the new stuff in the clinit in ImageLayoutB is in the last image and not in the
+ // first two images.
+ ASSERT_EQ(image_sizes.size(), image_sizes.size());
+ // Sizes of the images should be the same. These sizes are for the whole image unrounded.
+ for (size_t i = 0; i < image_sizes.size() - 1; ++i) {
+ EXPECT_EQ(image_sizes[i], image_sizes_extra[i]);
+ }
+ // Last image should be larger since it has a hash map and a string.
+ EXPECT_LT(image_sizes.back(), image_sizes_extra.back());
+}
TEST_F(ImageTest, ImageHeaderIsValid) {
uint32_t image_begin = ART_BASE_ADDRESS;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6d86f7d9d8..6b5758bc8f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -45,15 +45,16 @@
#include "gc/space/space-inl.h"
#include "globals.h"
#include "image.h"
+#include "imt_conflict_table.h"
#include "intern_table.h"
#include "linear_alloc.h"
#include "lock_word.h"
-#include "mirror/abstract_method.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/executable.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -389,7 +390,6 @@ void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
DCHECK(!IsImageBinSlotAssigned(object));
// Before we stomp over the lock word, save the hash code for later.
- Monitor::Deflate(Thread::Current(), object);;
LockWord lw(object->GetLockWord(false));
switch (lw.GetState()) {
case LockWord::kFatLocked: {
@@ -490,7 +490,7 @@ void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) {
pointer_arrays_.emplace(arr, kBinArtMethodClean);
}
-void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
+void ImageWriter::AssignImageBinSlot(mirror::Object* object, size_t oat_index) {
DCHECK(object != nullptr);
size_t object_size = object->SizeOf();
@@ -593,7 +593,10 @@ void ImageWriter::AssignImageBinSlot(mirror::Object* object) {
// else bin = kBinRegular
}
- size_t oat_index = GetOatIndex(object);
+ // Assign the oat index too.
+ DCHECK(oat_index_map_.find(object) == oat_index_map_.end());
+ oat_index_map_.emplace(object, oat_index);
+
ImageInfo& image_info = GetImageInfo(oat_index);
size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment
@@ -974,39 +977,6 @@ mirror::String* ImageWriter::FindInternedString(mirror::String* string) {
return nullptr;
}
-void ImageWriter::CalculateObjectBinSlots(Object* obj) {
- DCHECK(obj != nullptr);
- // if it is a string, we want to intern it if its not interned.
- if (obj->GetClass()->IsStringClass()) {
- size_t oat_index = GetOatIndex(obj);
- ImageInfo& image_info = GetImageInfo(oat_index);
-
- // we must be an interned string that was forward referenced and already assigned
- if (IsImageBinSlotAssigned(obj)) {
- DCHECK_EQ(obj, FindInternedString(obj->AsString()));
- return;
- }
- // Need to check if the string is already interned in another image info so that we don't have
- // the intern tables of two different images contain the same string.
- mirror::String* interned = FindInternedString(obj->AsString());
- if (interned == nullptr) {
- // Not in another image space, insert to our table.
- interned = image_info.intern_table_->InternStrongImageString(obj->AsString());
- }
- if (obj != interned) {
- if (!IsImageBinSlotAssigned(interned)) {
- // interned obj is after us, allocate its location early
- AssignImageBinSlot(interned);
- }
- // point those looking for this object to the interned version.
- SetImageBinSlot(obj, GetImageBinSlot(interned));
- return;
- }
- // else (obj == interned), nothing to do but fall through to the normal case
- }
-
- AssignImageBinSlot(obj);
-}
ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
Runtime* runtime = Runtime::Current();
@@ -1092,61 +1062,33 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
return image_roots.Get();
}
-// Walk instance fields of the given Class. Separate function to allow recursion on the super
-// class.
-void ImageWriter::WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) {
- // Visit fields of parent classes first.
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> h_class(hs.NewHandle(klass));
- mirror::Class* super = h_class->GetSuperClass();
- if (super != nullptr) {
- WalkInstanceFields(obj, super);
- }
- //
- size_t num_reference_fields = h_class->NumReferenceInstanceFields();
- MemberOffset field_offset = h_class->GetFirstReferenceInstanceFieldOffset();
- for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::Object* value = obj->GetFieldObject<mirror::Object>(field_offset);
- if (value != nullptr) {
- WalkFieldsInOrder(value);
- }
- field_offset = MemberOffset(field_offset.Uint32Value() +
- sizeof(mirror::HeapReference<mirror::Object>));
- }
-}
-
-// For an unvisited object, visit it then all its children found via fields.
-void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
- if (IsInBootImage(obj)) {
- // Object is in the image, don't need to fix it up.
- return;
+mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
+ mirror::Object* obj,
+ size_t oat_index) {
+ if (obj == nullptr || IsInBootImage(obj)) {
+ // Object is null or already in the image, there is no work to do.
+ return obj;
}
- // Use our own visitor routine (instead of GC visitor) to get better locality between
- // an object and its fields
if (!IsImageBinSlotAssigned(obj)) {
- // Walk instance fields of all objects
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::Object> h_obj(hs.NewHandle(obj));
- Handle<mirror::Class> klass(hs.NewHandle(obj->GetClass()));
- // visit the object itself.
- CalculateObjectBinSlots(h_obj.Get());
- WalkInstanceFields(h_obj.Get(), klass.Get());
- // Walk static fields of a Class.
- if (h_obj->IsClass()) {
- size_t num_reference_static_fields = klass->NumReferenceStaticFields();
- MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset(target_ptr_size_);
- for (size_t i = 0; i < num_reference_static_fields; ++i) {
- mirror::Object* value = h_obj->GetFieldObject<mirror::Object>(field_offset);
- if (value != nullptr) {
- WalkFieldsInOrder(value);
- }
- field_offset = MemberOffset(field_offset.Uint32Value() +
- sizeof(mirror::HeapReference<mirror::Object>));
+ // We want to intern all strings but also assign offsets for the source string. Since the
+ // pruning phase has already happened, if we intern a string to one in the image we still
+ // end up copying an unreachable string.
+ if (obj->IsString()) {
+ // Need to check if the string is already interned in another image info so that we don't have
+ // the intern tables of two different images contain the same string.
+ mirror::String* interned = FindInternedString(obj->AsString());
+ if (interned == nullptr) {
+ // Not in another image space, insert to our table.
+ interned = GetImageInfo(oat_index).intern_table_->InternStrongImageString(obj->AsString());
+ DCHECK_EQ(interned, obj);
}
+ } else if (obj->IsDexCache()) {
+ oat_index = GetOatIndexForDexCache(obj->AsDexCache());
+ } else if (obj->IsClass()) {
// Visit and assign offsets for fields and field arrays.
- auto* as_klass = h_obj->AsClass();
+ mirror::Class* as_klass = obj->AsClass();
mirror::DexCache* dex_cache = as_klass->GetDexCache();
- DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError);
+ DCHECK_NE(as_klass->GetStatus(), mirror::Class::kStatusError);
if (compile_app_image_) {
// Extra sanity, no boot loader classes should be left!
CHECK(!IsBootClassLoaderClass(as_klass)) << PrettyClass(as_klass);
@@ -1154,14 +1096,14 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
LengthPrefixedArray<ArtField>* fields[] = {
as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
};
- size_t oat_index = GetOatIndexForDexCache(dex_cache);
+ // Overwrite the oat index value since the class' dex cache is more accurate of where it
+ // belongs.
+ oat_index = GetOatIndexForDexCache(dex_cache);
ImageInfo& image_info = GetImageInfo(oat_index);
{
- // Note: This table is only accessed from the image writer, so the lock is technically
- // unnecessary.
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- // Insert in the class table for this iamge.
- image_info.class_table_->Insert(as_klass);
+ // Note: This table is only accessed from the image writer, avoid locking to prevent lock
+ // order violations from root visiting.
+ image_info.class_table_->InsertWithoutLocks(as_klass);
}
for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
// Total array length including header.
@@ -1251,26 +1193,26 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) {
ImTable* imt = as_klass->GetImt(target_ptr_size_);
TryAssignImTableOffset(imt, oat_index);
}
- } else if (h_obj->IsObjectArray()) {
- // Walk elements of an object array.
- int32_t length = h_obj->AsObjectArray<mirror::Object>()->GetLength();
- for (int32_t i = 0; i < length; i++) {
- mirror::ObjectArray<mirror::Object>* obj_array = h_obj->AsObjectArray<mirror::Object>();
- mirror::Object* value = obj_array->Get(i);
- if (value != nullptr) {
- WalkFieldsInOrder(value);
- }
- }
- } else if (h_obj->IsClassLoader()) {
+ } else if (obj->IsClassLoader()) {
// Register the class loader if it has a class table.
// The fake boot class loader should not get registered and we should end up with only one
// class loader.
- mirror::ClassLoader* class_loader = h_obj->AsClassLoader();
+ mirror::ClassLoader* class_loader = obj->AsClassLoader();
if (class_loader->GetClassTable() != nullptr) {
class_loaders_.insert(class_loader);
}
}
+ AssignImageBinSlot(obj, oat_index);
+ work_stack.emplace(obj, oat_index);
}
+ if (obj->IsString()) {
+ // Always return the interned string if there exists one.
+ mirror::String* interned = FindInternedString(obj->AsString());
+ if (interned != nullptr) {
+ return interned;
+ }
+ }
+ return obj;
}
bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
@@ -1327,10 +1269,16 @@ void ImageWriter::AssignMethodOffset(ArtMethod* method,
offset += ArtMethod::Size(target_ptr_size_);
}
-void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) {
+void ImageWriter::EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg) {
ImageWriter* writer = reinterpret_cast<ImageWriter*>(arg);
DCHECK(writer != nullptr);
- writer->WalkFieldsInOrder(obj);
+ if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+ CHECK(writer->IsImageBinSlotAssigned(obj)) << PrettyTypeOf(obj) << " " << obj;
+ }
+}
+
+void ImageWriter::DeflateMonitorCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) {
+ Monitor::Deflate(Thread::Current(), obj);
}
void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) {
@@ -1354,6 +1302,88 @@ void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
AssignImageOffset(obj, bin_slot);
}
+class ImageWriter::VisitReferencesVisitor {
+ public:
+ VisitReferencesVisitor(ImageWriter* image_writer, WorkStack* work_stack, size_t oat_index)
+ : image_writer_(image_writer), work_stack_(work_stack), oat_index_(oat_index) {}
+
+ // Fix up separately since we also need to fix up method entrypoints.
+ ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ root->Assign(VisitReference(root->AsMirrorPtr()));
+ }
+
+ ALWAYS_INLINE void operator() (mirror::Object* obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* ref =
+ obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+ obj->SetFieldObject</*kTransactionActive*/false>(offset, VisitReference(ref));
+ }
+
+ ALWAYS_INLINE void operator() (mirror::Class* klass ATTRIBUTE_UNUSED,
+ mirror::Reference* ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ref->SetReferent</*kTransactionActive*/false>(
+ VisitReference(ref->GetReferent<kWithoutReadBarrier>()));
+ }
+
+ private:
+ mirror::Object* VisitReference(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return image_writer_->TryAssignBinSlot(*work_stack_, ref, oat_index_);
+ }
+
+ ImageWriter* const image_writer_;
+ WorkStack* const work_stack_;
+ const size_t oat_index_;
+};
+
+class ImageWriter::GetRootsVisitor : public RootVisitor {
+ public:
+ explicit GetRootsVisitor(std::vector<mirror::Object*>* roots) : roots_(roots) {}
+
+ void VisitRoots(mirror::Object*** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ roots_->push_back(*roots[i]);
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ roots_->push_back(roots[i]->AsMirrorPtr());
+ }
+ }
+
+ private:
+ std::vector<mirror::Object*>* const roots_;
+};
+
+void ImageWriter::ProcessWorkStack(WorkStack* work_stack) {
+ while (!work_stack->empty()) {
+ std::pair<mirror::Object*, size_t> pair(work_stack->top());
+ work_stack->pop();
+ VisitReferencesVisitor visitor(this, work_stack, /*oat_index*/ pair.second);
+ // Walk references and assign bin slots for them.
+ pair.first->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ visitor,
+ visitor);
+ }
+}
+
void ImageWriter::CalculateNewObjectOffsets() {
Thread* const self = Thread::Current();
StackHandleScopeCollection handles(self);
@@ -1362,8 +1392,8 @@ void ImageWriter::CalculateNewObjectOffsets() {
image_roots.push_back(handles.NewHandle(CreateImageRoots(i)));
}
- auto* runtime = Runtime::Current();
- auto* heap = runtime->GetHeap();
+ Runtime* const runtime = Runtime::Current();
+ gc::Heap* const heap = runtime->GetHeap();
// Leave space for the header, but do not write it yet, we need to
// know where image_roots is going to end up
@@ -1392,8 +1422,64 @@ void ImageWriter::CalculateNewObjectOffsets() {
}
}
- // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
- heap->VisitObjects(WalkFieldsCallback, this);
+ // Deflate monitors before we visit roots since deflating acquires the monitor lock. Acquiring
+ // this lock while holding other locks may cause lock order violations.
+ heap->VisitObjects(DeflateMonitorCallback, this);
+
+ // Work list of <object, oat_index> for objects. Everything on the stack must already be
+ // assigned a bin slot.
+ WorkStack work_stack;
+
+ // Special case interned strings to put them in the image they are likely to be resolved from.
+ for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) {
+ auto it = dex_file_oat_index_map_.find(dex_file);
+ DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
+ const size_t oat_index = it->second;
+ InternTable* const intern_table = runtime->GetInternTable();
+ for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) {
+ uint32_t utf16_length;
+ const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(i, &utf16_length);
+ mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data);
+ TryAssignBinSlot(work_stack, string, oat_index);
+ }
+ }
+
+ // Get the GC roots and then visit them separately to avoid lock violations since the root visitor
+ // visits roots while holding various locks.
+ {
+ std::vector<mirror::Object*> roots;
+ GetRootsVisitor root_visitor(&roots);
+ runtime->VisitRoots(&root_visitor);
+ for (mirror::Object* obj : roots) {
+ TryAssignBinSlot(work_stack, obj, GetDefaultOatIndex());
+ }
+ }
+ ProcessWorkStack(&work_stack);
+
+ // For app images, there may be objects that are only held live by the by the boot image. One
+ // example is finalizer references. Forward these objects so that EnsureBinSlotAssignedCallback
+ // does not fail any checks. TODO: We should probably avoid copying these objects.
+ if (compile_app_image_) {
+ for (gc::space::ImageSpace* space : heap->GetBootImageSpaces()) {
+ DCHECK(space->IsImageSpace());
+ gc::accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->Limit()),
+ [this, &work_stack](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ VisitReferencesVisitor visitor(this, &work_stack, GetDefaultOatIndex());
+ // Visit all references and try to assign bin slots for them (calls TryAssignBinSlot).
+ obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
+ visitor,
+ visitor);
+ });
+ }
+ // Process the work stack in case anything was added by TryAssignBinSlot.
+ ProcessWorkStack(&work_stack);
+ }
+
+ // Verify that all objects have assigned image bin slots.
+ heap->VisitObjects(EnsureBinSlotAssignedCallback, this);
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
@@ -1989,14 +2075,10 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
} else {
if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) {
// Need to go update the ArtMethod.
- auto* dest = down_cast<mirror::AbstractMethod*>(copy);
- auto* src = down_cast<mirror::AbstractMethod*>(orig);
+ auto* dest = down_cast<mirror::Executable*>(copy);
+ auto* src = down_cast<mirror::Executable*>(orig);
ArtMethod* src_method = src->GetArtMethod();
- auto it = native_object_relocations_.find(src_method);
- CHECK(it != native_object_relocations_.end())
- << "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method);
- dest->SetArtMethod(
- reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset));
+ dest->SetArtMethod(GetImageMethodAddress(src_method));
} else if (!klass->IsArrayClass()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) {
@@ -2285,25 +2367,21 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat
}
size_t ImageWriter::GetOatIndex(mirror::Object* obj) const {
- if (compile_app_image_) {
+ if (!IsMultiImage()) {
return GetDefaultOatIndex();
- } else {
- mirror::DexCache* dex_cache =
- obj->IsDexCache() ? obj->AsDexCache()
- : obj->IsClass() ? obj->AsClass()->GetDexCache()
- : obj->GetClass()->GetDexCache();
- return GetOatIndexForDexCache(dex_cache);
}
+ auto it = oat_index_map_.find(obj);
+ DCHECK(it != oat_index_map_.end());
+ return it->second;
}
size_t ImageWriter::GetOatIndexForDexFile(const DexFile* dex_file) const {
- if (compile_app_image_) {
+ if (!IsMultiImage()) {
return GetDefaultOatIndex();
- } else {
- auto it = dex_file_oat_index_map_.find(dex_file);
- DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
- return it->second;
}
+ auto it = dex_file_oat_index_map_.find(dex_file);
+ DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
+ return it->second;
}
size_t ImageWriter::GetOatIndexForDexCache(mirror::DexCache* dex_cache) const {
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 76749cf7a1..acd16813cb 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -23,6 +23,7 @@
#include <cstddef>
#include <memory>
#include <set>
+#include <stack>
#include <string>
#include <ostream>
@@ -144,6 +145,8 @@ class ImageWriter FINAL {
void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
private:
+ using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
+
bool AllocMemory();
// Mark the objects defined in this space in the given live bitmap.
@@ -321,7 +324,10 @@ class ImageWriter FINAL {
REQUIRES_SHARED(Locks::mutator_lock_);
void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
- void AssignImageBinSlot(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsImageBinSlotAssigned(mirror::Object* object) const
@@ -378,6 +384,8 @@ class ImageWriter FINAL {
// Lays out where the image objects will be at runtime.
void CalculateNewObjectOffsets()
REQUIRES_SHARED(Locks::mutator_lock_);
+ void ProcessWorkStack(WorkStack* work_stack)
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateHeader(size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
@@ -387,11 +395,9 @@ class ImageWriter FINAL {
void UnbinObjectsIntoOffset(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
- void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
+ static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_);
- void WalkFieldsInOrder(mirror::Object* obj)
- REQUIRES_SHARED(Locks::mutator_lock_);
- static void WalkFieldsCallback(mirror::Object* obj, void* arg)
+ static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_);
static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -461,6 +467,10 @@ class ImageWriter FINAL {
std::unordered_set<mirror::Class*>* visited)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsMultiImage() const {
+ return image_infos_.size() > 1;
+ }
+
static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -519,6 +529,9 @@ class ImageWriter FINAL {
// forwarding addresses as well as copying over hash codes.
std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
+ // Oat index map for objects.
+ std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
+
// Boolean flags.
const bool compile_pic_;
const bool compile_app_image_;
@@ -573,8 +586,10 @@ class ImageWriter FINAL {
friend class FixupClassVisitor;
friend class FixupRootVisitor;
friend class FixupVisitor;
+ class GetRootsVisitor;
friend class NativeLocationVisitor;
friend class NonImageClassesVisitor;
+ class VisitReferencesVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index cdd4c68470..b692c6d9ad 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -391,12 +391,12 @@ jobject JniCompilerTest::class_loader_;
// 3) synchronized keyword
// -- TODO: We can support (1) if we remove the mutator lock assert during stub lookup.
# define JNI_TEST_NORMAL_ONLY(TestName) \
- TEST_F(JniCompilerTest, TestName ## Default) { \
+ TEST_F(JniCompilerTest, TestName ## NormalCompiler) { \
SCOPED_TRACE("Normal JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kNormal); \
TestName ## Impl(); \
} \
- TEST_F(JniCompilerTest, TestName ## Generic) { \
+ TEST_F(JniCompilerTest, TestName ## NormalGeneric) { \
SCOPED_TRACE("Normal JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kNormal); \
TEST_DISABLED_FOR_MIPS(); \
@@ -404,46 +404,41 @@ jobject JniCompilerTest::class_loader_;
TestName ## Impl(); \
}
-// Test normal compiler, @FastNative compiler, and normal/@FastNative generic for normal natives.
+// Test (normal, @FastNative) x (compiler, generic).
#define JNI_TEST(TestName) \
JNI_TEST_NORMAL_ONLY(TestName) \
- TEST_F(JniCompilerTest, TestName ## Fast) { \
+ TEST_F(JniCompilerTest, TestName ## FastCompiler) { \
SCOPED_TRACE("@FastNative JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kFast); \
TestName ## Impl(); \
} \
- \
-
-// TODO: maybe. @FastNative generic JNI support?
-#if 0
+ \
TEST_F(JniCompilerTest, TestName ## FastGeneric) { \
+ SCOPED_TRACE("@FastNative JNI with generic"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kFast); \
TEST_DISABLED_FOR_MIPS(); \
SetCheckGenericJni(true); \
TestName ## Impl(); \
}
-#endif
+// Test (@CriticalNative) x (compiler, generic) only.
#define JNI_TEST_CRITICAL_ONLY(TestName) \
- TEST_F(JniCompilerTest, TestName ## DefaultCritical) { \
+ TEST_F(JniCompilerTest, TestName ## CriticalCompiler) { \
SCOPED_TRACE("@CriticalNative JNI with compiler"); \
gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
TestName ## Impl(); \
+ } \
+ TEST_F(JniCompilerTest, TestName ## CriticalGeneric) { \
+ SCOPED_TRACE("@CriticalNative JNI with generic"); \
+ gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
+ TestName ## Impl(); \
}
-// Test everything above and also the @CriticalNative compiler, and @CriticalNative generic JNI.
+// Test everything: (normal, @FastNative, @CriticalNative) x (compiler, generic).
#define JNI_TEST_CRITICAL(TestName) \
JNI_TEST(TestName) \
JNI_TEST_CRITICAL_ONLY(TestName) \
-// TODO: maybe, more likely since calling convention changed. @Criticalnative generic JNI support?
-#if 0
- TEST_F(JniCompilerTest, TestName ## GenericCritical) { \
- gCurrentJni = static_cast<uint32_t>(JniKind::kCritical); \
- TestName ## Impl(); \
- }
-#endif
-
static void expectValidThreadState() {
// Normal JNI always transitions to "Native". Other JNIs stay in the "Runnable" state.
if (IsCurrentJniNormal()) {
@@ -506,6 +501,7 @@ static void expectValidJniEnvAndClass(JNIEnv* env, jclass kls) {
// Temporarily disable the EXPECT_NUM_STACK_REFERENCES check (for a single test).
struct ScopedDisableCheckNumStackReferences {
ScopedDisableCheckNumStackReferences() {
+ CHECK(sCheckNumStackReferences); // No nested support.
sCheckNumStackReferences = false;
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index d629c0c887..c840a9e64a 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -55,6 +55,7 @@
#include "utils/dex_cache_arrays_layout-inl.h"
#include "vdex_file.h"
#include "verifier/method_verifier.h"
+#include "verifier/verifier_deps.h"
#include "zip_archive.h"
namespace art {
@@ -297,6 +298,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings)
dex_files_(nullptr),
vdex_size_(0u),
vdex_dex_files_offset_(0u),
+ vdex_verifier_deps_offset_(0u),
oat_size_(0u),
bss_size_(0u),
oat_data_offset_(0u),
@@ -307,6 +309,8 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings)
size_oat_header_(0),
size_oat_header_key_value_store_(0),
size_dex_file_(0),
+ size_verifier_deps_(0),
+ size_verifier_deps_alignment_(0),
size_interpreter_to_interpreter_bridge_(0),
size_interpreter_to_compiled_code_bridge_(0),
size_jni_dlsym_lookup_(0),
@@ -476,11 +480,6 @@ bool OatWriter::WriteAndOpenDexFiles(
!OpenDexFiles(vdex_file, verify, &dex_files_map, &dex_files)) {
return false;
}
-
- // VDEX is finalized. Seek to the beginning of the file and write the header.
- if (!WriteVdexHeader(vdex_out.get())) {
- return false;
- }
} else {
// Write DEX files into OAT, mmap and open them.
if (!WriteDexFiles(oat_rodata, vdex_file) ||
@@ -967,7 +966,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
nullptr,
invoke_type);
if (method == nullptr) {
- LOG(INTERNAL_FATAL) << "Unexpected failure to resolve a method: "
+ LOG(FATAL_WITHOUT_ABORT) << "Unexpected failure to resolve a method: "
<< PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
soa.Self()->AssertPendingException();
mirror::Throwable* exc = soa.Self()->GetException();
@@ -1595,6 +1594,52 @@ bool OatWriter::WriteRodata(OutputStream* out) {
return true;
}
+bool OatWriter::WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps) {
+ if (!kIsVdexEnabled) {
+ return true;
+ }
+
+ if (verifier_deps == nullptr) {
+ // Nothing to write. Record the offset, but no need
+ // for alignment.
+ vdex_verifier_deps_offset_ = vdex_size_;
+ return true;
+ }
+
+ size_t initial_offset = vdex_size_;
+ size_t start_offset = RoundUp(initial_offset, 4u);
+
+ vdex_size_ = start_offset;
+ vdex_verifier_deps_offset_ = vdex_size_;
+ size_verifier_deps_alignment_ = start_offset - initial_offset;
+
+ off_t actual_offset = vdex_out->Seek(start_offset, kSeekSet);
+ if (actual_offset != static_cast<off_t>(start_offset)) {
+ PLOG(ERROR) << "Failed to seek to verifier deps section. Actual: " << actual_offset
+ << " Expected: " << start_offset
+ << " Output: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ std::vector<uint8_t> buffer;
+ verifier_deps->Encode(&buffer);
+
+ if (!vdex_out->WriteFully(buffer.data(), buffer.size())) {
+ PLOG(ERROR) << "Failed to write verifier deps."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing verifier deps."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ size_verifier_deps_ = buffer.size();
+ vdex_size_ += size_verifier_deps_;
+ return true;
+}
+
bool OatWriter::WriteCode(OutputStream* out) {
CHECK(write_state_ == WriteState::kWriteText);
@@ -1638,6 +1683,8 @@ bool OatWriter::WriteCode(OutputStream* out) {
DO_STAT(size_oat_header_);
DO_STAT(size_oat_header_key_value_store_);
DO_STAT(size_dex_file_);
+ DO_STAT(size_verifier_deps_);
+ DO_STAT(size_verifier_deps_alignment_);
DO_STAT(size_interpreter_to_interpreter_bridge_);
DO_STAT(size_interpreter_to_compiled_code_bridge_);
DO_STAT(size_jni_dlsym_lookup_);
@@ -2341,6 +2388,9 @@ bool OatWriter::WriteTypeLookupTables(
}
bool OatWriter::WriteVdexHeader(OutputStream* vdex_out) {
+ if (!kIsVdexEnabled) {
+ return true;
+ }
off_t actual_offset = vdex_out->Seek(0, kSeekSet);
if (actual_offset != 0) {
PLOG(ERROR) << "Failed to seek to the beginning of vdex file. Actual: " << actual_offset
@@ -2348,12 +2398,24 @@ bool OatWriter::WriteVdexHeader(OutputStream* vdex_out) {
return false;
}
- VdexFile::Header vdex_header;
+ DCHECK_NE(vdex_dex_files_offset_, 0u);
+ DCHECK_NE(vdex_verifier_deps_offset_, 0u);
+
+ size_t dex_section_size = vdex_verifier_deps_offset_ - vdex_dex_files_offset_;
+ size_t verifier_deps_section_size = vdex_size_ - vdex_verifier_deps_offset_;
+
+ VdexFile::Header vdex_header(dex_section_size, verifier_deps_section_size);
if (!vdex_out->WriteFully(&vdex_header, sizeof(VdexFile::Header))) {
PLOG(ERROR) << "Failed to write vdex header. File: " << vdex_out->GetLocation();
return false;
}
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing to vdex file."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+
return true;
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index dd7d699eee..670accbbaf 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -50,6 +50,10 @@ namespace linker {
class MultiOatRelativePatcher;
} // namespace linker
+namespace verifier {
+ class VerifierDeps;
+} // namespace verifier
+
// OatHeader variable length with count of D OatDexFiles
//
// OatDexFile[0] one variable sized OatDexFile with offsets to Dex and OatClasses
@@ -149,6 +153,9 @@ class OatWriter {
bool verify,
/*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
+ bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
+ bool WriteVdexHeader(OutputStream* vdex_out);
+
// Prepare layout of remaining data.
void PrepareLayout(const CompilerDriver* compiler,
ImageWriter* image_writer,
@@ -232,8 +239,6 @@ class OatWriter {
// with a given DexMethodVisitor.
bool VisitDexMethods(DexMethodVisitor* visitor);
- bool WriteVdexHeader(OutputStream* vdex_out);
-
bool WriteDexFiles(OutputStream* out, File* file);
bool WriteDexFile(OutputStream* out, File* file, OatDexFile* oat_dex_file);
bool SeekToDexFile(OutputStream* out, File* file, OatDexFile* oat_dex_file);
@@ -311,6 +316,9 @@ class OatWriter {
// Offset of section holding Dex files inside Vdex.
size_t vdex_dex_files_offset_;
+ // Offset of section holding VerifierDeps inside Vdex.
+ size_t vdex_verifier_deps_offset_;
+
// Size required for Oat data structures.
size_t oat_size_;
@@ -341,6 +349,8 @@ class OatWriter {
uint32_t size_oat_header_;
uint32_t size_oat_header_key_value_store_;
uint32_t size_dex_file_;
+ uint32_t size_verifier_deps_;
+ uint32_t size_verifier_deps_alignment_;
uint32_t size_interpreter_to_interpreter_bridge_;
uint32_t size_interpreter_to_compiled_code_bridge_;
uint32_t size_jni_dlsym_lookup_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 51ba187c1b..0f8cdbb19b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -18,6 +18,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
#include "code_generator_arm.h"
+#include "code_generator_arm_vixl.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -345,7 +346,7 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
// Initialize to anything to silent compiler warnings.
QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
- switch (invoke->GetOriginalInvokeType()) {
+ switch (invoke->GetInvokeType()) {
case kStatic:
entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
break;
@@ -575,11 +576,19 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
- return std::unique_ptr<CodeGenerator>(
- new (arena) arm::CodeGeneratorARM(graph,
- *isa_features.AsArmInstructionSetFeatures(),
- compiler_options,
- stats));
+ if (kArmUseVIXL32) {
+ return std::unique_ptr<CodeGenerator>(
+ new (arena) arm::CodeGeneratorARMVIXL(graph,
+ *isa_features.AsArmInstructionSetFeatures(),
+ compiler_options,
+ stats));
+ } else {
+ return std::unique_ptr<CodeGenerator>(
+ new (arena) arm::CodeGeneratorARM(graph,
+ *isa_features.AsArmInstructionSetFeatures(),
+ compiler_options,
+ stats));
+ }
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -1117,7 +1126,8 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
}
}
-LocationSummary* CodeGenerator::CreateNullCheckLocations(HNullCheck* null_check) {
+LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
+ RegisterSet caller_saves) {
// Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
// HSuspendCheck from entry block). However, it will still get a valid stack frame
// because the HNullCheck needs an environment.
@@ -1125,16 +1135,15 @@ LocationSummary* CodeGenerator::CreateNullCheckLocations(HNullCheck* null_check)
// When throwing from a try block, we may need to retrieve dalvik registers from
// physical registers and we also need to set up stack mask for GC. This is
// implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
- bool can_throw_into_catch_block = null_check->CanThrowIntoCatchBlock();
+ bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
if (can_throw_into_catch_block) {
call_kind = LocationSummary::kCallOnSlowPath;
}
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(null_check, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
}
- locations->SetInAt(0, Location::RequiresRegister());
- DCHECK(!null_check->HasUses());
+ DCHECK(!instruction->HasUses());
return locations;
}
@@ -1273,7 +1282,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
saved_fpu_stack_offsets_[i] = stack_offset;
@@ -1292,7 +1301,7 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 22b5c9cff4..85002045a3 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -313,7 +313,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
- LocationSummary* CreateNullCheckLocations(HNullCheck* null_check);
+ LocationSummary* CreateThrowingSlowPathLocations(
+ HInstruction* instruction, RegisterSet caller_saves = RegisterSet::Empty());
void GenerateNullCheck(HNullCheck* null_check);
virtual void GenerateImplicitNullCheck(HNullCheck* null_check) = 0;
virtual void GenerateExplicitNullCheck(HNullCheck* null_check) = 0;
@@ -514,7 +515,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// otherwise return a fall-back info that should be used instead.
virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) = 0;
+ HInvokeStaticOrDirect* invoke) = 0;
// Generate a call to a static or direct method.
virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
@@ -579,6 +580,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
core_spill_mask_(0),
fpu_spill_mask_(0),
first_register_slot_in_slow_path_(0),
+ allocated_registers_(RegisterSet::Empty()),
blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
kArenaAllocCodeGenerator)),
blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3b2758bc42..55e122150e 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -191,7 +191,7 @@ void SlowPathCodeARM::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary*
uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
orig_offset = stack_offset;
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
saved_fpu_stack_offsets_[i] = stack_offset;
stack_offset += kArmWordSize;
@@ -275,10 +275,6 @@ class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -1681,7 +1677,7 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3264,14 +3260,8 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
}
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -4110,7 +4100,7 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
@@ -4430,7 +4420,8 @@ void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
- codegen_->CreateNullCheckLocations(instruction);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
}
void CodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4582,7 +4573,7 @@ void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -5067,15 +5058,13 @@ void InstructionCodeGeneratorARM::VisitIntermediateAddress(HIntermediateAddress*
}
void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -5119,7 +5108,7 @@ void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction)
void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5441,7 +5430,7 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -5745,7 +5734,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -6683,7 +6672,7 @@ void CodeGeneratorARM::GenerateReadBarrierForRootSlow(HInstruction* instruction,
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) {
+ HInvokeStaticOrDirect* invoke) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
// We disable pc-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
@@ -6697,7 +6686,7 @@ HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOr
if (dispatch_info.code_ptr_location == HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
const DexFile& outer_dex_file = GetGraph()->GetDexFile();
- if (&outer_dex_file != target_method.dex_file) {
+ if (&outer_dex_file != invoke->GetTargetMethod().dex_file) {
// Calls across dex files are more likely to exceed the available BL range,
// so use absolute patch with fixup if available and kCallArtMethod otherwise.
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
@@ -6759,10 +6748,13 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
- case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
+ uint32_t offset =
+ GetThreadOffset<kArmPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
// temp = thread->string_init_entrypoint
- __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, invoke->GetStringInitOffset());
+ __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, offset);
break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 424a1a1455..6416d40f7f 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -454,7 +454,7 @@ class CodeGeneratorARM : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7f542da047..a2a2e426b6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -139,18 +139,18 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type retur
// Calculate memory accessing operand for save/restore live registers.
static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
- RegisterSet* register_set,
+ LocationSummary* locations,
int64_t spill_offset,
bool is_save) {
- DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
codegen->GetNumberOfCoreRegisters(),
- register_set->GetFloatingPointRegisters(),
+ fp_spills,
codegen->GetNumberOfFloatingPointRegisters()));
- CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
- register_set->GetCoreRegisters() & (~callee_saved_core_registers.GetList()));
- CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
- register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.GetList()));
+ CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills);
+ CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize, fp_spills);
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
@@ -184,38 +184,35 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
}
void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* register_set = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
- if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
- // If the register holds an object, update the stack mask.
- if (locations->RegisterContainsObject(i)) {
- locations->SetStackBit(stack_offset / kVRegSize);
- }
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- saved_core_stack_offsets_[i] = stack_offset;
- stack_offset += kXRegSizeInBytes;
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
}
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
+ stack_offset += kXRegSizeInBytes;
}
- for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
- if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
- register_set->ContainsFloatingPointRegister(i)) {
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- saved_fpu_stack_offsets_[i] = stack_offset;
- stack_offset += kDRegSizeInBytes;
- }
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ for (uint32_t i : LowToHighBits(fp_spills)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
+ stack_offset += kDRegSizeInBytes;
}
- SaveRestoreLiveRegistersHelper(codegen, register_set,
+ SaveRestoreLiveRegistersHelper(codegen,
+ locations,
codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
}
void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* register_set = locations->GetLiveRegisters();
- SaveRestoreLiveRegistersHelper(codegen, register_set,
+ SaveRestoreLiveRegistersHelper(codegen,
+ locations,
codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
}
@@ -261,10 +258,6 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -1608,7 +1601,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -2036,7 +2029,7 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -2306,15 +2299,13 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1).GetCode()));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -2685,14 +2676,8 @@ void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
}
void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -2924,7 +2909,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3077,7 +3062,7 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -3559,7 +3544,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method ATTRIBUTE_UNUSED) {
+ HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
// On ARM64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -3585,10 +3570,13 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
// Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
- case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
+ uint32_t offset =
+ GetThreadOffset<kArm64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
// temp = thread->string_init_entrypoint
- __ Ldr(XRegisterFrom(temp), MemOperand(tr, invoke->GetStringInitOffset()));
+ __ Ldr(XRegisterFrom(temp), MemOperand(tr, offset));
break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
@@ -3603,7 +3591,7 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
// Add ADRP with its PC-relative DexCache access patch.
- const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
+ const DexFile& dex_file = invoke->GetDexFile();
uint32_t element_offset = invoke->GetDexCacheArrayOffset();
vixl::aarch64::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
{
@@ -3944,7 +3932,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -4384,7 +4372,8 @@ void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
- codegen_->CreateNullCheckLocations(instruction);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
}
void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4670,7 +4659,7 @@ void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f1dc7eecb5..a15224578d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -522,7 +522,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
new file mode 100644
index 0000000000..226f109bec
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -0,0 +1,2145 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm_vixl.h"
+
+#include "arch/arm/instruction_set_features_arm.h"
+#include "art_method.h"
+#include "code_generator_utils.h"
+#include "common_arm.h"
+#include "compiled_method.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "thread.h"
+#include "utils/arm/assembler_arm_vixl.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace arm {
+
+namespace vixl32 = vixl::aarch32;
+using namespace vixl32; // NOLINT(build/namespaces)
+
+using helpers::DWARFReg;
+using helpers::FromLowSToD;
+using helpers::OutputRegister;
+using helpers::InputRegisterAt;
+using helpers::InputOperandAt;
+using helpers::OutputSRegister;
+using helpers::InputSRegisterAt;
+
+using RegisterList = vixl32::RegisterList;
+
+static bool ExpectedPairLayout(Location location) {
+ // We expected this for both core and fpu register pairs.
+ return ((location.low() & 1) == 0) && (location.low() + 1 == location.high());
+}
+
+static constexpr size_t kArmInstrMaxSizeInBytes = 4u;
+
+#ifdef __
+#error "ARM Codegen VIXL macro-assembler macro already defined."
+#endif
+
+// TODO: Remove with later pop when codegen complete.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+
+// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
+#define __ down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler()-> // NOLINT
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
+
+// Marker that code is yet to be, and must, be implemented.
+#define TODO_VIXL32(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
+
+class DivZeroCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL {
+ public:
+ explicit DivZeroCheckSlowPathARMVIXL(HDivZeroCheck* instruction)
+ : SlowPathCodeARMVIXL(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARMVIXL* armvixl_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ armvixl_codegen->InvokeRuntime(kQuickThrowDivZero,
+ instruction_,
+ instruction_->GetDexPc(),
+ this);
+ CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARMVIXL"; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARMVIXL);
+};
+
+inline vixl32::Condition ARMCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ case kCondLT: return lt;
+ case kCondLE: return le;
+ case kCondGT: return gt;
+ case kCondGE: return ge;
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+// Maps signed condition to unsigned condition.
+inline vixl32::Condition ARMUnsignedCondition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ // Signed to unsigned.
+ case kCondLT: return lo;
+ case kCondLE: return ls;
+ case kCondGT: return hi;
+ case kCondGE: return hs;
+ // Unsigned remain unchanged.
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+inline vixl32::Condition ARMFPCondition(IfCondition cond, bool gt_bias) {
+ // The ARM condition codes can express all the necessary branches, see the
+ // "Meaning (floating-point)" column in the table A8-1 of the ARMv7 reference manual.
+ // There is no dex instruction or HIR that would need the missing conditions
+ // "equal or unordered" or "not equal".
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne /* unordered */;
+ case kCondLT: return gt_bias ? cc : lt /* unordered */;
+ case kCondLE: return gt_bias ? ls : le /* unordered */;
+ case kCondGT: return gt_bias ? hi /* unordered */ : gt;
+ case kCondGE: return gt_bias ? cs /* unordered */ : ge;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
+void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen ATTRIBUTE_UNUSED,
+ LocationSummary* locations ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen ATTRIBUTE_UNUSED,
+ LocationSummary* locations ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << vixl32::Register(reg);
+}
+
+void CodeGeneratorARMVIXL::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << vixl32::SRegister(reg);
+}
+
+static uint32_t ComputeSRegisterMask(const SRegisterList& regs) {
+ uint32_t mask = 0;
+ for (uint32_t i = regs.GetFirstSRegister().GetCode();
+ i <= regs.GetLastSRegister().GetCode();
+ ++i) {
+ mask |= (1 << i);
+ }
+ return mask;
+}
+
+#undef __
+
+CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : CodeGenerator(graph,
+ kNumberOfCoreRegisters,
+ kNumberOfSRegisters,
+ kNumberOfRegisterPairs,
+ kCoreCalleeSaves.GetList(),
+ ComputeSRegisterMask(kFpuCalleeSaves),
+ compiler_options,
+ stats),
+ block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this),
+ assembler_(graph->GetArena()),
+ isa_features_(isa_features) {
+ // Always save the LR register to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(LR));
+}
+
+#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()->
+
+void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
+ GetAssembler()->FinalizeCode();
+ CodeGenerator::Finalize(allocator);
+}
+
+void CodeGeneratorARMVIXL::SetupBlockedRegisters() const {
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs_[R1_R2] = true;
+
+ // Stack register, LR and PC are always reserved.
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[LR] = true;
+ blocked_core_registers_[PC] = true;
+
+ // Reserve thread register.
+ blocked_core_registers_[TR] = true;
+
+ // Reserve temp register.
+ blocked_core_registers_[IP] = true;
+
+ if (GetGraph()->IsDebuggable()) {
+ // Stubs do not save callee-save floating point registers. If the graph
+ // is debuggable, we need to deal with these registers differently. For
+ // now, just block them.
+ for (uint32_t i = kFpuCalleeSaves.GetFirstSRegister().GetCode();
+ i <= kFpuCalleeSaves.GetLastSRegister().GetCode();
+ ++i) {
+ blocked_fpu_registers_[i] = true;
+ }
+ }
+
+ UpdateBlockedPairRegisters();
+}
+
+// Blocks all register pairs containing blocked core registers.
+void CodeGeneratorARMVIXL::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ ArmManagedRegister current =
+ ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ TODO_VIXL32(FATAL);
+}
+
+InstructionCodeGeneratorARMVIXL::InstructionCodeGeneratorARMVIXL(HGraph* graph,
+ CodeGeneratorARMVIXL* codegen)
+ : InstructionCodeGenerator(graph, codegen),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+void CodeGeneratorARMVIXL::ComputeSpillMask() {
+ core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
+ DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
+ // There is no easy instruction to restore just the PC on thumb2. We spill and
+ // restore another arbitrary register.
+ core_spill_mask_ |= (1 << kCoreAlwaysSpillRegister.GetCode());
+ fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
+ // We use vpush and vpop for saving and restoring floating point registers, which take
+ // a SRegister and the number of registers to save/restore after that SRegister. We
+ // therefore update the `fpu_spill_mask_` to also contain those registers not allocated,
+ // but in the range.
+ if (fpu_spill_mask_ != 0) {
+ uint32_t least_significant_bit = LeastSignificantBit(fpu_spill_mask_);
+ uint32_t most_significant_bit = MostSignificantBit(fpu_spill_mask_);
+ for (uint32_t i = least_significant_bit + 1 ; i < most_significant_bit; ++i) {
+ fpu_spill_mask_ |= (1 << i);
+ }
+ }
+}
+
+void CodeGeneratorARMVIXL::GenerateFrameEntry() {
+ bool skip_overflow_check =
+ IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
+ DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ __ Bind(&frame_entry_label_);
+
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ if (!skip_overflow_check) {
+ __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
+ // The load must immediately precede RecordPcInfo.
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ kArmInstrMaxSizeInBytes,
+ CodeBufferCheckScope::kMaximumSize);
+ __ ldr(temp, MemOperand(temp));
+ RecordPcInfo(nullptr, 0);
+ }
+ }
+
+ __ Push(RegisterList(core_spill_mask_));
+ GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
+ GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister),
+ 0,
+ core_spill_mask_,
+ kArmWordSize);
+ if (fpu_spill_mask_ != 0) {
+ uint32_t first = LeastSignificantBit(fpu_spill_mask_);
+
+ // Check that list is contiguous.
+ DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
+
+ __ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
+ GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
+ GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0),
+ 0,
+ fpu_spill_mask_,
+ kArmWordSize);
+ }
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ Sub(sp, sp, adjust);
+ GetAssembler()->cfi().AdjustCFAOffset(adjust);
+ GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
+}
+
+void CodeGeneratorARMVIXL::GenerateFrameExit() {
+ if (HasEmptyFrame()) {
+ __ Bx(lr);
+ return;
+ }
+ GetAssembler()->cfi().RememberState();
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ Add(sp, sp, adjust);
+ GetAssembler()->cfi().AdjustCFAOffset(-adjust);
+ if (fpu_spill_mask_ != 0) {
+ uint32_t first = LeastSignificantBit(fpu_spill_mask_);
+
+ // Check that list is contiguous.
+ DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
+
+ __ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
+ GetAssembler()->cfi().AdjustCFAOffset(
+ -static_cast<int>(kArmWordSize) * POPCOUNT(fpu_spill_mask_));
+ GetAssembler()->cfi().RestoreMany(DWARFReg(vixl32::SRegister(0)),
+ fpu_spill_mask_);
+ }
+ // Pop LR into PC to return.
+ DCHECK_NE(core_spill_mask_ & (1 << kLrCode), 0U);
+ uint32_t pop_mask = (core_spill_mask_ & (~(1 << kLrCode))) | 1 << kPcCode;
+ __ Pop(RegisterList(pop_mask));
+ GetAssembler()->cfi().RestoreState();
+ GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorARMVIXL::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorARMVIXL::MoveConstant(Location destination, int32_t value) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ TODO_VIXL32(FATAL);
+}
+
+uintptr_t CodeGeneratorARMVIXL::GetAddressOf(HBasicBlock* block) {
+ TODO_VIXL32(FATAL);
+ return 0;
+}
+
+void CodeGeneratorARMVIXL::GenerateImplicitNullCheck(HNullCheck* null_check) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::GenerateExplicitNullCheck(HNullCheck* null_check) {
+ TODO_VIXL32(FATAL);
+}
+
+void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
+ GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
+ if (EntrypointRequiresStackMap(entrypoint)) {
+ RecordPcInfo(instruction, dex_pc, slow_path);
+ }
+}
+
+void CodeGeneratorARMVIXL::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
+ ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
+ GenerateInvokeRuntime(entry_point_offset);
+}
+
+void CodeGeneratorARMVIXL::GenerateInvokeRuntime(int32_t entry_point_offset) {
+ GetAssembler()->LoadFromOffset(kLoadWord, lr, tr, entry_point_offset);
+ __ Blx(lr);
+}
+
+// Check if the desired_string_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) {
+ TODO_VIXL32(FATAL);
+ return desired_string_load_kind;
+}
+
+// Check if the desired_class_load_kind is supported. If it is, return it,
+// otherwise return a fall-back kind that should be used instead.
+HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) {
+ TODO_VIXL32(FATAL);
+ return desired_class_load_kind;
+}
+
+// Check if the desired_dispatch_info is supported. If it is, return it,
+// otherwise return a fall-back info that should be used instead.
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ HInvokeStaticOrDirect* invoke) {
+ TODO_VIXL32(FATAL);
+ return desired_dispatch_info;
+}
+
+// Generate a call to a static or direct method.
+void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
+ Location temp) {
+ TODO_VIXL32(FATAL);
+}
+
+// Generate a call to a virtual method.
+void CodeGeneratorARMVIXL::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) {
+ TODO_VIXL32(FATAL);
+}
+
+// Copy the result of a call into the given target.
+void CodeGeneratorARMVIXL::MoveFromReturnRegister(Location trg, Primitive::Type type) {
+ TODO_VIXL32(FATAL);
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) {
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitGoto(HGoto* got) {
+ HandleGoto(got, got->GetSuccessor());
+}
+
+void LocationsBuilderARMVIXL::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateVcmp(HInstruction* instruction) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ Location lhs_loc = instruction->GetLocations()->InAt(0);
+ Location rhs_loc = instruction->GetLocations()->InAt(1);
+ if (rhs_loc.IsConstant()) {
+ // 0.0 is the only immediate that can be encoded directly in
+ // a VCMP instruction.
+ //
+ // Both the JLS (section 15.20.1) and the JVMS (section 6.5)
+ // specify that in a floating-point comparison, positive zero
+ // and negative zero are considered equal, so we can use the
+ // literal 0.0 for both cases here.
+ //
+ // Note however that some methods (Float.equal, Float.compare,
+ // Float.compareTo, Double.equal, Double.compare,
+ // Double.compareTo, Math.max, Math.min, StrictMath.max,
+ // StrictMath.min) consider 0.0 to be (strictly) greater than
+ // -0.0. So if we ever translate calls to these methods into a
+ // HCompare instruction, we must handle the -0.0 case with
+ // care here.
+ DCHECK(rhs_loc.GetConstant()->IsArithmeticZero());
+ if (type == Primitive::kPrimFloat) {
+ __ Vcmp(F32, InputSRegisterAt(instruction, 0), 0.0);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ Vcmp(F64, FromLowSToD(lhs_loc.AsFpuRegisterPairLow<vixl32::SRegister>()), 0.0);
+ }
+ } else {
+ if (type == Primitive::kPrimFloat) {
+ __ Vcmp(F32, InputSRegisterAt(instruction, 0), InputSRegisterAt(instruction, 1));
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ Vcmp(F64,
+ FromLowSToD(lhs_loc.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(rhs_loc.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateFPJumps(HCondition* cond,
+ vixl32::Label* true_label,
+ vixl32::Label* false_label ATTRIBUTE_UNUSED) {
+ // To branch on the result of the FP compare we transfer FPSCR to APSR (encoded as PC in VMRS).
+ __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
+ __ B(ARMFPCondition(cond->GetCondition(), cond->IsGtBias()), true_label);
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateLongComparesAndJumps(HCondition* cond,
+ vixl32::Label* true_label,
+ vixl32::Label* false_label) {
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ IfCondition if_cond = cond->GetCondition();
+
+ vixl32::Register left_high = left.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register left_low = left.AsRegisterPairLow<vixl32::Register>();
+ IfCondition true_high_cond = if_cond;
+ IfCondition false_high_cond = cond->GetOppositeCondition();
+ vixl32::Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
+
+ // Set the conditions for the test, remembering that == needs to be
+ // decided using the low words.
+ // TODO: consider avoiding jumps with temporary and CMP low+SBC high
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ // Nothing to do.
+ break;
+ case kCondLT:
+ false_high_cond = kCondGT;
+ break;
+ case kCondLE:
+ true_high_cond = kCondLT;
+ break;
+ case kCondGT:
+ false_high_cond = kCondLT;
+ break;
+ case kCondGE:
+ true_high_cond = kCondGT;
+ break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
+ }
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int32_t val_low = Low32Bits(value);
+ int32_t val_high = High32Bits(value);
+
+ __ Cmp(left_high, val_high);
+ if (if_cond == kCondNE) {
+ __ B(ARMCondition(true_high_cond), true_label);
+ } else if (if_cond == kCondEQ) {
+ __ B(ARMCondition(false_high_cond), false_label);
+ } else {
+ __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(false_high_cond), false_label);
+ }
+ // Must be equal high, so compare the lows.
+ __ Cmp(left_low, val_low);
+ } else {
+ vixl32::Register right_high = right.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register right_low = right.AsRegisterPairLow<vixl32::Register>();
+
+ __ Cmp(left_high, right_high);
+ if (if_cond == kCondNE) {
+ __ B(ARMCondition(true_high_cond), true_label);
+ } else if (if_cond == kCondEQ) {
+ __ B(ARMCondition(false_high_cond), false_label);
+ } else {
+ __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(false_high_cond), false_label);
+ }
+ // Must be equal high, so compare the lows.
+ __ Cmp(left_low, right_low);
+ }
+ // The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
+ __ B(final_condition, true_label);
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition,
+ vixl32::Label* true_target_in,
+ vixl32::Label* false_target_in) {
+ // Generated branching requires both targets to be explicit. If either of the
+ // targets is nullptr (fallthrough) use and bind `fallthrough` instead.
+ vixl32::Label fallthrough;
+ vixl32::Label* true_target = (true_target_in == nullptr) ? &fallthrough : true_target_in;
+ vixl32::Label* false_target = (false_target_in == nullptr) ? &fallthrough : false_target_in;
+
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(condition, true_target, false_target);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ GenerateVcmp(condition);
+ GenerateFPJumps(condition, true_target, false_target);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected compare type " << type;
+ }
+
+ if (false_target != &fallthrough) {
+ __ B(false_target);
+ }
+
+ if (true_target_in == nullptr || false_target_in == nullptr) {
+ __ Bind(&fallthrough);
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateTestAndBranch(HInstruction* instruction,
+ size_t condition_input_index,
+ vixl32::Label* true_target,
+ vixl32::Label* false_target) {
+ HInstruction* cond = instruction->InputAt(condition_input_index);
+
+ if (true_target == nullptr && false_target == nullptr) {
+ // Nothing to do. The code always falls through.
+ return;
+ } else if (cond->IsIntConstant()) {
+ // Constant condition, statically compared against "true" (integer value 1).
+ if (cond->AsIntConstant()->IsTrue()) {
+ if (true_target != nullptr) {
+ __ B(true_target);
+ }
+ } else {
+ DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
+ if (false_target != nullptr) {
+ __ B(false_target);
+ }
+ }
+ return;
+ }
+
+ // The following code generates these patterns:
+ // (1) true_target == nullptr && false_target != nullptr
+ // - opposite condition true => branch to false_target
+ // (2) true_target != nullptr && false_target == nullptr
+ // - condition true => branch to true_target
+ // (3) true_target != nullptr && false_target != nullptr
+ // - condition true => branch to true_target
+ // - branch to false_target
+ if (IsBooleanValueOrMaterializedCondition(cond)) {
+ // Condition has been materialized, compare the output to 0.
+ if (kIsDebugBuild) {
+ Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
+ DCHECK(cond_val.IsRegister());
+ }
+ if (true_target == nullptr) {
+ __ Cbz(InputRegisterAt(instruction, condition_input_index), false_target);
+ } else {
+ __ Cbnz(InputRegisterAt(instruction, condition_input_index), true_target);
+ }
+ } else {
+ // Condition has not been materialized. Use its inputs as the comparison and
+ // its condition as the branch condition.
+ HCondition* condition = cond->AsCondition();
+
+ // If this is a long or FP comparison that has been folded into
+ // the HCondition, generate the comparison directly.
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) {
+ GenerateCompareTestAndBranch(condition, true_target, false_target);
+ return;
+ }
+
+ LocationSummary* locations = cond->GetLocations();
+ DCHECK(locations->InAt(0).IsRegister());
+ vixl32::Register left = InputRegisterAt(cond, 0);
+ Location right = locations->InAt(1);
+ if (right.IsRegister()) {
+ __ Cmp(left, InputRegisterAt(cond, 1));
+ } else {
+ DCHECK(right.IsConstant());
+ __ Cmp(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
+ if (true_target == nullptr) {
+ __ B(ARMCondition(condition->GetOppositeCondition()), false_target);
+ } else {
+ __ B(ARMCondition(condition->GetCondition()), true_target);
+ }
+ }
+
+ // If neither branch falls through (case 3), the conditional branch to `true_target`
+ // was already emitted (case 2) and we need to emit a jump to `false_target`.
+ if (true_target != nullptr && false_target != nullptr) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) {
+ HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
+ HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
+ vixl32::Label* true_target =
+ codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
+ nullptr : codegen_->GetLabelOf(true_successor);
+ vixl32::Label* false_target =
+ codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
+ nullptr : codegen_->GetLabelOf(false_successor);
+ GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+}
+
+void CodeGeneratorARMVIXL::GenerateNop() {
+ __ Nop();
+}
+
+void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
+ // Handle the long/FP comparisons made in instruction simplification.
+ switch (cond->InputAt(0)->GetType()) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ }
+ break;
+
+ // TODO: https://android-review.googlesource.com/#/c/252265/
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+
+ default:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::HandleCondition(HCondition* cond) {
+ if (cond->IsEmittedAtUseSite()) {
+ return;
+ }
+
+ LocationSummary* locations = cond->GetLocations();
+ Location right = locations->InAt(1);
+ vixl32::Register out = OutputRegister(cond);
+ vixl32::Label true_label, false_label;
+
+ switch (cond->InputAt(0)->GetType()) {
+ default: {
+ // Integer case.
+ if (right.IsRegister()) {
+ __ Cmp(InputRegisterAt(cond, 0), InputRegisterAt(cond, 1));
+ } else {
+ DCHECK(right.IsConstant());
+ __ Cmp(InputRegisterAt(cond, 0), CodeGenerator::GetInt32ValueOf(right.GetConstant()));
+ }
+ {
+ AssemblerAccurateScope aas(GetVIXLAssembler(),
+ kArmInstrMaxSizeInBytes * 3u,
+ CodeBufferCheckScope::kMaximumSize);
+ __ ite(ARMCondition(cond->GetCondition()));
+ __ mov(ARMCondition(cond->GetCondition()), OutputRegister(cond), 1);
+ __ mov(ARMCondition(cond->GetOppositeCondition()), OutputRegister(cond), 0);
+ }
+ return;
+ }
+ case Primitive::kPrimLong:
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ GenerateVcmp(cond);
+ GenerateFPJumps(cond, &true_label, &false_label);
+ break;
+ }
+
+ // Convert the jumps into the result.
+ vixl32::Label done_label;
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ Mov(out, 0);
+ __ B(&done_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ Mov(out, 1);
+ __ Bind(&done_label);
+}
+
+void LocationsBuilderARMVIXL::VisitEqual(HEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitEqual(HEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitNotEqual(HNotEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNotEqual(HNotEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitLessThan(HLessThan* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLessThan(HLessThan* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitGreaterThan(HGreaterThan* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitGreaterThan(HGreaterThan* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitBelow(HBelow* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBelow(HBelow* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitAbove(HAbove* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAbove(HAbove* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ HandleCondition(comp);
+}
+
+void LocationsBuilderARMVIXL::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARMVIXL::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderARMVIXL::VisitReturnVoid(HReturnVoid* ret) {
+ ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderARMVIXL::VisitReturn(HReturn* ret) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(ret, LocationSummary::kNoCall);
+ locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
+
+ // The float-to-long, double-to-long and long-to-float type conversions
+ // rely on a call to the runtime.
+ LocationSummary::CallKind call_kind =
+ (((input_type == Primitive::kPrimFloat || input_type == Primitive::kPrimDouble)
+ && result_type == Primitive::kPrimLong)
+ || (input_type == Primitive::kPrimLong && result_type == Primitive::kPrimFloat))
+ ? LocationSummary::kCallOnMainOnly
+ : LocationSummary::kNoCall;
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+ // The Java language does not allow treating boolean as an integral type but
+ // our bit representation makes it safe.
+
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-int' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-long' instruction.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(
+ calling_convention.GetFpuRegisterAt(0)));
+ locations->SetOut(Location::RegisterPairLocation(R0, R1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ // Processing a Dex `double-to-long' instruction.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterPairLocation(
+ calling_convention.GetFpuRegisterAt(0),
+ calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(Location::RegisterPairLocation(R0, R1));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-float' instruction.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ break;
+ }
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-float' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ locations->AddTemp(Location::RequiresFpuRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-double' instruction.
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ DCHECK_NE(result_type, input_type);
+ switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
+ __ Sbfx(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>(), 0, 8);
+ break;
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimShort:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ __ Sbfx(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>(), 0, 16);
+ break;
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-short' instruction.
+ __ Sbfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ DCHECK(out.IsRegister());
+ if (in.IsRegisterPair()) {
+ __ Mov(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ GetAssembler()->LoadFromOffset(kLoadWord,
+ OutputRegister(conversion),
+ sp,
+ in.GetStackIndex());
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ Mov(OutputRegister(conversion), static_cast<int32_t>(value));
+ }
+ break;
+
+ case Primitive::kPrimFloat: {
+ // Processing a Dex `float-to-int' instruction.
+ vixl32::SRegister temp = locations->GetTemp(0).AsFpuRegisterPairLow<vixl32::SRegister>();
+ __ Vcvt(I32, F32, temp, InputSRegisterAt(conversion, 0));
+ __ Vmov(OutputRegister(conversion), temp);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ // Processing a Dex `double-to-int' instruction.
+ vixl32::SRegister temp_s =
+ locations->GetTemp(0).AsFpuRegisterPairLow<vixl32::SRegister>();
+ __ Vcvt(I32, F64, temp_s, FromLowSToD(in.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ __ Vmov(OutputRegister(conversion), temp_s);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimLong:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-long' instruction.
+ DCHECK(out.IsRegisterPair());
+ DCHECK(in.IsRegister());
+ __ Mov(out.AsRegisterPairLow<vixl32::Register>(), InputRegisterAt(conversion, 0));
+ // Sign extension.
+ __ Asr(out.AsRegisterPairHigh<vixl32::Register>(),
+ out.AsRegisterPairLow<vixl32::Register>(),
+ 31);
+ break;
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-long' instruction.
+ codegen_->InvokeRuntime(kQuickF2l, conversion, conversion->GetDexPc());
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ break;
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-long' instruction.
+ codegen_->InvokeRuntime(kQuickD2l, conversion, conversion->GetDexPc());
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
+ __ Ubfx(OutputRegister(conversion), in.AsRegisterPairLow<vixl32::Register>(), 0, 16);
+ break;
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ // Processing a Dex `int-to-char' instruction.
+ __ Ubfx(OutputRegister(conversion), InputRegisterAt(conversion, 0), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar: {
+ // Processing a Dex `int-to-float' instruction.
+ __ Vmov(OutputSRegister(conversion), InputRegisterAt(conversion, 0));
+ __ Vcvt(F32, I32, OutputSRegister(conversion), OutputSRegister(conversion));
+ break;
+ }
+
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-float' instruction.
+ codegen_->InvokeRuntime(kQuickL2f, conversion, conversion->GetDexPc());
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ break;
+
+ case Primitive::kPrimDouble:
+ // Processing a Dex `double-to-float' instruction.
+ __ Vcvt(F32,
+ F64,
+ OutputSRegister(conversion),
+ FromLowSToD(in.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ case Primitive::kPrimDouble:
+ switch (input_type) {
+ case Primitive::kPrimBoolean:
+ // Boolean input is a result of code transformations.
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar: {
+ // Processing a Dex `int-to-double' instruction.
+ __ Vmov(out.AsFpuRegisterPairLow<vixl32::SRegister>(), InputRegisterAt(conversion, 0));
+ __ Vcvt(F64,
+ I32,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ out.AsFpuRegisterPairLow<vixl32::SRegister>());
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // Processing a Dex `long-to-double' instruction.
+ vixl32::Register low = in.AsRegisterPairLow<vixl32::Register>();
+ vixl32::Register high = in.AsRegisterPairHigh<vixl32::Register>();
+
+ vixl32::SRegister out_s = out.AsFpuRegisterPairLow<vixl32::SRegister>();
+ vixl32::DRegister out_d = FromLowSToD(out_s);
+
+ vixl32::SRegister temp_s =
+ locations->GetTemp(0).AsFpuRegisterPairLow<vixl32::SRegister>();
+ vixl32::DRegister temp_d = FromLowSToD(temp_s);
+
+ vixl32::SRegister constant_s =
+ locations->GetTemp(1).AsFpuRegisterPairLow<vixl32::SRegister>();
+ vixl32::DRegister constant_d = FromLowSToD(constant_s);
+
+ // temp_d = int-to-double(high)
+ __ Vmov(temp_s, high);
+ __ Vcvt(F64, I32, temp_d, temp_s);
+ // constant_d = k2Pow32EncodingForDouble
+ __ Vmov(F64,
+ constant_d,
+ vixl32::DOperand(bit_cast<double, int64_t>(k2Pow32EncodingForDouble)));
+ // out_d = unsigned-to-double(low)
+ __ Vmov(out_s, low);
+ __ Vcvt(F64, U32, out_d, out_s);
+ // out_d += temp_d * constant_d
+ __ Vmla(F64, out_d, temp_d, constant_d);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ // Processing a Dex `float-to-double' instruction.
+ __ Vcvt(F64,
+ F32,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ InputSRegisterAt(conversion, 0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ };
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitAdd(HAdd* add) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(add, LocationSummary::kNoCall);
+ switch (add->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(add->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitAdd(HAdd* add) {
+ LocationSummary* locations = add->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (add->GetResultType()) {
+ case Primitive::kPrimInt: {
+ __ Add(OutputRegister(add), InputRegisterAt(add, 0), InputOperandAt(add, 1));
+ }
+ break;
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ DCHECK(second.IsRegisterPair());
+ __ Adds(out.AsRegisterPairLow<vixl32::Register>(),
+ first.AsRegisterPairLow<vixl32::Register>(),
+ Operand(second.AsRegisterPairLow<vixl32::Register>()));
+ __ Adc(out.AsRegisterPairHigh<vixl32::Register>(),
+ first.AsRegisterPairHigh<vixl32::Register>(),
+ second.AsRegisterPairHigh<vixl32::Register>());
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vadd(F32, OutputSRegister(add), InputSRegisterAt(add, 0), InputSRegisterAt(add, 1));
+ }
+ break;
+
+ case Primitive::kPrimDouble:
+ __ Vadd(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected add type " << add->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitSub(HSub* sub) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(sub, LocationSummary::kNoCall);
+ switch (sub->GetResultType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(sub->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitSub(HSub* sub) {
+ LocationSummary* locations = sub->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ switch (sub->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (second.IsRegister()) {
+ __ Sub(OutputRegister(sub), InputRegisterAt(sub, 0), InputRegisterAt(sub, 1));
+ } else {
+ __ Sub(OutputRegister(sub),
+ InputRegisterAt(sub, 0),
+ second.GetConstant()->AsIntConstant()->GetValue());
+ }
+ break;
+ }
+
+ // TODO: https://android-review.googlesource.com/#/c/254144/
+ case Primitive::kPrimLong: {
+ DCHECK(second.IsRegisterPair());
+ __ Subs(out.AsRegisterPairLow<vixl32::Register>(),
+ first.AsRegisterPairLow<vixl32::Register>(),
+ Operand(second.AsRegisterPairLow<vixl32::Register>()));
+ __ Sbc(out.AsRegisterPairHigh<vixl32::Register>(),
+ first.AsRegisterPairHigh<vixl32::Register>(),
+ Operand(second.AsRegisterPairHigh<vixl32::Register>()));
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vsub(F32, OutputSRegister(sub), InputSRegisterAt(sub, 0), InputSRegisterAt(sub, 1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ Vsub(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitMul(HMul* mul) {
+ LocationSummary* locations = mul->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt: {
+ __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
+ break;
+ }
+ case Primitive::kPrimLong: {
+ vixl32::Register out_hi = out.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register out_lo = out.AsRegisterPairLow<vixl32::Register>();
+ vixl32::Register in1_hi = first.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register in1_lo = first.AsRegisterPairLow<vixl32::Register>();
+ vixl32::Register in2_hi = second.AsRegisterPairHigh<vixl32::Register>();
+ vixl32::Register in2_lo = second.AsRegisterPairLow<vixl32::Register>();
+
+ // Extra checks to protect caused by the existence of R1_R2.
+ // The algorithm is wrong if out.hi is either in1.lo or in2.lo:
+ // (e.g. in1=r0_r1, in2=r2_r3 and out=r1_r2);
+ DCHECK_NE(out_hi.GetCode(), in1_lo.GetCode());
+ DCHECK_NE(out_hi.GetCode(), in2_lo.GetCode());
+
+ // input: in1 - 64 bits, in2 - 64 bits
+ // output: out
+ // formula: out.hi : out.lo = (in1.lo * in2.hi + in1.hi * in2.lo)* 2^32 + in1.lo * in2.lo
+ // parts: out.hi = in1.lo * in2.hi + in1.hi * in2.lo + (in1.lo * in2.lo)[63:32]
+ // parts: out.lo = (in1.lo * in2.lo)[31:0]
+
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ // temp <- in1.lo * in2.hi
+ __ Mul(temp, in1_lo, in2_hi);
+ // out.hi <- in1.lo * in2.hi + in1.hi * in2.lo
+ __ Mla(out_hi, in1_hi, in2_lo, temp);
+ // out.lo <- (in1.lo * in2.lo)[31:0];
+ __ Umull(out_lo, temp, in1_lo, in2_lo);
+ // out.hi <- in2.hi * in1.lo + in2.lo * in1.hi + (in1.lo * in2.lo)[63:32]
+ __ Add(out_hi, out_hi, Operand(temp));
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vmul(F32, OutputSRegister(mul), InputSRegisterAt(mul, 0), InputSRegisterAt(mul, 1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ Vmul(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitNot(HNot* not_) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (not_->GetResultType()) {
+ case Primitive::kPrimInt:
+ __ Mvn(OutputRegister(not_), InputRegisterAt(not_, 0));
+ break;
+
+ case Primitive::kPrimLong:
+ __ Mvn(out.AsRegisterPairLow<vixl32::Register>(),
+ Operand(in.AsRegisterPairLow<vixl32::Register>()));
+ __ Mvn(out.AsRegisterPairHigh<vixl32::Register>(),
+ Operand(in.AsRegisterPairHigh<vixl32::Register>()));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
+}
+
+void CodeGeneratorARMVIXL::GenerateMemoryBarrier(MemBarrierKind kind) {
+ // TODO (ported from quick): revisit ARM barrier kinds.
+ DmbOptions flavor = DmbOptions::ISH; // Quiet C++ warnings.
+ switch (kind) {
+ case MemBarrierKind::kAnyStore:
+ case MemBarrierKind::kLoadAny:
+ case MemBarrierKind::kAnyAny: {
+ flavor = DmbOptions::ISH;
+ break;
+ }
+ case MemBarrierKind::kStoreStore: {
+ flavor = DmbOptions::ISHST;
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected memory barrier " << kind;
+ }
+ __ Dmb(flavor);
+}
+
+void InstructionCodeGeneratorARMVIXL::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ DCHECK(imm == 1 || imm == -1);
+
+ if (instruction->IsRem()) {
+ __ Mov(out, 0);
+ } else {
+ if (imm == 1) {
+ __ Mov(out, dividend);
+ } else {
+ __ Rsb(out, dividend, 0);
+ }
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ vixl32::Register temp = locations->GetTemp(0).AsRegister<vixl32::Register>();
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+ int ctz_imm = CTZ(abs_imm);
+
+ if (ctz_imm == 1) {
+ __ Lsr(temp, dividend, 32 - ctz_imm);
+ } else {
+ __ Asr(temp, dividend, 31);
+ __ Lsr(temp, temp, 32 - ctz_imm);
+ }
+ __ Add(out, temp, Operand(dividend));
+
+ if (instruction->IsDiv()) {
+ __ Asr(out, out, ctz_imm);
+ if (imm < 0) {
+ __ Rsb(out, out, Operand(0));
+ }
+ } else {
+ __ Ubfx(out, out, 0, ctz_imm);
+ __ Sub(out, out, Operand(temp));
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ vixl32::Register out = OutputRegister(instruction);
+ vixl32::Register dividend = InputRegisterAt(instruction, 0);
+ vixl32::Register temp1 = locations->GetTemp(0).AsRegister<vixl32::Register>();
+ vixl32::Register temp2 = locations->GetTemp(1).AsRegister<vixl32::Register>();
+ int64_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+
+ int64_t magic;
+ int shift;
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+
+ __ Mov(temp1, magic);
+ __ Smull(temp2, temp1, dividend, temp1);
+
+ if (imm > 0 && magic < 0) {
+ __ Add(temp1, temp1, Operand(dividend));
+ } else if (imm < 0 && magic > 0) {
+ __ Sub(temp1, temp1, Operand(dividend));
+ }
+
+ if (shift != 0) {
+ __ Asr(temp1, temp1, shift);
+ }
+
+ if (instruction->IsDiv()) {
+ __ Sub(out, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
+ } else {
+ __ Sub(temp1, temp1, Operand(temp1, vixl32::Shift(ASR), 31));
+ // TODO: Strength reduction for mls.
+ __ Mov(temp2, imm);
+ __ Mls(out, temp1, temp2, dividend);
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::GenerateDivRemConstantIntegral(
+ HBinaryOperation* instruction) {
+ DCHECK(instruction->IsDiv() || instruction->IsRem());
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ DCHECK(second.IsConstant());
+
+ int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
+ if (imm == 0) {
+ // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+ } else if (imm == 1 || imm == -1) {
+ DivRemOneOrMinusOne(instruction);
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ DivRemByPowerOfTwo(instruction);
+ } else {
+ DCHECK(imm <= -2 || imm >= 2);
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitDiv(HDiv* div) {
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if (div->GetResultType() == Primitive::kPrimLong) {
+ // pLdiv runtime call.
+ call_kind = LocationSummary::kCallOnMainOnly;
+ } else if (div->GetResultType() == Primitive::kPrimInt && div->InputAt(1)->IsConstant()) {
+ // sdiv will be replaced by other instruction sequence.
+ } else if (div->GetResultType() == Primitive::kPrimInt &&
+ !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // pIdivmod runtime call.
+ call_kind = LocationSummary::kCallOnMainOnly;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (div->InputAt(1)->IsConstant()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ int32_t value = div->InputAt(1)->AsIntConstant()->GetValue();
+ if (value == 1 || value == 0 || value == -1) {
+ // No temp register required.
+ } else {
+ locations->AddTemp(Location::RequiresRegister());
+ if (!IsPowerOfTwo(AbsOrMin(value))) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ }
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ TODO_VIXL32(FATAL);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDiv(HDiv* div) {
+ LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+
+ switch (div->GetResultType()) {
+ case Primitive::kPrimInt: {
+ if (second.IsConstant()) {
+ GenerateDivRemConstantIntegral(div);
+ } else if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ __ Sdiv(OutputRegister(div), InputRegisterAt(div, 0), InputRegisterAt(div, 1));
+ } else {
+ TODO_VIXL32(FATAL);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ TODO_VIXL32(FATAL);
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ __ Vdiv(F32, OutputSRegister(div), InputSRegisterAt(div, 0), InputSRegisterAt(div, 1));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ Vdiv(F64,
+ FromLowSToD(out.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(first.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ FromLowSToD(second.AsFpuRegisterPairLow<vixl32::SRegister>()));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << div->GetResultType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ DivZeroCheckSlowPathARMVIXL* slow_path =
+ new (GetGraph()->GetArena()) DivZeroCheckSlowPathARMVIXL(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location value = locations->InAt(0);
+
+ switch (instruction->GetType()) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Orrs(temp,
+ value.AsRegisterPairLow<vixl32::Register>(),
+ Operand(value.AsRegisterPairHigh<vixl32::Register>()));
+ __ B(eq, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+}
+
+void LocationsBuilderARMVIXL::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+ArmVIXLAssembler* ParallelMoveResolverARMVIXL::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverARMVIXL::EmitMove(size_t index) {
+ MoveOperands* move = moves_[index];
+ Location source = move->GetSource();
+ Location destination = move->GetDestination();
+
+ if (source.IsRegister()) {
+ if (destination.IsRegister()) {
+ __ Mov(destination.AsRegister<vixl32::Register>(), source.AsRegister<vixl32::Register>());
+ } else if (destination.IsFpuRegister()) {
+ __ Vmov(destination.AsFpuRegister<vixl32::SRegister>(),
+ source.AsRegister<vixl32::Register>());
+ } else {
+ DCHECK(destination.IsStackSlot());
+ GetAssembler()->StoreToOffset(kStoreWord,
+ source.AsRegister<vixl32::Register>(),
+ sp,
+ destination.GetStackIndex());
+ }
+ } else if (source.IsStackSlot()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsFpuRegister()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsDoubleStackSlot()) {
+ TODO_VIXL32(FATAL);
+ } else if (source.IsRegisterPair()) {
+ if (destination.IsRegisterPair()) {
+ __ Mov(destination.AsRegisterPairLow<vixl32::Register>(),
+ source.AsRegisterPairLow<vixl32::Register>());
+ __ Mov(destination.AsRegisterPairHigh<vixl32::Register>(),
+ source.AsRegisterPairHigh<vixl32::Register>());
+ } else if (destination.IsFpuRegisterPair()) {
+ __ Vmov(FromLowSToD(destination.AsFpuRegisterPairLow<vixl32::SRegister>()),
+ source.AsRegisterPairLow<vixl32::Register>(),
+ source.AsRegisterPairHigh<vixl32::Register>());
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ DCHECK(ExpectedPairLayout(source));
+ GetAssembler()->StoreToOffset(kStoreWordPair,
+ source.AsRegisterPairLow<vixl32::Register>(),
+ sp,
+ destination.GetStackIndex());
+ }
+ } else if (source.IsFpuRegisterPair()) {
+ TODO_VIXL32(FATAL);
+ } else {
+ DCHECK(source.IsConstant()) << source;
+ HConstant* constant = source.GetConstant();
+ if (constant->IsIntConstant() || constant->IsNullConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(constant);
+ if (destination.IsRegister()) {
+ __ Mov(destination.AsRegister<vixl32::Register>(), value);
+ } else {
+ DCHECK(destination.IsStackSlot());
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Mov(temp, value);
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ }
+ } else if (constant->IsLongConstant()) {
+ int64_t value = constant->AsLongConstant()->GetValue();
+ if (destination.IsRegisterPair()) {
+ __ Mov(destination.AsRegisterPairLow<vixl32::Register>(), Low32Bits(value));
+ __ Mov(destination.AsRegisterPairHigh<vixl32::Register>(), High32Bits(value));
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Mov(temp, Low32Bits(value));
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ __ Mov(temp, High32Bits(value));
+ GetAssembler()->StoreToOffset(kStoreWord,
+ temp,
+ sp,
+ destination.GetHighStackIndex(kArmWordSize));
+ }
+ } else if (constant->IsDoubleConstant()) {
+ double value = constant->AsDoubleConstant()->GetValue();
+ if (destination.IsFpuRegisterPair()) {
+ __ Vmov(F64, FromLowSToD(destination.AsFpuRegisterPairLow<vixl32::SRegister>()), value);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ uint64_t int_value = bit_cast<uint64_t, double>(value);
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ GetAssembler()->LoadImmediate(temp, Low32Bits(int_value));
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ GetAssembler()->LoadImmediate(temp, High32Bits(int_value));
+ GetAssembler()->StoreToOffset(kStoreWord,
+ temp,
+ sp,
+ destination.GetHighStackIndex(kArmWordSize));
+ }
+ } else {
+ DCHECK(constant->IsFloatConstant()) << constant->DebugName();
+ float value = constant->AsFloatConstant()->GetValue();
+ if (destination.IsFpuRegister()) {
+ __ Vmov(F32, destination.AsFpuRegister<vixl32::SRegister>(), value);
+ } else {
+ DCHECK(destination.IsStackSlot());
+ UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ GetAssembler()->LoadImmediate(temp, bit_cast<int32_t, float>(value));
+ GetAssembler()->StoreToOffset(kStoreWord, temp, sp, destination.GetStackIndex());
+ }
+ }
+ }
+}
+
+void ParallelMoveResolverARMVIXL::Exchange(Register reg, int mem) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::SpillScratch(int reg ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+void ParallelMoveResolverARMVIXL::RestoreScratch(int reg ATTRIBUTE_UNUSED) {
+ TODO_VIXL32(FATAL);
+}
+
+
+// TODO: Remove when codegen complete.
+#pragma GCC diagnostic pop
+
+#undef __
+#undef QUICK_ENTRY_POINT
+#undef TODO_VIXL32
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
new file mode 100644
index 0000000000..7b7118cb3e
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
+
+#include "code_generator_arm.h"
+#include "utils/arm/assembler_arm_vixl.h"
+
+// TODO(VIXL): make vixl clean wrt -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/constants-aarch32.h"
+#include "aarch32/instructions-aarch32.h"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
+// True if VIXL32 should be used for codegen on ARM.
+#ifdef USE_VIXL_ARM_BACKEND
+static constexpr bool kArmUseVIXL32 = true;
+#else
+static constexpr bool kArmUseVIXL32 = false;
+#endif
+
+namespace art {
+namespace arm {
+
+static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0;
+static const vixl::aarch32::Register kCoreAlwaysSpillRegister = vixl::aarch32::r5;
+static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList(
+ (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) | (1 << R10) | (1 << R11) | (1 << LR));
+// Callee saves s16 to s31 inc.
+static const vixl::aarch32::SRegisterList kFpuCalleeSaves =
+ vixl::aarch32::SRegisterList(vixl::aarch32::s16, 16);
+
+#define FOR_EACH_IMPLEMENTED_INSTRUCTION(M) \
+ M(Above) \
+ M(AboveOrEqual) \
+ M(Add) \
+ M(Below) \
+ M(BelowOrEqual) \
+ M(Div) \
+ M(DivZeroCheck) \
+ M(Equal) \
+ M(Exit) \
+ M(Goto) \
+ M(GreaterThan) \
+ M(GreaterThanOrEqual) \
+ M(If) \
+ M(IntConstant) \
+ M(LessThan) \
+ M(LessThanOrEqual) \
+ M(LongConstant) \
+ M(MemoryBarrier) \
+ M(Mul) \
+ M(Not) \
+ M(NotEqual) \
+ M(ParallelMove) \
+ M(Return) \
+ M(ReturnVoid) \
+ M(Sub) \
+ M(TypeConversion) \
+
+// TODO: Remove once the VIXL32 backend is implemented completely.
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(And) \
+ M(ArrayGet) \
+ M(ArrayLength) \
+ M(ArraySet) \
+ M(BooleanNot) \
+ M(BoundsCheck) \
+ M(BoundType) \
+ M(CheckCast) \
+ M(ClassTableGet) \
+ M(ClearException) \
+ M(ClinitCheck) \
+ M(Compare) \
+ M(CurrentMethod) \
+ M(Deoptimize) \
+ M(DoubleConstant) \
+ M(FloatConstant) \
+ M(InstanceFieldGet) \
+ M(InstanceFieldSet) \
+ M(InstanceOf) \
+ M(InvokeInterface) \
+ M(InvokeStaticOrDirect) \
+ M(InvokeUnresolved) \
+ M(InvokeVirtual) \
+ M(LoadClass) \
+ M(LoadException) \
+ M(LoadString) \
+ M(MonitorOperation) \
+ M(NativeDebugInfo) \
+ M(Neg) \
+ M(NewArray) \
+ M(NewInstance) \
+ M(NullCheck) \
+ M(NullConstant) \
+ M(Or) \
+ M(PackedSwitch) \
+ M(ParameterValue) \
+ M(Phi) \
+ M(Rem) \
+ M(Ror) \
+ M(Select) \
+ M(Shl) \
+ M(Shr) \
+ M(StaticFieldGet) \
+ M(StaticFieldSet) \
+ M(SuspendCheck) \
+ M(Throw) \
+ M(TryBoundary) \
+ M(UnresolvedInstanceFieldGet) \
+ M(UnresolvedInstanceFieldSet) \
+ M(UnresolvedStaticFieldGet) \
+ M(UnresolvedStaticFieldSet) \
+ M(UShr) \
+ M(Xor) \
+
+class CodeGeneratorARMVIXL;
+
+class SlowPathCodeARMVIXL : public SlowPathCode {
+ public:
+ explicit SlowPathCodeARMVIXL(HInstruction* instruction)
+ : SlowPathCode(instruction), entry_label_(), exit_label_() {}
+
+ vixl::aarch32::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::aarch32::Label* GetExitLabel() { return &exit_label_; }
+
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+
+ private:
+ vixl::aarch32::Label entry_label_;
+ vixl::aarch32::Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARMVIXL);
+};
+
+class ParallelMoveResolverARMVIXL : public ParallelMoveResolverWithSwap {
+ public:
+ ParallelMoveResolverARMVIXL(ArenaAllocator* allocator, CodeGeneratorARMVIXL* codegen)
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+
+ ArmVIXLAssembler* GetAssembler() const;
+
+ private:
+ void Exchange(Register reg, int mem);
+ void Exchange(int mem1, int mem2);
+
+ CodeGeneratorARMVIXL* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARMVIXL);
+};
+
+#define DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR(Name) \
+ void Visit##Name(H##Name*) OVERRIDE;
+
+#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR(Name) \
+ void Visit##Name(H##Name* instr) OVERRIDE { \
+ VisitUnimplemementedInstruction(instr); }
+
+class LocationsBuilderARMVIXL : public HGraphVisitor {
+ public:
+ LocationsBuilderARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+ FOR_EACH_IMPLEMENTED_INSTRUCTION(DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR)
+
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR)
+
+ private:
+ void VisitUnimplemementedInstruction(HInstruction* instruction) {
+ LOG(FATAL) << "Unimplemented Instruction: " << instruction->DebugName();
+ }
+
+ void HandleCondition(HCondition* condition);
+
+ CodeGeneratorARMVIXL* const codegen_;
+ InvokeDexCallingConventionVisitorARM parameter_visitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARMVIXL);
+};
+
+class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
+ public:
+ InstructionCodeGeneratorARMVIXL(HGraph* graph, CodeGeneratorARMVIXL* codegen);
+
+ FOR_EACH_IMPLEMENTED_INSTRUCTION(DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR)
+
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR)
+
+ ArmVIXLAssembler* GetAssembler() const { return assembler_; }
+ vixl::aarch32::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
+
+ private:
+ void VisitUnimplemementedInstruction(HInstruction* instruction) {
+ LOG(FATAL) << "Unimplemented Instruction: " << instruction->DebugName();
+ }
+
+ void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
+ void HandleGoto(HInstruction* got, HBasicBlock* successor);
+ void HandleCondition(HCondition* condition);
+ void GenerateTestAndBranch(HInstruction* instruction,
+ size_t condition_input_index,
+ vixl::aarch32::Label* true_target,
+ vixl::aarch32::Label* false_target);
+ void GenerateCompareTestAndBranch(HCondition* condition,
+ vixl::aarch32::Label* true_target,
+ vixl::aarch32::Label* false_target);
+ void GenerateVcmp(HInstruction* instruction);
+ void GenerateFPJumps(HCondition* cond,
+ vixl::aarch32::Label* true_label,
+ vixl::aarch32::Label* false_label);
+ void GenerateLongComparesAndJumps(HCondition* cond,
+ vixl::aarch32::Label* true_label,
+ vixl::aarch32::Label* false_label);
+ void DivRemOneOrMinusOne(HBinaryOperation* instruction);
+ void DivRemByPowerOfTwo(HBinaryOperation* instruction);
+ void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
+ void GenerateDivRemConstantIntegral(HBinaryOperation* instruction);
+
+ ArmVIXLAssembler* const assembler_;
+ CodeGeneratorARMVIXL* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARMVIXL);
+};
+
+class CodeGeneratorARMVIXL : public CodeGenerator {
+ public:
+ CodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
+
+ virtual ~CodeGeneratorARMVIXL() {}
+
+ void Initialize() OVERRIDE {
+ block_labels_.resize(GetGraph()->GetBlocks().size());
+ }
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+ void Bind(HBasicBlock* block) OVERRIDE;
+ void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+ ArmVIXLAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+
+ const ArmVIXLAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+
+ vixl::aarch32::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
+
+ size_t GetWordSize() const OVERRIDE { return kArmWordSize; }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return vixl::aarch32::kRegSizeInBytes; }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE;
+
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
+
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+
+ const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
+
+ vixl::aarch32::Label* GetFrameEntryLabel() { return &frame_entry_label_; }
+
+ // Saves the register in the stack. Returns the size taken on stack.
+ size_t SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
+ return 0;
+ }
+
+ // Restores the register from the stack. Returns the size taken on stack.
+ size_t RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
+ return 0;
+ }
+
+ size_t SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveFloatingPointRegister";
+ return 0;
+ }
+
+ size_t RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+ uint32_t reg_id ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreFloatingPointRegister";
+ return 0;
+ }
+
+ bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
+ return type == Primitive::kPrimDouble || type == Primitive::kPrimLong;
+ }
+
+ void ComputeSpillMask() OVERRIDE;
+
+ void GenerateImplicitNullCheck(HNullCheck* null_check) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* null_check) OVERRIDE;
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ return &move_resolver_;
+ }
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path = nullptr) OVERRIDE;
+
+ // Generate code to invoke a runtime entry point, but do not record
+ // PC-related information in a stack map.
+ void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
+
+ void GenerateInvokeRuntime(int32_t entry_point_offset);
+
+ // Check if the desired_string_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadString::LoadKind GetSupportedLoadStringKind(
+ HLoadString::LoadKind desired_string_load_kind) OVERRIDE;
+
+ // Check if the desired_class_load_kind is supported. If it is, return it,
+ // otherwise return a fall-back kind that should be used instead.
+ HLoadClass::LoadKind GetSupportedLoadClassKind(
+ HLoadClass::LoadKind desired_class_load_kind) OVERRIDE;
+
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
+
+ void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE;
+
+ void GenerateNop() OVERRIDE;
+
+ vixl::aarch32::Label* GetLabelOf(HBasicBlock* block) {
+ block = FirstNonEmptyBlock(block);
+ return &(block_labels_[block->GetBlockId()]);
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ // We use a deque so that the `vixl::aarch32::Label` objects do not move in memory.
+ ArenaDeque<vixl::aarch32::Label> block_labels_; // Indexed by block id.
+ vixl::aarch32::Label frame_entry_label_;
+
+ LocationsBuilderARMVIXL location_builder_;
+ InstructionCodeGeneratorARMVIXL instruction_visitor_;
+ ParallelMoveResolverARMVIXL move_resolver_;
+
+ ArmVIXLAssembler assembler_;
+ const ArmInstructionSetFeatures& isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
+};
+
+#undef FOR_EACH_IMPLEMENTED_INSTRUCTION
+#undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
+#undef DEFINE_IMPLEMENTED_INSTRUCTION_VISITOR
+#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITOR
+
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM_VIXL_H_
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b2e75952a0..5c0ca85c78 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -194,10 +194,6 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -2141,15 +2137,13 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -2664,14 +2658,8 @@ void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
}
void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -3725,7 +3713,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -4339,7 +4327,7 @@ Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticO
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method ATTRIBUTE_UNUSED) {
+ HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
// We disable PC-relative load when there is an irreducible loop, as the optimization
// is incompatible with it.
@@ -4408,13 +4396,16 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke
}
switch (method_load_kind) {
- case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
// temp = thread->string_init_entrypoint
+ uint32_t offset =
+ GetThreadOffset<kMipsPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
__ LoadFromOffset(kLoadWord,
temp.AsRegister<Register>(),
TR,
- invoke->GetStringInitOffset());
+ offset);
break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
@@ -5112,7 +5103,8 @@ void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
- codegen_->CreateNullCheckLocations(instruction);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
}
void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -5406,7 +5398,7 @@ void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 553a7e6674..f943978b3b 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -395,7 +395,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 557e5da916..02576bda67 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -150,10 +150,6 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -1558,15 +1554,13 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -2110,14 +2104,8 @@ void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
}
void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -2630,7 +2618,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -2984,7 +2972,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method ATTRIBUTE_UNUSED) {
+ HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
switch (desired_dispatch_info.method_load_kind) {
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
@@ -3018,13 +3006,16 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
- case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
// temp = thread->string_init_entrypoint
+ uint32_t offset =
+ GetThreadOffset<kMips64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
__ LoadFromOffset(kLoadDoubleword,
temp.AsRegister<GpuRegister>(),
TR,
- invoke->GetStringInitOffset());
+ offset);
break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
@@ -3461,7 +3452,8 @@ void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
- codegen_->CreateNullCheckLocations(instruction);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
}
void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -3741,7 +3733,7 @@ void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 2dd409a224..690eccb7d8 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -343,7 +343,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 172ce4ab12..c3000805d1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -84,10 +84,6 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -1458,7 +1454,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -3548,10 +3544,7 @@ void InstructionCodeGeneratorX86::VisitRem(HRem* rem) {
}
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -3571,9 +3564,6 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
default:
LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
}
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -4227,7 +4217,7 @@ void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method ATTRIBUTE_UNUSED) {
+ HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = desired_dispatch_info;
// We disable pc-relative load when there is an irreducible loop, as the optimization
@@ -4286,10 +4276,13 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO
Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
- case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
// temp = thread->string_init_entrypoint
- __ fs()->movl(temp.AsRegister<Register>(), Address::Absolute(invoke->GetStringInitOffset()));
+ uint32_t offset =
+ GetThreadOffset<kX86PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ __ fs()->movl(temp.AsRegister<Register>(), Address::Absolute(offset));
break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
@@ -4307,7 +4300,7 @@ Location CodeGeneratorX86::GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticO
__ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(*invoke->GetTargetMethod().dex_file, offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -4517,7 +4510,7 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
@@ -4927,11 +4920,11 @@ void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations = codegen_->CreateNullCheckLocations(instruction);
- if (!codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
- // Explicit null checks can use any location.
- locations->SetInAt(0, Location::Any());
- }
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
+ ? Location::RequiresRegister()
+ : Location::Any();
+ locations->SetInAt(0, loc);
}
void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4978,7 +4971,7 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -5369,18 +5362,16 @@ void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
}
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
HInstruction* length = instruction->InputAt(1);
if (!length->IsEmittedAtUseSite()) {
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
}
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -5444,7 +5435,7 @@ void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction)
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5802,7 +5793,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -6099,7 +6090,7 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 5866e65d88..1ae9af3b94 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -402,7 +402,7 @@ class CodeGeneratorX86 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
// Generate a call to a static or direct method.
Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1943ddc6f7..f9a3e429d7 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -88,10 +88,6 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -737,7 +733,7 @@ inline Condition X86_64FPCondition(IfCondition cond) {
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method ATTRIBUTE_UNUSED) {
+ HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
switch (desired_dispatch_info.code_ptr_location) {
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
@@ -758,11 +754,13 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat
// All registers are assumed to be correctly set up.
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
switch (invoke->GetMethodLoadKind()) {
- case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
// temp = thread->string_init_entrypoint
- __ gs()->movq(temp.AsRegister<CpuRegister>(),
- Address::Absolute(invoke->GetStringInitOffset(), /* no_rip */ true));
+ uint32_t offset =
+ GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
+ __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true));
break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
break;
@@ -779,7 +777,7 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat
Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
// Bind a new fixup label at the end of the "movl" insn.
uint32_t offset = invoke->GetDexCacheArrayOffset();
- __ Bind(NewPcRelativeDexCacheArrayPatch(*invoke->GetTargetMethod().dex_file, offset));
+ __ Bind(NewPcRelativeDexCacheArrayPatch(invoke->GetDexFile(), offset));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
@@ -1498,7 +1496,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -3618,14 +3616,8 @@ void InstructionCodeGeneratorX86_64::VisitRem(HRem* rem) {
}
void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::Any());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -4021,7 +4013,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -4396,11 +4388,11 @@ void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary* locations = codegen_->CreateNullCheckLocations(instruction);
- if (!codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
- // Explicit null checks can use any location.
- locations->SetInAt(0, Location::Any());
- }
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
+ ? Location::RequiresRegister()
+ : Location::Any();
+ locations->SetInAt(0, loc);
}
void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4447,7 +4439,7 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -4818,18 +4810,16 @@ void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction)
}
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
HInstruction* length = instruction->InputAt(1);
if (!length->IsEmittedAtUseSite()) {
locations->SetInAt(1, Location::RegisterOrConstant(length));
}
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -4912,7 +4902,7 @@ void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instructio
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5213,7 +5203,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -5488,7 +5478,7 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 7108676b8e..594f05157b 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -399,7 +399,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- MethodReference target_method) OVERRIDE;
+ HInvokeStaticOrDirect* invoke) OVERRIDE;
Location GenerateCalleeMethodStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 070cbb3894..f19faa324c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -41,6 +41,7 @@
#include "register_allocator_linear_scan.h"
#include "ssa_liveness_analysis.h"
#include "utils.h"
+#include "utils/arm/assembler_arm_vixl.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/mips/managed_register_mips.h"
#include "utils/mips64/managed_register_mips64.h"
@@ -48,6 +49,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
#include "code_generator_arm.h"
+#include "code_generator_arm_vixl.h"
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -117,6 +119,28 @@ class TestCodeGeneratorARM : public arm::CodeGeneratorARM {
blocked_register_pairs_[arm::R6_R7] = false;
}
};
+
+// A way to test the VIXL32-based code generator on ARM. This will replace
+// TestCodeGeneratorARM when the VIXL32-based backend replaces the existing one.
+class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
+ public:
+ TestCodeGeneratorARMVIXL(HGraph* graph,
+ const ArmInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options)
+ : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) {
+ AddAllocatedRegister(Location::RegisterLocation(arm::R6));
+ AddAllocatedRegister(Location::RegisterLocation(arm::R7));
+ }
+
+ void SetupBlockedRegisters() const OVERRIDE {
+ arm::CodeGeneratorARMVIXL::SetupBlockedRegisters();
+ blocked_core_registers_[arm::R4] = true;
+ blocked_core_registers_[arm::R6] = false;
+ blocked_core_registers_[arm::R7] = false;
+ // Makes pair R6-R7 available.
+ blocked_register_pairs_[arm::R6_R7] = false;
+ }
+};
#endif
#ifdef ART_ENABLE_CODEGEN_x86
@@ -296,6 +320,13 @@ CodeGenerator* create_codegen_arm(HGraph* graph, const CompilerOptions& compiler
*features_arm.get(),
compiler_options);
}
+
+CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
+ std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
+ ArmInstructionSetFeatures::FromCppDefines());
+ return new (graph->GetArena())
+ TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
+}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
@@ -351,6 +382,7 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
#ifdef ART_ENABLE_CODEGEN_arm
CodegenTargetConfig(kArm, create_codegen_arm),
CodegenTargetConfig(kThumb2, create_codegen_arm),
+ CodegenTargetConfig(kArm, create_codegen_arm_vixl32),
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
CodegenTargetConfig(kArm64, create_codegen_arm64),
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
new file mode 100644
index 0000000000..853541754d
--- /dev/null
+++ b/compiler/optimizing/common_arm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
+#define ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
+
+// TODO(VIXL): Make VIXL compile with -Wshadow.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#include "aarch32/macro-assembler-aarch32.h"
+#pragma GCC diagnostic pop
+
+namespace art {
+namespace arm {
+namespace helpers {
+
+static_assert(vixl::aarch32::kSpCode == SP, "vixl::aarch32::kSpCode must equal ART's SP");
+
+inline dwarf::Reg DWARFReg(vixl::aarch32::Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
+}
+
+inline dwarf::Reg DWARFReg(vixl::aarch32::SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
+}
+
+inline vixl::aarch32::DRegister FromLowSToD(vixl::aarch32::SRegister reg) {
+ DCHECK_EQ(reg.GetCode() % 2, 0u) << reg;
+ return vixl::aarch32::DRegister(reg.GetCode() / 2);
+}
+
+inline vixl::aarch32::Register RegisterFrom(Location location) {
+ DCHECK(location.IsRegister()) << location;
+ return vixl::aarch32::Register(location.reg());
+}
+
+inline vixl::aarch32::Register RegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type;
+ return RegisterFrom(location);
+}
+
+inline vixl::aarch32::DRegister DRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister()) << location;
+ return vixl::aarch32::DRegister(location.reg());
+}
+
+inline vixl::aarch32::SRegister SRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister()) << location;
+ return vixl::aarch32::SRegister(location.reg());
+}
+
+inline vixl::aarch32::SRegister OutputSRegister(HInstruction* instr) {
+ Primitive::Type type = instr->GetType();
+ DCHECK_EQ(type, Primitive::kPrimFloat) << type;
+ return SRegisterFrom(instr->GetLocations()->Out());
+}
+
+inline vixl::aarch32::DRegister OutputDRegister(HInstruction* instr) {
+ Primitive::Type type = instr->GetType();
+ DCHECK_EQ(type, Primitive::kPrimDouble) << type;
+ return DRegisterFrom(instr->GetLocations()->Out());
+}
+
+inline vixl::aarch32::SRegister InputSRegisterAt(HInstruction* instr, int input_index) {
+ Primitive::Type type = instr->InputAt(input_index)->GetType();
+ DCHECK_EQ(type, Primitive::kPrimFloat) << type;
+ return SRegisterFrom(instr->GetLocations()->InAt(input_index));
+}
+
+inline vixl::aarch32::DRegister InputDRegisterAt(HInstruction* instr, int input_index) {
+ Primitive::Type type = instr->InputAt(input_index)->GetType();
+ DCHECK_EQ(type, Primitive::kPrimDouble) << type;
+ return DRegisterFrom(instr->GetLocations()->InAt(input_index));
+}
+
+inline vixl::aarch32::Register OutputRegister(HInstruction* instr) {
+ return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+inline vixl::aarch32::Register InputRegisterAt(HInstruction* instr, int input_index) {
+ return RegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+inline int64_t Int64ConstantFrom(Location location) {
+ HConstant* instr = location.GetConstant();
+ if (instr->IsIntConstant()) {
+ return instr->AsIntConstant()->GetValue();
+ } else if (instr->IsNullConstant()) {
+ return 0;
+ } else {
+ DCHECK(instr->IsLongConstant()) << instr->DebugName();
+ return instr->AsLongConstant()->GetValue();
+ }
+}
+
+inline vixl::aarch32::Operand OperandFrom(Location location, Primitive::Type type) {
+ if (location.IsRegister()) {
+ return vixl::aarch32::Operand(RegisterFrom(location, type));
+ } else {
+ return vixl::aarch32::Operand(Int64ConstantFrom(location));
+ }
+}
+
+inline vixl::aarch32::Operand InputOperandAt(HInstruction* instr, int input_index) {
+ return OperandFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+} // namespace helpers
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index eda0971ecc..776a483d43 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -273,9 +273,9 @@ inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
// only SP/WSP and ZXR/WZR codes are different between art and vixl.
// Note: This function is only used for debug checks.
inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
- size_t num_core,
- uint32_t art_fpu_registers,
- size_t num_fpu) {
+ size_t num_core,
+ uint32_t art_fpu_registers,
+ size_t num_fpu) {
// The register masks won't work if the number of register is larger than 32.
DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
index 6ad9b07f1a..7010171c80 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -82,12 +82,10 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
// we need to add the dex cache arrays base as the special input.
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderARM>(invoke, codegen_)) {
- // Initialize base for target method dex file if needed.
- MethodReference target_method = invoke->GetTargetMethod();
- HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(*target_method.dex_file);
+ HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
// Update the element offset in base.
- DexCacheArraysLayout layout(kArmPointerSize, target_method.dex_file);
- base->UpdateElementOffset(layout.MethodOffset(target_method.dex_method_index));
+ DexCacheArraysLayout layout(kArmPointerSize, &invoke->GetDexFile());
+ base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
invoke->AddSpecialInput(base);
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.cc b/compiler/optimizing/dex_cache_array_fixups_mips.cc
index 300284d04c..4456b49e87 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.cc
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.cc
@@ -89,11 +89,10 @@ class DexCacheArrayFixupsVisitor : public HGraphVisitor {
if (invoke->HasPcRelativeDexCache() &&
!IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
// Initialize base for target method dex file if needed.
- MethodReference target_method = invoke->GetTargetMethod();
- HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(*target_method.dex_file);
+ HMipsDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke->GetDexFile());
// Update the element offset in base.
- DexCacheArraysLayout layout(kMipsPointerSize, target_method.dex_file);
- base->UpdateElementOffset(layout.MethodOffset(target_method.dex_method_index));
+ DexCacheArraysLayout layout(kMipsPointerSize, &invoke->GetDexFile());
+ base->UpdateElementOffset(layout.MethodOffset(invoke->GetDexMethodIndex()));
// Add the special argument base to the method.
DCHECK(!invoke->HasCurrentMethodInput());
invoke->AddSpecialInput(base);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index b3d5341de0..912ee29cdb 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -447,7 +447,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
VisitInvoke(invoke);
- StartAttributeStream("invoke_type") << invoke->GetOriginalInvokeType();
+ StartAttributeStream("invoke_type") << invoke->GetInvokeType();
}
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index ce53134235..f21dc0e7e4 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -263,42 +263,24 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
return false; // Don't bother to move further if we know the method is unresolved.
}
- uint32_t method_index = invoke_instruction->GetDexMethodIndex();
ScopedObjectAccess soa(Thread::Current());
+ uint32_t method_index = invoke_instruction->GetDexMethodIndex();
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
VLOG(compiler) << "Try inlining " << PrettyMethod(method_index, caller_dex_file);
- ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
// We can query the dex cache directly. The verifier has populated it already.
- ArtMethod* resolved_method;
+ ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
ArtMethod* actual_method = nullptr;
- if (invoke_instruction->IsInvokeStaticOrDirect()) {
- if (invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit()) {
- VLOG(compiler) << "Not inlining a String.<init> method";
- return false;
- }
- MethodReference ref = invoke_instruction->AsInvokeStaticOrDirect()->GetTargetMethod();
- mirror::DexCache* const dex_cache = IsSameDexFile(caller_dex_file, *ref.dex_file)
- ? caller_compilation_unit_.GetDexCache().Get()
- : class_linker->FindDexCache(soa.Self(), *ref.dex_file);
- resolved_method = dex_cache->GetResolvedMethod(
- ref.dex_method_index, class_linker->GetImagePointerSize());
- // actual_method == resolved_method for direct or static calls.
- actual_method = resolved_method;
- } else {
- resolved_method = caller_compilation_unit_.GetDexCache().Get()->GetResolvedMethod(
- method_index, class_linker->GetImagePointerSize());
- if (resolved_method != nullptr) {
- // Check if we can statically find the method.
- actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
- }
- }
-
if (resolved_method == nullptr) {
- // TODO: Can this still happen?
- // Method cannot be resolved if it is in another dex file we do not have access to.
- VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file);
+ DCHECK(invoke_instruction->IsInvokeStaticOrDirect());
+ DCHECK(invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit());
+ VLOG(compiler) << "Not inlining a String.<init> method";
return false;
+ } else if (invoke_instruction->IsInvokeStaticOrDirect()) {
+ actual_method = resolved_method;
+ } else {
+ // Check if we can statically find the method.
+ actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
}
if (actual_method != nullptr) {
@@ -763,9 +745,9 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* metho
// 2) We will not go to the conflict trampoline with an invoke-virtual.
// TODO: Consider sharpening once it is not dependent on the compiler driver.
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
- uint32_t method_index = FindMethodIndexIn(
+ uint32_t dex_method_index = FindMethodIndexIn(
method, caller_dex_file, invoke_instruction->GetDexMethodIndex());
- if (method_index == DexFile::kDexNoIndex) {
+ if (dex_method_index == DexFile::kDexNoIndex) {
return false;
}
HInvokeVirtual* new_invoke = new (graph_->GetArena()) HInvokeVirtual(
@@ -773,7 +755,8 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* metho
invoke_instruction->GetNumberOfArguments(),
invoke_instruction->GetType(),
invoke_instruction->GetDexPc(),
- method_index,
+ dex_method_index,
+ method,
method->GetMethodIndex());
HInputsRef inputs = invoke_instruction->GetInputs();
for (size_t index = 0; index != inputs.size(); ++index) {
@@ -1122,7 +1105,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
}
- InvokeType invoke_type = invoke_instruction->GetOriginalInvokeType();
+ InvokeType invoke_type = invoke_instruction->GetInvokeType();
if (invoke_type == kInterface) {
// We have statically resolved the dispatch. To please the class linker
// at runtime, we change this call as if it was a virtual call.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index d7e4c53df0..3b08d9f989 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -21,6 +21,7 @@
#include "class_linker.h"
#include "dex_instruction-inl.h"
#include "driver/compiler_options.h"
+#include "imtable-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
@@ -767,6 +768,11 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
return resolved_method;
}
+static bool IsStringConstructor(ArtMethod* method) {
+ ScopedObjectAccess soa(Thread::Current());
+ return method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
+}
+
bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
uint32_t dex_pc,
uint32_t method_idx,
@@ -785,31 +791,46 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
number_of_arguments++;
}
- MethodReference target_method(dex_file_, method_idx);
+ ArtMethod* resolved_method = ResolveMethod(method_idx, invoke_type);
+
+ if (UNLIKELY(resolved_method == nullptr)) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedMethod);
+ HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_,
+ number_of_arguments,
+ return_type,
+ dex_pc,
+ method_idx,
+ invoke_type);
+ return HandleInvoke(invoke,
+ number_of_vreg_arguments,
+ args,
+ register_index,
+ is_range,
+ descriptor,
+ nullptr, /* clinit_check */
+ true /* is_unresolved */);
+ }
- // Special handling for string init.
- int32_t string_init_offset = 0;
- bool is_string_init = compiler_driver_->IsStringInit(method_idx,
- dex_file_,
- &string_init_offset);
// Replace calls to String.<init> with StringFactory.
- if (is_string_init) {
+ if (IsStringConstructor(resolved_method)) {
+ uint32_t string_init_entry_point = WellKnownClasses::StringInitToEntryPoint(resolved_method);
HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
HInvokeStaticOrDirect::MethodLoadKind::kStringInit,
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- dchecked_integral_cast<uint64_t>(string_init_offset),
+ dchecked_integral_cast<uint64_t>(string_init_entry_point),
0U
};
+ MethodReference target_method(dex_file_, method_idx);
HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
arena_,
number_of_arguments - 1,
Primitive::kPrimNot /*return_type */,
dex_pc,
method_idx,
- target_method,
+ nullptr,
dispatch_info,
invoke_type,
- kStatic /* optimized_invoke_type */,
+ target_method,
HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
return HandleStringInit(invoke,
number_of_vreg_arguments,
@@ -819,26 +840,6 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
descriptor);
}
- ArtMethod* resolved_method = ResolveMethod(method_idx, invoke_type);
-
- if (UNLIKELY(resolved_method == nullptr)) {
- MaybeRecordStat(MethodCompilationStat::kUnresolvedMethod);
- HInvoke* invoke = new (arena_) HInvokeUnresolved(arena_,
- number_of_arguments,
- return_type,
- dex_pc,
- method_idx,
- invoke_type);
- return HandleInvoke(invoke,
- number_of_vreg_arguments,
- args,
- register_index,
- is_range,
- descriptor,
- nullptr, /* clinit_check */
- true /* is_unresolved */);
- }
-
// Potential class initialization check, in the case of a static method call.
HClinitCheck* clinit_check = nullptr;
HInvoke* invoke = nullptr;
@@ -853,10 +854,9 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
dex_pc, resolved_method, method_idx, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
- // Update the target method to the one resolved. Note that this may be a no-op if
+ // Update the method index to the one resolved. Note that this may be a no-op if
// we resolved to the method referenced by the instruction.
method_idx = resolved_method->GetDexMethodIndex();
- target_method = MethodReference(dex_file_, method_idx);
}
}
@@ -866,15 +866,17 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
0u,
0U
};
+ MethodReference target_method(resolved_method->GetDexFile(),
+ resolved_method->GetDexMethodIndex());
invoke = new (arena_) HInvokeStaticOrDirect(arena_,
number_of_arguments,
return_type,
dex_pc,
method_idx,
- target_method,
+ resolved_method,
dispatch_info,
invoke_type,
- invoke_type,
+ target_method,
clinit_check_requirement);
} else if (invoke_type == kVirtual) {
ScopedObjectAccess soa(Thread::Current()); // Needed for the method index
@@ -883,16 +885,18 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
return_type,
dex_pc,
method_idx,
+ resolved_method,
resolved_method->GetMethodIndex());
} else {
DCHECK_EQ(invoke_type, kInterface);
- ScopedObjectAccess soa(Thread::Current()); // Needed for the method index
+ ScopedObjectAccess soa(Thread::Current()); // Needed for the IMT index.
invoke = new (arena_) HInvokeInterface(arena_,
number_of_arguments,
return_type,
dex_pc,
method_idx,
- resolved_method->GetImtIndex());
+ resolved_method,
+ ImTable::GetImtIndex(resolved_method));
}
return HandleInvoke(invoke,
@@ -1103,7 +1107,7 @@ bool HInstructionBuilder::HandleInvoke(HInvoke* invoke,
size_t start_index = 0;
size_t argument_index = 0;
- if (invoke->GetOriginalInvokeType() != InvokeType::kStatic) { // Instance call.
+ if (invoke->GetInvokeType() != InvokeType::kStatic) { // Instance call.
uint32_t obj_reg = is_range ? register_index : args[0];
HInstruction* arg = is_unresolved
? LoadLocal(obj_reg, Primitive::kPrimNot)
@@ -1813,7 +1817,20 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::MOVE_OBJECT:
case Instruction::MOVE_OBJECT_16:
case Instruction::MOVE_OBJECT_FROM16: {
- HInstruction* value = LoadLocal(instruction.VRegB(), Primitive::kPrimNot);
+ // The verifier has no notion of a null type, so a move-object of constant 0
+ // will lead to the same constant 0 in the destination register. To mimic
+ // this behavior, we just pretend we haven't seen a type change (int to reference)
+ // for the 0 constant and phis. We rely on our type propagation to eventually get the
+ // types correct.
+ uint32_t reg_number = instruction.VRegB();
+ HInstruction* value = (*current_locals_)[reg_number];
+ if (value->IsIntConstant()) {
+ DCHECK_EQ(value->AsIntConstant()->GetValue(), 0);
+ } else if (value->IsPhi()) {
+ DCHECK(value->GetType() == Primitive::kPrimInt || value->GetType() == Primitive::kPrimNot);
+ } else {
+ value = LoadLocal(reg_number, Primitive::kPrimNot);
+ }
UpdateLocal(instruction.VRegA(), value);
break;
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 4ca0600dba..ff829af4c2 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1577,6 +1577,18 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) {
return;
}
+ if ((input_cst != nullptr) && input_cst->IsOne()
+ && input_other->GetType() == Primitive::kPrimBoolean) {
+ // Replace code looking like
+ // XOR dst, src, 1
+ // with
+ // BOOLEAN_NOT dst, src
+ HBooleanNot* boolean_not = new (GetGraph()->GetArena()) HBooleanNot(input_other);
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, boolean_not);
+ RecordSimplification();
+ return;
+ }
+
if ((input_cst != nullptr) && AreAllBitsSet(input_cst)) {
// Replace code looking like
// XOR dst, src, 0xFFF...FF
@@ -1645,7 +1657,7 @@ void InstructionSimplifierVisitor::SimplifyRotate(HInvoke* invoke,
bool is_left,
Primitive::Type type) {
DCHECK(invoke->IsInvokeStaticOrDirect());
- DCHECK_EQ(invoke->GetOriginalInvokeType(), InvokeType::kStatic);
+ DCHECK_EQ(invoke->GetInvokeType(), InvokeType::kStatic);
HInstruction* value = invoke->InputAt(0);
HInstruction* distance = invoke->InputAt(1);
// Replace the invoke with an HRor.
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 418d59c6cb..4d4bbcf616 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -532,9 +532,7 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke, const DexFile
// inline. If the precise type is known, however, the instruction will be sharpened to an
// InvokeStaticOrDirect.
InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
- InvokeType invoke_type = invoke->IsInvokeStaticOrDirect() ?
- invoke->AsInvokeStaticOrDirect()->GetOptimizedInvokeType() :
- invoke->IsInvokeVirtual() ? kVirtual : kSuper;
+ InvokeType invoke_type = invoke->GetInvokeType();
switch (intrinsic_type) {
case kStatic:
return (invoke_type == kStatic);
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 67640a1ebf..fd2da1004b 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -657,7 +657,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 082076d79b..ce58657bcd 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -895,7 +895,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index d17f85ec8d..e61aba05b4 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1977,7 +1977,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index f8f30d9015..0f31fabbfb 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2110,7 +2110,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 1b1b3a79ab..d157509758 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -33,8 +33,8 @@ LocationSummary::LocationSummary(HInstruction* instruction,
output_overlaps_(Location::kOutputOverlap),
stack_mask_(nullptr),
register_mask_(0),
- live_registers_(),
- custom_slow_path_caller_saves_() {
+ live_registers_(RegisterSet::Empty()),
+ custom_slow_path_caller_saves_(RegisterSet::Empty()) {
instruction->SetLocations(this);
if (NeedsSafepoint()) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index c97c4a6c5b..da27928ef2 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -420,7 +420,7 @@ std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
- RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
+ static RegisterSet Empty() { return RegisterSet(); }
void Add(Location loc) {
if (loc.IsRegister()) {
@@ -465,6 +465,8 @@ class RegisterSet : public ValueObject {
}
private:
+ RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
+
uint32_t core_registers_;
uint32_t floating_point_registers_;
};
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6d207765e3..4dc4c20003 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3742,8 +3742,8 @@ class HInvoke : public HInstruction {
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
- InvokeType GetOriginalInvokeType() const {
- return GetPackedField<OriginalInvokeTypeField>();
+ InvokeType GetInvokeType() const {
+ return GetPackedField<InvokeTypeField>();
}
Intrinsics GetIntrinsic() const {
@@ -3777,21 +3777,22 @@ class HInvoke : public HInstruction {
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
+ ArtMethod* GetResolvedMethod() const { return resolved_method_; }
+
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
protected:
- static constexpr size_t kFieldOriginalInvokeType = kNumberOfGenericPackedBits;
- static constexpr size_t kFieldOriginalInvokeTypeSize =
+ static constexpr size_t kFieldInvokeType = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldInvokeTypeSize =
MinimumBitsToStore(static_cast<size_t>(kMaxInvokeType));
static constexpr size_t kFieldReturnType =
- kFieldOriginalInvokeType + kFieldOriginalInvokeTypeSize;
+ kFieldInvokeType + kFieldInvokeTypeSize;
static constexpr size_t kFieldReturnTypeSize =
MinimumBitsToStore(static_cast<size_t>(Primitive::kPrimLast));
static constexpr size_t kFlagCanThrow = kFieldReturnType + kFieldReturnTypeSize;
static constexpr size_t kNumberOfInvokePackedBits = kFlagCanThrow + 1;
static_assert(kNumberOfInvokePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
- using OriginalInvokeTypeField =
- BitField<InvokeType, kFieldOriginalInvokeType, kFieldOriginalInvokeTypeSize>;
+ using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
using ReturnTypeField = BitField<Primitive::Type, kFieldReturnType, kFieldReturnTypeSize>;
HInvoke(ArenaAllocator* arena,
@@ -3800,23 +3801,26 @@ class HInvoke : public HInstruction {
Primitive::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
- InvokeType original_invoke_type)
+ ArtMethod* resolved_method,
+ InvokeType invoke_type)
: HInstruction(
SideEffects::AllExceptGCDependency(), dex_pc), // Assume write/read on all fields/arrays.
number_of_arguments_(number_of_arguments),
+ resolved_method_(resolved_method),
inputs_(number_of_arguments + number_of_other_inputs,
arena->Adapter(kArenaAllocInvokeInputs)),
dex_method_index_(dex_method_index),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
SetPackedField<ReturnTypeField>(return_type);
- SetPackedField<OriginalInvokeTypeField>(original_invoke_type);
+ SetPackedField<InvokeTypeField>(invoke_type);
SetPackedFlag<kFlagCanThrow>(true);
}
void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); }
uint32_t number_of_arguments_;
+ ArtMethod* const resolved_method_;
ArenaVector<HUserRecord<HInstruction*>> inputs_;
const uint32_t dex_method_index_;
Intrinsics intrinsic_;
@@ -3842,6 +3846,7 @@ class HInvokeUnresolved FINAL : public HInvoke {
return_type,
dex_pc,
dex_method_index,
+ nullptr,
invoke_type) {
}
@@ -3935,10 +3940,10 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
Primitive::Type return_type,
uint32_t dex_pc,
uint32_t method_index,
- MethodReference target_method,
+ ArtMethod* resolved_method,
DispatchInfo dispatch_info,
- InvokeType original_invoke_type,
- InvokeType optimized_invoke_type,
+ InvokeType invoke_type,
+ MethodReference target_method,
ClinitCheckRequirement clinit_check_requirement)
: HInvoke(arena,
number_of_arguments,
@@ -3950,10 +3955,10 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
return_type,
dex_pc,
method_index,
- original_invoke_type),
+ resolved_method,
+ invoke_type),
target_method_(target_method),
dispatch_info_(dispatch_info) {
- SetPackedField<OptimizedInvokeTypeField>(optimized_invoke_type);
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
@@ -4017,14 +4022,6 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
uint32_t GetSpecialInputIndex() const { return GetNumberOfArguments(); }
bool HasSpecialInput() const { return GetNumberOfArguments() != InputCount(); }
- InvokeType GetOptimizedInvokeType() const {
- return GetPackedField<OptimizedInvokeTypeField>();
- }
-
- void SetOptimizedInvokeType(InvokeType invoke_type) {
- SetPackedField<OptimizedInvokeTypeField>(invoke_type);
- }
-
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
@@ -4046,12 +4043,10 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
}
}
bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; }
- MethodReference GetTargetMethod() const { return target_method_; }
- void SetTargetMethod(MethodReference method) { target_method_ = method; }
- int32_t GetStringInitOffset() const {
+ QuickEntrypointEnum GetStringInitEntryPoint() const {
DCHECK(IsStringInit());
- return dispatch_info_.method_load_data;
+ return static_cast<QuickEntrypointEnum>(dispatch_info_.method_load_data);
}
uint64_t GetMethodAddress() const {
@@ -4075,7 +4070,11 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// Is this instruction a call to a static method?
bool IsStatic() const {
- return GetOriginalInvokeType() == kStatic;
+ return GetInvokeType() == kStatic;
+ }
+
+ MethodReference GetTargetMethod() const {
+ return target_method_;
}
// Remove the HClinitCheck or the replacement HLoadClass (set as last input by
@@ -4117,26 +4116,18 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
void RemoveInputAt(size_t index);
private:
- static constexpr size_t kFieldOptimizedInvokeType = kNumberOfInvokePackedBits;
- static constexpr size_t kFieldOptimizedInvokeTypeSize =
- MinimumBitsToStore(static_cast<size_t>(kMaxInvokeType));
- static constexpr size_t kFieldClinitCheckRequirement =
- kFieldOptimizedInvokeType + kFieldOptimizedInvokeTypeSize;
+ static constexpr size_t kFieldClinitCheckRequirement = kNumberOfInvokePackedBits;
static constexpr size_t kFieldClinitCheckRequirementSize =
MinimumBitsToStore(static_cast<size_t>(ClinitCheckRequirement::kLast));
static constexpr size_t kNumberOfInvokeStaticOrDirectPackedBits =
kFieldClinitCheckRequirement + kFieldClinitCheckRequirementSize;
static_assert(kNumberOfInvokeStaticOrDirectPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
- using OptimizedInvokeTypeField =
- BitField<InvokeType, kFieldOptimizedInvokeType, kFieldOptimizedInvokeTypeSize>;
using ClinitCheckRequirementField = BitField<ClinitCheckRequirement,
kFieldClinitCheckRequirement,
kFieldClinitCheckRequirementSize>;
- // The target method may refer to different dex file or method index than the original
- // invoke. This happens for sharpened calls and for calls where a method was redeclared
- // in derived class to increase visibility.
+ // Cached values of the resolved method, to avoid needing the mutator lock.
MethodReference target_method_;
DispatchInfo dispatch_info_;
@@ -4152,8 +4143,16 @@ class HInvokeVirtual FINAL : public HInvoke {
Primitive::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
+ ArtMethod* resolved_method,
uint32_t vtable_index)
- : HInvoke(arena, number_of_arguments, 0u, return_type, dex_pc, dex_method_index, kVirtual),
+ : HInvoke(arena,
+ number_of_arguments,
+ 0u,
+ return_type,
+ dex_pc,
+ dex_method_index,
+ resolved_method,
+ kVirtual),
vtable_index_(vtable_index) {}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
@@ -4166,6 +4165,7 @@ class HInvokeVirtual FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeVirtual);
private:
+ // Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t vtable_index_;
DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
@@ -4178,8 +4178,16 @@ class HInvokeInterface FINAL : public HInvoke {
Primitive::Type return_type,
uint32_t dex_pc,
uint32_t dex_method_index,
+ ArtMethod* resolved_method,
uint32_t imt_index)
- : HInvoke(arena, number_of_arguments, 0u, return_type, dex_pc, dex_method_index, kInterface),
+ : HInvoke(arena,
+ number_of_arguments,
+ 0u,
+ return_type,
+ dex_pc,
+ dex_method_index,
+ resolved_method,
+ kInterface),
imt_index_(imt_index) {}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
@@ -4193,6 +4201,7 @@ class HInvokeInterface FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeInterface);
private:
+ // Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t imt_index_;
DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index e64c005410..abec55f25c 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -61,44 +61,28 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
return;
}
- // TODO: Avoid CompilerDriver.
- InvokeType original_invoke_type = invoke->GetOriginalInvokeType();
- InvokeType optimized_invoke_type = original_invoke_type;
- MethodReference target_method(&graph_->GetDexFile(), invoke->GetDexMethodIndex());
- int vtable_idx;
- uintptr_t direct_code, direct_method;
- bool success = compiler_driver_->ComputeInvokeInfo(
- &compilation_unit_,
- invoke->GetDexPc(),
- false /* update_stats: already updated in builder */,
- true /* enable_devirtualization */,
- &optimized_invoke_type,
- &target_method,
- &vtable_idx,
- &direct_code,
- &direct_method);
- if (!success) {
- // TODO: try using kDexCachePcRelative. It's always a valid method load
- // kind as long as it's supported by the codegen
- return;
- }
- invoke->SetOptimizedInvokeType(optimized_invoke_type);
- invoke->SetTargetMethod(target_method);
+ HGraph* outer_graph = codegen_->GetGraph();
+ ArtMethod* compiling_method = graph_->GetArtMethod();
HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
uint64_t method_load_data = 0u;
uint64_t direct_code_ptr = 0u;
- HGraph* outer_graph = codegen_->GetGraph();
- if (target_method.dex_file == &outer_graph->GetDexFile() &&
- target_method.dex_method_index == outer_graph->GetMethodIdx()) {
+ if (invoke->GetResolvedMethod() == outer_graph->GetArtMethod()) {
+ DCHECK(outer_graph->GetArtMethod() != nullptr);
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
} else {
- bool use_pc_relative_instructions =
- ((direct_method == 0u || direct_code == static_cast<uintptr_t>(-1))) &&
- ContainsElement(compiler_driver_->GetDexFilesForOatFile(), target_method.dex_file);
+ uintptr_t direct_code, direct_method;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ compiler_driver_->GetCodeAndMethodForDirectCall(
+ (compiling_method == nullptr) ? nullptr : compiling_method->GetDeclaringClass(),
+ invoke->GetResolvedMethod(),
+ &direct_code,
+ &direct_method);
+ }
if (direct_method != 0u) { // Should we use a direct pointer to the method?
// Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while
// kDirectAddress would be fine for image methods, we don't support it at the moment.
@@ -110,13 +94,12 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
}
} else { // Use dex cache.
- DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
- if (use_pc_relative_instructions) { // Can we use PC-relative access to the dex cache arrays?
- DCHECK(!Runtime::Current()->UseJitCompilation());
+ if (!Runtime::Current()->UseJitCompilation()) {
+ // Use PC-relative access to the dex cache arrays.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()),
&graph_->GetDexFile());
- method_load_data = layout.MethodOffset(target_method.dex_method_index);
+ method_load_data = layout.MethodOffset(invoke->GetDexMethodIndex());
} else { // We must go through the ArtMethod's pointer to resolved methods.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
}
@@ -125,10 +108,11 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
// Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and
// while kCallDirect would be fine for image methods, we don't support it at the moment.
DCHECK(!Runtime::Current()->UseJitCompilation());
+ const DexFile* dex_file_of_callee = invoke->GetTargetMethod().dex_file;
if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
direct_code_ptr = direct_code;
- } else if (use_pc_relative_instructions) {
+ } else if (ContainsElement(compiler_driver_->GetDexFilesForOatFile(), dex_file_of_callee)) {
// Use PC-relative calls for invokes within a multi-dex oat file.
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
} else { // The direct pointer will be known at link time.
@@ -151,8 +135,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
method_load_kind, code_ptr_location, method_load_data, direct_code_ptr
};
HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info,
- invoke->GetTargetMethod());
+ codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
invoke->SetDispatchInfo(dispatch_info);
}
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index f7dc112d00..03807ba1ee 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -163,18 +163,12 @@ static bool TypePhiFromInputs(HPhi* phi) {
// Replace inputs of `phi` to match its type. Return false if conflict is identified.
bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) {
Primitive::Type common_type = phi->GetType();
- if (common_type == Primitive::kPrimVoid || Primitive::IsIntegralType(common_type)) {
- // Phi either contains only other untyped phis (common_type == kPrimVoid),
- // or `common_type` is integral and we do not need to retype ambiguous inputs
- // because they are always constructed with the integral type candidate.
+ if (Primitive::IsIntegralType(common_type)) {
+ // We do not need to retype ambiguous inputs because they are always constructed
+ // with the integral type candidate.
if (kIsDebugBuild) {
for (HInstruction* input : phi->GetInputs()) {
- if (common_type == Primitive::kPrimVoid) {
- DCHECK(input->IsPhi() && input->GetType() == Primitive::kPrimVoid);
- } else {
- DCHECK((input->IsPhi() && input->GetType() == Primitive::kPrimVoid) ||
- HPhi::ToPhiType(input->GetType()) == common_type);
- }
+ DCHECK(HPhi::ToPhiType(input->GetType()) == common_type);
}
}
// Inputs did not need to be replaced, hence no conflict. Report success.
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 11c18b0924..931bbd368b 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -49,6 +49,7 @@ art_cc_binary {
shared_libs: [
"libart",
"libart-compiler",
+ "libbase",
"libsigchain",
],
}
@@ -62,6 +63,7 @@ art_cc_binary {
shared_libs: [
"libartd",
"libartd-compiler",
+ "libbase",
"libsigchain",
],
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index d99d2d615e..3b2715d56b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -64,6 +64,8 @@
#include "interpreter/unstarted_runtime.h"
#include "jit/offline_profiling_info.h"
#include "leb128.h"
+#include "linker/buffered_output_stream.h"
+#include "linker/file_output_stream.h"
#include "linker/multi_oat_relative_patcher.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -448,7 +450,11 @@ class WatchDog {
// it's rather easy to hang in unwinding.
// LogLine also avoids ART logging lock issues, as it's really only a wrapper around
// logcat logging or stderr output.
- LogMessage::LogLine(__FILE__, __LINE__, LogSeverity::FATAL, message.c_str());
+ android::base::LogMessage::LogLine(__FILE__,
+ __LINE__,
+ android::base::LogId::DEFAULT,
+ LogSeverity::FATAL,
+ message.c_str());
exit(1);
}
@@ -1384,7 +1390,10 @@ class Dex2Oat FINAL {
if (IsBootImage() && image_filenames_.size() > 1) {
// If we're compiling the boot image, store the boot classpath into the Key-Value store.
// We need this for the multi-image case.
- key_value_store_->Put(OatHeader::kBootClassPathKey, GetMultiImageBootClassPath());
+ key_value_store_->Put(OatHeader::kBootClassPathKey,
+ gc::space::ImageSpace::GetMultiImageBootClassPath(dex_locations_,
+ oat_filenames_,
+ image_filenames_));
}
if (!IsBootImage()) {
@@ -1753,6 +1762,28 @@ class Dex2Oat FINAL {
}
}
+ {
+ TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
+ DCHECK(IsBootImage() || oat_files_.size() == 1u);
+ DCHECK_EQ(IsBootImage(), verifier_deps_ == nullptr);
+ for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
+ File* vdex_file = vdex_files_[i].get();
+ std::unique_ptr<BufferedOutputStream> vdex_out(
+ MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(vdex_file)));
+
+ if (!oat_writers_[i]->WriteVerifierDeps(vdex_out.get(), verifier_deps_.get())) {
+ LOG(ERROR) << "Failed to write verifier dependencies into VDEX " << vdex_file->GetPath();
+ return false;
+ }
+
+ // VDEX finalized, seek back to the beginning and write the header.
+ if (!oat_writers_[i]->WriteVdexHeader(vdex_out.get())) {
+ LOG(ERROR) << "Failed to write vdex header into VDEX " << vdex_file->GetPath();
+ return false;
+ }
+ }
+ }
+
linker::MultiOatRelativePatcher patcher(instruction_set_, instruction_set_features_.get());
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
@@ -2028,49 +2059,6 @@ class Dex2Oat FINAL {
return result;
}
- std::string GetMultiImageBootClassPath() {
- DCHECK(IsBootImage());
- DCHECK_GT(oat_filenames_.size(), 1u);
- // If the image filename was adapted (e.g., for our tests), we need to change this here,
- // too, but need to strip all path components (they will be re-established when loading).
- std::ostringstream bootcp_oss;
- bool first_bootcp = true;
- for (size_t i = 0; i < dex_locations_.size(); ++i) {
- if (!first_bootcp) {
- bootcp_oss << ":";
- }
-
- std::string dex_loc = dex_locations_[i];
- std::string image_filename = image_filenames_[i];
-
- // Use the dex_loc path, but the image_filename name (without path elements).
- size_t dex_last_slash = dex_loc.rfind('/');
-
- // npos is max(size_t). That makes this a bit ugly.
- size_t image_last_slash = image_filename.rfind('/');
- size_t image_last_at = image_filename.rfind('@');
- size_t image_last_sep = (image_last_slash == std::string::npos)
- ? image_last_at
- : (image_last_at == std::string::npos)
- ? std::string::npos
- : std::max(image_last_slash, image_last_at);
- // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
-
- if (dex_last_slash == std::string::npos) {
- dex_loc = image_filename.substr(image_last_sep + 1);
- } else {
- dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
- image_filename.substr(image_last_sep + 1);
- }
-
- // Image filenames already end with .art, no need to replace.
-
- bootcp_oss << dex_loc;
- first_bootcp = false;
- }
- return bootcp_oss.str();
- }
-
std::vector<std::string> GetClassPathLocations(const std::string& class_path) {
// This function is used only for apps and for an app we have exactly one oat file.
DCHECK(!IsBootImage());
@@ -2604,6 +2592,7 @@ class Dex2Oat FINAL {
std::vector<std::unique_ptr<ElfWriter>> elf_writers_;
std::vector<std::unique_ptr<OatWriter>> oat_writers_;
std::vector<OutputStream*> rodata_;
+ std::vector<std::unique_ptr<OutputStream>> vdex_out_;
std::unique_ptr<ImageWriter> image_writer_;
std::unique_ptr<CompilerDriver> driver_;
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 64f2299fa5..3e589f7c5e 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -22,7 +22,10 @@ art_cc_binary {
"dexdump.cc",
],
cflags: ["-Wall"],
- shared_libs: ["libart"],
+ shared_libs: [
+ "libart",
+ "libbase",
+ ],
}
art_cc_test {
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 163cb01120..296cdb6140 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -18,10 +18,14 @@ art_cc_binary {
srcs: [
"dexlayout_main.cc",
"dexlayout.cc",
+ "dex_ir.cc",
"dex_ir_builder.cc",
],
cflags: ["-Wall"],
- shared_libs: ["libart"],
+ shared_libs: [
+ "libart",
+ "libbase",
+ ],
}
art_cc_test {
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
new file mode 100644
index 0000000000..aff03cd6ea
--- /dev/null
+++ b/dexlayout/dex_ir.cc
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#include "dex_ir.h"
+#include "dex_ir_builder.h"
+
+namespace art {
+namespace dex_ir {
+
+static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) {
+ uint64_t value = 0;
+ for (uint32_t i = 0; i <= length; i++) {
+ value |= static_cast<uint64_t>(*(*data)++) << (i * 8);
+ }
+ if (sign_extend) {
+ int shift = (7 - length) * 8;
+ return (static_cast<int64_t>(value) << shift) >> shift;
+ }
+ return value;
+}
+
+static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) {
+ DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
+ PositionInfoVector& positions = debug_info->GetPositionInfo();
+ positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_)));
+ return false;
+}
+
+static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) {
+ DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
+ LocalInfoVector& locals = debug_info->GetLocalInfo();
+ const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
+ const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
+ locals.push_back(std::unique_ptr<LocalInfo>(
+ new LocalInfo(name, entry.descriptor_, signature, entry.start_address_,
+ entry.end_address_, entry.reg_)));
+}
+
+EncodedValue* Collections::ReadEncodedValue(const uint8_t** data) {
+ const uint8_t encoded_value = *(*data)++;
+ const uint8_t type = encoded_value & 0x1f;
+ EncodedValue* item = new EncodedValue(type);
+ ReadEncodedValue(data, type, encoded_value >> 5, item);
+ return item;
+}
+
+EncodedValue* Collections::ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length) {
+ EncodedValue* item = new EncodedValue(type);
+ ReadEncodedValue(data, type, length, item);
+ return item;
+}
+
+void Collections::ReadEncodedValue(
+ const uint8_t** data, uint8_t type, uint8_t length, EncodedValue* item) {
+ switch (type) {
+ case DexFile::kDexAnnotationByte:
+ item->SetByte(static_cast<int8_t>(ReadVarWidth(data, length, false)));
+ break;
+ case DexFile::kDexAnnotationShort:
+ item->SetShort(static_cast<int16_t>(ReadVarWidth(data, length, true)));
+ break;
+ case DexFile::kDexAnnotationChar:
+ item->SetChar(static_cast<uint16_t>(ReadVarWidth(data, length, false)));
+ break;
+ case DexFile::kDexAnnotationInt:
+ item->SetInt(static_cast<int32_t>(ReadVarWidth(data, length, true)));
+ break;
+ case DexFile::kDexAnnotationLong:
+ item->SetLong(static_cast<int64_t>(ReadVarWidth(data, length, true)));
+ break;
+ case DexFile::kDexAnnotationFloat: {
+ // Fill on right.
+ union {
+ float f;
+ uint32_t data;
+ } conv;
+ conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8;
+ item->SetFloat(conv.f);
+ break;
+ }
+ case DexFile::kDexAnnotationDouble: {
+ // Fill on right.
+ union {
+ double d;
+ uint64_t data;
+ } conv;
+ conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8;
+ item->SetDouble(conv.d);
+ break;
+ }
+ case DexFile::kDexAnnotationString: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetStringId(GetStringId(string_index));
+ break;
+ }
+ case DexFile::kDexAnnotationType: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetTypeId(GetTypeId(string_index));
+ break;
+ }
+ case DexFile::kDexAnnotationField:
+ case DexFile::kDexAnnotationEnum: {
+ const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetFieldId(GetFieldId(field_index));
+ break;
+ }
+ case DexFile::kDexAnnotationMethod: {
+ const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetMethodId(GetMethodId(method_index));
+ break;
+ }
+ case DexFile::kDexAnnotationArray: {
+ EncodedValueVector* values = new EncodedValueVector();
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ // Decode all elements.
+ for (uint32_t i = 0; i < size; i++) {
+ values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(data)));
+ }
+ item->SetEncodedArray(new EncodedArrayItem(values));
+ break;
+ }
+ case DexFile::kDexAnnotationAnnotation: {
+ AnnotationElementVector* elements = new AnnotationElementVector();
+ const uint32_t type_idx = DecodeUnsignedLeb128(data);
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ // Decode all name=value pairs.
+ for (uint32_t i = 0; i < size; i++) {
+ const uint32_t name_index = DecodeUnsignedLeb128(data);
+ elements->push_back(std::unique_ptr<AnnotationElement>(
+ new AnnotationElement(GetStringId(name_index), ReadEncodedValue(data))));
+ }
+ item->SetEncodedAnnotation(new EncodedAnnotation(GetTypeId(type_idx), elements));
+ break;
+ }
+ case DexFile::kDexAnnotationNull:
+ break;
+ case DexFile::kDexAnnotationBoolean:
+ item->SetBoolean(length != 0);
+ break;
+ default:
+ break;
+ }
+}
+
+void Collections::CreateStringId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::StringId& disk_string_id = dex_file.GetStringId(i);
+ StringData* string_data = new StringData(dex_file.GetStringData(disk_string_id));
+ string_datas_.AddItem(string_data, disk_string_id.string_data_off_);
+
+ StringId* string_id = new StringId(string_data);
+ string_ids_.AddIndexedItem(string_id, StringIdsOffset() + i * StringId::ItemSize(), i);
+}
+
+void Collections::CreateTypeId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(i);
+ TypeId* type_id = new TypeId(GetStringId(disk_type_id.descriptor_idx_));
+ type_ids_.AddIndexedItem(type_id, TypeIdsOffset() + i * TypeId::ItemSize(), i);
+}
+
+void Collections::CreateProtoId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(i);
+ const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
+ TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_, true);
+
+ ProtoId* proto_id = new ProtoId(GetStringId(disk_proto_id.shorty_idx_),
+ GetTypeId(disk_proto_id.return_type_idx_),
+ parameter_type_list);
+ proto_ids_.AddIndexedItem(proto_id, ProtoIdsOffset() + i * ProtoId::ItemSize(), i);
+}
+
+void Collections::CreateFieldId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
+ FieldId* field_id = new FieldId(GetTypeId(disk_field_id.class_idx_),
+ GetTypeId(disk_field_id.type_idx_),
+ GetStringId(disk_field_id.name_idx_));
+ field_ids_.AddIndexedItem(field_id, FieldIdsOffset() + i * FieldId::ItemSize(), i);
+}
+
+void Collections::CreateMethodId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
+ MethodId* method_id = new MethodId(GetTypeId(disk_method_id.class_idx_),
+ GetProtoId(disk_method_id.proto_idx_),
+ GetStringId(disk_method_id.name_idx_));
+ method_ids_.AddIndexedItem(method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i);
+}
+
+void Collections::CreateClassDef(const DexFile& dex_file, uint32_t i) {
+ const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
+ const TypeId* class_type = GetTypeId(disk_class_def.class_idx_);
+ uint32_t access_flags = disk_class_def.access_flags_;
+ const TypeId* superclass = GetTypeIdOrNullPtr(disk_class_def.superclass_idx_);
+
+ const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
+ TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_, false);
+
+ const StringId* source_file = GetStringIdOrNullPtr(disk_class_def.source_file_idx_);
+ // Annotations.
+ AnnotationsDirectoryItem* annotations = nullptr;
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
+ dex_file.GetAnnotationsDirectory(disk_class_def);
+ if (disk_annotations_directory_item != nullptr) {
+ annotations = CreateAnnotationsDirectoryItem(
+ dex_file, disk_annotations_directory_item, disk_class_def.annotations_off_);
+ }
+ // Static field initializers.
+ const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
+ EncodedArrayItem* static_values =
+ CreateEncodedArrayItem(static_data, disk_class_def.static_values_off_);
+ ClassData* class_data = CreateClassData(
+ dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_);
+ ClassDef* class_def = new ClassDef(class_type, access_flags, superclass, interfaces_type_list,
+ source_file, annotations, static_values, class_data);
+ class_defs_.AddIndexedItem(class_def, ClassDefsOffset() + i * ClassDef::ItemSize(), i);
+}
+
+TypeList* Collections::CreateTypeList(
+ const DexFile::TypeList* dex_type_list, uint32_t offset, bool allow_empty) {
+ if (dex_type_list == nullptr && !allow_empty) {
+ return nullptr;
+ }
+ // TODO: Create more efficient lookup for existing type lists.
+ for (std::unique_ptr<TypeList>& type_list : TypeLists()) {
+ if (type_list->GetOffset() == offset) {
+ return type_list.get();
+ }
+ }
+ TypeIdVector* type_vector = new TypeIdVector();
+ uint32_t size = dex_type_list == nullptr ? 0 : dex_type_list->Size();
+ for (uint32_t index = 0; index < size; ++index) {
+ type_vector->push_back(GetTypeId(dex_type_list->GetTypeItem(index).type_idx_));
+ }
+ TypeList* new_type_list = new TypeList(type_vector);
+ type_lists_.AddItem(new_type_list, offset);
+ return new_type_list;
+}
+
+EncodedArrayItem* Collections::CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset) {
+ if (static_data == nullptr) {
+ return nullptr;
+ }
+ uint32_t size = DecodeUnsignedLeb128(&static_data);
+ EncodedValueVector* values = new EncodedValueVector();
+ for (uint32_t i = 0; i < size; ++i) {
+ values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(&static_data)));
+ }
+ // TODO: Calculate the size of the encoded array.
+ EncodedArrayItem* encoded_array_item = new EncodedArrayItem(values);
+ encoded_array_items_.AddItem(encoded_array_item, offset);
+ return encoded_array_item;
+}
+
+AnnotationItem* Collections::CreateAnnotationItem(const DexFile::AnnotationItem* annotation,
+ uint32_t offset) {
+ uint8_t visibility = annotation->visibility_;
+ const uint8_t* annotation_data = annotation->annotation_;
+ EncodedValue* encoded_value =
+ ReadEncodedValue(&annotation_data, DexFile::kDexAnnotationAnnotation, 0);
+ // TODO: Calculate the size of the annotation.
+ AnnotationItem* annotation_item =
+ new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation());
+ annotation_items_.AddItem(annotation_item, offset);
+ return annotation_item;
+}
+
+
+AnnotationSetItem* Collections::CreateAnnotationSetItem(const DexFile& dex_file,
+ const DexFile::AnnotationSetItem& disk_annotations_item, uint32_t offset) {
+ if (disk_annotations_item.size_ == 0) {
+ return nullptr;
+ }
+ std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
+ for (uint32_t i = 0; i < disk_annotations_item.size_; ++i) {
+ const DexFile::AnnotationItem* annotation =
+ dex_file.GetAnnotationItem(&disk_annotations_item, i);
+ if (annotation == nullptr) {
+ continue;
+ }
+ AnnotationItem* annotation_item =
+ CreateAnnotationItem(annotation, disk_annotations_item.entries_[i]);
+ items->push_back(annotation_item);
+ }
+ AnnotationSetItem* annotation_set_item = new AnnotationSetItem(items);
+ annotation_set_items_.AddItem(annotation_set_item, offset);
+ return annotation_set_item;
+}
+
+AnnotationsDirectoryItem* Collections::CreateAnnotationsDirectoryItem(const DexFile& dex_file,
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
+ const DexFile::AnnotationSetItem* class_set_item =
+ dex_file.GetClassAnnotationSet(disk_annotations_item);
+ AnnotationSetItem* class_annotation = nullptr;
+ if (class_set_item != nullptr) {
+ uint32_t offset = disk_annotations_item->class_annotations_off_;
+ class_annotation = CreateAnnotationSetItem(dex_file, *class_set_item, offset);
+ }
+ const DexFile::FieldAnnotationsItem* fields =
+ dex_file.GetFieldAnnotations(disk_annotations_item);
+ FieldAnnotationVector* field_annotations = nullptr;
+ if (fields != nullptr) {
+ field_annotations = new FieldAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
+ FieldId* field_id = GetFieldId(fields[i].field_idx_);
+ const DexFile::AnnotationSetItem* field_set_item =
+ dex_file.GetFieldAnnotationSetItem(fields[i]);
+ uint32_t annotation_set_offset = fields[i].annotations_off_;
+ AnnotationSetItem* annotation_set_item =
+ CreateAnnotationSetItem(dex_file, *field_set_item, annotation_set_offset);
+ field_annotations->push_back(std::unique_ptr<FieldAnnotation>(
+ new FieldAnnotation(field_id, annotation_set_item)));
+ }
+ }
+ const DexFile::MethodAnnotationsItem* methods =
+ dex_file.GetMethodAnnotations(disk_annotations_item);
+ MethodAnnotationVector* method_annotations = nullptr;
+ if (methods != nullptr) {
+ method_annotations = new MethodAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
+ MethodId* method_id = GetMethodId(methods[i].method_idx_);
+ const DexFile::AnnotationSetItem* method_set_item =
+ dex_file.GetMethodAnnotationSetItem(methods[i]);
+ uint32_t annotation_set_offset = methods[i].annotations_off_;
+ AnnotationSetItem* annotation_set_item =
+ CreateAnnotationSetItem(dex_file, *method_set_item, annotation_set_offset);
+ method_annotations->push_back(std::unique_ptr<MethodAnnotation>(
+ new MethodAnnotation(method_id, annotation_set_item)));
+ }
+ }
+ const DexFile::ParameterAnnotationsItem* parameters =
+ dex_file.GetParameterAnnotations(disk_annotations_item);
+ ParameterAnnotationVector* parameter_annotations = nullptr;
+ if (parameters != nullptr) {
+ parameter_annotations = new ParameterAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
+ MethodId* method_id = GetMethodId(parameters[i].method_idx_);
+ const DexFile::AnnotationSetRefList* list =
+ dex_file.GetParameterAnnotationSetRefList(&parameters[i]);
+ parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
+ GenerateParameterAnnotation(dex_file, method_id, list, parameters[i].annotations_off_)));
+ }
+ }
+ // TODO: Calculate the size of the annotations directory.
+ AnnotationsDirectoryItem* annotations_directory_item = new AnnotationsDirectoryItem(
+ class_annotation, field_annotations, method_annotations, parameter_annotations);
+ annotations_directory_items_.AddItem(annotations_directory_item, offset);
+ return annotations_directory_item;
+}
+
+ParameterAnnotation* Collections::GenerateParameterAnnotation(
+ const DexFile& dex_file, MethodId* method_id,
+ const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset) {
+ std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>();
+ for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
+ const DexFile::AnnotationSetItem* annotation_set_item =
+ dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
+ uint32_t set_offset = annotation_set_ref_list->list_[i].annotations_off_;
+ annotations->push_back(CreateAnnotationSetItem(dex_file, *annotation_set_item, set_offset));
+ }
+ AnnotationSetRefList* new_ref_list = new AnnotationSetRefList(annotations);
+ annotation_set_ref_lists_.AddItem(new_ref_list, offset);
+ return new ParameterAnnotation(method_id, new_ref_list);
+}
+
+CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem& disk_code_item, uint32_t offset) {
+ uint16_t registers_size = disk_code_item.registers_size_;
+ uint16_t ins_size = disk_code_item.ins_size_;
+ uint16_t outs_size = disk_code_item.outs_size_;
+ uint32_t tries_size = disk_code_item.tries_size_;
+
+ // TODO: Calculate the size of the debug info.
+ const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(&disk_code_item);
+ DebugInfoItem* debug_info = nullptr;
+ if (debug_info_stream != nullptr) {
+ debug_info = new DebugInfoItem();
+ debug_info_items_.AddItem(debug_info, disk_code_item.debug_info_off_);
+ }
+
+ uint32_t insns_size = disk_code_item.insns_size_in_code_units_;
+ uint16_t* insns = new uint16_t[insns_size];
+ memcpy(insns, disk_code_item.insns_, insns_size * sizeof(uint16_t));
+
+ TryItemVector* tries = nullptr;
+ if (tries_size > 0) {
+ tries = new TryItemVector();
+ for (uint32_t i = 0; i < tries_size; ++i) {
+ const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i);
+ uint32_t start_addr = disk_try_item->start_addr_;
+ uint16_t insn_count = disk_try_item->insn_count_;
+ CatchHandlerVector* handlers = new CatchHandlerVector();
+ for (CatchHandlerIterator it(disk_code_item, *disk_try_item); it.HasNext(); it.Next()) {
+ const uint16_t type_index = it.GetHandlerTypeIndex();
+ const TypeId* type_id = GetTypeIdOrNullPtr(type_index);
+ handlers->push_back(std::unique_ptr<const CatchHandler>(
+ new CatchHandler(type_id, it.GetHandlerAddress())));
+ }
+ TryItem* try_item = new TryItem(start_addr, insn_count, handlers);
+ tries->push_back(std::unique_ptr<const TryItem>(try_item));
+ }
+ }
+ // TODO: Calculate the size of the code item.
+ CodeItem* code_item =
+ new CodeItem(registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries);
+ code_items_.AddItem(code_item, offset);
+ return code_item;
+}
+
+MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) {
+ MethodId* method_item = GetMethodId(cdii.GetMemberIndex());
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
+ CodeItem* code_item = nullptr;
+ DebugInfoItem* debug_info = nullptr;
+ if (disk_code_item != nullptr) {
+ code_item = CreateCodeItem(dex_file, *disk_code_item, cdii.GetMethodCodeItemOffset());
+ debug_info = code_item->DebugInfo();
+ }
+ if (debug_info != nullptr) {
+ bool is_static = (access_flags & kAccStatic) != 0;
+ dex_file.DecodeDebugLocalInfo(
+ disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info);
+ dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info);
+ }
+ return new MethodItem(access_flags, method_item, code_item);
+}
+
+ClassData* Collections::CreateClassData(
+ const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) {
+ // Read the fields and methods defined by the class, resolving the circular reference from those
+ // to classes by setting class at the same time.
+ ClassData* class_data = nullptr;
+ if (encoded_data != nullptr) {
+ ClassDataItemIterator cdii(dex_file, encoded_data);
+ // Static fields.
+ FieldItemVector* static_fields = new FieldItemVector();
+ for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) {
+ FieldId* field_item = GetFieldId(cdii.GetMemberIndex());
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
+ }
+ // Instance fields.
+ FieldItemVector* instance_fields = new FieldItemVector();
+ for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) {
+ FieldId* field_item = GetFieldId(cdii.GetMemberIndex());
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ instance_fields->push_back(
+ std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
+ }
+ // Direct methods.
+ MethodItemVector* direct_methods = new MethodItemVector();
+ for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) {
+ direct_methods->push_back(
+ std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
+ }
+ // Virtual methods.
+ MethodItemVector* virtual_methods = new MethodItemVector();
+ for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) {
+ virtual_methods->push_back(
+ std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
+ }
+ // TODO: Calculate the size of the class data.
+ class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods);
+ class_datas_.AddItem(class_data, offset);
+ }
+ return class_data;
+}
+
+} // namespace dex_ir
+} // namespace art
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index cbb4404234..6ae9f1c938 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -23,18 +23,23 @@
#include <stdint.h>
#include "dex_file-inl.h"
+#include "leb128.h"
namespace art {
namespace dex_ir {
// Forward declarations for classes used in containers or pointed to.
+class AnnotationItem;
class AnnotationsDirectoryItem;
class AnnotationSetItem;
-class ArrayItem;
+class AnnotationSetRefList;
class ClassData;
class ClassDef;
class CodeItem;
class DebugInfoItem;
+class EncodedAnnotation;
+class EncodedArrayItem;
+class EncodedValue;
class FieldId;
class FieldItem;
class Header;
@@ -42,10 +47,22 @@ class MapList;
class MapItem;
class MethodId;
class MethodItem;
+class ParameterAnnotation;
class ProtoId;
+class StringData;
class StringId;
class TryItem;
class TypeId;
+class TypeList;
+
+// Item size constants.
+static constexpr size_t kHeaderItemSize = 112;
+static constexpr size_t kStringIdItemSize = 4;
+static constexpr size_t kTypeIdItemSize = 4;
+static constexpr size_t kProtoIdItemSize = 12;
+static constexpr size_t kFieldIdItemSize = 8;
+static constexpr size_t kMethodIdItemSize = 8;
+static constexpr size_t kClassDefItemSize = 32;
// Visitor support
class AbstractDispatcher {
@@ -54,6 +71,7 @@ class AbstractDispatcher {
virtual ~AbstractDispatcher() { }
virtual void Dispatch(Header* header) = 0;
+ virtual void Dispatch(const StringData* string_data) = 0;
virtual void Dispatch(const StringId* string_id) = 0;
virtual void Dispatch(const TypeId* type_id) = 0;
virtual void Dispatch(const ProtoId* proto_id) = 0;
@@ -63,11 +81,13 @@ class AbstractDispatcher {
virtual void Dispatch(ClassDef* class_def) = 0;
virtual void Dispatch(FieldItem* field_item) = 0;
virtual void Dispatch(MethodItem* method_item) = 0;
- virtual void Dispatch(ArrayItem* array_item) = 0;
+ virtual void Dispatch(EncodedArrayItem* array_item) = 0;
virtual void Dispatch(CodeItem* code_item) = 0;
virtual void Dispatch(TryItem* try_item) = 0;
virtual void Dispatch(DebugInfoItem* debug_info_item) = 0;
+ virtual void Dispatch(AnnotationItem* annotation_item) = 0;
virtual void Dispatch(AnnotationSetItem* annotation_set_item) = 0;
+ virtual void Dispatch(AnnotationSetRefList* annotation_set_ref_list) = 0;
virtual void Dispatch(AnnotationsDirectoryItem* annotations_directory_item) = 0;
virtual void Dispatch(MapList* map_list) = 0;
virtual void Dispatch(MapItem* map_item) = 0;
@@ -82,9 +102,14 @@ template<class T> class CollectionWithOffset {
CollectionWithOffset() = default;
std::vector<std::unique_ptr<T>>& Collection() { return collection_; }
// Read-time support methods
- void AddWithPosition(uint32_t position, T* object) {
+ void AddItem(T* object, uint32_t offset) {
+ object->SetOffset(offset);
+ collection_.push_back(std::unique_ptr<T>(object));
+ }
+ void AddIndexedItem(T* object, uint32_t offset, uint32_t index) {
+ object->SetOffset(offset);
+ object->SetIndex(index);
collection_.push_back(std::unique_ptr<T>(object));
- collection_.back()->SetOffset(position);
}
// Ordinary object insertion into collection.
void Insert(T object ATTRIBUTE_UNUSED) {
@@ -98,18 +123,160 @@ template<class T> class CollectionWithOffset {
private:
std::vector<std::unique_ptr<T>> collection_;
uint32_t offset_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(CollectionWithOffset);
};
+class Collections {
+ public:
+ Collections() = default;
+
+ std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); }
+ std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); }
+ std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); }
+ std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); }
+ std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); }
+ std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); }
+
+ std::vector<std::unique_ptr<TypeList>>& TypeLists() { return type_lists_.Collection(); }
+ std::vector<std::unique_ptr<EncodedArrayItem>>& EncodedArrayItems()
+ { return encoded_array_items_.Collection(); }
+
+ void CreateStringId(const DexFile& dex_file, uint32_t i);
+ void CreateTypeId(const DexFile& dex_file, uint32_t i);
+ void CreateProtoId(const DexFile& dex_file, uint32_t i);
+ void CreateFieldId(const DexFile& dex_file, uint32_t i);
+ void CreateMethodId(const DexFile& dex_file, uint32_t i);
+ void CreateClassDef(const DexFile& dex_file, uint32_t i);
+
+ TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset, bool allow_empty);
+ EncodedArrayItem* CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset);
+ AnnotationItem* CreateAnnotationItem(const DexFile::AnnotationItem* annotation, uint32_t offset);
+ AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
+ const DexFile::AnnotationSetItem& disk_annotations_item, uint32_t offset);
+ AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
+ CodeItem* CreateCodeItem(
+ const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset);
+ ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
+
+ StringId* GetStringId(uint32_t index) { return StringIds()[index].get(); }
+ TypeId* GetTypeId(uint32_t index) { return TypeIds()[index].get(); }
+ ProtoId* GetProtoId(uint32_t index) { return ProtoIds()[index].get(); }
+ FieldId* GetFieldId(uint32_t index) { return FieldIds()[index].get(); }
+ MethodId* GetMethodId(uint32_t index) { return MethodIds()[index].get(); }
+ ClassDef* GetClassDef(uint32_t index) { return ClassDefs()[index].get(); }
+
+ StringId* GetStringIdOrNullPtr(uint32_t index) {
+ return index == DexFile::kDexNoIndex ? nullptr : GetStringId(index);
+ }
+ TypeId* GetTypeIdOrNullPtr(uint16_t index) {
+ return index == DexFile::kDexNoIndex16 ? nullptr : GetTypeId(index);
+ }
+
+ uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); }
+ uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); }
+ uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); }
+ uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); }
+ uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); }
+ uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); }
+ uint32_t StringDatasOffset() const { return string_datas_.GetOffset(); }
+ uint32_t TypeListsOffset() const { return type_lists_.GetOffset(); }
+ uint32_t EncodedArrayOffset() const { return encoded_array_items_.GetOffset(); }
+ uint32_t AnnotationOffset() const { return annotation_items_.GetOffset(); }
+ uint32_t AnnotationSetOffset() const { return annotation_set_items_.GetOffset(); }
+ uint32_t AnnotationSetRefListsOffset() const { return annotation_set_ref_lists_.GetOffset(); }
+ uint32_t AnnotationsDirectoryOffset() const { return annotations_directory_items_.GetOffset(); }
+ uint32_t DebugInfoOffset() const { return debug_info_items_.GetOffset(); }
+ uint32_t CodeItemsOffset() const { return code_items_.GetOffset(); }
+ uint32_t ClassDatasOffset() const { return class_datas_.GetOffset(); }
+
+ void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); }
+ void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); }
+ void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); }
+ void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); }
+ void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); }
+ void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); }
+ void SetStringDatasOffset(uint32_t new_offset) { string_datas_.SetOffset(new_offset); }
+ void SetTypeListsOffset(uint32_t new_offset) { type_lists_.SetOffset(new_offset); }
+ void SetEncodedArrayOffset(uint32_t new_offset) { encoded_array_items_.SetOffset(new_offset); }
+ void SetAnnotationOffset(uint32_t new_offset) { annotation_items_.SetOffset(new_offset); }
+ void SetAnnotationSetOffset(uint32_t new_offset) { annotation_set_items_.SetOffset(new_offset); }
+ void SetAnnotationSetRefListsOffset(uint32_t new_offset)
+ { annotation_set_ref_lists_.SetOffset(new_offset); }
+ void SetAnnotationsDirectoryOffset(uint32_t new_offset)
+ { annotations_directory_items_.SetOffset(new_offset); }
+ void SetDebugInfoOffset(uint32_t new_offset) { debug_info_items_.SetOffset(new_offset); }
+ void SetCodeItemsOffset(uint32_t new_offset) { code_items_.SetOffset(new_offset); }
+ void SetClassDatasOffset(uint32_t new_offset) { class_datas_.SetOffset(new_offset); }
+
+ uint32_t StringIdsSize() const { return string_ids_.Size(); }
+ uint32_t TypeIdsSize() const { return type_ids_.Size(); }
+ uint32_t ProtoIdsSize() const { return proto_ids_.Size(); }
+ uint32_t FieldIdsSize() const { return field_ids_.Size(); }
+ uint32_t MethodIdsSize() const { return method_ids_.Size(); }
+ uint32_t ClassDefsSize() const { return class_defs_.Size(); }
+
+ private:
+ EncodedValue* ReadEncodedValue(const uint8_t** data);
+ EncodedValue* ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length);
+ void ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length, EncodedValue* item);
+
+ ParameterAnnotation* GenerateParameterAnnotation(const DexFile& dex_file, MethodId* method_id,
+ const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset);
+ MethodItem* GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii);
+
+ CollectionWithOffset<StringId> string_ids_;
+ CollectionWithOffset<TypeId> type_ids_;
+ CollectionWithOffset<ProtoId> proto_ids_;
+ CollectionWithOffset<FieldId> field_ids_;
+ CollectionWithOffset<MethodId> method_ids_;
+ CollectionWithOffset<ClassDef> class_defs_;
+
+ CollectionWithOffset<StringData> string_datas_;
+ CollectionWithOffset<TypeList> type_lists_;
+ CollectionWithOffset<EncodedArrayItem> encoded_array_items_;
+ CollectionWithOffset<AnnotationItem> annotation_items_;
+ CollectionWithOffset<AnnotationSetItem> annotation_set_items_;
+ CollectionWithOffset<AnnotationSetRefList> annotation_set_ref_lists_;
+ CollectionWithOffset<AnnotationsDirectoryItem> annotations_directory_items_;
+ CollectionWithOffset<DebugInfoItem> debug_info_items_;
+ CollectionWithOffset<CodeItem> code_items_;
+ CollectionWithOffset<ClassData> class_datas_;
+
+ DISALLOW_COPY_AND_ASSIGN(Collections);
+};
+
class Item {
public:
+ Item() { }
virtual ~Item() { }
uint32_t GetOffset() const { return offset_; }
+ uint32_t GetSize() const { return size_; }
void SetOffset(uint32_t offset) { offset_ = offset; }
+ void SetSize(uint32_t size) { size_ = size; }
protected:
+ Item(uint32_t offset, uint32_t size) : offset_(offset), size_(size) { }
+
uint32_t offset_ = 0;
+ uint32_t size_ = 0;
+};
+
+class IndexedItem : public Item {
+ public:
+ IndexedItem() { }
+ virtual ~IndexedItem() { }
+
+ uint32_t GetIndex() const { return index_; }
+ void SetIndex(uint32_t index) { index_ = index; }
+
+ protected:
+ IndexedItem(uint32_t offset, uint32_t size, uint32_t index)
+ : Item(offset, size), index_(index) { }
+
+ uint32_t index_ = 0;
};
class Header : public Item {
@@ -124,7 +291,8 @@ class Header : public Item {
uint32_t link_offset,
uint32_t data_size,
uint32_t data_offset)
- : checksum_(checksum),
+ : Item(0, kHeaderItemSize),
+ checksum_(checksum),
endian_tag_(endian_tag),
file_size_(file_size),
header_size_(header_size),
@@ -137,6 +305,8 @@ class Header : public Item {
}
~Header() OVERRIDE { }
+ static size_t ItemSize() { return kHeaderItemSize; }
+
const uint8_t* Magic() const { return magic_; }
uint32_t Checksum() const { return checksum_; }
const uint8_t* Signature() const { return signature_; }
@@ -159,39 +329,7 @@ class Header : public Item {
void SetDataSize(uint32_t new_data_size) { data_size_ = new_data_size; }
void SetDataOffset(uint32_t new_data_offset) { data_offset_ = new_data_offset; }
- // Collections.
- std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); }
- std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); }
- std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); }
- std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); }
- std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); }
- std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); }
- uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); }
- uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); }
- uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); }
- uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); }
- uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); }
- uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); }
- void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); }
- void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); }
- void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); }
- void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); }
- void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); }
- void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); }
- uint32_t StringIdsSize() const { return string_ids_.Size(); }
- uint32_t TypeIdsSize() const { return type_ids_.Size(); }
- uint32_t ProtoIdsSize() const { return proto_ids_.Size(); }
- uint32_t FieldIdsSize() const { return field_ids_.Size(); }
- uint32_t MethodIdsSize() const { return method_ids_.Size(); }
- uint32_t ClassDefsSize() const { return class_defs_.Size(); }
-
- TypeId* GetTypeIdOrNullPtr(uint16_t index) {
- return index == DexFile::kDexNoIndex16 ? nullptr : TypeIds()[index].get();
- }
-
- StringId* GetStringIdOrNullPtr(uint32_t index) {
- return index == DexFile::kDexNoIndex ? nullptr : StringIds()[index].get();
- }
+ Collections& GetCollections() { return collections_; }
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
@@ -207,19 +345,16 @@ class Header : public Item {
uint32_t data_size_;
uint32_t data_offset_;
- CollectionWithOffset<StringId> string_ids_;
- CollectionWithOffset<TypeId> type_ids_;
- CollectionWithOffset<ProtoId> proto_ids_;
- CollectionWithOffset<FieldId> field_ids_;
- CollectionWithOffset<MethodId> method_ids_;
- CollectionWithOffset<ClassDef> class_defs_;
+ Collections collections_;
+
DISALLOW_COPY_AND_ASSIGN(Header);
};
-class StringId : public Item {
+class StringData : public Item {
public:
- explicit StringId(const char* data) : data_(strdup(data)) { }
- ~StringId() OVERRIDE { }
+ explicit StringData(const char* data) : data_(strdup(data)) {
+ size_ = UnsignedLeb128Size(strlen(data)) + strlen(data);
+ }
const char* Data() const { return data_.get(); }
@@ -227,50 +362,95 @@ class StringId : public Item {
private:
std::unique_ptr<const char> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringData);
+};
+
+class StringId : public IndexedItem {
+ public:
+ explicit StringId(StringData* string_data) : string_data_(string_data) {
+ size_ = kStringIdItemSize;
+ }
+ ~StringId() OVERRIDE { }
+
+ static size_t ItemSize() { return kStringIdItemSize; }
+
+ const char* Data() const { return string_data_->Data(); }
+ StringData* DataItem() const { return string_data_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ StringData* string_data_;
+
DISALLOW_COPY_AND_ASSIGN(StringId);
};
-class TypeId : public Item {
+class TypeId : public IndexedItem {
public:
- explicit TypeId(StringId* string_id) : string_id_(string_id) { }
+ explicit TypeId(StringId* string_id) : string_id_(string_id) { size_ = kTypeIdItemSize; }
~TypeId() OVERRIDE { }
+ static size_t ItemSize() { return kTypeIdItemSize; }
+
StringId* GetStringId() const { return string_id_; }
void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
private:
StringId* string_id_;
+
DISALLOW_COPY_AND_ASSIGN(TypeId);
};
using TypeIdVector = std::vector<const TypeId*>;
-class ProtoId : public Item {
+class TypeList : public Item {
+ public:
+ explicit TypeList(TypeIdVector* type_list) : type_list_(type_list) {
+ size_ = sizeof(uint32_t) + (type_list->size() * sizeof(uint16_t));
+ }
+ ~TypeList() OVERRIDE { }
+
+ const TypeIdVector* GetTypeList() const { return type_list_.get(); }
+
+ private:
+ std::unique_ptr<TypeIdVector> type_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeList);
+};
+
+class ProtoId : public IndexedItem {
public:
- ProtoId(const StringId* shorty, const TypeId* return_type, TypeIdVector* parameters)
- : shorty_(shorty), return_type_(return_type), parameters_(parameters) { }
+ ProtoId(const StringId* shorty, const TypeId* return_type, TypeList* parameters)
+ : shorty_(shorty), return_type_(return_type), parameters_(parameters)
+ { size_ = kProtoIdItemSize; }
~ProtoId() OVERRIDE { }
+ static size_t ItemSize() { return kProtoIdItemSize; }
+
const StringId* Shorty() const { return shorty_; }
const TypeId* ReturnType() const { return return_type_; }
- const std::vector<const TypeId*>& Parameters() const { return *parameters_; }
+ const TypeIdVector& Parameters() const { return *parameters_->GetTypeList(); }
void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
private:
const StringId* shorty_;
const TypeId* return_type_;
- std::unique_ptr<TypeIdVector> parameters_;
+ TypeList* parameters_;
+
DISALLOW_COPY_AND_ASSIGN(ProtoId);
};
-class FieldId : public Item {
+class FieldId : public IndexedItem {
public:
FieldId(const TypeId* klass, const TypeId* type, const StringId* name)
- : class_(klass), type_(type), name_(name) { }
+ : class_(klass), type_(type), name_(name) { size_ = kFieldIdItemSize; }
~FieldId() OVERRIDE { }
+ static size_t ItemSize() { return kFieldIdItemSize; }
+
const TypeId* Class() const { return class_; }
const TypeId* Type() const { return type_; }
const StringId* Name() const { return name_; }
@@ -281,15 +461,18 @@ class FieldId : public Item {
const TypeId* class_;
const TypeId* type_;
const StringId* name_;
+
DISALLOW_COPY_AND_ASSIGN(FieldId);
};
-class MethodId : public Item {
+class MethodId : public IndexedItem {
public:
MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name)
- : class_(klass), proto_(proto), name_(name) { }
+ : class_(klass), proto_(proto), name_(name) { size_ = kMethodIdItemSize; }
~MethodId() OVERRIDE { }
+ static size_t ItemSize() { return kMethodIdItemSize; }
+
const TypeId* Class() const { return class_; }
const ProtoId* Proto() const { return proto_; }
const StringId* Name() const { return name_; }
@@ -300,6 +483,7 @@ class MethodId : public Item {
const TypeId* class_;
const ProtoId* proto_;
const StringId* name_;
+
DISALLOW_COPY_AND_ASSIGN(MethodId);
};
@@ -317,6 +501,7 @@ class FieldItem : public Item {
private:
uint32_t access_flags_;
const FieldId* field_id_;
+
DISALLOW_COPY_AND_ASSIGN(FieldItem);
};
@@ -330,93 +515,126 @@ class MethodItem : public Item {
uint32_t GetAccessFlags() const { return access_flags_; }
const MethodId* GetMethodId() const { return method_id_; }
- const CodeItem* GetCodeItem() const { return code_.get(); }
+ const CodeItem* GetCodeItem() const { return code_; }
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
private:
uint32_t access_flags_;
const MethodId* method_id_;
- std::unique_ptr<const CodeItem> code_;
+ const CodeItem* code_;
+
DISALLOW_COPY_AND_ASSIGN(MethodItem);
};
using MethodItemVector = std::vector<std::unique_ptr<MethodItem>>;
-class ArrayItem : public Item {
- public:
- class NameValuePair {
- public:
- NameValuePair(StringId* name, ArrayItem* value)
- : name_(name), value_(value) { }
-
- StringId* Name() const { return name_; }
- ArrayItem* Value() const { return value_.get(); }
-
- private:
- StringId* name_;
- std::unique_ptr<ArrayItem> value_;
- DISALLOW_COPY_AND_ASSIGN(NameValuePair);
- };
-
- struct ArrayItemVariant {
- public:
- union {
- bool bool_val_;
- int8_t byte_val_;
- int16_t short_val_;
- uint16_t char_val_;
- int32_t int_val_;
- int64_t long_val_;
- float float_val_;
- double double_val_;
- StringId* string_val_;
- FieldId* field_val_;
- MethodId* method_val_;
- } u_;
- std::unique_ptr<std::vector<std::unique_ptr<ArrayItem>>> annotation_array_val_;
- struct {
- StringId* string_;
- std::unique_ptr<std::vector<std::unique_ptr<NameValuePair>>> array_;
- } annotation_annotation_val_;
- };
-
- explicit ArrayItem(uint8_t type) : type_(type) { }
- ~ArrayItem() OVERRIDE { }
+class EncodedValue {
+ public:
+ explicit EncodedValue(uint8_t type) : type_(type) { }
int8_t Type() const { return type_; }
- bool GetBoolean() const { return item_.u_.bool_val_; }
- int8_t GetByte() const { return item_.u_.byte_val_; }
- int16_t GetShort() const { return item_.u_.short_val_; }
- uint16_t GetChar() const { return item_.u_.char_val_; }
- int32_t GetInt() const { return item_.u_.int_val_; }
- int64_t GetLong() const { return item_.u_.long_val_; }
- float GetFloat() const { return item_.u_.float_val_; }
- double GetDouble() const { return item_.u_.double_val_; }
- StringId* GetStringId() const { return item_.u_.string_val_; }
- FieldId* GetFieldId() const { return item_.u_.field_val_; }
- MethodId* GetMethodId() const { return item_.u_.method_val_; }
- std::vector<std::unique_ptr<ArrayItem>>* GetAnnotationArray() const {
- return item_.annotation_array_val_.get();
- }
- StringId* GetAnnotationAnnotationString() const {
- return item_.annotation_annotation_val_.string_;
- }
- std::vector<std::unique_ptr<NameValuePair>>* GetAnnotationAnnotationNameValuePairArray() const {
- return item_.annotation_annotation_val_.array_.get();
- }
- // Used to construct the item union. Ugly, but necessary.
- ArrayItemVariant* GetArrayItemVariant() { return &item_; }
- void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+ void SetBoolean(bool z) { u_.bool_val_ = z; }
+ void SetByte(int8_t b) { u_.byte_val_ = b; }
+ void SetShort(int16_t s) { u_.short_val_ = s; }
+ void SetChar(uint16_t c) { u_.char_val_ = c; }
+ void SetInt(int32_t i) { u_.int_val_ = i; }
+ void SetLong(int64_t l) { u_.long_val_ = l; }
+ void SetFloat(float f) { u_.float_val_ = f; }
+ void SetDouble(double d) { u_.double_val_ = d; }
+ void SetStringId(StringId* string_id) { u_.string_val_ = string_id; }
+ void SetTypeId(TypeId* type_id) { u_.type_val_ = type_id; }
+ void SetFieldId(FieldId* field_id) { u_.field_val_ = field_id; }
+ void SetMethodId(MethodId* method_id) { u_.method_val_ = method_id; }
+ void SetEncodedArray(EncodedArrayItem* encoded_array) { encoded_array_.reset(encoded_array); }
+ void SetEncodedAnnotation(EncodedAnnotation* encoded_annotation)
+ { encoded_annotation_.reset(encoded_annotation); }
+
+ bool GetBoolean() const { return u_.bool_val_; }
+ int8_t GetByte() const { return u_.byte_val_; }
+ int16_t GetShort() const { return u_.short_val_; }
+ uint16_t GetChar() const { return u_.char_val_; }
+ int32_t GetInt() const { return u_.int_val_; }
+ int64_t GetLong() const { return u_.long_val_; }
+ float GetFloat() const { return u_.float_val_; }
+ double GetDouble() const { return u_.double_val_; }
+ StringId* GetStringId() const { return u_.string_val_; }
+ TypeId* GetTypeId() const { return u_.type_val_; }
+ FieldId* GetFieldId() const { return u_.field_val_; }
+ MethodId* GetMethodId() const { return u_.method_val_; }
+ EncodedArrayItem* GetEncodedArray() const { return encoded_array_.get(); }
+ EncodedAnnotation* GetEncodedAnnotation() const { return encoded_annotation_.get(); }
+
+ EncodedAnnotation* ReleaseEncodedAnnotation() { return encoded_annotation_.release(); }
private:
uint8_t type_;
- ArrayItemVariant item_;
- DISALLOW_COPY_AND_ASSIGN(ArrayItem);
+ union {
+ bool bool_val_;
+ int8_t byte_val_;
+ int16_t short_val_;
+ uint16_t char_val_;
+ int32_t int_val_;
+ int64_t long_val_;
+ float float_val_;
+ double double_val_;
+ StringId* string_val_;
+ TypeId* type_val_;
+ FieldId* field_val_;
+ MethodId* method_val_;
+ } u_;
+ std::unique_ptr<EncodedArrayItem> encoded_array_;
+ std::unique_ptr<EncodedAnnotation> encoded_annotation_;
+
+ DISALLOW_COPY_AND_ASSIGN(EncodedValue);
+};
+
+using EncodedValueVector = std::vector<std::unique_ptr<EncodedValue>>;
+
+class AnnotationElement {
+ public:
+ AnnotationElement(StringId* name, EncodedValue* value) : name_(name), value_(value) { }
+
+ StringId* GetName() const { return name_; }
+ EncodedValue* GetValue() const { return value_.get(); }
+
+ private:
+ StringId* name_;
+ std::unique_ptr<EncodedValue> value_;
+
+ DISALLOW_COPY_AND_ASSIGN(AnnotationElement);
};
-using ArrayItemVector = std::vector<std::unique_ptr<ArrayItem>>;
+using AnnotationElementVector = std::vector<std::unique_ptr<AnnotationElement>>;
+
+class EncodedAnnotation {
+ public:
+ EncodedAnnotation(TypeId* type, AnnotationElementVector* elements)
+ : type_(type), elements_(elements) { }
+
+ TypeId* GetType() const { return type_; }
+ AnnotationElementVector* GetAnnotationElements() const { return elements_.get(); }
+
+ private:
+ TypeId* type_;
+ std::unique_ptr<AnnotationElementVector> elements_;
+
+ DISALLOW_COPY_AND_ASSIGN(EncodedAnnotation);
+};
+
+class EncodedArrayItem : public Item {
+ public:
+ explicit EncodedArrayItem(EncodedValueVector* encoded_values)
+ : encoded_values_(encoded_values) { }
+
+ EncodedValueVector* GetEncodedValues() const { return encoded_values_.get(); }
+
+ private:
+ std::unique_ptr<EncodedValueVector> encoded_values_;
+
+ DISALLOW_COPY_AND_ASSIGN(EncodedArrayItem);
+};
class ClassData : public Item {
public:
@@ -442,42 +660,43 @@ class ClassData : public Item {
std::unique_ptr<FieldItemVector> instance_fields_;
std::unique_ptr<MethodItemVector> direct_methods_;
std::unique_ptr<MethodItemVector> virtual_methods_;
+
DISALLOW_COPY_AND_ASSIGN(ClassData);
};
-class ClassDef : public Item {
+class ClassDef : public IndexedItem {
public:
ClassDef(const TypeId* class_type,
uint32_t access_flags,
const TypeId* superclass,
- TypeIdVector* interfaces,
- uint32_t interfaces_offset,
+ TypeList* interfaces,
const StringId* source_file,
AnnotationsDirectoryItem* annotations,
- ArrayItemVector* static_values,
+ EncodedArrayItem* static_values,
ClassData* class_data)
: class_type_(class_type),
access_flags_(access_flags),
superclass_(superclass),
interfaces_(interfaces),
- interfaces_offset_(interfaces_offset),
source_file_(source_file),
annotations_(annotations),
static_values_(static_values),
- class_data_(class_data) { }
+ class_data_(class_data) { size_ = kClassDefItemSize; }
~ClassDef() OVERRIDE { }
+ static size_t ItemSize() { return kClassDefItemSize; }
+
const TypeId* ClassType() const { return class_type_; }
uint32_t GetAccessFlags() const { return access_flags_; }
const TypeId* Superclass() const { return superclass_; }
- TypeIdVector* Interfaces() { return interfaces_.get(); }
- uint32_t InterfacesOffset() const { return interfaces_offset_; }
- void SetInterfacesOffset(uint32_t new_offset) { interfaces_offset_ = new_offset; }
+ const TypeIdVector* Interfaces()
+ { return interfaces_ == nullptr ? nullptr: interfaces_->GetTypeList(); }
+ uint32_t InterfacesOffset() { return interfaces_ == nullptr ? 0 : interfaces_->GetOffset(); }
const StringId* SourceFile() const { return source_file_; }
- AnnotationsDirectoryItem* Annotations() const { return annotations_.get(); }
- ArrayItemVector* StaticValues() { return static_values_.get(); }
- ClassData* GetClassData() { return class_data_.get(); }
+ AnnotationsDirectoryItem* Annotations() const { return annotations_; }
+ EncodedArrayItem* StaticValues() { return static_values_; }
+ ClassData* GetClassData() { return class_data_; }
MethodItem* GenerateMethodItem(Header& header, ClassDataItemIterator& cdii);
@@ -487,12 +706,12 @@ class ClassDef : public Item {
const TypeId* class_type_;
uint32_t access_flags_;
const TypeId* superclass_;
- std::unique_ptr<TypeIdVector> interfaces_;
- uint32_t interfaces_offset_;
+ TypeList* interfaces_;
const StringId* source_file_;
- std::unique_ptr<AnnotationsDirectoryItem> annotations_;
- std::unique_ptr<ArrayItemVector> static_values_;
- std::unique_ptr<ClassData> class_data_;
+ AnnotationsDirectoryItem* annotations_;
+ EncodedArrayItem* static_values_;
+ ClassData* class_data_;
+
DISALLOW_COPY_AND_ASSIGN(ClassDef);
};
@@ -506,6 +725,7 @@ class CatchHandler {
private:
const TypeId* type_id_;
uint32_t address_;
+
DISALLOW_COPY_AND_ASSIGN(CatchHandler);
};
@@ -527,6 +747,7 @@ class TryItem : public Item {
uint32_t start_addr_;
uint16_t insn_count_;
std::unique_ptr<CatchHandlerVector> handlers_;
+
DISALLOW_COPY_AND_ASSIGN(TryItem);
};
@@ -555,7 +776,7 @@ class CodeItem : public Item {
uint16_t InsSize() const { return ins_size_; }
uint16_t OutsSize() const { return outs_size_; }
uint16_t TriesSize() const { return tries_ == nullptr ? 0 : tries_->size(); }
- DebugInfoItem* DebugInfo() const { return debug_info_.get(); }
+ DebugInfoItem* DebugInfo() const { return debug_info_; }
uint32_t InsnsSize() const { return insns_size_; }
uint16_t* Insns() const { return insns_.get(); }
TryItemVector* Tries() const { return tries_.get(); }
@@ -566,14 +787,14 @@ class CodeItem : public Item {
uint16_t registers_size_;
uint16_t ins_size_;
uint16_t outs_size_;
- std::unique_ptr<DebugInfoItem> debug_info_;
+ DebugInfoItem* debug_info_;
uint32_t insns_size_;
std::unique_ptr<uint16_t[]> insns_;
std::unique_ptr<TryItemVector> tries_;
+
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
-
struct PositionInfo {
PositionInfo(uint32_t address, uint32_t line) : address_(address), line_(line) { }
@@ -617,39 +838,60 @@ class DebugInfoItem : public Item {
private:
PositionInfoVector positions_;
LocalInfoVector locals_;
+
DISALLOW_COPY_AND_ASSIGN(DebugInfoItem);
};
-class AnnotationItem {
+class AnnotationItem : public Item {
public:
- AnnotationItem(uint8_t visibility, ArrayItem* item) : visibility_(visibility), item_(item) { }
+ AnnotationItem(uint8_t visibility, EncodedAnnotation* annotation)
+ : visibility_(visibility), annotation_(annotation) { }
uint8_t GetVisibility() const { return visibility_; }
- ArrayItem* GetItem() const { return item_.get(); }
+ EncodedAnnotation* GetAnnotation() const { return annotation_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
private:
uint8_t visibility_;
- std::unique_ptr<ArrayItem> item_;
+ std::unique_ptr<EncodedAnnotation> annotation_;
+
DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
};
-using AnnotationItemVector = std::vector<std::unique_ptr<AnnotationItem>>;
-
class AnnotationSetItem : public Item {
public:
- explicit AnnotationSetItem(AnnotationItemVector* items) : items_(items) { }
+ explicit AnnotationSetItem(std::vector<AnnotationItem*>* items) : items_(items) {
+ size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
+ }
~AnnotationSetItem() OVERRIDE { }
- AnnotationItemVector* GetItems() { return items_.get(); }
+ std::vector<AnnotationItem*>* GetItems() { return items_.get(); }
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
private:
- std::unique_ptr<AnnotationItemVector> items_;
+ std::unique_ptr<std::vector<AnnotationItem*>> items_;
+
DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
};
-using AnnotationSetItemVector = std::vector<std::unique_ptr<AnnotationSetItem>>;
+class AnnotationSetRefList : public Item {
+ public:
+ explicit AnnotationSetRefList(std::vector<AnnotationSetItem*>* items) : items_(items) {
+ size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t);
+ }
+ ~AnnotationSetRefList() OVERRIDE { }
+
+ std::vector<AnnotationSetItem*>* GetItems() { return items_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<std::vector<AnnotationSetItem*>> items_;
+
+ DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefList);
+};
class FieldAnnotation {
public:
@@ -657,11 +899,12 @@ class FieldAnnotation {
: field_id_(field_id), annotation_set_item_(annotation_set_item) { }
FieldId* GetFieldId() const { return field_id_; }
- AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); }
+ AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_; }
private:
FieldId* field_id_;
- std::unique_ptr<AnnotationSetItem> annotation_set_item_;
+ AnnotationSetItem* annotation_set_item_;
+
DISALLOW_COPY_AND_ASSIGN(FieldAnnotation);
};
@@ -673,11 +916,12 @@ class MethodAnnotation {
: method_id_(method_id), annotation_set_item_(annotation_set_item) { }
MethodId* GetMethodId() const { return method_id_; }
- AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); }
+ AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_; }
private:
MethodId* method_id_;
- std::unique_ptr<AnnotationSetItem> annotation_set_item_;
+ AnnotationSetItem* annotation_set_item_;
+
DISALLOW_COPY_AND_ASSIGN(MethodAnnotation);
};
@@ -685,15 +929,16 @@ using MethodAnnotationVector = std::vector<std::unique_ptr<MethodAnnotation>>;
class ParameterAnnotation {
public:
- ParameterAnnotation(MethodId* method_id, AnnotationSetItemVector* annotations)
+ ParameterAnnotation(MethodId* method_id, AnnotationSetRefList* annotations)
: method_id_(method_id), annotations_(annotations) { }
MethodId* GetMethodId() const { return method_id_; }
- AnnotationSetItemVector* GetAnnotations() { return annotations_.get(); }
+ AnnotationSetRefList* GetAnnotations() { return annotations_; }
private:
MethodId* method_id_;
- std::unique_ptr<AnnotationSetItemVector> annotations_;
+ AnnotationSetRefList* annotations_;
+
DISALLOW_COPY_AND_ASSIGN(ParameterAnnotation);
};
@@ -710,7 +955,7 @@ class AnnotationsDirectoryItem : public Item {
method_annotations_(method_annotations),
parameter_annotations_(parameter_annotations) { }
- AnnotationSetItem* GetClassAnnotation() const { return class_annotation_.get(); }
+ AnnotationSetItem* GetClassAnnotation() const { return class_annotation_; }
FieldAnnotationVector* GetFieldAnnotations() { return field_annotations_.get(); }
MethodAnnotationVector* GetMethodAnnotations() { return method_annotations_.get(); }
ParameterAnnotationVector* GetParameterAnnotations() { return parameter_annotations_.get(); }
@@ -718,10 +963,11 @@ class AnnotationsDirectoryItem : public Item {
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
private:
- std::unique_ptr<AnnotationSetItem> class_annotation_;
+ AnnotationSetItem* class_annotation_;
std::unique_ptr<FieldAnnotationVector> field_annotations_;
std::unique_ptr<MethodAnnotationVector> method_annotations_;
std::unique_ptr<ParameterAnnotationVector> parameter_annotations_;
+
DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
};
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index 30f57d95a4..e6868d74bc 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -24,401 +24,6 @@
namespace art {
namespace dex_ir {
-namespace {
-
-static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) {
- uint64_t value = 0;
- for (uint32_t i = 0; i <= length; i++) {
- value |= static_cast<uint64_t>(*(*data)++) << (i * 8);
- }
- if (sign_extend) {
- int shift = (7 - length) * 8;
- return (static_cast<int64_t>(value) << shift) >> shift;
- }
- return value;
-}
-
-// Prototype to break cyclic dependency.
-void ReadArrayItemVariant(Header& header,
- const uint8_t** data,
- uint8_t type,
- uint8_t length,
- ArrayItem::ArrayItemVariant* item);
-
-ArrayItem* ReadArrayItem(Header& header, const uint8_t** data, uint8_t type, uint8_t length) {
- ArrayItem* item = new ArrayItem(type);
- ReadArrayItemVariant(header, data, type, length, item->GetArrayItemVariant());
- return item;
-}
-
-ArrayItem* ReadArrayItem(Header& header, const uint8_t** data) {
- const uint8_t encoded_value = *(*data)++;
- const uint8_t type = encoded_value & 0x1f;
- ArrayItem* item = new ArrayItem(type);
- ReadArrayItemVariant(header, data, type, encoded_value >> 5, item->GetArrayItemVariant());
- return item;
-}
-
-void ReadArrayItemVariant(Header& header,
- const uint8_t** data,
- uint8_t type,
- uint8_t length,
- ArrayItem::ArrayItemVariant* item) {
- switch (type) {
- case DexFile::kDexAnnotationByte:
- item->u_.byte_val_ = static_cast<int8_t>(ReadVarWidth(data, length, false));
- break;
- case DexFile::kDexAnnotationShort:
- item->u_.short_val_ = static_cast<int16_t>(ReadVarWidth(data, length, true));
- break;
- case DexFile::kDexAnnotationChar:
- item->u_.char_val_ = static_cast<uint16_t>(ReadVarWidth(data, length, false));
- break;
- case DexFile::kDexAnnotationInt:
- item->u_.int_val_ = static_cast<int32_t>(ReadVarWidth(data, length, true));
- break;
- case DexFile::kDexAnnotationLong:
- item->u_.long_val_ = static_cast<int64_t>(ReadVarWidth(data, length, true));
- break;
- case DexFile::kDexAnnotationFloat: {
- // Fill on right.
- union {
- float f;
- uint32_t data;
- } conv;
- conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8;
- item->u_.float_val_ = conv.f;
- break;
- }
- case DexFile::kDexAnnotationDouble: {
- // Fill on right.
- union {
- double d;
- uint64_t data;
- } conv;
- conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8;
- item->u_.double_val_ = conv.d;
- break;
- }
- case DexFile::kDexAnnotationString: {
- const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->u_.string_val_ = header.StringIds()[string_index].get();
- break;
- }
- case DexFile::kDexAnnotationType: {
- const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->u_.string_val_ = header.TypeIds()[string_index]->GetStringId();
- break;
- }
- case DexFile::kDexAnnotationField:
- case DexFile::kDexAnnotationEnum: {
- const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->u_.field_val_ = header.FieldIds()[field_index].get();
- break;
- }
- case DexFile::kDexAnnotationMethod: {
- const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->u_.method_val_ = header.MethodIds()[method_index].get();
- break;
- }
- case DexFile::kDexAnnotationArray: {
- item->annotation_array_val_.reset(new ArrayItemVector());
- // Decode all elements.
- const uint32_t size = DecodeUnsignedLeb128(data);
- for (uint32_t i = 0; i < size; i++) {
- item->annotation_array_val_->push_back(
- std::unique_ptr<ArrayItem>(ReadArrayItem(header, data)));
- }
- break;
- }
- case DexFile::kDexAnnotationAnnotation: {
- const uint32_t type_idx = DecodeUnsignedLeb128(data);
- item->annotation_annotation_val_.string_ = header.TypeIds()[type_idx]->GetStringId();
- item->annotation_annotation_val_.array_.reset(
- new std::vector<std::unique_ptr<ArrayItem::NameValuePair>>());
- // Decode all name=value pairs.
- const uint32_t size = DecodeUnsignedLeb128(data);
- for (uint32_t i = 0; i < size; i++) {
- const uint32_t name_index = DecodeUnsignedLeb128(data);
- item->annotation_annotation_val_.array_->push_back(
- std::unique_ptr<ArrayItem::NameValuePair>(
- new ArrayItem::NameValuePair(header.StringIds()[name_index].get(),
- ReadArrayItem(header, data))));
- }
- break;
- }
- case DexFile::kDexAnnotationNull:
- break;
- case DexFile::kDexAnnotationBoolean:
- item->u_.bool_val_ = (length != 0);
- break;
- default:
- break;
- }
-}
-
-static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) {
- DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
- PositionInfoVector& positions = debug_info->GetPositionInfo();
- positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_)));
- return false;
-}
-
-static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) {
- DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
- LocalInfoVector& locals = debug_info->GetLocalInfo();
- const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
- const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
- locals.push_back(std::unique_ptr<LocalInfo>(
- new LocalInfo(name, entry.descriptor_, signature, entry.start_address_,
- entry.end_address_, entry.reg_)));
-}
-
-CodeItem* ReadCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem& disk_code_item,
- Header& header) {
- uint16_t registers_size = disk_code_item.registers_size_;
- uint16_t ins_size = disk_code_item.ins_size_;
- uint16_t outs_size = disk_code_item.outs_size_;
- uint32_t tries_size = disk_code_item.tries_size_;
-
- const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(&disk_code_item);
- DebugInfoItem* debug_info = nullptr;
- if (debug_info_stream != nullptr) {
- debug_info = new DebugInfoItem();
- }
-
- uint32_t insns_size = disk_code_item.insns_size_in_code_units_;
- uint16_t* insns = new uint16_t[insns_size];
- memcpy(insns, disk_code_item.insns_, insns_size * sizeof(uint16_t));
-
- TryItemVector* tries = nullptr;
- if (tries_size > 0) {
- tries = new TryItemVector();
- for (uint32_t i = 0; i < tries_size; ++i) {
- const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i);
- uint32_t start_addr = disk_try_item->start_addr_;
- uint16_t insn_count = disk_try_item->insn_count_;
- CatchHandlerVector* handlers = new CatchHandlerVector();
- for (CatchHandlerIterator it(disk_code_item, *disk_try_item); it.HasNext(); it.Next()) {
- const uint16_t type_index = it.GetHandlerTypeIndex();
- const TypeId* type_id = header.GetTypeIdOrNullPtr(type_index);
- handlers->push_back(std::unique_ptr<const CatchHandler>(
- new CatchHandler(type_id, it.GetHandlerAddress())));
- }
- TryItem* try_item = new TryItem(start_addr, insn_count, handlers);
- tries->push_back(std::unique_ptr<const TryItem>(try_item));
- }
- }
- return new CodeItem(registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries);
-}
-
-MethodItem* GenerateMethodItem(const DexFile& dex_file,
- dex_ir::Header& header,
- ClassDataItemIterator& cdii) {
- MethodId* method_item = header.MethodIds()[cdii.GetMemberIndex()].get();
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- CodeItem* code_item = nullptr;
- DebugInfoItem* debug_info = nullptr;
- if (disk_code_item != nullptr) {
- code_item = ReadCodeItem(dex_file, *disk_code_item, header);
- code_item->SetOffset(cdii.GetMethodCodeItemOffset());
- debug_info = code_item->DebugInfo();
- }
- if (debug_info != nullptr) {
- bool is_static = (access_flags & kAccStatic) != 0;
- dex_file.DecodeDebugLocalInfo(
- disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info);
- dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info);
- }
- return new MethodItem(access_flags, method_item, code_item);
-}
-
-AnnotationSetItem* ReadAnnotationSetItem(const DexFile& dex_file,
- const DexFile::AnnotationSetItem& disk_annotations_item,
- Header& header) {
- if (disk_annotations_item.size_ == 0) {
- return nullptr;
- }
- AnnotationItemVector* items = new AnnotationItemVector();
- for (uint32_t i = 0; i < disk_annotations_item.size_; ++i) {
- const DexFile::AnnotationItem* annotation =
- dex_file.GetAnnotationItem(&disk_annotations_item, i);
- if (annotation == nullptr) {
- continue;
- }
- uint8_t visibility = annotation->visibility_;
- const uint8_t* annotation_data = annotation->annotation_;
- ArrayItem* array_item =
- ReadArrayItem(header, &annotation_data, DexFile::kDexAnnotationAnnotation, 0);
- items->push_back(std::unique_ptr<AnnotationItem>(new AnnotationItem(visibility, array_item)));
- }
- return new AnnotationSetItem(items);
-}
-
-ParameterAnnotation* ReadParameterAnnotation(
- const DexFile& dex_file,
- MethodId* method_id,
- const DexFile::AnnotationSetRefList* annotation_set_ref_list,
- Header& header) {
- AnnotationSetItemVector* annotations = new AnnotationSetItemVector();
- for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
- const DexFile::AnnotationSetItem* annotation_set_item =
- dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
- annotations->push_back(std::unique_ptr<AnnotationSetItem>(
- ReadAnnotationSetItem(dex_file, *annotation_set_item, header)));
- }
- return new ParameterAnnotation(method_id, annotations);
-}
-
-AnnotationsDirectoryItem* ReadAnnotationsDirectoryItem(
- const DexFile& dex_file,
- const DexFile::AnnotationsDirectoryItem* disk_annotations_item,
- Header& header) {
- const DexFile::AnnotationSetItem* class_set_item =
- dex_file.GetClassAnnotationSet(disk_annotations_item);
- AnnotationSetItem* class_annotation = nullptr;
- if (class_set_item != nullptr) {
- class_annotation = ReadAnnotationSetItem(dex_file, *class_set_item, header);
- }
- const DexFile::FieldAnnotationsItem* fields =
- dex_file.GetFieldAnnotations(disk_annotations_item);
- FieldAnnotationVector* field_annotations = nullptr;
- if (fields != nullptr) {
- field_annotations = new FieldAnnotationVector();
- for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
- FieldId* field_id = header.FieldIds()[fields[i].field_idx_].get();
- const DexFile::AnnotationSetItem* field_set_item =
- dex_file.GetFieldAnnotationSetItem(fields[i]);
- AnnotationSetItem* annotation_set_item =
- ReadAnnotationSetItem(dex_file, *field_set_item, header);
- field_annotations->push_back(std::unique_ptr<FieldAnnotation>(
- new FieldAnnotation(field_id, annotation_set_item)));
- }
- }
- const DexFile::MethodAnnotationsItem* methods =
- dex_file.GetMethodAnnotations(disk_annotations_item);
- MethodAnnotationVector* method_annotations = nullptr;
- if (methods != nullptr) {
- method_annotations = new MethodAnnotationVector();
- for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
- MethodId* method_id = header.MethodIds()[methods[i].method_idx_].get();
- const DexFile::AnnotationSetItem* method_set_item =
- dex_file.GetMethodAnnotationSetItem(methods[i]);
- AnnotationSetItem* annotation_set_item =
- ReadAnnotationSetItem(dex_file, *method_set_item, header);
- method_annotations->push_back(std::unique_ptr<MethodAnnotation>(
- new MethodAnnotation(method_id, annotation_set_item)));
- }
- }
- const DexFile::ParameterAnnotationsItem* parameters =
- dex_file.GetParameterAnnotations(disk_annotations_item);
- ParameterAnnotationVector* parameter_annotations = nullptr;
- if (parameters != nullptr) {
- parameter_annotations = new ParameterAnnotationVector();
- for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
- MethodId* method_id = header.MethodIds()[parameters[i].method_idx_].get();
- const DexFile::AnnotationSetRefList* list =
- dex_file.GetParameterAnnotationSetRefList(&parameters[i]);
- parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
- ReadParameterAnnotation(dex_file, method_id, list, header)));
- }
- }
-
- return new AnnotationsDirectoryItem(class_annotation,
- field_annotations,
- method_annotations,
- parameter_annotations);
-}
-
-ClassDef* ReadClassDef(const DexFile& dex_file,
- const DexFile::ClassDef& disk_class_def,
- Header& header) {
- const TypeId* class_type = header.TypeIds()[disk_class_def.class_idx_].get();
- uint32_t access_flags = disk_class_def.access_flags_;
- const TypeId* superclass = header.GetTypeIdOrNullPtr(disk_class_def.superclass_idx_);
-
- TypeIdVector* interfaces = nullptr;
- const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
- uint32_t interfaces_offset = disk_class_def.interfaces_off_;
- if (type_list != nullptr) {
- interfaces = new TypeIdVector();
- for (uint32_t index = 0; index < type_list->Size(); ++index) {
- interfaces->push_back(header.TypeIds()[type_list->GetTypeItem(index).type_idx_].get());
- }
- }
- const StringId* source_file = header.GetStringIdOrNullPtr(disk_class_def.source_file_idx_);
- // Annotations.
- AnnotationsDirectoryItem* annotations = nullptr;
- const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
- dex_file.GetAnnotationsDirectory(disk_class_def);
- if (disk_annotations_directory_item != nullptr) {
- annotations = ReadAnnotationsDirectoryItem(dex_file, disk_annotations_directory_item, header);
- annotations->SetOffset(disk_class_def.annotations_off_);
- }
- // Static field initializers.
- ArrayItemVector* static_values = nullptr;
- const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
- if (static_data != nullptr) {
- uint32_t static_value_count = static_data == nullptr ? 0 : DecodeUnsignedLeb128(&static_data);
- if (static_value_count > 0) {
- static_values = new ArrayItemVector();
- for (uint32_t i = 0; i < static_value_count; ++i) {
- static_values->push_back(std::unique_ptr<ArrayItem>(ReadArrayItem(header, &static_data)));
- }
- }
- }
- // Read the fields and methods defined by the class, resolving the circular reference from those
- // to classes by setting class at the same time.
- const uint8_t* encoded_data = dex_file.GetClassData(disk_class_def);
- ClassData* class_data = nullptr;
- if (encoded_data != nullptr) {
- uint32_t offset = disk_class_def.class_data_off_;
- ClassDataItemIterator cdii(dex_file, encoded_data);
- // Static fields.
- FieldItemVector* static_fields = new FieldItemVector();
- for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) {
- FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get();
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
- }
- // Instance fields.
- FieldItemVector* instance_fields = new FieldItemVector();
- for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) {
- FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get();
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- instance_fields->push_back(
- std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
- }
- // Direct methods.
- MethodItemVector* direct_methods = new MethodItemVector();
- for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) {
- direct_methods->push_back(
- std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, header, cdii)));
- }
- // Virtual methods.
- MethodItemVector* virtual_methods = new MethodItemVector();
- for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) {
- virtual_methods->push_back(
- std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, header, cdii)));
- }
- class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods);
- class_data->SetOffset(offset);
- }
- return new ClassDef(class_type,
- access_flags,
- superclass,
- interfaces,
- interfaces_offset,
- source_file,
- annotations,
- static_values,
- class_data);
-}
-
-} // namespace
-
Header* DexIrBuilder(const DexFile& dex_file) {
const DexFile::Header& disk_header = dex_file.GetHeader();
Header* header = new Header(disk_header.magic_,
@@ -431,73 +36,37 @@ Header* DexIrBuilder(const DexFile& dex_file) {
disk_header.link_off_,
disk_header.data_size_,
disk_header.data_off_);
+ Collections& collections = header->GetCollections();
// Walk the rest of the header fields.
// StringId table.
- std::vector<std::unique_ptr<StringId>>& string_ids = header->StringIds();
- header->SetStringIdsOffset(disk_header.string_ids_off_);
+ collections.SetStringIdsOffset(disk_header.string_ids_off_);
for (uint32_t i = 0; i < dex_file.NumStringIds(); ++i) {
- const DexFile::StringId& disk_string_id = dex_file.GetStringId(i);
- StringId* string_id = new StringId(dex_file.GetStringData(disk_string_id));
- string_id->SetOffset(i);
- string_ids.push_back(std::unique_ptr<StringId>(string_id));
+ collections.CreateStringId(dex_file, i);
}
// TypeId table.
- std::vector<std::unique_ptr<TypeId>>& type_ids = header->TypeIds();
- header->SetTypeIdsOffset(disk_header.type_ids_off_);
+ collections.SetTypeIdsOffset(disk_header.type_ids_off_);
for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
- const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(i);
- TypeId* type_id = new TypeId(header->StringIds()[disk_type_id.descriptor_idx_].get());
- type_id->SetOffset(i);
- type_ids.push_back(std::unique_ptr<TypeId>(type_id));
+ collections.CreateTypeId(dex_file, i);
}
// ProtoId table.
- std::vector<std::unique_ptr<ProtoId>>& proto_ids = header->ProtoIds();
- header->SetProtoIdsOffset(disk_header.proto_ids_off_);
+ collections.SetProtoIdsOffset(disk_header.proto_ids_off_);
for (uint32_t i = 0; i < dex_file.NumProtoIds(); ++i) {
- const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(i);
- // Build the parameter type vector.
- TypeIdVector* parameters = new TypeIdVector();
- DexFileParameterIterator dfpi(dex_file, disk_proto_id);
- while (dfpi.HasNext()) {
- parameters->push_back(header->TypeIds()[dfpi.GetTypeIdx()].get());
- dfpi.Next();
- }
- ProtoId* proto_id = new ProtoId(header->StringIds()[disk_proto_id.shorty_idx_].get(),
- header->TypeIds()[disk_proto_id.return_type_idx_].get(),
- parameters);
- proto_id->SetOffset(i);
- proto_ids.push_back(std::unique_ptr<ProtoId>(proto_id));
+ collections.CreateProtoId(dex_file, i);
}
// FieldId table.
- std::vector<std::unique_ptr<FieldId>>& field_ids = header->FieldIds();
- header->SetFieldIdsOffset(disk_header.field_ids_off_);
+ collections.SetFieldIdsOffset(disk_header.field_ids_off_);
for (uint32_t i = 0; i < dex_file.NumFieldIds(); ++i) {
- const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
- FieldId* field_id = new FieldId(header->TypeIds()[disk_field_id.class_idx_].get(),
- header->TypeIds()[disk_field_id.type_idx_].get(),
- header->StringIds()[disk_field_id.name_idx_].get());
- field_id->SetOffset(i);
- field_ids.push_back(std::unique_ptr<FieldId>(field_id));
+ collections.CreateFieldId(dex_file, i);
}
// MethodId table.
- std::vector<std::unique_ptr<MethodId>>& method_ids = header->MethodIds();
- header->SetMethodIdsOffset(disk_header.method_ids_off_);
+ collections.SetMethodIdsOffset(disk_header.method_ids_off_);
for (uint32_t i = 0; i < dex_file.NumMethodIds(); ++i) {
- const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
- MethodId* method_id = new MethodId(header->TypeIds()[disk_method_id.class_idx_].get(),
- header->ProtoIds()[disk_method_id.proto_idx_].get(),
- header->StringIds()[disk_method_id.name_idx_].get());
- method_id->SetOffset(i);
- method_ids.push_back(std::unique_ptr<MethodId>(method_id));
+ collections.CreateMethodId(dex_file, i);
}
// ClassDef table.
- std::vector<std::unique_ptr<ClassDef>>& class_defs = header->ClassDefs();
- header->SetClassDefsOffset(disk_header.class_defs_off_);
+ collections.SetClassDefsOffset(disk_header.class_defs_off_);
for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
- const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
- ClassDef* class_def = ReadClassDef(dex_file, disk_class_def, *header);
- class_def->SetOffset(i);
- class_defs.push_back(std::unique_ptr<ClassDef>(class_def));
+ collections.CreateClassDef(dex_file, i);
}
return header;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 3a3f417825..6f34a33ed7 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -30,9 +30,11 @@
#include <sstream>
#include <vector>
+#include "base/unix_file/fd_file.h"
#include "dex_ir_builder.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
+#include "os.h"
#include "utils.h"
namespace art {
@@ -348,10 +350,26 @@ static void DumpXmlAttribute(const char* p) {
} // for
}
+// Forward declare to resolve circular dependence.
+static void DumpEncodedValue(const dex_ir::EncodedValue* data);
+
+/*
+ * Dumps encoded annotation.
+ */
+static void DumpEncodedAnnotation(dex_ir::EncodedAnnotation* annotation) {
+ fputs(annotation->GetType()->GetStringId()->Data(), out_file_);
+ // Display all name=value pairs.
+ for (auto& subannotation : *annotation->GetAnnotationElements()) {
+ fputc(' ', out_file_);
+ fputs(subannotation->GetName()->Data(), out_file_);
+ fputc('=', out_file_);
+ DumpEncodedValue(subannotation->GetValue());
+ }
+}
/*
* Dumps encoded value.
*/
-static void DumpEncodedValue(const dex_ir::ArrayItem* data) {
+static void DumpEncodedValue(const dex_ir::EncodedValue* data) {
switch (data->Type()) {
case DexFile::kDexAnnotationByte:
fprintf(out_file_, "%" PRId8, data->GetByte());
@@ -386,8 +404,8 @@ static void DumpEncodedValue(const dex_ir::ArrayItem* data) {
break;
}
case DexFile::kDexAnnotationType: {
- dex_ir::StringId* string_id = data->GetStringId();
- fputs(string_id->Data(), out_file_);
+ dex_ir::TypeId* type_id = data->GetTypeId();
+ fputs(type_id->GetStringId()->Data(), out_file_);
break;
}
case DexFile::kDexAnnotationField:
@@ -404,22 +422,15 @@ static void DumpEncodedValue(const dex_ir::ArrayItem* data) {
case DexFile::kDexAnnotationArray: {
fputc('{', out_file_);
// Display all elements.
- for (auto& array : *data->GetAnnotationArray()) {
+ for (auto& value : *data->GetEncodedArray()->GetEncodedValues()) {
fputc(' ', out_file_);
- DumpEncodedValue(array.get());
+ DumpEncodedValue(value.get());
}
fputs(" }", out_file_);
break;
}
case DexFile::kDexAnnotationAnnotation: {
- fputs(data->GetAnnotationAnnotationString()->Data(), out_file_);
- // Display all name=value pairs.
- for (auto& subannotation : *data->GetAnnotationAnnotationNameValuePairArray()) {
- fputc(' ', out_file_);
- fputs(subannotation->Name()->Data(), out_file_);
- fputc('=', out_file_);
- DumpEncodedValue(subannotation->Value());
- }
+ DumpEncodedAnnotation(data->GetEncodedAnnotation());
break;
}
case DexFile::kDexAnnotationNull:
@@ -437,8 +448,9 @@ static void DumpEncodedValue(const dex_ir::ArrayItem* data) {
/*
* Dumps the file header.
*/
-static void DumpFileHeader(const dex_ir::Header* header) {
+static void DumpFileHeader(dex_ir::Header* header) {
char sanitized[8 * 2 + 1];
+ dex_ir::Collections& collections = header->GetCollections();
fprintf(out_file_, "DEX file header:\n");
Asciify(sanitized, header->Magic(), 8);
fprintf(out_file_, "magic : '%s'\n", sanitized);
@@ -452,24 +464,24 @@ static void DumpFileHeader(const dex_ir::Header* header) {
fprintf(out_file_, "link_size : %d\n", header->LinkSize());
fprintf(out_file_, "link_off : %d (0x%06x)\n",
header->LinkOffset(), header->LinkOffset());
- fprintf(out_file_, "string_ids_size : %d\n", header->StringIdsSize());
+ fprintf(out_file_, "string_ids_size : %d\n", collections.StringIdsSize());
fprintf(out_file_, "string_ids_off : %d (0x%06x)\n",
- header->StringIdsOffset(), header->StringIdsOffset());
- fprintf(out_file_, "type_ids_size : %d\n", header->TypeIdsSize());
+ collections.StringIdsOffset(), collections.StringIdsOffset());
+ fprintf(out_file_, "type_ids_size : %d\n", collections.TypeIdsSize());
fprintf(out_file_, "type_ids_off : %d (0x%06x)\n",
- header->TypeIdsOffset(), header->TypeIdsOffset());
- fprintf(out_file_, "proto_ids_size : %d\n", header->ProtoIdsSize());
+ collections.TypeIdsOffset(), collections.TypeIdsOffset());
+ fprintf(out_file_, "proto_ids_size : %d\n", collections.ProtoIdsSize());
fprintf(out_file_, "proto_ids_off : %d (0x%06x)\n",
- header->ProtoIdsOffset(), header->ProtoIdsOffset());
- fprintf(out_file_, "field_ids_size : %d\n", header->FieldIdsSize());
+ collections.ProtoIdsOffset(), collections.ProtoIdsOffset());
+ fprintf(out_file_, "field_ids_size : %d\n", collections.FieldIdsSize());
fprintf(out_file_, "field_ids_off : %d (0x%06x)\n",
- header->FieldIdsOffset(), header->FieldIdsOffset());
- fprintf(out_file_, "method_ids_size : %d\n", header->MethodIdsSize());
+ collections.FieldIdsOffset(), collections.FieldIdsOffset());
+ fprintf(out_file_, "method_ids_size : %d\n", collections.MethodIdsSize());
fprintf(out_file_, "method_ids_off : %d (0x%06x)\n",
- header->MethodIdsOffset(), header->MethodIdsOffset());
- fprintf(out_file_, "class_defs_size : %d\n", header->ClassDefsSize());
+ collections.MethodIdsOffset(), collections.MethodIdsOffset());
+ fprintf(out_file_, "class_defs_size : %d\n", collections.ClassDefsSize());
fprintf(out_file_, "class_defs_off : %d (0x%06x)\n",
- header->ClassDefsOffset(), header->ClassDefsOffset());
+ collections.ClassDefsOffset(), collections.ClassDefsOffset());
fprintf(out_file_, "data_size : %d\n", header->DataSize());
fprintf(out_file_, "data_off : %d (0x%06x)\n\n",
header->DataOffset(), header->DataOffset());
@@ -480,19 +492,19 @@ static void DumpFileHeader(const dex_ir::Header* header) {
*/
static void DumpClassDef(dex_ir::Header* header, int idx) {
// General class information.
- dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx);
fprintf(out_file_, "Class #%d header:\n", idx);
- fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetOffset());
+ fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetIndex());
fprintf(out_file_, "access_flags : %d (0x%04x)\n",
class_def->GetAccessFlags(), class_def->GetAccessFlags());
uint32_t superclass_idx = class_def->Superclass() == nullptr ?
- DexFile::kDexNoIndex16 : class_def->Superclass()->GetOffset();
+ DexFile::kDexNoIndex16 : class_def->Superclass()->GetIndex();
fprintf(out_file_, "superclass_idx : %d\n", superclass_idx);
fprintf(out_file_, "interfaces_off : %d (0x%06x)\n",
class_def->InterfacesOffset(), class_def->InterfacesOffset());
uint32_t source_file_offset = 0xffffffffU;
if (class_def->SourceFile() != nullptr) {
- source_file_offset = class_def->SourceFile()->GetOffset();
+ source_file_offset = class_def->SourceFile()->GetIndex();
}
fprintf(out_file_, "source_file_idx : %d\n", source_file_offset);
uint32_t annotations_offset = 0;
@@ -541,7 +553,7 @@ static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) {
fputs(" empty-annotation-set\n", out_file_);
return;
}
- for (std::unique_ptr<dex_ir::AnnotationItem>& annotation : *set_item->GetItems()) {
+ for (dex_ir::AnnotationItem* annotation : *set_item->GetItems()) {
if (annotation == nullptr) {
continue;
}
@@ -552,10 +564,7 @@ static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) {
case DexFile::kDexVisibilitySystem: fputs("VISIBILITY_SYSTEM ", out_file_); break;
default: fputs("VISIBILITY_UNKNOWN ", out_file_); break;
} // switch
- // Decode raw bytes in annotation.
- // const uint8_t* rData = annotation->annotation_;
- dex_ir::ArrayItem* data = annotation->GetItem();
- DumpEncodedValue(data);
+ DumpEncodedAnnotation(annotation->GetAnnotation());
fputc('\n', out_file_);
}
}
@@ -564,7 +573,7 @@ static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) {
* Dumps class annotations.
*/
static void DumpClassAnnotations(dex_ir::Header* header, int idx) {
- dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx);
dex_ir::AnnotationsDirectoryItem* annotations_directory = class_def->Annotations();
if (annotations_directory == nullptr) {
return; // none
@@ -587,7 +596,7 @@ static void DumpClassAnnotations(dex_ir::Header* header, int idx) {
if (fields != nullptr) {
for (auto& field : *fields) {
const dex_ir::FieldId* field_id = field->GetFieldId();
- const uint32_t field_idx = field_id->GetOffset();
+ const uint32_t field_idx = field_id->GetIndex();
const char* field_name = field_id->Name()->Data();
fprintf(out_file_, "Annotations on field #%u '%s'\n", field_idx, field_name);
DumpAnnotationSetItem(field->GetAnnotationSetItem());
@@ -598,7 +607,7 @@ static void DumpClassAnnotations(dex_ir::Header* header, int idx) {
if (methods != nullptr) {
for (auto& method : *methods) {
const dex_ir::MethodId* method_id = method->GetMethodId();
- const uint32_t method_idx = method_id->GetOffset();
+ const uint32_t method_idx = method_id->GetIndex();
const char* method_name = method_id->Name()->Data();
fprintf(out_file_, "Annotations on method #%u '%s'\n", method_idx, method_name);
DumpAnnotationSetItem(method->GetAnnotationSetItem());
@@ -609,13 +618,13 @@ static void DumpClassAnnotations(dex_ir::Header* header, int idx) {
if (parameters != nullptr) {
for (auto& parameter : *parameters) {
const dex_ir::MethodId* method_id = parameter->GetMethodId();
- const uint32_t method_idx = method_id->GetOffset();
+ const uint32_t method_idx = method_id->GetIndex();
const char* method_name = method_id->Name()->Data();
fprintf(out_file_, "Annotations on method #%u '%s' parameters\n", method_idx, method_name);
uint32_t j = 0;
- for (auto& annotation : *parameter->GetAnnotations()) {
+ for (dex_ir::AnnotationSetItem* annotation : *parameter->GetAnnotations()->GetItems()) {
fprintf(out_file_, "#%u\n", j);
- DumpAnnotationSetItem(annotation.get());
+ DumpAnnotationSetItem(annotation);
++j;
}
}
@@ -748,24 +757,24 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
outSize = snprintf(buf.get(), buf_size, "<no-index>");
break;
case Instruction::kIndexTypeRef:
- if (index < header->TypeIdsSize()) {
- const char* tp = header->TypeIds()[index]->GetStringId()->Data();
+ if (index < header->GetCollections().TypeIdsSize()) {
+ const char* tp = header->GetCollections().GetTypeId(index)->GetStringId()->Data();
outSize = snprintf(buf.get(), buf_size, "%s // type@%0*x", tp, width, index);
} else {
outSize = snprintf(buf.get(), buf_size, "<type?> // type@%0*x", width, index);
}
break;
case Instruction::kIndexStringRef:
- if (index < header->StringIdsSize()) {
- const char* st = header->StringIds()[index]->Data();
+ if (index < header->GetCollections().StringIdsSize()) {
+ const char* st = header->GetCollections().GetStringId(index)->Data();
outSize = snprintf(buf.get(), buf_size, "\"%s\" // string@%0*x", st, width, index);
} else {
outSize = snprintf(buf.get(), buf_size, "<string?> // string@%0*x", width, index);
}
break;
case Instruction::kIndexMethodRef:
- if (index < header->MethodIdsSize()) {
- dex_ir::MethodId* method_id = header->MethodIds()[index].get();
+ if (index < header->GetCollections().MethodIdsSize()) {
+ dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(index);
const char* name = method_id->Name()->Data();
std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
@@ -776,8 +785,8 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
}
break;
case Instruction::kIndexFieldRef:
- if (index < header->FieldIdsSize()) {
- dex_ir::FieldId* field_id = header->FieldIds()[index].get();
+ if (index < header->GetCollections().FieldIdsSize()) {
+ dex_ir::FieldId* field_id = header->GetCollections().GetFieldId(index);
const char* name = field_id->Name()->Data();
const char* type_descriptor = field_id->Type()->GetStringId()->Data();
const char* back_descriptor = field_id->Class()->GetStringId()->Data();
@@ -1028,7 +1037,7 @@ static void DumpInstruction(dex_ir::Header* header, const dex_ir::CodeItem* code
*/
static void DumpBytecodes(dex_ir::Header* header, uint32_t idx,
const dex_ir::CodeItem* code, uint32_t code_offset) {
- dex_ir::MethodId* method_id = header->MethodIds()[idx].get();
+ dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(idx);
const char* name = method_id->Name()->Data();
std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
@@ -1088,7 +1097,7 @@ static void DumpMethod(dex_ir::Header* header, uint32_t idx, uint32_t flags,
return;
}
- dex_ir::MethodId* method_id = header->MethodIds()[idx].get();
+ dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(idx);
const char* name = method_id->Name()->Data();
char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
@@ -1187,13 +1196,13 @@ static void DumpMethod(dex_ir::Header* header, uint32_t idx, uint32_t flags,
* Dumps a static (class) field.
*/
static void DumpSField(dex_ir::Header* header, uint32_t idx, uint32_t flags,
- int i, dex_ir::ArrayItem* init) {
+ int i, dex_ir::EncodedValue* init) {
// Bail for anything private if export only requested.
if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
return;
}
- dex_ir::FieldId* field_id = header->FieldIds()[idx].get();
+ dex_ir::FieldId* field_id = header->GetCollections().GetFieldId(idx);
const char* name = field_id->Name()->Data();
const char* type_descriptor = field_id->Type()->GetStringId()->Data();
const char* back_descriptor = field_id->Class()->GetStringId()->Data();
@@ -1293,7 +1302,7 @@ static void DumpClass(const DexFile* dex_file,
dex_ir::Header* header,
int idx,
char** last_package) {
- dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx);
// Omitting non-public class.
if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) {
return;
@@ -1316,7 +1325,8 @@ static void DumpClass(const DexFile* dex_file,
// up the classes, sort them, and dump them alphabetically so the
// package name wouldn't jump around, but that's not a great plan
// for something that needs to run on the device.
- const char* class_descriptor = header->ClassDefs()[idx]->ClassType()->GetStringId()->Data();
+ const char* class_descriptor =
+ header->GetCollections().GetClassDef(idx)->ClassType()->GetStringId()->Data();
if (!(class_descriptor[0] == 'L' &&
class_descriptor[strlen(class_descriptor)-1] == ';')) {
// Arrays and primitives should not be defined explicitly. Keep going?
@@ -1386,7 +1396,7 @@ static void DumpClass(const DexFile* dex_file,
}
// Interfaces.
- dex_ir::TypeIdVector* interfaces = class_def->Interfaces();
+ const dex_ir::TypeIdVector* interfaces = class_def->Interfaces();
if (interfaces != nullptr) {
for (uint32_t i = 0; i < interfaces->size(); i++) {
DumpInterface((*interfaces)[i], i);
@@ -1396,8 +1406,10 @@ static void DumpClass(const DexFile* dex_file,
// Fields and methods.
dex_ir::ClassData* class_data = class_def->GetClassData();
// Prepare data for static fields.
- std::vector<std::unique_ptr<dex_ir::ArrayItem>>* static_values = class_def->StaticValues();
- const uint32_t static_values_size = (static_values == nullptr) ? 0 : static_values->size();
+ dex_ir::EncodedArrayItem* static_values = class_def->StaticValues();
+ dex_ir::EncodedValueVector* encoded_values =
+ static_values == nullptr ? nullptr : static_values->GetEncodedValues();
+ const uint32_t encoded_values_size = (encoded_values == nullptr) ? 0 : encoded_values->size();
// Static fields.
if (options_.output_format_ == kOutputPlain) {
@@ -1408,10 +1420,10 @@ static void DumpClass(const DexFile* dex_file,
if (static_fields != nullptr) {
for (uint32_t i = 0; i < static_fields->size(); i++) {
DumpSField(header,
- (*static_fields)[i]->GetFieldId()->GetOffset(),
+ (*static_fields)[i]->GetFieldId()->GetIndex(),
(*static_fields)[i]->GetAccessFlags(),
i,
- i < static_values_size ? (*static_values)[i].get() : nullptr);
+ i < encoded_values_size ? (*encoded_values)[i].get() : nullptr);
} // for
}
}
@@ -1425,7 +1437,7 @@ static void DumpClass(const DexFile* dex_file,
if (instance_fields != nullptr) {
for (uint32_t i = 0; i < instance_fields->size(); i++) {
DumpIField(header,
- (*instance_fields)[i]->GetFieldId()->GetOffset(),
+ (*instance_fields)[i]->GetFieldId()->GetIndex(),
(*instance_fields)[i]->GetAccessFlags(),
i);
} // for
@@ -1441,7 +1453,7 @@ static void DumpClass(const DexFile* dex_file,
if (direct_methods != nullptr) {
for (uint32_t i = 0; i < direct_methods->size(); i++) {
DumpMethod(header,
- (*direct_methods)[i]->GetMethodId()->GetOffset(),
+ (*direct_methods)[i]->GetMethodId()->GetIndex(),
(*direct_methods)[i]->GetAccessFlags(),
(*direct_methods)[i]->GetCodeItem(),
i);
@@ -1458,7 +1470,7 @@ static void DumpClass(const DexFile* dex_file,
if (virtual_methods != nullptr) {
for (uint32_t i = 0; i < virtual_methods->size(); i++) {
DumpMethod(header,
- (*virtual_methods)[i]->GetMethodId()->GetOffset(),
+ (*virtual_methods)[i]->GetMethodId()->GetIndex(),
(*virtual_methods)[i]->GetAccessFlags(),
(*virtual_methods)[i]->GetCodeItem(),
i);
@@ -1474,7 +1486,7 @@ static void DumpClass(const DexFile* dex_file,
}
const dex_ir::StringId* source_file = class_def->SourceFile();
fprintf(out_file_, " source_file_idx : %d (%s)\n\n",
- source_file == nullptr ? 0xffffffffU : source_file->GetOffset(), file_name);
+ source_file == nullptr ? 0xffffffffU : source_file->GetIndex(), file_name);
} else if (options_.output_format_ == kOutputXml) {
fprintf(out_file_, "</class>\n");
}
@@ -1483,6 +1495,96 @@ static void DumpClass(const DexFile* dex_file,
}
/*
+static uint32_t GetDataSectionOffset(dex_ir::Header& header) {
+ return dex_ir::Header::ItemSize() +
+ header.GetCollections().StringIdsSize() * dex_ir::StringId::ItemSize() +
+ header.GetCollections().TypeIdsSize() * dex_ir::TypeId::ItemSize() +
+ header.GetCollections().ProtoIdsSize() * dex_ir::ProtoId::ItemSize() +
+ header.GetCollections().FieldIdsSize() * dex_ir::FieldId::ItemSize() +
+ header.GetCollections().MethodIdsSize() * dex_ir::MethodId::ItemSize() +
+ header.GetCollections().ClassDefsSize() * dex_ir::ClassDef::ItemSize();
+}
+
+static bool Align(File* file, uint32_t& offset) {
+ uint8_t zero_buffer[] = { 0, 0, 0 };
+ uint32_t zeroes = (-offset) & 3;
+ if (zeroes > 0) {
+ if (!file->PwriteFully(zero_buffer, zeroes, offset)) {
+ return false;
+ }
+ offset += zeroes;
+ }
+ return true;
+}
+
+static bool WriteStrings(File* dex_file, dex_ir::Header& header,
+ uint32_t& index_offset, uint32_t& data_offset) {
+ uint32_t index = 0;
+ uint32_t index_buffer[1];
+ uint32_t string_length;
+ uint32_t length_length;
+ uint8_t length_buffer[8];
+ for (std::unique_ptr<dex_ir::StringId>& string_id : header.GetCollections().StringIds()) {
+ string_id->SetOffset(index);
+ index_buffer[0] = data_offset;
+ string_length = strlen(string_id->Data());
+ length_length = UnsignedLeb128Size(string_length);
+ EncodeUnsignedLeb128(length_buffer, string_length);
+
+ if (!dex_file->PwriteFully(index_buffer, 4, index_offset) ||
+ !dex_file->PwriteFully(length_buffer, length_length, data_offset) ||
+ !dex_file->PwriteFully(string_id->Data(), string_length, data_offset + length_length)) {
+ return false;
+ }
+
+ index++;
+ index_offset += 4;
+ data_offset += string_length + length_length;
+ }
+ return true;
+}
+
+static bool WriteTypes(File* dex_file, dex_ir::Header& header, uint32_t& index_offset) {
+ uint32_t index = 0;
+ uint32_t index_buffer[1];
+ for (std::unique_ptr<dex_ir::TypeId>& type_id : header.GetCollections().TypeIds()) {
+ type_id->SetIndex(index);
+ index_buffer[0] = type_id->GetStringId()->GetOffset();
+
+ if (!dex_file->PwriteFully(index_buffer, 4, index_offset)) {
+ return false;
+ }
+
+ index++;
+ index_offset += 4;
+ }
+ return true;
+}
+
+static bool WriteTypeLists(File* dex_file, dex_ir::Header& header, uint32_t& data_offset) {
+ if (!Align(dex_file, data_offset)) {
+ return false;
+ }
+
+ return true;
+}
+
+static void OutputDexFile(dex_ir::Header& header, const char* file_name) {
+ LOG(INFO) << "FILE NAME: " << file_name;
+ std::unique_ptr<File> dex_file(OS::CreateEmptyFileWriteOnly(file_name));
+ if (dex_file == nullptr) {
+ fprintf(stderr, "Can't open %s\n", file_name);
+ return;
+ }
+
+ uint32_t index_offset = dex_ir::Header::ItemSize();
+ uint32_t data_offset = GetDataSectionOffset(header);
+ WriteStrings(dex_file.get(), header, index_offset, data_offset);
+ WriteTypes(dex_file.get(), header, index_offset);
+}
+*/
+
+/*
* Dumps the requested sections of the file.
*/
static void ProcessDexFile(const char* file_name, const DexFile* dex_file) {
@@ -1504,7 +1606,7 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file) {
// Iterate over all classes.
char* package = nullptr;
- const uint32_t class_defs_size = header->ClassDefsSize();
+ const uint32_t class_defs_size = header->GetCollections().ClassDefsSize();
for (uint32_t i = 0; i < class_defs_size; i++) {
DumpClass(dex_file, header.get(), i, &package);
} // for
@@ -1519,6 +1621,14 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file) {
if (options_.output_format_ == kOutputXml) {
fprintf(out_file_, "</api>\n");
}
+
+ /*
+ // Output dex file.
+ if (options_.output_dex_files_) {
+ std::string output_dex_filename = dex_file->GetLocation() + ".out";
+ OutputDexFile(*header, output_dex_filename.c_str());
+ }
+ */
}
/*
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index bae587dfb2..736d230a99 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -41,6 +41,7 @@ struct Options {
bool disassemble_;
bool exports_only_;
bool ignore_bad_checksum_;
+ bool output_dex_files_;
bool show_annotations_;
bool show_cfg_;
bool show_file_headers_;
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 09fa0ef602..ec5edf4065 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -38,7 +38,7 @@ static const char* kProgramName = "dexlayout";
*/
static void Usage(void) {
fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
- fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]"
+ fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-w]"
" dexfile...\n\n", kProgramName);
fprintf(stderr, " -a : display annotations\n");
fprintf(stderr, " -b : build dex_ir\n");
@@ -51,6 +51,7 @@ static void Usage(void) {
fprintf(stderr, " -i : ignore checksum failures\n");
fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
fprintf(stderr, " -o : output file name (defaults to stdout)\n");
+ fprintf(stderr, " -w : output dex files\n");
}
/*
@@ -68,7 +69,7 @@ int DexlayoutDriver(int argc, char** argv) {
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "abcdefghil:o:");
+ const int ic = getopt(argc, argv, "abcdefghil:o:w");
if (ic < 0) {
break; // done
}
@@ -113,6 +114,9 @@ int DexlayoutDriver(int argc, char** argv) {
case 'o': // output file
options_.output_file_name_ = optarg;
break;
+ case 'w': // output dex files
+ options_.output_dex_files_ = true;
+ break;
default:
want_usage = true;
break;
diff --git a/imgdiag/Android.bp b/imgdiag/Android.bp
index 7837d66913..eaeb78efa4 100644
--- a/imgdiag/Android.bp
+++ b/imgdiag/Android.bp
@@ -26,7 +26,10 @@ cc_defaults {
// that the image it's analyzing be the same ISA as the runtime ISA.
compile_multilib: "both",
- shared_libs: ["libbacktrace"],
+ shared_libs: [
+ "libbacktrace",
+ "libbase",
+ ],
target: {
android: {
shared_libs: ["libcutils"],
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index dd6331c24b..bbe6cc1ac0 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -34,6 +34,7 @@ art_cc_binary {
"libart",
"libart-compiler",
"libart-disassembler",
+ "libbase",
],
}
@@ -47,6 +48,7 @@ art_cc_binary {
"libartd",
"libartd-compiler",
"libartd-disassembler",
+ "libbase",
],
}
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
index 8d8d6d1977..a78f97d54c 100644
--- a/patchoat/Android.bp
+++ b/patchoat/Android.bp
@@ -24,6 +24,9 @@ cc_defaults {
compile_multilib: "prefer32",
},
},
+ shared_libs: [
+ "libbase",
+ ],
}
art_cc_binary {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 5240011901..1af3660e8f 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -37,8 +37,8 @@
#include "elf_file_impl.h"
#include "gc/space/image_space.h"
#include "image-inl.h"
-#include "mirror/abstract_method.h"
#include "mirror/dex_cache.h"
+#include "mirror/executable.h"
#include "mirror/object-inl.h"
#include "mirror/method.h"
#include "mirror/reference.h"
@@ -770,8 +770,8 @@ void PatchOat::VisitObject(mirror::Object* object) {
} else if (object->GetClass() == mirror::Method::StaticClass() ||
object->GetClass() == mirror::Constructor::StaticClass()) {
// Need to go update the ArtMethod.
- auto* dest = down_cast<mirror::AbstractMethod*>(copy);
- auto* src = down_cast<mirror::AbstractMethod*>(object);
+ auto* dest = down_cast<mirror::Executable*>(copy);
+ auto* src = down_cast<mirror::Executable*>(object);
dest->SetArtMethod(RelocatedAddressOfPointer(src->GetArtMethod()));
}
}
diff --git a/profman/Android.bp b/profman/Android.bp
index 322dda2211..2dcbaee456 100644
--- a/profman/Android.bp
+++ b/profman/Android.bp
@@ -32,6 +32,10 @@ cc_defaults {
include_dirs: [
"art/cmdline",
],
+
+ shared_libs: [
+ "libbase",
+ ],
}
art_cc_binary {
diff --git a/profman/profman.cc b/profman/profman.cc
index a5fefa71d4..7722e8041a 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -280,18 +280,11 @@ class ProfMan FINAL {
for (size_t i = 0; i < dex_locations_.size(); ++i) {
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files_for_location;
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(apks_fd_[i],
- dex_locations_[i].c_str(),
- &error_msg));
- if (zip_archive == nullptr) {
- LOG(WARNING) << "OpenFromFd failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
- }
- if (DexFile::OpenFromZip(*zip_archive,
- dex_locations_[i],
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (DexFile::OpenZip(apks_fd_[i],
+ dex_locations_[i],
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
LOG(WARNING) << "OpenFromZip failed for '" << dex_locations_[i] << "' " << error_msg;
continue;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 8c17653c79..fd9b5b9d94 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -120,10 +120,10 @@ cc_defaults {
"linear_alloc.cc",
"mem_map.cc",
"memory_region.cc",
- "mirror/abstract_method.cc",
"mirror/array.cc",
"mirror/class.cc",
"mirror/dex_cache.cc",
+ "mirror/executable.cc",
"mirror/field.cc",
"mirror/method.cc",
"mirror/object.cc",
@@ -151,9 +151,9 @@ cc_defaults {
"native/java_lang_VMClassLoader.cc",
"native/java_lang_ref_FinalizerReference.cc",
"native/java_lang_ref_Reference.cc",
- "native/java_lang_reflect_AbstractMethod.cc",
"native/java_lang_reflect_Array.cc",
"native/java_lang_reflect_Constructor.cc",
+ "native/java_lang_reflect_Executable.cc",
"native/java_lang_reflect_Field.cc",
"native/java_lang_reflect_Method.cc",
"native/java_lang_reflect_Parameter.cc",
@@ -382,7 +382,8 @@ cc_defaults {
"libnativeloader",
"libbacktrace",
"liblz4",
- // For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
+ "liblog",
+ // For atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
"libcutils",
// For common macros.
"libbase",
@@ -469,6 +470,7 @@ art_cc_library {
srcs: ["common_runtime_test.cc"],
shared_libs: [
"libartd",
+ "libbase",
],
}
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 0b044802b4..5d53062902 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -272,6 +272,15 @@ ENTRY \c_name
END \c_name
.endm
+.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_SAVE_EVERYTHING_FRAME r0 @ save all registers as basis for long jump context
+ mov r0, r9 @ pass Thread::Current
+ bl \cxx_name @ \cxx_name(Thread*)
+END \c_name
+.endm
+
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
@@ -281,10 +290,10 @@ ENTRY \c_name
END \c_name
.endm
-.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
+ SETUP_SAVE_EVERYTHING_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -361,7 +370,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo
/*
* Called by managed code to create and deliver a NullPointerException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Call installed by a signal handler to create and deliver a NullPointerException.
@@ -398,19 +407,19 @@ END art_quick_throw_null_pointer_exception_from_signal
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* Called by managed code to create and deliver a StringIndexOutOfBoundsException
* as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index e9d03d7ceb..eee949da8a 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -434,6 +434,17 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x0, xSELF // pass Thread::Current
bl \cxx_name // \cxx_name(Thread*)
+ brk 0
+END \c_name
+.endm
+
+.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context
+ mov x0, xSELF // pass Thread::Current
+ bl \cxx_name // \cxx_name(Thread*)
+ brk 0
END \c_name
.endm
@@ -447,10 +458,10 @@ ENTRY \c_name
END \c_name
.endm
-.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
bl \cxx_name // \cxx_name(arg1, arg2, Thread*)
brk 0
@@ -466,7 +477,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo
/*
* Called by managed code to create and deliver a NullPointerException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Call installed by a signal handler to create and deliver a NullPointerException.
@@ -490,19 +501,19 @@ END art_quick_throw_null_pointer_exception_from_signal
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* Called by managed code to create and deliver a StringIndexOutOfBoundsException
* as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 7955b1d275..948b06ce61 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -26,22 +26,6 @@
// Register holding Thread::Current().
#define rSELF $s1
- // Declare a function called name, sets up $gp.
-.macro ENTRY name
- .type \name, %function
- .global \name
- // Cache alignment for function entry.
- .balign 16
-\name:
- .cfi_startproc
- // Ensure we get a sane starting CFA.
- .cfi_def_cfa $sp,0
- // Load $gp. We expect that ".set noreorder" is in effect.
- .cpload $t9
- // Declare a local convenience label to be branched to when $gp is already set up.
-.L\name\()_gp_set:
-.endm
-
// Declare a function called name, doesn't set up $gp.
.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset
.type \name, %function
@@ -59,6 +43,15 @@
ENTRY_NO_GP_CUSTOM_CFA \name, 0
.endm
+ // Declare a function called name, sets up $gp.
+.macro ENTRY name
+ ENTRY_NO_GP \name
+ // Load $gp. We expect that ".set noreorder" is in effect.
+ .cpload $t9
+ // Declare a local convenience label to be branched to when $gp is already set up.
+.L\name\()_gp_set:
+.endm
+
.macro END name
.cfi_endproc
.size \name, .-\name
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index b6a63ca4f7..1792f31578 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -90,7 +90,7 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void*
sc->sc_regs[mips::RA] = sc->sc_pc + 4; // RA needs to point to gc map location
sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
- sc->sc_regs[mips::T9] = sc->sc_pc; // make sure T9 points to the function
+ // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9.
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 4563004e86..c3c188233b 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -710,8 +710,10 @@ END art_quick_deliver_exception
* Called by managed code to create and deliver a NullPointerException
*/
.extern artThrowNullPointerExceptionFromCode
-ENTRY art_quick_throw_null_pointer_exception
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_null_pointer_exception
+ // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
+ // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
+ SETUP_SAVE_EVERYTHING_FRAME
la $t9, artThrowNullPointerExceptionFromCode
jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -735,8 +737,8 @@ END art_quick_throw_null_pointer_exception_from_signal
* Called by managed code to create and deliver an ArithmeticException
*/
.extern artThrowDivZeroFromCode
-ENTRY art_quick_throw_div_zero
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_div_zero
+ SETUP_SAVE_EVERYTHING_FRAME
la $t9, artThrowDivZeroFromCode
jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -746,8 +748,10 @@ END art_quick_throw_div_zero
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
*/
.extern artThrowArrayBoundsFromCode
-ENTRY art_quick_throw_array_bounds
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_array_bounds
+ // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
+ // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
+ SETUP_SAVE_EVERYTHING_FRAME
la $t9, artThrowArrayBoundsFromCode
jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -758,8 +762,8 @@ END art_quick_throw_array_bounds
* as if thrown from a call to String.charAt().
*/
.extern artThrowStringBoundsFromCode
-ENTRY art_quick_throw_string_bounds
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_string_bounds
+ SETUP_SAVE_EVERYTHING_FRAME
la $t9, artThrowStringBoundsFromCode
jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1123,7 +1127,7 @@ END art_quick_handle_fill_data
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
- beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
+ beqz $a0, art_quick_throw_null_pointer_exception
nop
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
@@ -1133,7 +1137,7 @@ ENTRY art_quick_lock_object
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
- beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
+ beqz $a0, art_quick_throw_null_pointer_exception
nop
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
@@ -1147,7 +1151,7 @@ END art_quick_lock_object_no_inline
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
- beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
+ beqz $a0, art_quick_throw_null_pointer_exception
nop
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
@@ -1157,7 +1161,7 @@ ENTRY art_quick_unlock_object
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
- beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set
+ beqz $a0, art_quick_throw_null_pointer_exception
nop
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
@@ -1280,7 +1284,7 @@ END art_quick_check_cast
ENTRY art_quick_aput_obj_with_null_and_bound_check
bnez $a0, .Lart_quick_aput_obj_with_bound_check_gp_set
nop
- b .Lart_quick_throw_null_pointer_exception_gp_set
+ b art_quick_throw_null_pointer_exception
nop
END art_quick_aput_obj_with_null_and_bound_check
@@ -1290,7 +1294,7 @@ ENTRY art_quick_aput_obj_with_bound_check
bnez $t1, .Lart_quick_aput_obj_gp_set
nop
move $a0, $a1
- b .Lart_quick_throw_array_bounds_gp_set
+ b art_quick_throw_array_bounds
move $a1, $t0
END art_quick_aput_obj_with_bound_check
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index 6c58fcf969..35f20fbf44 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -27,24 +27,6 @@
#define rSELF $s1
- // Declare a function called name, sets up $gp.
- // This macro modifies t8.
-.macro ENTRY name
- .type \name, %function
- .global \name
- // Cache alignment for function entry.
- .balign 16
-\name:
- .cfi_startproc
- // Set up $gp and store the previous $gp value to $t8. It will be pushed to the
- // stack after the frame has been constructed.
- .cpsetup $t9, $t8, \name
- // Ensure we get a sane starting CFA.
- .cfi_def_cfa $sp,0
- // Declare a local convenience label to be branched to when $gp is already set up.
-.L\name\()_gp_set:
-.endm
-
// Declare a function called name, doesn't set up $gp.
.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset
.type \name, %function
@@ -62,6 +44,17 @@
ENTRY_NO_GP_CUSTOM_CFA \name, 0
.endm
+ // Declare a function called name, sets up $gp.
+ // This macro modifies t8.
+.macro ENTRY name
+ ENTRY_NO_GP \name
+ // Set up $gp and store the previous $gp value to $t8. It will be pushed to the
+ // stack after the frame has been constructed.
+ .cpsetup $t9, $t8, \name
+ // Declare a local convenience label to be branched to when $gp is already set up.
+.L\name\()_gp_set:
+.endm
+
.macro END name
.cfi_endproc
.size \name, .-\name
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index e52dc73070..709cab587c 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -91,7 +91,7 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void*
sc->sc_regs[mips64::RA] = sc->sc_pc + 4; // RA needs to point to gc map location
sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
- sc->sc_regs[mips64::T9] = sc->sc_pc; // make sure T9 points to the function
+ // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9.
VLOG(signals) << "Generating null pointer exception";
return true;
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index c16e85543d..8fc7bc31ce 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -817,9 +817,10 @@ END art_quick_deliver_exception
* Called by managed code to create and deliver a NullPointerException
*/
.extern artThrowNullPointerExceptionFromCode
-ENTRY art_quick_throw_null_pointer_exception
-.Lart_quick_throw_null_pointer_exception_gp_set:
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_null_pointer_exception
+ // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
+ // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
+ SETUP_SAVE_EVERYTHING_FRAME
dla $t9, artThrowNullPointerExceptionFromCode
jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -842,8 +843,8 @@ END art_quick_throw_null_pointer_exception
* Called by managed code to create and deliver an ArithmeticException
*/
.extern artThrowDivZeroFromCode
-ENTRY art_quick_throw_div_zero
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_div_zero
+ SETUP_SAVE_EVERYTHING_FRAME
dla $t9, artThrowDivZeroFromCode
jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
move $a0, rSELF # pass Thread::Current
@@ -854,9 +855,10 @@ END art_quick_throw_div_zero
* ArrayIndexOutOfBoundsException
*/
.extern artThrowArrayBoundsFromCode
-ENTRY art_quick_throw_array_bounds
-.Lart_quick_throw_array_bounds_gp_set:
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_array_bounds
+ // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
+ // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
+ SETUP_SAVE_EVERYTHING_FRAME
dla $t9, artThrowArrayBoundsFromCode
jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -867,9 +869,8 @@ END art_quick_throw_array_bounds
* as if thrown from a call to String.charAt().
*/
.extern artThrowStringBoundsFromCode
-ENTRY art_quick_throw_string_bounds
-.Lart_quick_throw_string_bounds_gp_set:
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ENTRY_NO_GP art_quick_throw_string_bounds
+ SETUP_SAVE_EVERYTHING_FRAME
dla $t9, artThrowStringBoundsFromCode
jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
move $a2, rSELF # pass Thread::Current
@@ -1210,18 +1211,20 @@ END art_quick_handle_fill_data
* Entry from managed code that calls artLockObjectFromCode, may block for GC.
*/
.extern artLockObjectFromCode
-ENTRY art_quick_lock_object
- beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ENTRY_NO_GP art_quick_lock_object
+ beq $a0, $zero, art_quick_throw_null_pointer_exception
nop
+ .cpsetup $t9, $t8, art_quick_lock_object
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_lock_object
-ENTRY art_quick_lock_object_no_inline
- beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ENTRY_NO_GP art_quick_lock_object_no_inline
+ beq $a0, $zero, art_quick_throw_null_pointer_exception
nop
+ .cpsetup $t9, $t8, art_quick_lock_object_no_inline
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -1232,18 +1235,20 @@ END art_quick_lock_object_no_inline
* Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
*/
.extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object
- beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ENTRY_NO_GP art_quick_unlock_object
+ beq $a0, $zero, art_quick_throw_null_pointer_exception
nop
+ .cpsetup $t9, $t8, art_quick_unlock_object
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_unlock_object
-ENTRY art_quick_unlock_object_no_inline
- beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ENTRY_NO_GP art_quick_unlock_object_no_inline
+ beq $a0, $zero, art_quick_throw_null_pointer_exception
nop
+ .cpsetup $t9, $t8, art_quick_unlock_object_no_inline
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
move $a1, rSELF # pass Thread::Current
@@ -1362,7 +1367,7 @@ END art_quick_check_cast
ENTRY art_quick_aput_obj_with_null_and_bound_check
bne $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set
nop
- b .Lart_quick_throw_null_pointer_exception_gp_set
+ b art_quick_throw_null_pointer_exception
nop
END art_quick_aput_obj_with_null_and_bound_check
@@ -1372,7 +1377,7 @@ ENTRY art_quick_aput_obj_with_bound_check
bne $t1, $zero, .Lart_quick_aput_obj_gp_set
nop
move $a0, $a1
- b .Lart_quick_throw_array_bounds_gp_set
+ b art_quick_throw_array_bounds
move $a1, $t0
END art_quick_aput_obj_with_bound_check
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 507dbf0130..3424e3c0d2 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -22,6 +22,7 @@
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "imt_conflict_table.h"
#include "linear_alloc.h"
#include "mirror/class-inl.h"
#include "mirror/string-inl.h"
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index f3793e15a7..879d49644b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -327,6 +327,19 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
END_FUNCTION VAR(c_name)
END_MACRO
+MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save all registers as basis for long jump context
+ // Outgoing argument set up
+ subl MACRO_LITERAL(12), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(12)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
+ UNREACHABLE
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
@@ -341,9 +354,9 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
END_FUNCTION VAR(c_name)
END_MACRO
-MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
+ SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -358,7 +371,7 @@ END_MACRO
/*
* Called by managed code to create and deliver a NullPointerException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Call installed by a signal handler to create and deliver a NullPointerException.
@@ -384,7 +397,7 @@ END_FUNCTION art_quick_throw_null_pointer_exception
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
@@ -401,13 +414,13 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* Called by managed code to create and deliver a StringIndexOutOfBoundsException
* as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index bfba543d93..a11e4021b4 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -395,6 +395,16 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
END_FUNCTION VAR(c_name)
END_MACRO
+MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context
+ // Outgoing argument set up
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
+ UNREACHABLE
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
@@ -405,9 +415,9 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
END_FUNCTION VAR(c_name)
END_MACRO
-MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
+MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
+ SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(Thread*)
@@ -418,7 +428,7 @@ END_MACRO
/*
* Called by managed code to create and deliver a NullPointerException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
* Call installed by a signal handler to create and deliver a NullPointerException.
@@ -440,7 +450,7 @@ END_FUNCTION art_quick_throw_null_pointer_exception_from_signal
/*
* Called by managed code to create and deliver an ArithmeticException.
*/
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode
/*
* Called by managed code to create and deliver a StackOverflowError.
@@ -457,13 +467,13 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
* Called by managed code to create and deliver a StringIndexOutOfBoundsException
* as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
*/
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 9b4b38aa0f..73cce5ea1a 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -123,10 +123,6 @@ inline uint32_t ArtMethod::GetDexMethodIndex() {
return dex_method_index_;
}
-inline uint32_t ArtMethod::GetImtIndex() {
- return GetDexMethodIndex() % ImTable::kSize;
-}
-
inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(PointerSize pointer_size) {
return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
pointer_size);
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index fd6c37a4e5..193bea167f 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -34,8 +34,8 @@
#include "jit/jit_code_cache.h"
#include "jit/profiling_info.h"
#include "jni_internal.h"
-#include "mirror/abstract_method.h"
#include "mirror/class-inl.h"
+#include "mirror/executable.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
@@ -52,9 +52,9 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
- auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(jlr_method);
- DCHECK(abstract_method != nullptr);
- return abstract_method->GetArtMethod();
+ auto* executable = soa.Decode<mirror::Executable*>(jlr_method);
+ DCHECK(executable != nullptr);
+ return executable->GetArtMethod();
}
mirror::String* ArtMethod::GetNameAsString(Thread* self) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index b1baccded9..3d2db690a7 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -34,6 +34,7 @@
namespace art {
template<class T> class Handle;
+class ImtConflictTable;
union JValue;
class OatQuickMethodHeader;
class ProfilingInfo;
@@ -48,175 +49,6 @@ class IfTable;
class PointerArray;
} // namespace mirror
-// Table to resolve IMT conflicts at runtime. The table is attached to
-// the jni entrypoint of IMT conflict ArtMethods.
-// The table contains a list of pairs of { interface_method, implementation_method }
-// with the last entry being null to make an assembly implementation of a lookup
-// faster.
-class ImtConflictTable {
- enum MethodIndex {
- kMethodInterface,
- kMethodImplementation,
- kMethodCount, // Number of elements in enum.
- };
-
- public:
- // Build a new table copying `other` and adding the new entry formed of
- // the pair { `interface_method`, `implementation_method` }
- ImtConflictTable(ImtConflictTable* other,
- ArtMethod* interface_method,
- ArtMethod* implementation_method,
- PointerSize pointer_size) {
- const size_t count = other->NumEntries(pointer_size);
- for (size_t i = 0; i < count; ++i) {
- SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
- SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size));
- }
- SetInterfaceMethod(count, pointer_size, interface_method);
- SetImplementationMethod(count, pointer_size, implementation_method);
- // Add the null marker.
- SetInterfaceMethod(count + 1, pointer_size, nullptr);
- SetImplementationMethod(count + 1, pointer_size, nullptr);
- }
-
- // num_entries excludes the header.
- ImtConflictTable(size_t num_entries, PointerSize pointer_size) {
- SetInterfaceMethod(num_entries, pointer_size, nullptr);
- SetImplementationMethod(num_entries, pointer_size, nullptr);
- }
-
- // Set an entry at an index.
- void SetInterfaceMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
- SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
- }
-
- void SetImplementationMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
- SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
- }
-
- ArtMethod* GetInterfaceMethod(size_t index, PointerSize pointer_size) const {
- return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
- }
-
- ArtMethod* GetImplementationMethod(size_t index, PointerSize pointer_size) const {
- return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
- }
-
- // Return true if two conflict tables are the same.
- bool Equals(ImtConflictTable* other, PointerSize pointer_size) const {
- size_t num = NumEntries(pointer_size);
- if (num != other->NumEntries(pointer_size)) {
- return false;
- }
- for (size_t i = 0; i < num; ++i) {
- if (GetInterfaceMethod(i, pointer_size) != other->GetInterfaceMethod(i, pointer_size) ||
- GetImplementationMethod(i, pointer_size) !=
- other->GetImplementationMethod(i, pointer_size)) {
- return false;
- }
- }
- return true;
- }
-
- // Visit all of the entries.
- // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
- // and also returns one. The order is <interface, implementation>.
- template<typename Visitor>
- void Visit(const Visitor& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS {
- uint32_t table_index = 0;
- for (;;) {
- ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
- if (interface_method == nullptr) {
- break;
- }
- ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size);
- auto input = std::make_pair(interface_method, implementation_method);
- std::pair<ArtMethod*, ArtMethod*> updated = visitor(input);
- if (input.first != updated.first) {
- SetInterfaceMethod(table_index, pointer_size, updated.first);
- }
- if (input.second != updated.second) {
- SetImplementationMethod(table_index, pointer_size, updated.second);
- }
- ++table_index;
- }
- }
-
- // Lookup the implementation ArtMethod associated to `interface_method`. Return null
- // if not found.
- ArtMethod* Lookup(ArtMethod* interface_method, PointerSize pointer_size) const {
- uint32_t table_index = 0;
- for (;;) {
- ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
- if (current_interface_method == nullptr) {
- break;
- }
- if (current_interface_method == interface_method) {
- return GetImplementationMethod(table_index, pointer_size);
- }
- ++table_index;
- }
- return nullptr;
- }
-
- // Compute the number of entries in this table.
- size_t NumEntries(PointerSize pointer_size) const {
- uint32_t table_index = 0;
- while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
- ++table_index;
- }
- return table_index;
- }
-
- // Compute the size in bytes taken by this table.
- size_t ComputeSize(PointerSize pointer_size) const {
- // Add the end marker.
- return ComputeSize(NumEntries(pointer_size), pointer_size);
- }
-
- // Compute the size in bytes needed for copying the given `table` and add
- // one more entry.
- static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, PointerSize pointer_size) {
- return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
- }
-
- // Compute size with a fixed number of entries.
- static size_t ComputeSize(size_t num_entries, PointerSize pointer_size) {
- return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator.
- }
-
- static size_t EntrySize(PointerSize pointer_size) {
- return static_cast<size_t>(pointer_size) * static_cast<size_t>(kMethodCount);
- }
-
- private:
- ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
- if (pointer_size == PointerSize::k64) {
- return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
- } else {
- return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
- }
- }
-
- void SetMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
- if (pointer_size == PointerSize::k64) {
- data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
- } else {
- data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
- }
- }
-
- // Array of entries that the assembly stubs will iterate over. Note that this is
- // not fixed size, and we allocate data prior to calling the constructor
- // of ImtConflictTable.
- union {
- uint32_t data32_[0];
- uint64_t data64_[0];
- };
-
- DISALLOW_COPY_AND_ASSIGN(ImtConflictTable);
-};
-
class ArtMethod FINAL {
public:
ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
@@ -428,8 +260,6 @@ class ArtMethod FINAL {
ALWAYS_INLINE uint32_t GetDexMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
-
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
dex_method_index_ = new_idx;
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index d2f0fdbd9c..f0811b020b 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -21,13 +21,8 @@
#include <limits>
#include <type_traits>
-// This header is used in the disassembler with libbase's logging. Only include ART logging
-// when no other logging macros are available. b/15436106, b/31338270
-#ifndef CHECK
-#include "base/logging.h"
-#endif
-
#include "base/iteration_range.h"
+#include "base/logging.h"
#include "base/stl_util.h"
namespace art {
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index e00e62d368..17873b541c 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -28,6 +28,7 @@
// Headers for LogMessage::LogLine.
#ifdef ART_TARGET_ANDROID
#include "cutils/log.h"
+#include <android/set_abort_message.h>
#else
#include <sys/types.h>
#include <unistd.h>
@@ -39,24 +40,10 @@ LogVerbosity gLogVerbosity;
unsigned int gAborting = 0;
-static LogSeverity gMinimumLogSeverity = INFO;
static std::unique_ptr<std::string> gCmdLine;
static std::unique_ptr<std::string> gProgramInvocationName;
static std::unique_ptr<std::string> gProgramInvocationShortName;
-// Print INTERNAL_FATAL messages directly instead of at destruction time. This only works on the
-// host right now: for the device, a stream buf collating output into lines and calling LogLine or
-// lower-level logging is necessary.
-#ifdef ART_TARGET_ANDROID
-static constexpr bool kPrintInternalFatalDirectly = false;
-#else
-static constexpr bool kPrintInternalFatalDirectly = !kIsTargetBuild;
-#endif
-
-static bool PrintDirectly(LogSeverity severity) {
- return kPrintInternalFatalDirectly && severity == INTERNAL_FATAL;
-}
-
const char* GetCmdLine() {
return (gCmdLine.get() != nullptr) ? gCmdLine->c_str() : nullptr;
}
@@ -70,6 +57,16 @@ const char* ProgramInvocationShortName() {
: "art";
}
+NO_RETURN
+static void RuntimeAborter(const char* abort_message) {
+#ifdef __ANDROID__
+ android_set_abort_message(abort_message);
+#else
+ UNUSED(abort_message);
+#endif
+ Runtime::Abort();
+}
+
void InitLogging(char* argv[]) {
if (gCmdLine.get() != nullptr) {
return;
@@ -94,150 +91,14 @@ void InitLogging(char* argv[]) {
// TODO: fall back to /proc/self/cmdline when argv is null on Linux.
gCmdLine.reset(new std::string("<unset>"));
}
- const char* tags = getenv("ANDROID_LOG_TAGS");
- if (tags == nullptr) {
- return;
- }
-
- std::vector<std::string> specs;
- Split(tags, ' ', &specs);
- for (size_t i = 0; i < specs.size(); ++i) {
- // "tag-pattern:[vdiwefs]"
- std::string spec(specs[i]);
- if (spec.size() == 3 && StartsWith(spec, "*:")) {
- switch (spec[2]) {
- case 'v':
- gMinimumLogSeverity = VERBOSE;
- continue;
- case 'd':
- gMinimumLogSeverity = DEBUG;
- continue;
- case 'i':
- gMinimumLogSeverity = INFO;
- continue;
- case 'w':
- gMinimumLogSeverity = WARNING;
- continue;
- case 'e':
- gMinimumLogSeverity = ERROR;
- continue;
- case 'f':
- gMinimumLogSeverity = FATAL;
- continue;
- // liblog will even suppress FATAL if you say 's' for silent, but that's crazy!
- case 's':
- gMinimumLogSeverity = FATAL;
- continue;
- }
- }
- LOG(FATAL) << "unsupported '" << spec << "' in ANDROID_LOG_TAGS (" << tags << ")";
- }
-}
-
-// This indirection greatly reduces the stack impact of having
-// lots of checks/logging in a function.
-class LogMessageData {
- public:
- LogMessageData(const char* file, unsigned int line, LogSeverity severity, int error)
- : file_(GetFilenameBase(file)),
- line_number_(line),
- severity_(severity),
- error_(error) {}
- const char * GetFile() const {
- return file_;
- }
-
- unsigned int GetLineNumber() const {
- return line_number_;
- }
-
- LogSeverity GetSeverity() const {
- return severity_;
- }
-
- int GetError() const {
- return error_;
- }
-
- std::ostream& GetBuffer() {
- return buffer_;
- }
-
- std::string ToString() const {
- return buffer_.str();
- }
-
- private:
- std::ostringstream buffer_;
- const char* const file_;
- const unsigned int line_number_;
- const LogSeverity severity_;
- const int error_;
-
- static const char* GetFilenameBase(const char* file) {
- const char* last_slash = strrchr(file, '/');
- return (last_slash == nullptr) ? file : last_slash + 1;
- }
-
- DISALLOW_COPY_AND_ASSIGN(LogMessageData);
-};
-
-
-LogMessage::LogMessage(const char* file, unsigned int line, LogSeverity severity, int error)
- : data_(new LogMessageData(file, line, severity, error)) {
- if (PrintDirectly(severity)) {
- static constexpr char kLogCharacters[] = { 'V', 'D', 'I', 'W', 'E', 'F', 'F' };
- static_assert(arraysize(kLogCharacters) == static_cast<size_t>(INTERNAL_FATAL) + 1,
- "Wrong character array size");
- stream() << ProgramInvocationShortName() << " " << kLogCharacters[static_cast<size_t>(severity)]
- << " " << getpid() << " " << ::art::GetTid() << " " << file << ":" << line << "]";
- }
-}
-LogMessage::~LogMessage() {
- if (PrintDirectly(data_->GetSeverity())) {
- // Add newline at the end to match the not printing directly behavior.
- std::cerr << '\n';
- } else {
- if (data_->GetSeverity() < gMinimumLogSeverity) {
- return; // No need to format something we're not going to output.
- }
-
- // Finish constructing the message.
- if (data_->GetError() != -1) {
- data_->GetBuffer() << ": " << strerror(data_->GetError());
- }
- std::string msg(data_->ToString());
-
- // Do the actual logging with the lock held.
- {
- MutexLock mu(Thread::Current(), *Locks::logging_lock_);
- if (msg.find('\n') == std::string::npos) {
- LogLine(data_->GetFile(), data_->GetLineNumber(), data_->GetSeverity(), msg.c_str());
- } else {
- msg += '\n';
- size_t i = 0;
- while (i < msg.size()) {
- size_t nl = msg.find('\n', i);
- msg[nl] = '\0';
- LogLine(data_->GetFile(), data_->GetLineNumber(), data_->GetSeverity(), &msg[i]);
- i = nl + 1;
- }
- }
- }
- }
-
- // Abort if necessary.
- if (data_->GetSeverity() == FATAL) {
- Runtime::Abort();
- }
-}
-
-std::ostream& LogMessage::stream() {
- if (PrintDirectly(data_->GetSeverity())) {
- return std::cerr;
- }
- return data_->GetBuffer();
+#ifdef __ANDROID__
+#define INIT_LOGGING_DEFAULT_LOGGER android::base::LogdLogger()
+#else
+#define INIT_LOGGING_DEFAULT_LOGGER android::base::StderrLogger
+#endif
+ android::base::InitLogging(argv, INIT_LOGGING_DEFAULT_LOGGER, RuntimeAborter);
+#undef INIT_LOGGING_DEFAULT_LOGGER
}
#ifdef ART_TARGET_ANDROID
@@ -245,31 +106,14 @@ static const android_LogPriority kLogSeverityToAndroidLogPriority[] = {
ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN,
ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL
};
-static_assert(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1,
+static_assert(arraysize(kLogSeverityToAndroidLogPriority) == ::android::base::FATAL + 1,
"Mismatch in size of kLogSeverityToAndroidLogPriority and values in LogSeverity");
#endif
-void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity,
- const char* message) {
-#ifdef ART_TARGET_ANDROID
- const char* tag = ProgramInvocationShortName();
- int priority = kLogSeverityToAndroidLogPriority[static_cast<size_t>(log_severity)];
- if (priority == ANDROID_LOG_FATAL) {
- LOG_PRI(priority, tag, "%s:%u] %s", file, line, message);
- } else {
- LOG_PRI(priority, tag, "%s", message);
- }
-#else
- static const char* log_characters = "VDIWEFF";
- CHECK_EQ(strlen(log_characters), INTERNAL_FATAL + 1U);
- char severity = log_characters[log_severity];
- fprintf(stderr, "%s %c %5d %5d %s:%u] %s\n",
- ProgramInvocationShortName(), severity, getpid(), ::art::GetTid(), file, line, message);
-#endif
-}
-
-void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity,
- const char* message) {
+void LogHelper::LogLineLowStack(const char* file,
+ unsigned int line,
+ LogSeverity log_severity,
+ const char* message) {
#ifdef ART_TARGET_ANDROID
// Use android_writeLog() to avoid stack-based buffers used by android_printLog().
const char* tag = ProgramInvocationShortName();
@@ -292,8 +136,9 @@ void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverit
}
#else
static constexpr char kLogCharacters[] = { 'V', 'D', 'I', 'W', 'E', 'F', 'F' };
- static_assert(arraysize(kLogCharacters) == static_cast<size_t>(INTERNAL_FATAL) + 1,
- "Wrong character array size");
+ static_assert(
+ arraysize(kLogCharacters) == static_cast<size_t>(::android::base::FATAL) + 1,
+ "Wrong character array size");
const char* program_name = ProgramInvocationShortName();
TEMP_FAILURE_RETRY(write(STDERR_FILENO, program_name, strlen(program_name)));
@@ -310,13 +155,4 @@ void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverit
#endif // ART_TARGET_ANDROID
}
-ScopedLogSeverity::ScopedLogSeverity(LogSeverity level) {
- old_ = gMinimumLogSeverity;
- gMinimumLogSeverity = level;
-}
-
-ScopedLogSeverity::~ScopedLogSeverity() {
- gMinimumLogSeverity = old_;
-}
-
} // namespace art
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 185aa0e577..5f84204a2c 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -18,20 +18,16 @@
#define ART_RUNTIME_BASE_LOGGING_H_
#include <ostream>
+#include <sstream>
+#include "android-base/logging.h"
#include "base/macros.h"
namespace art {
-enum LogSeverity {
- VERBOSE,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
- FATAL,
- INTERNAL_FATAL, // For Runtime::Abort.
-};
+// Make libbase's LogSeverity more easily available.
+using ::android::base::LogSeverity;
+using ::android::base::ScopedLogSeverity;
// The members of this struct are the valid arguments to VLOG and VLOG_IS_ON in code,
// and the "-verbose:" command line argument.
@@ -89,172 +85,28 @@ extern const char* ProgramInvocationName();
// hasn't been performed then just returns "art"
extern const char* ProgramInvocationShortName();
-// Logs a message to logcat on Android otherwise to stderr. If the severity is FATAL it also causes
-// an abort. For example: LOG(FATAL) << "We didn't expect to reach here";
-#define LOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, -1).stream()
-
-// A variant of LOG that also logs the current errno value. To be used when library calls fail.
-#define PLOG(severity) ::art::LogMessage(__FILE__, __LINE__, severity, errno).stream()
+class LogHelper {
+ public:
+ // A logging helper for logging a single line. Can be used with little stack.
+ static void LogLineLowStack(const char* file,
+ unsigned int line,
+ android::base::LogSeverity severity,
+ const char* msg);
-// Marker that code is yet to be implemented.
-#define UNIMPLEMENTED(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
+ private:
+ DISALLOW_ALLOCATION();
+ DISALLOW_COPY_AND_ASSIGN(LogHelper);
+};
// Is verbose logging enabled for the given module? Where the module is defined in LogVerbosity.
#define VLOG_IS_ON(module) UNLIKELY(::art::gLogVerbosity.module)
// Variant of LOG that logs when verbose logging is enabled for a module. For example,
// VLOG(jni) << "A JNI operation was performed";
-#define VLOG(module) \
- if (VLOG_IS_ON(module)) \
- ::art::LogMessage(__FILE__, __LINE__, INFO, -1).stream()
+#define VLOG(module) if (VLOG_IS_ON(module)) LOG(INFO)
// Return the stream associated with logging for the given module.
-#define VLOG_STREAM(module) ::art::LogMessage(__FILE__, __LINE__, INFO, -1).stream()
-
-// Check whether condition x holds and LOG(FATAL) if not. The value of the expression x is only
-// evaluated once. Extra logging can be appended using << after. For example,
-// CHECK(false == true) results in a log message of "Check failed: false == true".
-#define CHECK(x) \
- if (UNLIKELY(!(x))) \
- ::art::LogMessage(__FILE__, __LINE__, ::art::FATAL, -1).stream() \
- << "Check failed: " #x << " "
-
-// Helper for CHECK_xx(x,y) macros.
-#define CHECK_OP(LHS, RHS, OP) \
- for (auto _values = ::art::MakeEagerEvaluator(LHS, RHS); \
- UNLIKELY(!(_values.lhs OP _values.rhs)); /* empty */) \
- ::art::LogMessage(__FILE__, __LINE__, ::art::FATAL, -1).stream() \
- << "Check failed: " << #LHS << " " << #OP << " " << #RHS \
- << " (" #LHS "=" << _values.lhs << ", " #RHS "=" << _values.rhs << ") "
-
-
-// Check whether a condition holds between x and y, LOG(FATAL) if not. The value of the expressions
-// x and y is evaluated once. Extra logging can be appended using << after. For example,
-// CHECK_NE(0 == 1, false) results in "Check failed: false != false (0==1=false, false=false) ".
-#define CHECK_EQ(x, y) CHECK_OP(x, y, ==)
-#define CHECK_NE(x, y) CHECK_OP(x, y, !=)
-#define CHECK_LE(x, y) CHECK_OP(x, y, <=)
-#define CHECK_LT(x, y) CHECK_OP(x, y, <)
-#define CHECK_GE(x, y) CHECK_OP(x, y, >=)
-#define CHECK_GT(x, y) CHECK_OP(x, y, >)
-
-// Helper for CHECK_STRxx(s1,s2) macros.
-#define CHECK_STROP(s1, s2, sense) \
- if (UNLIKELY((strcmp(s1, s2) == 0) != (sense))) \
- LOG(::art::FATAL) << "Check failed: " \
- << "\"" << (s1) << "\"" \
- << ((sense) ? " == " : " != ") \
- << "\"" << (s2) << "\""
-
-// Check for string (const char*) equality between s1 and s2, LOG(FATAL) if not.
-#define CHECK_STREQ(s1, s2) CHECK_STROP(s1, s2, true)
-#define CHECK_STRNE(s1, s2) CHECK_STROP(s1, s2, false)
-
-// Perform the pthread function call(args), LOG(FATAL) on error.
-#define CHECK_PTHREAD_CALL(call, args, what) \
- do { \
- int rc = call args; \
- if (rc != 0) { \
- errno = rc; \
- PLOG(::art::FATAL) << # call << " failed for " << (what); \
- } \
- } while (false)
-
-
-// DCHECKs are debug variants of CHECKs only enabled in debug builds. Generally CHECK should be
-// used unless profiling identifies a CHECK as being in performance critical code.
-#if defined(NDEBUG)
-static constexpr bool kEnableDChecks = false;
-#else
-static constexpr bool kEnableDChecks = true;
-#endif
-
-#define DCHECK(x) if (::art::kEnableDChecks) CHECK(x)
-#define DCHECK_EQ(x, y) if (::art::kEnableDChecks) CHECK_EQ(x, y)
-#define DCHECK_NE(x, y) if (::art::kEnableDChecks) CHECK_NE(x, y)
-#define DCHECK_LE(x, y) if (::art::kEnableDChecks) CHECK_LE(x, y)
-#define DCHECK_LT(x, y) if (::art::kEnableDChecks) CHECK_LT(x, y)
-#define DCHECK_GE(x, y) if (::art::kEnableDChecks) CHECK_GE(x, y)
-#define DCHECK_GT(x, y) if (::art::kEnableDChecks) CHECK_GT(x, y)
-#define DCHECK_STREQ(s1, s2) if (::art::kEnableDChecks) CHECK_STREQ(s1, s2)
-#define DCHECK_STRNE(s1, s2) if (::art::kEnableDChecks) CHECK_STRNE(s1, s2)
-
-// Temporary class created to evaluate the LHS and RHS, used with MakeEagerEvaluator to infer the
-// types of LHS and RHS.
-template <typename LHS, typename RHS>
-struct EagerEvaluator {
- constexpr EagerEvaluator(LHS l, RHS r) : lhs(l), rhs(r) { }
- LHS lhs;
- RHS rhs;
-};
-
-// Helper function for CHECK_xx.
-template <typename LHS, typename RHS>
-constexpr EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
- return EagerEvaluator<LHS, RHS>(lhs, rhs);
-}
-
-// Explicitly instantiate EagerEvalue for pointers so that char*s aren't treated as strings. To
-// compare strings use CHECK_STREQ and CHECK_STRNE. We rely on signed/unsigned warnings to
-// protect you against combinations not explicitly listed below.
-#define EAGER_PTR_EVALUATOR(T1, T2) \
- template <> struct EagerEvaluator<T1, T2> { \
- EagerEvaluator(T1 l, T2 r) \
- : lhs(reinterpret_cast<const void*>(l)), \
- rhs(reinterpret_cast<const void*>(r)) { } \
- const void* lhs; \
- const void* rhs; \
- }
-EAGER_PTR_EVALUATOR(const char*, const char*);
-EAGER_PTR_EVALUATOR(const char*, char*);
-EAGER_PTR_EVALUATOR(char*, const char*);
-EAGER_PTR_EVALUATOR(char*, char*);
-EAGER_PTR_EVALUATOR(const unsigned char*, const unsigned char*);
-EAGER_PTR_EVALUATOR(const unsigned char*, unsigned char*);
-EAGER_PTR_EVALUATOR(unsigned char*, const unsigned char*);
-EAGER_PTR_EVALUATOR(unsigned char*, unsigned char*);
-EAGER_PTR_EVALUATOR(const signed char*, const signed char*);
-EAGER_PTR_EVALUATOR(const signed char*, signed char*);
-EAGER_PTR_EVALUATOR(signed char*, const signed char*);
-EAGER_PTR_EVALUATOR(signed char*, signed char*);
-
-// Data for the log message, not stored in LogMessage to avoid increasing the stack size.
-class LogMessageData;
-
-// A LogMessage is a temporarily scoped object used by LOG and the unlikely part of a CHECK. The
-// destructor will abort if the severity is FATAL.
-class LogMessage {
- public:
- LogMessage(const char* file, unsigned int line, LogSeverity severity, int error);
-
- ~LogMessage(); // TODO: enable REQUIRES(!Locks::logging_lock_).
-
- // Returns the stream associated with the message, the LogMessage performs output when it goes
- // out of scope.
- std::ostream& stream();
-
- // The routine that performs the actual logging.
- static void LogLine(const char* file, unsigned int line, LogSeverity severity, const char* msg);
-
- // A variant of the above for use with little stack.
- static void LogLineLowStack(const char* file, unsigned int line, LogSeverity severity,
- const char* msg);
-
- private:
- const std::unique_ptr<LogMessageData> data_;
-
- DISALLOW_COPY_AND_ASSIGN(LogMessage);
-};
-
-// Allows to temporarily change the minimum severity level for logging.
-class ScopedLogSeverity {
- public:
- explicit ScopedLogSeverity(LogSeverity level);
- ~ScopedLogSeverity();
-
- private:
- LogSeverity old_;
-};
+#define VLOG_STREAM(module) LOG_STREAM(INFO)
} // namespace art
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 43c38c4363..9d56954445 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -328,14 +328,20 @@ Mutex::~Mutex() {
bool shutting_down = IsShuttingDown();
#if ART_USE_FUTEXES
if (state_.LoadRelaxed() != 0) {
- LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
+ LOG(shutting_down
+ ? ::android::base::WARNING
+ : ::android::base::FATAL) << "destroying mutex with owner: " << exclusive_owner_;
} else {
if (exclusive_owner_ != 0) {
- LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found an owner on unlocked mutex "
+ LOG(shutting_down
+ ? ::android::base::WARNING
+ : ::android::base::FATAL) << "unexpectedly found an owner on unlocked mutex "
<< name_;
}
if (num_contenders_.LoadSequentiallyConsistent() != 0) {
- LOG(shutting_down ? WARNING : FATAL) << "unexpectedly found a contender on mutex " << name_;
+ LOG(shutting_down
+ ? ::android::base::WARNING
+ : ::android::base::FATAL) << "unexpectedly found a contender on mutex " << name_;
}
}
#else
@@ -346,7 +352,9 @@ Mutex::~Mutex() {
errno = rc;
// TODO: should we just not log at all if shutting down? this could be the logging mutex!
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
- PLOG(shutting_down ? WARNING : FATAL) << "pthread_mutex_destroy failed for " << name_;
+ PLOG(shutting_down
+ ? ::android::base::WARNING
+ : ::android::base::FATAL) << "pthread_mutex_destroy failed for " << name_;
}
#endif
}
@@ -480,9 +488,11 @@ void Mutex::ExclusiveUnlock(Thread* self) {
if (this != Locks::logging_lock_) {
LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_;
} else {
- LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
- StringPrintf("Unexpected state_ %d in unlock for %s",
- cur_state, name_).c_str());
+ LogHelper::LogLineLowStack(__FILE__,
+ __LINE__,
+ ::android::base::FATAL_WITHOUT_ABORT,
+ StringPrintf("Unexpected state_ %d in unlock for %s",
+ cur_state, name_).c_str());
_exit(1);
}
}
@@ -762,7 +772,10 @@ ConditionVariable::~ConditionVariable() {
if (num_waiters_!= 0) {
Runtime* runtime = Runtime::Current();
bool shutting_down = runtime == nullptr || runtime->IsShuttingDown(Thread::Current());
- LOG(shutting_down ? WARNING : FATAL) << "ConditionVariable::~ConditionVariable for " << name_
+ LOG(shutting_down
+ ? ::android::base::WARNING
+ : ::android::base::FATAL)
+ << "ConditionVariable::~ConditionVariable for " << name_
<< " called with " << num_waiters_ << " waiters.";
}
#else
@@ -774,7 +787,9 @@ ConditionVariable::~ConditionVariable() {
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
Runtime* runtime = Runtime::Current();
bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDownLocked();
- PLOG(shutting_down ? WARNING : FATAL) << "pthread_cond_destroy failed for " << name_;
+ PLOG(shutting_down
+ ? ::android::base::WARNING
+ : ::android::base::FATAL) << "pthread_cond_destroy failed for " << name_;
}
#endif
}
diff --git a/runtime/base/stl_util.h b/runtime/base/stl_util.h
index a4cf249861..a53dcea2d7 100644
--- a/runtime/base/stl_util.h
+++ b/runtime/base/stl_util.h
@@ -20,11 +20,7 @@
#include <algorithm>
#include <sstream>
-// This header is used in the disassembler with libbase's logging. Only include ART logging
-// when no other logging macros are available. b/15436106, b/31338270
-#ifndef CHECK
#include "base/logging.h"
-#endif
namespace art {
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index 48e3ceb064..4498198b34 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -64,16 +64,16 @@ FdFile::FdFile(const std::string& path, int flags, mode_t mode, bool check_usage
void FdFile::Destroy() {
if (kCheckSafeUsage && (guard_state_ < GuardState::kNoCheck)) {
if (guard_state_ < GuardState::kFlushed) {
- LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly flushed before destruction.";
+ LOG(ERROR) << "File " << file_path_ << " wasn't explicitly flushed before destruction.";
}
if (guard_state_ < GuardState::kClosed) {
- LOG(::art::ERROR) << "File " << file_path_ << " wasn't explicitly closed before destruction.";
+ LOG(ERROR) << "File " << file_path_ << " wasn't explicitly closed before destruction.";
}
CHECK_GE(guard_state_, GuardState::kClosed);
}
if (auto_close_ && fd_ != -1) {
if (Close() != 0) {
- PLOG(::art::WARNING) << "Failed to close file " << file_path_;
+ PLOG(WARNING) << "Failed to close file " << file_path_;
}
}
}
@@ -104,7 +104,7 @@ void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* wa
if (kCheckSafeUsage) {
if (guard_state_ < GuardState::kNoCheck) {
if (warn_threshold < GuardState::kNoCheck && guard_state_ >= warn_threshold) {
- LOG(::art::ERROR) << warning;
+ LOG(ERROR) << warning;
}
guard_state_ = target;
}
@@ -117,7 +117,7 @@ void FdFile::moveUp(GuardState target, const char* warning) {
if (guard_state_ < target) {
guard_state_ = target;
} else if (target < guard_state_) {
- LOG(::art::ERROR) << warning;
+ LOG(ERROR) << warning;
}
}
}
@@ -350,13 +350,13 @@ int FdFile::FlushCloseOrErase() {
DCHECK(!read_only_mode_);
int flush_result = TEMP_FAILURE_RETRY(Flush());
if (flush_result != 0) {
- LOG(::art::ERROR) << "CloseOrErase failed while flushing a file.";
+ LOG(ERROR) << "CloseOrErase failed while flushing a file.";
Erase();
return flush_result;
}
int close_result = TEMP_FAILURE_RETRY(Close());
if (close_result != 0) {
- LOG(::art::ERROR) << "CloseOrErase failed while closing a file.";
+ LOG(ERROR) << "CloseOrErase failed while closing a file.";
Erase();
return close_result;
}
@@ -367,11 +367,11 @@ int FdFile::FlushClose() {
DCHECK(!read_only_mode_);
int flush_result = TEMP_FAILURE_RETRY(Flush());
if (flush_result != 0) {
- LOG(::art::ERROR) << "FlushClose failed while flushing a file.";
+ LOG(ERROR) << "FlushClose failed while flushing a file.";
}
int close_result = TEMP_FAILURE_RETRY(Close());
if (close_result != 0) {
- LOG(::art::ERROR) << "FlushClose failed while closing a file.";
+ LOG(ERROR) << "FlushClose failed while closing a file.";
}
return (flush_result != 0) ? flush_result : close_result;
}
@@ -383,7 +383,7 @@ void FdFile::MarkUnchecked() {
bool FdFile::ClearContent() {
DCHECK(!read_only_mode_);
if (SetLength(0) < 0) {
- PLOG(art::ERROR) << "Failed to reset the length";
+ PLOG(ERROR) << "Failed to reset the length";
return false;
}
return ResetOffset();
@@ -393,7 +393,7 @@ bool FdFile::ResetOffset() {
DCHECK(!read_only_mode_);
off_t rc = TEMP_FAILURE_RETRY(lseek(fd_, 0, SEEK_SET));
if (rc == static_cast<off_t>(-1)) {
- PLOG(art::ERROR) << "Failed to reset the offset";
+ PLOG(ERROR) << "Failed to reset the offset";
return false;
}
return true;
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 6683f13fd0..a980535c40 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -275,7 +275,7 @@ class ScopedCheck {
return false;
}
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("field operation on invalid %s: %p",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
java_object);
@@ -783,7 +783,7 @@ class ScopedCheck {
}
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("%s is an invalid %s: %p (%p)",
what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
java_object, obj);
@@ -1109,7 +1109,7 @@ class ScopedCheck {
mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("jarray is an invalid %s: %p (%p)",
ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
java_array, a);
@@ -1146,7 +1146,7 @@ class ScopedCheck {
ArtField* f = soa.DecodeField(fid);
// TODO: Better check here.
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass())) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("invalid jfieldID: %p", fid);
return nullptr;
}
@@ -1162,7 +1162,7 @@ class ScopedCheck {
ArtMethod* m = soa.DecodeMethod(mid);
// TODO: Better check here.
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("invalid jmethodID: %p", mid);
return nullptr;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 845e39aa85..c51b99a986 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -56,6 +56,8 @@
#include "gc/space/image_space.h"
#include "handle_scope-inl.h"
#include "image-inl.h"
+#include "imt_conflict_table.h"
+#include "imtable-inl.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "jit/jit.h"
@@ -2378,6 +2380,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self,
DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
DCHECK(self != nullptr);
self->AssertNoPendingException();
+ self->PoisonObjectPointers();
if (descriptor[1] == '\0') {
// only the descriptors of primitive types should be 1 character long, also avoid class lookup
// for primitive classes that aren't backed by dex files.
@@ -6267,7 +6270,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
// or interface methods in the IMT here they will not create extra conflicts since we compare
// names and signatures in SetIMTRef.
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = interface_method->GetImtIndex();
+ const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
// There is only any conflicts if all of the interface methods for an IMT slot don't have
// the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6321,7 +6324,7 @@ void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table,
}
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = interface_method->GetImtIndex();
+ const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
if (!imt[imt_index]->IsRuntimeMethod() ||
imt[imt_index] == unimplemented_method ||
imt[imt_index] == imt_conflict_method) {
@@ -6780,7 +6783,7 @@ bool ClassLinker::LinkInterfaceMethods(
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = interface_method->GetImtIndex();
+ uint32_t imt_index = ImTable::GetImtIndex(interface_method);
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -7920,7 +7923,7 @@ class DumpClassVisitor : public ClassVisitor {
explicit DumpClassVisitor(int flags) : flags_(flags) {}
bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
- klass->DumpClass(LOG(ERROR), flags_);
+ klass->DumpClass(LOG_STREAM(ERROR), flags_);
return true;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 0a46e2ebed..954af76b97 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1172,6 +1172,7 @@ class ClassLinker {
PointerSize image_pointer_size_;
class FindVirtualMethodHolderVisitor;
+ friend struct CompilationHelper; // For Compile in ImageTest.
friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 5e0ee6fe23..7023081ced 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -28,7 +28,6 @@
#include "experimental_flags.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
-#include "mirror/abstract_method.h"
#include "mirror/accessible_object.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -697,24 +696,18 @@ struct FieldOffsets : public CheckOffsets<mirror::Field> {
struct ExecutableOffsets : public CheckOffsets<mirror::Executable> {
ExecutableOffsets() : CheckOffsets<mirror::Executable>(
false, "Ljava/lang/reflect/Executable;") {
+ addOffset(OFFSETOF_MEMBER(mirror::Executable, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Executable, art_method_), "artMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::Executable, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::Executable, declaring_class_of_overridden_method_),
+ "declaringClassOfOverriddenMethod");
+ addOffset(OFFSETOF_MEMBER(mirror::Executable, dex_method_index_), "dexMethodIndex");
addOffset(OFFSETOF_MEMBER(mirror::Executable, has_real_parameter_data_),
"hasRealParameterData");
addOffset(OFFSETOF_MEMBER(mirror::Executable, parameters_), "parameters");
};
};
-struct AbstractMethodOffsets : public CheckOffsets<mirror::AbstractMethod> {
- AbstractMethodOffsets() : CheckOffsets<mirror::AbstractMethod>(
- false, "Ljava/lang/reflect/AbstractMethod;") {
- addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, access_flags_), "accessFlags");
- addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, art_method_), "artMethod");
- addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_), "declaringClass");
- addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_of_overridden_method_),
- "declaringClassOfOverriddenMethod");
- addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, dex_method_index_), "dexMethodIndex");
- };
-};
-
// C++ fields must exactly match the fields in the Java classes. If this fails,
// reorder the fields in the C++ class. Managed class fields are ordered by
// ClassLinker::LinkFields.
@@ -733,7 +726,6 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
EXPECT_TRUE(AccessibleObjectOffsets().Check());
EXPECT_TRUE(FieldOffsets().Check());
EXPECT_TRUE(ExecutableOffsets().Check());
- EXPECT_TRUE(AbstractMethodOffsets().Check());
}
TEST_F(ClassLinkerTest, FindClassNonexistent) {
@@ -1269,7 +1261,6 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) {
old_dex_file->Size(),
location->ToModifiedUtf8(),
0u,
- nullptr,
nullptr));
{
WriterMutexLock mu(soa.Self(), *class_linker->DexLock());
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 55ff73d0a6..0600876122 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -113,6 +113,10 @@ void ClassTable::Insert(mirror::Class* klass) {
classes_.back().Insert(GcRoot<mirror::Class>(klass));
}
+void ClassTable::InsertWithoutLocks(mirror::Class* klass) {
+ classes_.back().Insert(GcRoot<mirror::Class>(klass));
+}
+
void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) {
WriterMutexLock mu(Thread::Current(), lock_);
classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 66c241fb22..8c91806ae2 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -163,6 +163,8 @@ class ClassTable {
}
private:
+ void InsertWithoutLocks(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS;
+
// Lock to guard inserting and removing.
mutable ReaderWriterMutex lock_;
// We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
@@ -171,6 +173,8 @@ class ClassTable {
// loader which may not be owned by the class loader must be held strongly live. Also dex caches
// are held live to prevent them being unloading once they have classes in them.
std::vector<GcRoot<mirror::Object>> strong_roots_ GUARDED_BY(lock_);
+
+ friend class ImageWriter; // for InsertWithoutLocks.
};
} // namespace art
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 72349527d1..11722b2573 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -58,7 +58,7 @@ int main(int argc, char **argv) {
setenv("ANDROID_LOG_TAGS", "*:e", 1);
art::InitLogging(argv);
- LOG(::art::INFO) << "Running main() from common_runtime_test.cc...";
+ LOG(INFO) << "Running main() from common_runtime_test.cc...";
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
@@ -76,9 +76,10 @@ ScratchFile::ScratchFile() {
file_.reset(new File(fd, GetFilename(), true));
}
-ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix) {
- filename_ = other.GetFilename();
- filename_ += suffix;
+ScratchFile::ScratchFile(const ScratchFile& other, const char* suffix)
+ : ScratchFile(other.GetFilename() + suffix) {}
+
+ScratchFile::ScratchFile(const std::string& filename) : filename_(filename) {
int fd = open(filename_.c_str(), O_RDWR | O_CREAT, 0666);
CHECK_NE(-1, fd);
file_.reset(new File(fd, GetFilename(), true));
@@ -90,6 +91,18 @@ ScratchFile::ScratchFile(File* file) {
file_.reset(file);
}
+ScratchFile::ScratchFile(ScratchFile&& other) {
+ *this = std::move(other);
+}
+
+ScratchFile& ScratchFile::operator=(ScratchFile&& other) {
+ if (GetFile() != other.GetFile()) {
+ std::swap(filename_, other.filename_);
+ std::swap(file_, other.file_);
+ }
+ return *this;
+}
+
ScratchFile::~ScratchFile() {
Unlink();
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index b2090b7e0b..a7948e4e84 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -40,8 +40,14 @@ class ScratchFile {
public:
ScratchFile();
+ explicit ScratchFile(const std::string& filename);
+
ScratchFile(const ScratchFile& other, const char* suffix);
+ explicit ScratchFile(ScratchFile&& other);
+
+ ScratchFile& operator=(ScratchFile&& other);
+
explicit ScratchFile(File* file);
~ScratchFile();
@@ -113,8 +119,7 @@ class CommonRuntimeTestImpl {
std::string GetTestDexFileName(const char* name) const;
- std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name);
std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 03223b0d3c..0af086c896 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -52,6 +52,10 @@
namespace art {
+static constexpr OatDexFile* kNoOatDexFile = nullptr;
+
+const char* DexFile::kClassesDex = "classes.dex";
+
const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' };
const uint8_t DexFile::kDexMagicVersions[DexFile::kNumDexVersions][DexFile::kDexVersionLen] = {
{'0', '3', '5', '\0'},
@@ -118,64 +122,6 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
return false;
}
-bool DexFile::Open(const char* filename,
- const char* location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace(std::string("Open dex file ") + location);
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
- uint32_t magic;
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- return DexFile::OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files);
- }
- if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.Release(),
- location,
- /* verify */ true,
- verify_checksum,
- error_msg));
- if (dex_file.get() != nullptr) {
- dex_files->push_back(std::move(dex_file));
- return true;
- } else {
- return false;
- }
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
-static bool ContainsClassesDex(int fd, const char* filename) {
- std::string error_msg;
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, filename, &error_msg));
- if (zip_archive.get() == nullptr) {
- return false;
- }
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(DexFile::kClassesDex, &error_msg));
- return (zip_entry.get() != nullptr);
-}
-
-bool DexFile::MaybeDex(const char* filename) {
- uint32_t magic;
- std::string error_msg;
- File fd = OpenAndReadMagic(filename, &magic, &error_msg);
- if (fd.Fd() == -1) {
- return false;
- }
- if (IsZipMagic(magic)) {
- return ContainsClassesDex(fd.Release(), filename);
- } else if (IsDexMagic(magic)) {
- return true;
- }
- return false;
-}
-
int DexFile::GetPermissions() const {
if (mem_map_.get() == nullptr) {
return 0;
@@ -206,7 +152,9 @@ bool DexFile::DisableWrite() const {
}
}
-std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size,
+
+std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base,
+ size_t size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
@@ -214,72 +162,117 @@ std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size,
bool verify_checksum,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from RAM ") + location);
- std::unique_ptr<const DexFile> dex_file = OpenMemory(base,
- size,
- location,
- location_checksum,
- nullptr,
- oat_dex_file,
- error_msg);
- if (dex_file == nullptr) {
- return nullptr;
- }
-
- if (verify && !DexFileVerifier::Verify(dex_file.get(),
- dex_file->Begin(),
- dex_file->Size(),
- location.c_str(),
- verify_checksum,
- error_msg)) {
- return nullptr;
- }
- return dex_file;
+ return OpenCommon(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ verify,
+ verify_checksum,
+ error_msg);
}
std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
+ std::unique_ptr<MemMap> map,
bool verify,
bool verify_checksum,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- std::unique_ptr<const DexFile> dex_file = OpenMemory(location,
- location_checksum,
- std::move(mem_map),
- error_msg);
- if (dex_file == nullptr) {
- return nullptr;
+ CHECK(map.get() != nullptr);
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ location_checksum,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg);
+ if (dex_file != nullptr) {
+ dex_file->mem_map_.reset(map.release());
}
+ return dex_file;
+}
- if (verify && !DexFileVerifier::Verify(dex_file.get(),
- dex_file->Begin(),
- dex_file->Size(),
- location.c_str(),
- verify_checksum,
- error_msg)) {
- return nullptr;
+bool DexFile::Open(const char* filename,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
+ uint32_t magic;
+ File fd = OpenAndReadMagic(filename, &magic, error_msg);
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
}
- return dex_file;
+ if (IsZipMagic(magic)) {
+ return DexFile::OpenZip(fd.Release(), location, verify_checksum, error_msg, dex_files);
+ }
+ if (IsDexMagic(magic)) {
+ std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.Release(),
+ location,
+ /* verify */ true,
+ verify_checksum,
+ error_msg));
+ if (dex_file.get() != nullptr) {
+ dex_files->push_back(std::move(dex_file));
+ return true;
+ } else {
+ return false;
+ }
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+std::unique_ptr<const DexFile> DexFile::OpenDex(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace("Open dex file " + std::string(location));
+ return OpenFile(fd, location, true /* verify */, verify_checksum, error_msg);
+}
+
+bool DexFile::OpenZip(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ ScopedTrace trace("Dex file open Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
+ if (zip_archive.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ return DexFile::OpenAllDexFilesFromZip(*zip_archive,
+ location,
+ verify_checksum,
+ error_msg,
+ dex_files);
}
std::unique_ptr<const DexFile> DexFile::OpenFile(int fd,
- const char* location,
+ const std::string& location,
bool verify,
bool verify_checksum,
std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file ") + location);
- CHECK(location != nullptr);
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ CHECK(!location.empty());
std::unique_ptr<MemMap> map;
{
File delayed_close(fd, /* check_usage */ false);
struct stat sbuf;
memset(&sbuf, 0, sizeof(sbuf));
if (fstat(fd, &sbuf) == -1) {
- *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location, strerror(errno));
+ *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
+ strerror(errno));
return nullptr;
}
if (S_ISDIR(sbuf.st_mode)) {
- *error_msg = StringPrintf("Attempt to mmap directory '%s'", location);
+ *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
return nullptr;
}
size_t length = sbuf.st_size;
@@ -289,7 +282,7 @@ std::unique_ptr<const DexFile> DexFile::OpenFile(int fd,
fd,
0,
/*low_4gb*/false,
- location,
+ location.c_str(),
error_msg));
if (map == nullptr) {
DCHECK(!error_msg->empty());
@@ -299,72 +292,38 @@ std::unique_ptr<const DexFile> DexFile::OpenFile(int fd,
if (map->Size() < sizeof(DexFile::Header)) {
*error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header", location);
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
return nullptr;
}
const Header* dex_header = reinterpret_cast<const Header*>(map->Begin());
- std::unique_ptr<const DexFile> dex_file(OpenMemory(location,
- dex_header->checksum_,
- std::move(map),
- error_msg));
- if (dex_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location,
- error_msg->c_str());
- return nullptr;
- }
-
- if (verify && !DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
- location,
- verify_checksum,
- error_msg)) {
- return nullptr;
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ dex_header->checksum_,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg);
+ if (dex_file != nullptr) {
+ dex_file->mem_map_.reset(map.release());
}
return dex_file;
}
-const char* DexFile::kClassesDex = "classes.dex";
-
-bool DexFile::OpenZip(int fd,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
- if (zip_archive.get() == nullptr) {
- DCHECK(!error_msg->empty());
- return false;
- }
- return DexFile::OpenFromZip(*zip_archive, location, verify_checksum, error_msg, dex_files);
-}
-
-std::unique_ptr<const DexFile> DexFile::OpenMemory(const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
- std::string* error_msg) {
- return OpenMemory(mem_map->Begin(),
- mem_map->Size(),
- location,
- location_checksum,
- std::move(mem_map),
- nullptr,
- error_msg);
-}
-
-std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code) {
+std::unique_ptr<const DexFile> DexFile::OpenOneDexFileFromZip(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) {
ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
CHECK(!location.empty());
std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry.get() == nullptr) {
+ if (zip_entry == nullptr) {
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
}
@@ -374,32 +333,38 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive,
return nullptr;
}
std::unique_ptr<MemMap> map(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- if (map.get() == nullptr) {
+ if (map == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
return nullptr;
}
- std::unique_ptr<const DexFile> dex_file(OpenMemory(location,
- zip_entry->GetCrc32(),
- std::move(map),
- error_msg));
+ VerifyResult verify_result;
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ zip_entry->GetCrc32(),
+ kNoOatDexFile,
+ /* verify */ true,
+ verify_checksum,
+ error_msg,
+ &verify_result);
if (dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
- error_msg->c_str());
- *error_code = ZipOpenErrorCode::kDexFileError;
+ if (verify_result == VerifyResult::kVerifyNotAttempted) {
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ } else {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ }
return nullptr;
}
+ dex_file->mem_map_.reset(map.release());
if (!dex_file->DisableWrite()) {
*error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
*error_code = ZipOpenErrorCode::kMakeReadOnlyError;
return nullptr;
}
CHECK(dex_file->IsReadOnly()) << location;
- if (!DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
- location.c_str(),
- verify_checksum,
- error_msg)) {
+ if (verify_result != VerifyResult::kVerifySucceeded) {
*error_code = ZipOpenErrorCode::kVerifyError;
return nullptr;
}
@@ -413,16 +378,20 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive,
// seems an excessive number.
static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
-bool DexFile::OpenFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+bool DexFile::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) {
ScopedTrace trace("Dex file open from Zip " + std::string(location));
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
- std::unique_ptr<const DexFile> dex_file(
- Open(zip_archive, kClassesDex, location, verify_checksum, error_msg, &error_code));
+ std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
+ kClassesDex,
+ location,
+ verify_checksum,
+ error_msg,
+ &error_code));
if (dex_file.get() == nullptr) {
return false;
} else {
@@ -437,8 +406,12 @@ bool DexFile::OpenFromZip(const ZipArchive& zip_archive,
for (size_t i = 1; ; ++i) {
std::string name = GetMultiDexClassesDexName(i);
std::string fake_location = GetMultiDexLocation(i, location.c_str());
- std::unique_ptr<const DexFile> next_dex_file(
- Open(zip_archive, name.c_str(), fake_location, verify_checksum, error_msg, &error_code));
+ std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
+ name.c_str(),
+ fake_location,
+ verify_checksum,
+ error_msg,
+ &error_code));
if (next_dex_file.get() == nullptr) {
if (error_code != ZipOpenErrorCode::kEntryNotFound) {
LOG(WARNING) << error_msg;
@@ -464,35 +437,58 @@ bool DexFile::OpenFromZip(const ZipArchive& zip_archive,
}
}
-
-std::unique_ptr<const DexFile> DexFile::OpenMemory(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
- const OatDexFile* oat_dex_file,
- std::string* error_msg) {
- DCHECK(base != nullptr);
- DCHECK_NE(size, 0U);
- CHECK_ALIGNED(base, 4); // various dex file structures must be word aligned
- std::unique_ptr<DexFile> dex_file(
- new DexFile(base, size, location, location_checksum, std::move(mem_map), oat_dex_file));
+std::unique_ptr<DexFile> DexFile::OpenCommon(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ VerifyResult* verify_result) {
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifyNotAttempted;
+ }
+ std::unique_ptr<DexFile> dex_file(new DexFile(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file));
+ if (dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
+ error_msg->c_str());
+ return nullptr;
+ }
if (!dex_file->Init(error_msg)) {
dex_file.reset();
+ return nullptr;
+ }
+ if (verify && !DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ location.c_str(),
+ verify_checksum,
+ error_msg)) {
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifyFailed;
+ }
+ return nullptr;
+ }
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifySucceeded;
}
- return std::unique_ptr<const DexFile>(dex_file.release());
+ return dex_file;
}
-DexFile::DexFile(const uint8_t* base, size_t size,
+DexFile::DexFile(const uint8_t* base,
+ size_t size,
const std::string& location,
uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file)
: begin_(base),
size_(size),
location_(location),
location_checksum_(location_checksum),
- mem_map_(std::move(mem_map)),
header_(reinterpret_cast<const Header*>(base)),
string_ids_(reinterpret_cast<const StringId*>(base + header_->string_ids_off_)),
type_ids_(reinterpret_cast<const TypeId*>(base + header_->type_ids_off_)),
@@ -577,6 +573,34 @@ const DexFile::ClassDef* DexFile::FindClassDef(uint16_t type_idx) const {
return nullptr;
}
+uint32_t DexFile::FindCodeItemOffset(const DexFile::ClassDef& class_def,
+ uint32_t method_idx) const {
+ const uint8_t* class_data = GetClassData(class_def);
+ CHECK(class_data != nullptr);
+ ClassDataItemIterator it(*this, class_data);
+ // Skip fields
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ while (it.HasNextDirectMethod()) {
+ if (it.GetMemberIndex() == method_idx) {
+ return it.GetMethodCodeItemOffset();
+ }
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ if (it.GetMemberIndex() == method_idx) {
+ return it.GetMethodCodeItemOffset();
+ }
+ it.Next();
+ }
+ LOG(FATAL) << "Unable to find method " << method_idx;
+ UNREACHABLE();
+}
+
const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass,
const DexFile::StringId& name,
const DexFile::TypeId& type) const {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 97c2596de0..29b8c3adef 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -397,15 +397,9 @@ class DexFile {
// Return true if the checksum could be found, false otherwise.
static bool GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg);
- // Opens .dex files found in the container, guessing the container format based on file extension.
- static bool Open(const char* filename,
- const char* location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
// Opens .dex file, backed by existing memory
- static std::unique_ptr<const DexFile> Open(const uint8_t* base, size_t size,
+ static std::unique_ptr<const DexFile> Open(const uint8_t* base,
+ size_t size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
@@ -421,16 +415,25 @@ class DexFile {
bool verify_checksum,
std::string* error_msg);
- // Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
- // If this function returns false, Open will not succeed. The inverse is not true, however.
- static bool MaybeDex(const char* filename);
+ // Opens all .dex files found in the file, guessing the container format based on file extension.
+ static bool Open(const char* filename,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files);
+
+ // Open a single dex file from an fd.
+ static std::unique_ptr<const DexFile> OpenDex(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg);
- // Open all classesXXX.dex files from a zip archive.
- static bool OpenFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
+ // Opens dex files from within a .jar, .zip, or .apk file
+ static bool OpenZip(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files);
// Closes a .dex file.
virtual ~DexFile();
@@ -587,6 +590,9 @@ class DexFile {
const DexFile::StringId& name,
const DexFile::TypeId& type) const;
+ uint32_t FindCodeItemOffset(const DexFile::ClassDef& class_def,
+ uint32_t dex_method_idx) const;
+
// Returns the declaring class descriptor string of a field id.
const char* GetFieldDeclaringClassDescriptor(const FieldId& field_id) const {
const DexFile::TypeId& type_id = GetTypeId(field_id.class_idx_);
@@ -1011,20 +1017,12 @@ class DexFile {
static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right);
private:
- // Opens a .dex file
static std::unique_ptr<const DexFile> OpenFile(int fd,
- const char* location,
+ const std::string& location,
bool verify,
bool verify_checksum,
std::string* error_msg);
- // Opens dex files from within a .jar, .zip, or .apk file
- static bool OpenZip(int fd,
- const std::string& location,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
enum class ZipOpenErrorCode { // private
kNoError,
kEntryNotFound,
@@ -1034,20 +1032,38 @@ class DexFile {
kVerifyError
};
+ // Open all classesXXX.dex files from a zip archive.
+ static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files);
+
// Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
// return.
- static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive,
- const char* entry_name,
+ static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code);
+
+ enum class VerifyResult { // private
+ kVerifyNotAttempted,
+ kVerifySucceeded,
+ kVerifyFailed
+ };
+
+ static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base,
+ size_t size,
const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
bool verify_checksum,
std::string* error_msg,
- ZipOpenErrorCode* error_code);
+ VerifyResult* verify_result = nullptr);
- // Opens a .dex file at the given address backed by a MemMap
- static std::unique_ptr<const DexFile> OpenMemory(const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
- std::string* error_msg);
// Opens a .dex file at the given address, optionally backed by a MemMap
static std::unique_ptr<const DexFile> OpenMemory(const uint8_t* dex_file,
@@ -1058,10 +1074,10 @@ class DexFile {
const OatDexFile* oat_dex_file,
std::string* error_msg);
- DexFile(const uint8_t* base, size_t size,
+ DexFile(const uint8_t* base,
+ size_t size,
const std::string& location,
uint32_t location_checksum,
- std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file);
// Top-level initializer that calls other Init methods.
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index c6c87fdf36..e0d5337660 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -1027,6 +1027,66 @@ mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
annotation_class);
}
+bool GetParametersMetadataForMethod(ArtMethod* method,
+ MutableHandle<mirror::ObjectArray<mirror::String>>* names,
+ MutableHandle<mirror::IntArray>* access_flags) {
+ const DexFile::AnnotationSetItem::AnnotationSetItem* annotation_set =
+ FindAnnotationSetForMethod(method);
+ if (annotation_set == nullptr) {
+ return false;
+ }
+
+ const DexFile* dex_file = method->GetDexFile();
+ const DexFile::AnnotationItem* annotation_item =
+ SearchAnnotationSet(*dex_file,
+ annotation_set,
+ "Ldalvik/annotation/MethodParameters;",
+ DexFile::kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return false;
+ }
+
+ StackHandleScope<5> hs(Thread::Current());
+
+ // Extract the parameters' names String[].
+ mirror::Class* string_class = mirror::String::GetJavaLangString();
+ Handle<mirror::Class> string_array_class(hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class)));
+ if (UNLIKELY(string_array_class.Get() == nullptr)) {
+ return false;
+ }
+
+ Handle<mirror::Class> klass = hs.NewHandle(method->GetDeclaringClass());
+ Handle<mirror::Object> names_obj =
+ hs.NewHandle(GetAnnotationValue(klass,
+ annotation_item,
+ "names",
+ string_array_class,
+ DexFile::kDexAnnotationArray));
+ if (names_obj.Get() == nullptr) {
+ return false;
+ }
+
+ // Extract the parameters' access flags int[].
+ Handle<mirror::Class> int_array_class(hs.NewHandle(mirror::IntArray::GetArrayClass()));
+ if (UNLIKELY(int_array_class.Get() == nullptr)) {
+ return false;
+ }
+ Handle<mirror::Object> access_flags_obj =
+ hs.NewHandle(GetAnnotationValue(klass,
+ annotation_item,
+ "accessFlags",
+ int_array_class,
+ DexFile::kDexAnnotationArray));
+ if (access_flags_obj.Get() == nullptr) {
+ return false;
+ }
+
+ names->Assign(names_obj.Get()->AsObjectArray<mirror::String>());
+ access_flags->Assign(access_flags_obj.Get()->AsIntArray());
+ return true;
+}
+
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
diff --git a/runtime/dex_file_annotations.h b/runtime/dex_file_annotations.h
index 7b4e8564b0..c66c5bdb8b 100644
--- a/runtime/dex_file_annotations.h
+++ b/runtime/dex_file_annotations.h
@@ -30,6 +30,7 @@ namespace mirror {
class ArtField;
class ArtMethod;
class ClassLinker;
+template<class T> class MutableHandle;
namespace annotations {
@@ -58,6 +59,10 @@ mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
uint32_t parameter_idx,
Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
+bool GetParametersMetadataForMethod(ArtMethod* method,
+ MutableHandle<mirror::ObjectArray<mirror::String>>* names,
+ MutableHandle<mirror::IntArray>* access_flags)
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class,
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 6a06177bb6..8dd5f37962 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -26,6 +26,7 @@
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
+#include "utils.h"
namespace art {
@@ -37,65 +38,13 @@ TEST_F(DexFileTest, Open) {
ASSERT_TRUE(dex.get() != nullptr);
}
-static const uint8_t kBase64Map[256] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
- 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
- 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
- 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
- 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255
-};
-
-static inline std::vector<uint8_t> DecodeBase64(const char* src) {
- std::vector<uint8_t> tmp;
- uint32_t t = 0, y = 0;
- int g = 3;
- for (size_t i = 0; src[i] != '\0'; ++i) {
- uint8_t c = kBase64Map[src[i] & 0xFF];
- if (c == 255) continue;
- // the final = symbols are read and used to trim the remaining bytes
- if (c == 254) {
- c = 0;
- // prevent g < 0 which would potentially allow an overflow later
- if (--g < 0) {
- return std::vector<uint8_t>();
- }
- } else if (g != 3) {
- // we only allow = to be at the end
- return std::vector<uint8_t>();
- }
- t = (t << 6) | c;
- if (++y == 4) {
- tmp.push_back((t >> 16) & 255);
- if (g > 1) {
- tmp.push_back((t >> 8) & 255);
- }
- if (g > 2) {
- tmp.push_back(t & 255);
- }
- y = t = 0;
- }
- }
- if (y != 0) {
- return std::vector<uint8_t>();
- }
- return tmp;
+static inline std::vector<uint8_t> DecodeBase64Vec(const char* src) {
+ std::vector<uint8_t> res;
+ size_t size;
+ std::unique_ptr<uint8_t[]> data(DecodeBase64(src, &size));
+ res.resize(size);
+ memcpy(res.data(), data.get(), size);
+ return res;
}
// Although this is the same content logically as the Nested test dex,
@@ -163,10 +112,59 @@ static const char kRawDexZeroLength[] =
"AGNsYXNzZXMuZGV4VVQFAANkDaFXdXgLAAEE5AMBAASIEwAAUEsFBgAAAAABAAEAUQAAAEUAAAAA"
"AA==";
+static const char kRawZipClassesDexPresent[] =
+ "UEsDBBQAAAAIANVRN0ms99lIMQEAACACAAALABwAY2xhc3Nlcy5kZXhVVAkAAwFj5VcUY+VXdXgL"
+ "AAEE5AMBAASIEwAAS0mt4DIwtmDYYdV9csrcks83lpxZN2vD8f/1p1beWX3vabQCEwNDAQMDQ0WY"
+ "iRADFPQwMjBwMEDEWYB4AhADlTEsYEAAZiDeAcRApQwXgNgAyPgApJWAtBYQGwGxGxAHAnEIEEcA"
+ "cS4jRD0T1Fw2KM0ENZMVypZhRLIIqIMdag9CBMFnhtJ1jDA5RrBcMSPE7AIBkIl8UFGgP6Fu4IOa"
+ "wczAZpOZl1lix8Dm45uYmWfNIOSTlViWqJ+TmJeu75+UlZpcYs3ACZLSA4kzMIYxMIX5MAhHIykL"
+ "LinKzEu3ZmDJBSoDOZiPgRlMgv3T2MDygZGRs4OJB8n9MBoWzrAwmQD1Eyy8WZHCmg0pvBkVIGpA"
+ "Yc4oABEHhRuTAsRMUDwwQ9WAwoJBAaIGHE5Q9aB4BgBQSwECHgMUAAAACADVUTdJrPfZSDEBAAAg"
+ "AgAACwAYAAAAAAAAAAAAoIEAAAAAY2xhc3Nlcy5kZXhVVAUAAwFj5Vd1eAsAAQTkAwEABIgTAABQ"
+ "SwUGAAAAAAEAAQBRAAAAdgEAAAAA";
+
+static const char kRawZipClassesDexAbsent[] =
+ "UEsDBBQAAAAIANVRN0ms99lIMQEAACACAAAOABwAbm90Y2xhc3Nlcy5kZXhVVAkAAwFj5VcUY+VX"
+ "dXgLAAEE5AMBAASIEwAAS0mt4DIwtmDYYdV9csrcks83lpxZN2vD8f/1p1beWX3vabQCEwNDAQMD"
+ "Q0WYiRADFPQwMjBwMEDEWYB4AhADlTEsYEAAZiDeAcRApQwXgNgAyPgApJWAtBYQGwGxGxAHAnEI"
+ "EEcAcS4jRD0T1Fw2KM0ENZMVypZhRLIIqIMdag9CBMFnhtJ1jDA5RrBcMSPE7AIBkIl8UFGgP6Fu"
+ "4IOawczAZpOZl1lix8Dm45uYmWfNIOSTlViWqJ+TmJeu75+UlZpcYs3ACZLSA4kzMIYxMIX5MAhH"
+ "IykLLinKzEu3ZmDJBSoDOZiPgRlMgv3T2MDygZGRs4OJB8n9MBoWzrAwmQD1Eyy8WZHCmg0pvBkV"
+ "IGpAYc4oABEHhRuTAsRMUDwwQ9WAwoJBAaIGHE5Q9aB4BgBQSwECHgMUAAAACADVUTdJrPfZSDEB"
+ "AAAgAgAADgAYAAAAAAAAAAAAoIEAAAAAbm90Y2xhc3Nlcy5kZXhVVAUAAwFj5Vd1eAsAAQTkAwEA"
+ "BIgTAABQSwUGAAAAAAEAAQBUAAAAeQEAAAAA";
+
+static const char kRawZipThreeDexFiles[] =
+ "UEsDBBQAAAAIAP1WN0ms99lIMQEAACACAAAMABwAY2xhc3NlczIuZGV4VVQJAAOtbOVXrWzlV3V4"
+ "CwABBOQDAQAEiBMAAEtJreAyMLZg2GHVfXLK3JLPN5acWTdrw/H/9adW3ll972m0AhMDQwEDA0NF"
+ "mIkQAxT0MDIwcDBAxFmAeAIQA5UxLGBAAGYg3gHEQKUMF4DYAMj4AKSVgLQWEBsBsRsQBwJxCBBH"
+ "AHEuI0Q9E9RcNijNBDWTFcqWYUSyCKiDHWoPQgTBZ4bSdYwwOUawXDEjxOwCAZCJfFBRoD+hbuCD"
+ "msHMwGaTmZdZYsfA5uObmJlnzSDkk5VYlqifk5iXru+flJWaXGLNwAmS0gOJMzCGMTCF+TAIRyMp"
+ "Cy4pysxLt2ZgyQUqAzmYj4EZTIL909jA8oGRkbODiQfJ/TAaFs6wMJkA9RMsvFmRwpoNKbwZFSBq"
+ "QGHOKAARB4UbkwLETFA8MEPVgMKCQQGiBhxOUPWgeAYAUEsDBBQAAAAIAABXN0ms99lIMQEAACAC"
+ "AAAMABwAY2xhc3NlczMuZGV4VVQJAAOvbOVXr2zlV3V4CwABBOQDAQAEiBMAAEtJreAyMLZg2GHV"
+ "fXLK3JLPN5acWTdrw/H/9adW3ll972m0AhMDQwEDA0NFmIkQAxT0MDIwcDBAxFmAeAIQA5UxLGBA"
+ "AGYg3gHEQKUMF4DYAMj4AKSVgLQWEBsBsRsQBwJxCBBHAHEuI0Q9E9RcNijNBDWTFcqWYUSyCKiD"
+ "HWoPQgTBZ4bSdYwwOUawXDEjxOwCAZCJfFBRoD+hbuCDmsHMwGaTmZdZYsfA5uObmJlnzSDkk5VY"
+ "lqifk5iXru+flJWaXGLNwAmS0gOJMzCGMTCF+TAIRyMpCy4pysxLt2ZgyQUqAzmYj4EZTIL909jA"
+ "8oGRkbODiQfJ/TAaFs6wMJkA9RMsvFmRwpoNKbwZFSBqQGHOKAARB4UbkwLETFA8MEPVgMKCQQGi"
+ "BhxOUPWgeAYAUEsDBBQAAAAIANVRN0ms99lIMQEAACACAAALABwAY2xhc3Nlcy5kZXhVVAkAAwFj"
+ "5VetbOVXdXgLAAEE5AMBAASIEwAAS0mt4DIwtmDYYdV9csrcks83lpxZN2vD8f/1p1beWX3vabQC"
+ "EwNDAQMDQ0WYiRADFPQwMjBwMEDEWYB4AhADlTEsYEAAZiDeAcRApQwXgNgAyPgApJWAtBYQGwGx"
+ "GxAHAnEIEEcAcS4jRD0T1Fw2KM0ENZMVypZhRLIIqIMdag9CBMFnhtJ1jDA5RrBcMSPE7AIBkIl8"
+ "UFGgP6Fu4IOawczAZpOZl1lix8Dm45uYmWfNIOSTlViWqJ+TmJeu75+UlZpcYs3ACZLSA4kzMIYx"
+ "MIX5MAhHIykLLinKzEu3ZmDJBSoDOZiPgRlMgv3T2MDygZGRs4OJB8n9MBoWzrAwmQD1Eyy8WZHC"
+ "mg0pvBkVIGpAYc4oABEHhRuTAsRMUDwwQ9WAwoJBAaIGHE5Q9aB4BgBQSwECHgMUAAAACAD9VjdJ"
+ "rPfZSDEBAAAgAgAADAAYAAAAAAAAAAAAoIEAAAAAY2xhc3NlczIuZGV4VVQFAAOtbOVXdXgLAAEE"
+ "5AMBAASIEwAAUEsBAh4DFAAAAAgAAFc3Saz32UgxAQAAIAIAAAwAGAAAAAAAAAAAAKCBdwEAAGNs"
+ "YXNzZXMzLmRleFVUBQADr2zlV3V4CwABBOQDAQAEiBMAAFBLAQIeAxQAAAAIANVRN0ms99lIMQEA"
+ "ACACAAALABgAAAAAAAAAAACgge4CAABjbGFzc2VzLmRleFVUBQADAWPlV3V4CwABBOQDAQAEiBMA"
+ "AFBLBQYAAAAAAwADAPUAAABkBAAAAAA=";
+
static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
- std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ std::vector<uint8_t> dex_bytes = DecodeBase64Vec(base64);
CHECK_NE(dex_bytes.size(), 0u);
// write to provided file
@@ -180,29 +178,43 @@ static void DecodeAndWriteDexFile(const char* base64, const char* location) {
}
}
-static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
- const char* location) {
+static bool OpenDexFilesBase64(const char* base64,
+ const char* location,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::string* error_msg) {
DecodeAndWriteDexFile(base64, location);
- // read dex file
+ // read dex file(s)
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
- std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFile::Open(location, location, kVerifyChecksum, &error_msg, &tmp);
+ bool success = DexFile::Open(location, location, kVerifyChecksum, error_msg, &tmp);
+ if (success) {
+ for (std::unique_ptr<const DexFile>& dex_file : tmp) {
+ EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
+ EXPECT_TRUE(dex_file->IsReadOnly());
+ }
+ *dex_files = std::move(tmp);
+ }
+ return success;
+}
+
+static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
+ const char* location) {
+ // read dex files.
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ bool success = OpenDexFilesBase64(base64, location, &dex_files, &error_msg);
CHECK(success) << error_msg;
- EXPECT_EQ(1U, tmp.size());
- std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
- EXPECT_EQ(PROT_READ, dex_file->GetPermissions());
- EXPECT_TRUE(dex_file->IsReadOnly());
- return dex_file;
+ EXPECT_EQ(1U, dex_files.size());
+ return std::move(dex_files[0]);
}
static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base64,
const char* location,
uint32_t location_checksum) {
CHECK(base64 != nullptr);
- std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ std::vector<uint8_t> dex_bytes = DecodeBase64Vec(base64);
CHECK_NE(dex_bytes.size(), 0u);
std::string error_message;
@@ -507,4 +519,31 @@ TEST(DexFileUtilsTest, GetBaseLocationAndMultiDexSuffix) {
EXPECT_EQ(":classes8.dex", DexFile::GetMultiDexSuffix("/foo/bar/baz.jar:classes8.dex"));
}
+TEST_F(DexFileTest, ZipOpenClassesPresent) {
+ ScratchFile tmp;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::string error_msg;
+ ASSERT_TRUE(OpenDexFilesBase64(kRawZipClassesDexPresent, tmp.GetFilename().c_str(), &dex_files,
+ &error_msg));
+ EXPECT_EQ(dex_files.size(), 1u);
+}
+
+TEST_F(DexFileTest, ZipOpenClassesAbsent) {
+ ScratchFile tmp;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::string error_msg;
+ ASSERT_FALSE(OpenDexFilesBase64(kRawZipClassesDexAbsent, tmp.GetFilename().c_str(), &dex_files,
+ &error_msg));
+ EXPECT_EQ(dex_files.size(), 0u);
+}
+
+TEST_F(DexFileTest, ZipOpenThreeDexFiles) {
+ ScratchFile tmp;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::string error_msg;
+ ASSERT_TRUE(OpenDexFilesBase64(kRawZipThreeDexFiles, tmp.GetFilename().c_str(), &dex_files,
+ &error_msg));
+ EXPECT_EQ(dex_files.size(), 3u);
+}
+
} // namespace art
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 71c0ad9295..c5a4d7534c 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -29,34 +29,10 @@
#include "leb128.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
+#include "utils.h"
namespace art {
-static const uint8_t kBase64Map[256] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
- 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
- 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
- 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
- 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255
-};
-
// Make the Dex file version 37.
static void MakeDexVersion37(DexFile* dex_file) {
size_t offset = OFFSETOF_MEMBER(DexFile::Header, magic_) + 6;
@@ -64,52 +40,6 @@ static void MakeDexVersion37(DexFile* dex_file) {
*(const_cast<uint8_t*>(dex_file->Begin()) + offset) = '7';
}
-static inline std::unique_ptr<uint8_t[]> DecodeBase64(const char* src, size_t* dst_size) {
- std::vector<uint8_t> tmp;
- uint32_t t = 0, y = 0;
- int g = 3;
- for (size_t i = 0; src[i] != '\0'; ++i) {
- uint8_t c = kBase64Map[src[i] & 0xFF];
- if (c == 255) continue;
- // the final = symbols are read and used to trim the remaining bytes
- if (c == 254) {
- c = 0;
- // prevent g < 0 which would potentially allow an overflow later
- if (--g < 0) {
- *dst_size = 0;
- return nullptr;
- }
- } else if (g != 3) {
- // we only allow = to be at the end
- *dst_size = 0;
- return nullptr;
- }
- t = (t << 6) | c;
- if (++y == 4) {
- tmp.push_back((t >> 16) & 255);
- if (g > 1) {
- tmp.push_back((t >> 8) & 255);
- }
- if (g > 2) {
- tmp.push_back(t & 255);
- }
- y = t = 0;
- }
- }
- if (y != 0) {
- *dst_size = 0;
- return nullptr;
- }
- std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
- if (dst_size != nullptr) {
- *dst_size = tmp.size();
- } else {
- *dst_size = 0;
- }
- std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst;
-}
-
static void FixUpChecksum(uint8_t* dex_file) {
DexFile::Header* header = reinterpret_cast<DexFile::Header*>(dex_file);
uint32_t expected_size = header->file_size_;
@@ -123,7 +53,7 @@ static void FixUpChecksum(uint8_t* dex_file) {
class DexFileVerifierTest : public CommonRuntimeTest {
protected:
DexFile* GetDexFile(const uint8_t* dex_bytes, size_t length) {
- return new DexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
+ return new DexFile(dex_bytes, length, "tmp", 0, nullptr);
}
void VerifyModification(const char* dex_file_base64_content,
@@ -131,7 +61,7 @@ class DexFileVerifierTest : public CommonRuntimeTest {
std::function<void(DexFile*)> f,
const char* expected_error) {
size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(dex_file_base64_content, &length);
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_base64_content, &length));
CHECK(dex_bytes != nullptr);
// Note: `dex_file` will be destroyed before `dex_bytes`.
std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
@@ -1704,7 +1634,7 @@ TEST_F(DexFileVerifierTest, CircularInterfaceImplementation) {
TEST_F(DexFileVerifierTest, Checksum) {
size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(kGoodTestDex, &length);
+ std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(kGoodTestDex, &length));
CHECK(dex_bytes != nullptr);
// Note: `dex_file` will be destroyed before `dex_bytes`.
std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 14110c24ba..99b8805099 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -26,6 +26,8 @@
#include "dex_file.h"
#include "entrypoints/quick/callee_save_frame.h"
#include "handle_scope-inl.h"
+#include "imt_conflict_table.h"
+#include "imtable-inl.h"
#include "indirect_reference_table.h"
#include "invoke_type.h"
#include "jni_internal.h"
@@ -612,7 +614,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
UNREACHABLE();
}
case kInterface: {
- uint32_t imt_index = resolved_method->GetImtIndex();
+ uint32_t imt_index = ImTable::GetImtIndex(resolved_method);
PointerSize pointer_size = class_linker->GetImagePointerSize();
ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
Get(imt_index, pointer_size);
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 5b9d03b7fa..343343fc3f 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -36,7 +36,7 @@ NO_RETURN static void artDeoptimizeImpl(Thread* self, bool single_frame)
// specialized visitor that will show whether a method is Quick or Shadow.
} else {
LOG(INFO) << "Deopting:";
- self->Dump(LOG(INFO));
+ self->Dump(LOG_STREAM(INFO));
}
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 76b545652d..446e3431a9 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -169,22 +169,33 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
HandleScope* handle_scope)
// TODO: NO_THREAD_SAFETY_ANALYSIS as GoToRunnable() is NO_THREAD_SAFETY_ANALYSIS
NO_THREAD_SAFETY_ANALYSIS {
- GoToRunnable(self);
+ bool critical_native = called->IsAnnotatedWithCriticalNative();
+ bool fast_native = called->IsAnnotatedWithFastNative();
+ bool normal_native = !critical_native && !fast_native;
+
+ // @Fast and @CriticalNative do not do a state transition.
+ if (LIKELY(normal_native)) {
+ GoToRunnable(self);
+ }
// We need the mutator lock (i.e., calling GoToRunnable()) before accessing the shorty or the
// locked object.
jobject locked = called->IsSynchronized() ? handle_scope->GetHandle(0).ToJObject() : nullptr;
char return_shorty_char = called->GetShorty()[0];
if (return_shorty_char == 'L') {
if (locked != nullptr) {
+ DCHECK(normal_native) << " @FastNative and synchronize is not supported";
UnlockJniSynchronizedMethod(locked, self);
}
return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceHandleResult(
result.l, saved_local_ref_cookie, self));
} else {
if (locked != nullptr) {
+ DCHECK(normal_native) << " @FastNative and synchronize is not supported";
UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
}
- PopLocalReferences(saved_local_ref_cookie, self);
+ if (LIKELY(!critical_native)) {
+ PopLocalReferences(saved_local_ref_cookie, self);
+ }
switch (return_shorty_char) {
case 'F': {
if (kRuntimeISA == kX86) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 3c6f807d64..cfd948ebc5 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -23,6 +23,8 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
+#include "imt_conflict_table.h"
+#include "imtable-inl.h"
#include "interpreter/interpreter.h"
#include "linear_alloc.h"
#include "method_reference.h"
@@ -1625,7 +1627,8 @@ class ComputeNativeCallFrameSize {
class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
public:
- ComputeGenericJniFrameSize() : num_handle_scope_references_(0) {}
+ explicit ComputeGenericJniFrameSize(bool critical_native)
+ : num_handle_scope_references_(0), critical_native_(critical_native) {}
// Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
// is at *m = sp. Will update to point to the bottom of the save frame.
@@ -1711,6 +1714,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
private:
uint32_t num_handle_scope_references_;
+ const bool critical_native_;
};
uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
@@ -1720,6 +1724,11 @@ uintptr_t ComputeGenericJniFrameSize::PushHandle(mirror::Object* /* ptr */) {
void ComputeGenericJniFrameSize::WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) {
+ // First 2 parameters are always excluded for @CriticalNative.
+ if (UNLIKELY(critical_native_)) {
+ return;
+ }
+
// JNIEnv
sm->AdvancePointer(nullptr);
@@ -1778,11 +1787,16 @@ class FillNativeCall {
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
+ BuildGenericJniFrameVisitor(Thread* self,
+ bool is_static,
+ bool critical_native,
+ const char* shorty,
+ uint32_t shorty_len,
ArtMethod*** sp)
: QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
- jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
- ComputeGenericJniFrameSize fsc;
+ jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
+ sm_(&jni_call_) {
+ ComputeGenericJniFrameSize fsc(critical_native);
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
@@ -1793,11 +1807,14 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
- // jni environment is always first argument
- sm_.AdvancePointer(self->GetJniEnv());
+ // First 2 parameters are always excluded for CriticalNative methods.
+ if (LIKELY(!critical_native)) {
+ // jni environment is always first argument
+ sm_.AdvancePointer(self->GetJniEnv());
- if (is_static) {
- sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ if (is_static) {
+ sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ } // else "this" reference is already handled by QuickArgumentVisitor.
}
}
@@ -1822,8 +1839,11 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
class FillJniCall FINAL : public FillNativeCall {
public:
FillJniCall(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args,
- HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
- handle_scope_(handle_scope), cur_entry_(0) {}
+ HandleScope* handle_scope, bool critical_native)
+ : FillNativeCall(gpr_regs, fpr_regs, stack_args),
+ handle_scope_(handle_scope),
+ cur_entry_(0),
+ critical_native_(critical_native) {}
uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1839,12 +1859,17 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
while (cur_entry_ < expected_slots) {
handle_scope_->GetMutableHandle(cur_entry_++).Assign(nullptr);
}
- DCHECK_NE(cur_entry_, 0U);
+
+ if (!critical_native_) {
+ // Non-critical natives have at least the self class (jclass) or this (jobject).
+ DCHECK_NE(cur_entry_, 0U);
+ }
}
private:
HandleScope* handle_scope_;
size_t cur_entry_;
+ const bool critical_native_;
};
HandleScope* handle_scope_;
@@ -1924,7 +1949,12 @@ extern "C" void* artFindNativeMethod();
extern "C" void* artFindNativeMethod(Thread* self);
#endif
-uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
+static uint64_t artQuickGenericJniEndJNIRef(Thread* self,
+ uint32_t cookie,
+ bool fast_native ATTRIBUTE_UNUSED,
+ jobject l,
+ jobject lock) {
+ // TODO: add entrypoints for @FastNative returning objects.
if (lock != nullptr) {
return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
} else {
@@ -1932,11 +1962,19 @@ uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, j
}
}
-void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) {
+static void artQuickGenericJniEndJNINonRef(Thread* self,
+ uint32_t cookie,
+ bool fast_native,
+ jobject lock) {
if (lock != nullptr) {
JniMethodEndSynchronized(cookie, lock, self);
+ // Ignore "fast_native" here because synchronized functions aren't very fast.
} else {
- JniMethodEnd(cookie, self);
+ if (UNLIKELY(fast_native)) {
+ JniMethodFastEnd(cookie, self);
+ } else {
+ JniMethodEnd(cookie, self);
+ }
}
}
@@ -1958,9 +1996,17 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod**
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
const char* shorty = called->GetShorty(&shorty_len);
+ bool critical_native = called->IsAnnotatedWithCriticalNative();
+ bool fast_native = called->IsAnnotatedWithFastNative();
+ bool normal_native = !critical_native && !fast_native;
// Run the visitor and update sp.
- BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
+ BuildGenericJniFrameVisitor visitor(self,
+ called->IsStatic(),
+ critical_native,
+ shorty,
+ shorty_len,
+ &sp);
{
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
visitor.VisitArguments();
@@ -1973,20 +2019,30 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod**
self->VerifyStack();
- // Start JNI, save the cookie.
uint32_t cookie;
- if (called->IsSynchronized()) {
- cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
- if (self->IsExceptionPending()) {
- self->PopHandleScope();
- // A negative value denotes an error.
- return GetTwoWordFailureValue();
+ uint32_t* sp32;
+ // Skip calling JniMethodStart for @CriticalNative.
+ if (LIKELY(!critical_native)) {
+ // Start JNI, save the cookie.
+ if (called->IsSynchronized()) {
+ DCHECK(normal_native) << " @FastNative and synchronize is not supported";
+ cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
+ if (self->IsExceptionPending()) {
+ self->PopHandleScope();
+ // A negative value denotes an error.
+ return GetTwoWordFailureValue();
+ }
+ } else {
+ if (fast_native) {
+ cookie = JniMethodFastStart(self);
+ } else {
+ DCHECK(normal_native);
+ cookie = JniMethodStart(self);
+ }
}
- } else {
- cookie = JniMethodStart(self);
+ sp32 = reinterpret_cast<uint32_t*>(sp);
+ *(sp32 - 1) = cookie;
}
- uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
- *(sp32 - 1) = cookie;
// Retrieve the stored native code.
void* nativeCode = called->GetEntryPointFromJni();
@@ -2007,12 +2063,15 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod**
if (nativeCode == nullptr) {
DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
- // End JNI, as the assembly will move to deliver the exception.
- jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
- if (shorty[0] == 'L') {
- artQuickGenericJniEndJNIRef(self, cookie, nullptr, lock);
- } else {
- artQuickGenericJniEndJNINonRef(self, cookie, lock);
+ // @CriticalNative calls do not need to call back into JniMethodEnd.
+ if (LIKELY(!critical_native)) {
+ // End JNI, as the assembly will move to deliver the exception.
+ jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
+ if (shorty[0] == 'L') {
+ artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock);
+ } else {
+ artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock);
+ }
}
return GetTwoWordFailureValue();
@@ -2182,7 +2241,8 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
// If the dex cache already resolved the interface method, look whether we have
// a match in the ImtConflictTable.
- ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), kRuntimePointerSize);
+ ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
+ kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
DCHECK(current_table != nullptr);
@@ -2234,7 +2294,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
- uint32_t imt_index = interface_method->GetImtIndex();
+ uint32_t imt_index = ImTable::GetImtIndex(interface_method);
ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
if (conflict_method->IsRuntimeMethod()) {
ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index f86921c1b9..f9345b64a8 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -438,7 +438,7 @@ bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo,
// above.
abort();
#endif
- self->DumpJavaStack(LOG(ERROR));
+ self->DumpJavaStack(LOG_STREAM(ERROR));
}
return false; // Return false since we want to propagate the fault to the main signal handler.
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 24a2c1783e..3b6750ebdb 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -283,7 +283,7 @@ class CheckReferenceVisitor {
<< from_space->GetGcRetentionPolicy();
LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
<< to_space->GetGcRetentionPolicy();
- heap->DumpSpaces(LOG(INFO));
+ heap->DumpSpaces(LOG_STREAM(INFO));
LOG(FATAL) << "FATAL ERROR";
}
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index dc4e31253f..0c84224fcb 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -37,6 +37,9 @@ static void art_heap_usage_error(const char* function, void* p);
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "../../../external/dlmalloc/malloc.c"
+// Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
+// of libbase, so undefine it now.
+#undef DEBUG
#pragma GCC diagnostic pop
static void* art_heap_morecore(void* m, intptr_t increment) {
@@ -44,11 +47,11 @@ static void* art_heap_morecore(void* m, intptr_t increment) {
}
static void art_heap_corruption(const char* function) {
- LOG(::art::FATAL) << "Corrupt heap detected in: " << function;
+ LOG(FATAL) << "Corrupt heap detected in: " << function;
}
static void art_heap_usage_error(const char* function, void* p) {
- LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p
+ LOG(FATAL) << "Incorrect use of function '" << function << "' argument " << p
<< " not expected";
}
@@ -69,7 +72,7 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte
int rc = madvise(start, length, MADV_DONTNEED);
if (UNLIKELY(rc != 0)) {
errno = rc;
- PLOG(::art::FATAL) << "madvise failed during heap trimming";
+ PLOG(FATAL) << "madvise failed during heap trimming";
}
size_t* reclaimed = reinterpret_cast<size_t*>(arg);
*reclaimed += length;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 2750fea751..ab8942a4b8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -430,7 +430,7 @@ void ConcurrentCopying::FlipThreadRoots() {
TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
if (kVerboseMode) {
LOG(INFO) << "time=" << region_space_->Time();
- region_space_->DumpNonFreeRegions(LOG(INFO));
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
}
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotHeld(self);
@@ -447,7 +447,7 @@ void ConcurrentCopying::FlipThreadRoots() {
QuasiAtomic::ThreadFenceForConstructor();
if (kVerboseMode) {
LOG(INFO) << "time=" << region_space_->Time();
- region_space_->DumpNonFreeRegions(LOG(INFO));
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
LOG(INFO) << "GC end of FlipThreadRoots";
}
}
@@ -1622,7 +1622,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset
if (obj != nullptr) {
LogFromSpaceRefHolder(obj, offset);
}
- ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
+ ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
} else {
AssertToSpaceInvariantInNonMovingSpace(obj, ref);
@@ -1645,13 +1645,13 @@ class RootPrinter {
template <class MirrorType>
void VisitRoot(mirror::Object** root)
REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
+ LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
+ LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
}
};
@@ -1670,19 +1670,19 @@ void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
// No info.
} else if (gc_root_source->HasArtField()) {
ArtField* field = gc_root_source->GetArtField();
- LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
+ LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " " << PrettyField(field);
RootPrinter root_printer;
field->VisitRoots(root_printer);
} else if (gc_root_source->HasArtMethod()) {
ArtMethod* method = gc_root_source->GetArtMethod();
- LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
+ LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " " << PrettyMethod(method);
RootPrinter root_printer;
method->VisitRoots(root_printer, kRuntimePointerSize);
}
- ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
- region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
- PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
- MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
+ ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
} else {
AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index d866106532..6d2f009868 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -139,7 +139,7 @@ inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
REQUIRES_SHARED(Locks::mutator_lock_) {
// Marking a large object, make sure its aligned as a sanity check.
if (!IsAligned<kPageSize>(ref)) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
LOG(FATAL) << ref;
}
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index cbc4dc1df0..ad3dd33303 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -413,11 +413,11 @@ class MarkSweep::MarkObjectSlowPath {
if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
(kIsDebugBuild && large_object_space != nullptr &&
!large_object_space->Contains(obj)))) {
- LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
+ LOG(FATAL_WITHOUT_ABORT) << "Tried to mark " << obj << " not contained by any spaces";
if (holder_ != nullptr) {
size_t holder_size = holder_->SizeOf();
ArtField* field = holder_->FindFieldByOffset(offset_);
- LOG(INTERNAL_FATAL) << "Field info: "
+ LOG(FATAL_WITHOUT_ABORT) << "Field info: "
<< " holder=" << holder_
<< " holder is "
<< (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
@@ -440,13 +440,13 @@ class MarkSweep::MarkObjectSlowPath {
// Print the memory content of the holder.
for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
- LOG(INTERNAL_FATAL) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
+ LOG(FATAL_WITHOUT_ABORT) << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = "
<< std::hex << p[i];
}
}
- PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
- MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
- LOG(INTERNAL_FATAL) << "Attempting see if it's a bad thread root";
+ PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
+ MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
+ LOG(FATAL_WITHOUT_ABORT) << "Attempting see if it's a bad thread root";
mark_sweep_->VerifySuspendedThreadRoots();
LOG(FATAL) << "Can't mark invalid object";
}
@@ -574,7 +574,7 @@ class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
+ LOG(FATAL_WITHOUT_ABORT) << "Found invalid root: " << root << " " << info;
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 47e6ca3311..2e971729f0 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -296,7 +296,7 @@ class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
- Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
+ Runtime::Current()->GetHeap()->DumpObject(LOG_STREAM(INFO), obj);
LOG(FATAL) << ref << " found in from space";
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index cb5226b7b6..4e6dd2b13c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -327,9 +327,9 @@ Heap::Heap(size_t initial_size,
continue;
}
- space::ImageSpace::CreateMultiImageLocations(image_file_name,
- boot_classpath,
- &image_file_names);
+ space::ImageSpace::ExtractMultiImageLocations(image_file_name,
+ boot_classpath,
+ &image_file_names);
}
} else {
LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
@@ -630,7 +630,7 @@ Heap::Heap(size_t initial_size,
bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap());
if (!no_gap) {
PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
- MemMap::DumpMaps(LOG(ERROR), true);
+ MemMap::DumpMaps(LOG_STREAM(ERROR), true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
}
@@ -3157,14 +3157,14 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) {
// Dump mod-union tables.
for (const auto& table_pair : mod_union_tables_) {
accounting::ModUnionTable* mod_union_table = table_pair.second;
- mod_union_table->Dump(LOG(ERROR) << mod_union_table->GetName() << ": ");
+ mod_union_table->Dump(LOG_STREAM(ERROR) << mod_union_table->GetName() << ": ");
}
// Dump remembered sets.
for (const auto& table_pair : remembered_sets_) {
accounting::RememberedSet* remembered_set = table_pair.second;
- remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
+ remembered_set->Dump(LOG_STREAM(ERROR) << remembered_set->GetName() << ": ");
}
- DumpSpaces(LOG(ERROR));
+ DumpSpaces(LOG_STREAM(ERROR));
}
return visitor.GetFailureCount();
}
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index 35bf718875..2a1635dff9 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <sstream>
+
#include "common_runtime_test.h"
#include "reference_queue.h"
#include "handle_scope-inl.h"
@@ -65,7 +67,9 @@ TEST_F(ReferenceQueueTest, Dump) {
StackHandleScope<20> hs(self);
Mutex lock("Reference queue lock");
ReferenceQueue queue(&lock);
- queue.Dump(LOG(INFO));
+ std::ostringstream oss;
+ queue.Dump(oss);
+ LOG(INFO) << oss.str();
auto weak_ref_class = hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
ScopedNullHandle<mirror::ClassLoader>()));
@@ -78,10 +82,16 @@ TEST_F(ReferenceQueueTest, Dump) {
ASSERT_TRUE(ref1.Get() != nullptr);
auto ref2(hs.NewHandle(finalizer_ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref2.Get() != nullptr);
+
queue.EnqueueReference(ref1.Get());
- queue.Dump(LOG(INFO));
+ oss.str("");
+ queue.Dump(oss);
+ LOG(INFO) << oss.str();
+
queue.EnqueueReference(ref2.Get());
- queue.Dump(LOG(INFO));
+ oss.str("");
+ queue.Dump(oss);
+ LOG(INFO) << oss.str();
}
} // namespace gc
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e41c53264e..a40e408bc8 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -363,15 +363,29 @@ static bool ChecksumsMatch(const char* image_a, const char* image_b, std::string
return true;
}
-static bool ImageCreationAllowed(bool is_global_cache, std::string* error_msg) {
+static bool CanWriteToDalvikCache(const InstructionSet isa) {
+ const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa));
+ if (access(dalvik_cache.c_str(), O_RDWR) == 0) {
+ return true;
+ } else if (errno != EACCES) {
+ PLOG(WARNING) << "CanWriteToDalvikCache returned error other than EACCES";
+ }
+ return false;
+}
+
+static bool ImageCreationAllowed(bool is_global_cache,
+ const InstructionSet isa,
+ std::string* error_msg) {
// Anyone can write into a "local" cache.
if (!is_global_cache) {
return true;
}
- // Only the zygote is allowed to create the global boot image.
+ // Only the zygote running as root is allowed to create the global boot image.
+ // If the zygote is running as non-root (and cannot write to the dalvik-cache),
+ // then image creation is not allowed..
if (Runtime::Current()->IsZygote()) {
- return true;
+ return CanWriteToDalvikCache(isa);
}
*error_msg = "Only the zygote can create the global boot image.";
@@ -714,7 +728,7 @@ class ImageSpaceLoader {
VLOG(image) << "ImageSpace::Init exiting " << *space.get();
if (VLOG_IS_ON(image)) {
- logger.Dump(LOG(INFO));
+ logger.Dump(LOG_STREAM(INFO));
}
return space;
}
@@ -1284,7 +1298,7 @@ class ImageSpaceLoader {
}
}
if (VLOG_IS_ON(image)) {
- logger.Dump(LOG(INFO));
+ logger.Dump(LOG_STREAM(INFO));
}
return true;
}
@@ -1410,7 +1424,7 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati
// Step 0.a: If we're the zygote, mark boot.
const bool is_zygote = Runtime::Current()->IsZygote();
- if (is_zygote && !secondary_image) {
+ if (is_zygote && !secondary_image && CanWriteToDalvikCache(image_isa)) {
MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
}
@@ -1525,7 +1539,7 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati
local_error_msg = "Patching disabled.";
} else if (secondary_image) {
local_error_msg = "Cannot patch a secondary image.";
- } else if (ImageCreationAllowed(is_global_cache, &local_error_msg)) {
+ } else if (ImageCreationAllowed(is_global_cache, image_isa, &local_error_msg)) {
bool patch_success =
RelocateImage(image_location, cache_filename.c_str(), image_isa, &local_error_msg);
if (patch_success) {
@@ -1555,7 +1569,7 @@ std::unique_ptr<ImageSpace> ImageSpace::CreateBootImage(const char* image_locati
local_error_msg = "Image compilation disabled.";
} else if (secondary_image) {
local_error_msg = "Cannot compile a secondary image.";
- } else if (ImageCreationAllowed(is_global_cache, &local_error_msg)) {
+ } else if (ImageCreationAllowed(is_global_cache, image_isa, &local_error_msg)) {
bool compilation_success = GenerateImage(cache_filename, image_isa, &local_error_msg);
if (compilation_success) {
std::unique_ptr<ImageSpace> compiled_space =
@@ -1619,9 +1633,54 @@ void ImageSpace::Dump(std::ostream& os) const {
<< ",name=\"" << GetName() << "\"]";
}
-void ImageSpace::CreateMultiImageLocations(const std::string& input_image_file_name,
- const std::string& boot_classpath,
- std::vector<std::string>* image_file_names) {
+std::string ImageSpace::GetMultiImageBootClassPath(
+ const std::vector<const char*>& dex_locations,
+ const std::vector<const char*>& oat_filenames,
+ const std::vector<const char*>& image_filenames) {
+ DCHECK_GT(oat_filenames.size(), 1u);
+ // If the image filename was adapted (e.g., for our tests), we need to change this here,
+ // too, but need to strip all path components (they will be re-established when loading).
+ std::ostringstream bootcp_oss;
+ bool first_bootcp = true;
+ for (size_t i = 0; i < dex_locations.size(); ++i) {
+ if (!first_bootcp) {
+ bootcp_oss << ":";
+ }
+
+ std::string dex_loc = dex_locations[i];
+ std::string image_filename = image_filenames[i];
+
+ // Use the dex_loc path, but the image_filename name (without path elements).
+ size_t dex_last_slash = dex_loc.rfind('/');
+
+ // npos is max(size_t). That makes this a bit ugly.
+ size_t image_last_slash = image_filename.rfind('/');
+ size_t image_last_at = image_filename.rfind('@');
+ size_t image_last_sep = (image_last_slash == std::string::npos)
+ ? image_last_at
+ : (image_last_at == std::string::npos)
+ ? std::string::npos
+ : std::max(image_last_slash, image_last_at);
+ // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
+
+ if (dex_last_slash == std::string::npos) {
+ dex_loc = image_filename.substr(image_last_sep + 1);
+ } else {
+ dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
+ image_filename.substr(image_last_sep + 1);
+ }
+
+ // Image filenames already end with .art, no need to replace.
+
+ bootcp_oss << dex_loc;
+ first_bootcp = false;
+ }
+ return bootcp_oss.str();
+}
+
+void ImageSpace::ExtractMultiImageLocations(const std::string& input_image_file_name,
+ const std::string& boot_classpath,
+ std::vector<std::string>* image_file_names) {
DCHECK(image_file_names != nullptr);
std::vector<std::string> images;
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index c407259506..0ba131b5bf 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -125,10 +125,14 @@ class ImageSpace : public MemMapSpace {
// Use the input image filename to adapt the names in the given boot classpath to establish
// complete locations for secondary images.
- static void CreateMultiImageLocations(const std::string& input_image_file_name,
+ static void ExtractMultiImageLocations(const std::string& input_image_file_name,
const std::string& boot_classpath,
std::vector<std::string>* image_filenames);
+ static std::string GetMultiImageBootClassPath(const std::vector<const char*>& dex_locations,
+ const std::vector<const char*>& oat_filenames,
+ const std::vector<const char*>& image_filenames);
+
// Return the end of the image which includes non-heap objects such as ArtMethods and ArtFields.
uint8_t* GetImageEnd() const {
return Begin() + GetImageHeader().GetImageSize();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 010f677885..16d1f939d7 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -192,7 +192,7 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
auto it = large_objects_.find(ptr);
if (UNLIKELY(it == large_objects_.end())) {
ScopedObjectAccess soa(self);
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(INTERNAL_FATAL));
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
}
MemMap* mem_map = it->second.mem_map;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index ad38724e7d..2544914a95 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -98,7 +98,9 @@ void LargeObjectSpaceTest::LargeObjectTest() {
}
}
// Test that dump doesn't crash.
- los->Dump(LOG(INFO));
+ std::ostringstream oss;
+ los->Dump(oss);
+ LOG(INFO) << oss.str();
size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
// Checks that the coalescing works.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index e24741a4a6..23cae7c821 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -38,7 +38,7 @@ RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
if (mem_map.get() == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
- MemMap::DumpMaps(LOG(ERROR));
+ MemMap::DumpMaps(LOG_STREAM(ERROR));
return nullptr;
}
return new RegionSpace(name, mem_map.release());
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index 669649efde..da18ae5a60 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -20,6 +20,7 @@
#include "image.h"
#include "art_method.h"
+#include "imt_conflict_table.h"
#include "imtable.h"
#include "read_barrier-inl.h"
diff --git a/runtime/imt_conflict_table.h b/runtime/imt_conflict_table.h
new file mode 100644
index 0000000000..fdd10fefc4
--- /dev/null
+++ b/runtime/imt_conflict_table.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_IMT_CONFLICT_TABLE_H_
+#define ART_RUNTIME_IMT_CONFLICT_TABLE_H_
+
+#include <cstddef>
+
+#include "base/casts.h"
+#include "base/enums.h"
+#include "base/macros.h"
+
+namespace art {
+
+class ArtMethod;
+
+// Table to resolve IMT conflicts at runtime. The table is attached to
+// the jni entrypoint of IMT conflict ArtMethods.
+// The table contains a list of pairs of { interface_method, implementation_method }
+// with the last entry being null to make an assembly implementation of a lookup
+// faster.
+class ImtConflictTable {
+ enum MethodIndex {
+ kMethodInterface,
+ kMethodImplementation,
+ kMethodCount, // Number of elements in enum.
+ };
+
+ public:
+ // Build a new table copying `other` and adding the new entry formed of
+ // the pair { `interface_method`, `implementation_method` }
+ ImtConflictTable(ImtConflictTable* other,
+ ArtMethod* interface_method,
+ ArtMethod* implementation_method,
+ PointerSize pointer_size) {
+ const size_t count = other->NumEntries(pointer_size);
+ for (size_t i = 0; i < count; ++i) {
+ SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size));
+ SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size));
+ }
+ SetInterfaceMethod(count, pointer_size, interface_method);
+ SetImplementationMethod(count, pointer_size, implementation_method);
+ // Add the null marker.
+ SetInterfaceMethod(count + 1, pointer_size, nullptr);
+ SetImplementationMethod(count + 1, pointer_size, nullptr);
+ }
+
+ // num_entries excludes the header.
+ ImtConflictTable(size_t num_entries, PointerSize pointer_size) {
+ SetInterfaceMethod(num_entries, pointer_size, nullptr);
+ SetImplementationMethod(num_entries, pointer_size, nullptr);
+ }
+
+ // Set an entry at an index.
+ void SetInterfaceMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
+ SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method);
+ }
+
+ void SetImplementationMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
+ SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method);
+ }
+
+ ArtMethod* GetInterfaceMethod(size_t index, PointerSize pointer_size) const {
+ return GetMethod(index * kMethodCount + kMethodInterface, pointer_size);
+ }
+
+ ArtMethod* GetImplementationMethod(size_t index, PointerSize pointer_size) const {
+ return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
+ }
+
+ // Return true if two conflict tables are the same.
+ bool Equals(ImtConflictTable* other, PointerSize pointer_size) const {
+ size_t num = NumEntries(pointer_size);
+ if (num != other->NumEntries(pointer_size)) {
+ return false;
+ }
+ for (size_t i = 0; i < num; ++i) {
+ if (GetInterfaceMethod(i, pointer_size) != other->GetInterfaceMethod(i, pointer_size) ||
+ GetImplementationMethod(i, pointer_size) !=
+ other->GetImplementationMethod(i, pointer_size)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Visit all of the entries.
+ // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod*
+ // and also returns one. The order is <interface, implementation>.
+ template<typename Visitor>
+ void Visit(const Visitor& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS {
+ uint32_t table_index = 0;
+ for (;;) {
+ ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size);
+ if (interface_method == nullptr) {
+ break;
+ }
+ ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size);
+ auto input = std::make_pair(interface_method, implementation_method);
+ std::pair<ArtMethod*, ArtMethod*> updated = visitor(input);
+ if (input.first != updated.first) {
+ SetInterfaceMethod(table_index, pointer_size, updated.first);
+ }
+ if (input.second != updated.second) {
+ SetImplementationMethod(table_index, pointer_size, updated.second);
+ }
+ ++table_index;
+ }
+ }
+
+ // Lookup the implementation ArtMethod associated to `interface_method`. Return null
+ // if not found.
+ ArtMethod* Lookup(ArtMethod* interface_method, PointerSize pointer_size) const {
+ uint32_t table_index = 0;
+ for (;;) {
+ ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size);
+ if (current_interface_method == nullptr) {
+ break;
+ }
+ if (current_interface_method == interface_method) {
+ return GetImplementationMethod(table_index, pointer_size);
+ }
+ ++table_index;
+ }
+ return nullptr;
+ }
+
+ // Compute the number of entries in this table.
+ size_t NumEntries(PointerSize pointer_size) const {
+ uint32_t table_index = 0;
+ while (GetInterfaceMethod(table_index, pointer_size) != nullptr) {
+ ++table_index;
+ }
+ return table_index;
+ }
+
+ // Compute the size in bytes taken by this table.
+ size_t ComputeSize(PointerSize pointer_size) const {
+ // Add the end marker.
+ return ComputeSize(NumEntries(pointer_size), pointer_size);
+ }
+
+ // Compute the size in bytes needed for copying the given `table` and add
+ // one more entry.
+ static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, PointerSize pointer_size) {
+ return table->ComputeSize(pointer_size) + EntrySize(pointer_size);
+ }
+
+ // Compute size with a fixed number of entries.
+ static size_t ComputeSize(size_t num_entries, PointerSize pointer_size) {
+ return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator.
+ }
+
+ static size_t EntrySize(PointerSize pointer_size) {
+ return static_cast<size_t>(pointer_size) * static_cast<size_t>(kMethodCount);
+ }
+
+ private:
+ ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
+ if (pointer_size == PointerSize::k64) {
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
+ } else {
+ return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
+ }
+ }
+
+ void SetMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
+ if (pointer_size == PointerSize::k64) {
+ data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
+ } else {
+ data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
+ }
+ }
+
+ // Array of entries that the assembly stubs will iterate over. Note that this is
+ // not fixed size, and we allocate data prior to calling the constructor
+ // of ImtConflictTable.
+ union {
+ uint32_t data32_[0];
+ uint64_t data64_[0];
+ };
+
+ DISALLOW_COPY_AND_ASSIGN(ImtConflictTable);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_IMT_CONFLICT_TABLE_H_
diff --git a/runtime/imtable-inl.h b/runtime/imtable-inl.h
new file mode 100644
index 0000000000..0cb9b5e4dc
--- /dev/null
+++ b/runtime/imtable-inl.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_IMTABLE_INL_H_
+#define ART_RUNTIME_IMTABLE_INL_H_
+
+#include "imtable.h"
+
+#include "art_method-inl.h"
+
+namespace art {
+
+inline uint32_t ImTable::GetBaseImtHash(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return method->GetDexMethodIndex();
+}
+
+inline uint32_t ImTable::GetImtIndex(ArtMethod* method) {
+ return GetBaseImtHash(method) % ImTable::kSize;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_IMTABLE_INL_H_
+
diff --git a/runtime/imtable.h b/runtime/imtable.h
index 2416621f49..6df890d14b 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -21,9 +21,13 @@
#error IMT_SIZE not defined
#endif
+#include "base/enums.h"
+#include "base/macros.h"
+
namespace art {
class ArtMethod;
+class DexFile;
class ImTable {
public:
@@ -69,6 +73,19 @@ class ImTable {
constexpr static size_t SizeInBytes(PointerSize pointer_size) {
return kSize * static_cast<size_t>(pointer_size);
}
+
+ // Converts a method to the base hash used in GetImtIndex.
+ ALWAYS_INLINE static inline uint32_t GetBaseImtHash(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE static inline uint32_t GetBaseImtHash(const DexFile* dex_file, uint32_t method_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // The (complete) hashing scheme to map an ArtMethod to a slot in the Interface Method Table
+ // (IMT).
+ ALWAYS_INLINE static inline uint32_t GetImtIndex(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE static inline uint32_t GetImtIndex(const DexFile* dex_file, uint32_t method_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 4f81b59115..1f39a1e695 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -188,7 +188,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
ScopedObjectAccess soa(self);
LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
if (kDumpStackOnNonLocalReference) {
- self->Dump(LOG(WARNING));
+ self->Dump(LOG_STREAM(WARNING));
}
}
return true;
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 77c3f0f4ae..5934f13b30 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -576,9 +576,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Replace calls to String.<init> with equivalent StringFactory call.
if (UNLIKELY(called_method->GetDeclaringClass()->IsStringClass()
&& called_method->IsConstructor())) {
- ScopedObjectAccessUnchecked soa(self);
- jmethodID mid = soa.EncodeMethod(called_method);
- called_method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ called_method = WellKnownClasses::StringInitToStringFactory(called_method);
string_init = true;
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index a8c7d15fd0..cf8d4bd1b5 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -38,8 +38,8 @@ void CheckMterpAsmConstants() {
int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
(uintptr_t) artMterpAsmInstructionStart;
if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
- LOG(art::FATAL) << "ERROR: unexpected asm interp size " << interp_size
- << "(did an instruction handler exceed " << width << " bytes?)";
+ LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
+ << "(did an instruction handler exceed " << width << " bytes?)";
}
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index d505aea10a..98e358b8b8 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -858,7 +858,7 @@ void UnstartedRuntime::UnstartedThreadLocalGet(
ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
"<init>", "()V", cl->GetImagePointerSize());
if (init_method == nullptr) {
- h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+ h_real_to_string_class->DumpClass(LOG_STREAM(FATAL), mirror::Class::kDumpClassFullDetail);
} else {
JValue invoke_result;
EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index 2507fe9e6e..3be7fd6428 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -121,7 +121,7 @@ static JdwpSocketState* SocketStartup(JdwpState* state, uint16_t port, bool prob
netState->listenSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
if (netState->listenSock < 0) {
- PLOG(probe ? ERROR : FATAL) << "Socket create failed";
+ PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL) << "Socket create failed";
goto fail;
}
@@ -129,7 +129,8 @@ static JdwpSocketState* SocketStartup(JdwpState* state, uint16_t port, bool prob
{
int one = 1;
if (setsockopt(netState->listenSock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0) {
- PLOG(probe ? ERROR : FATAL) << "setsockopt(SO_REUSEADDR) failed";
+ PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL)
+ << "setsockopt(SO_REUSEADDR) failed";
goto fail;
}
}
@@ -143,14 +144,15 @@ static JdwpSocketState* SocketStartup(JdwpState* state, uint16_t port, bool prob
inet_aton("127.0.0.1", &addr.addrInet.sin_addr);
if (bind(netState->listenSock, &addr.addrPlain, sizeof(addr)) != 0) {
- PLOG(probe ? ERROR : FATAL) << "Attempt to bind to port " << port << " failed";
+ PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL)
+ << "Attempt to bind to port " << port << " failed";
goto fail;
}
netState->listenPort = port;
if (listen(netState->listenSock, 5) != 0) {
- PLOG(probe ? ERROR : FATAL) << "Listen failed";
+ PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL) << "Listen failed";
goto fail;
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d984f45253..afa52ca2c4 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -337,7 +337,7 @@ bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
Jit::~Jit() {
DCHECK(!profile_saver_options_.IsEnabled() || !ProfileSaver::IsStarted());
if (dump_info_on_shutdown_) {
- DumpInfo(LOG(INFO));
+ DumpInfo(LOG_STREAM(INFO));
}
DeleteThreadPool();
if (jit_compiler_handle_ != nullptr) {
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 42916c3e1e..a4bc3fca47 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -447,7 +447,7 @@ void ProfileSaver::Stop(bool dump_info) {
}
instance_->shutting_down_ = true;
if (dump_info) {
- instance_->DumpInfo(LOG(INFO));
+ instance_->DumpInfo(LOG_STREAM(INFO));
}
}
diff --git a/runtime/jni_env_ext-inl.h b/runtime/jni_env_ext-inl.h
index dc6a3e8f62..685b056e8d 100644
--- a/runtime/jni_env_ext-inl.h
+++ b/runtime/jni_env_ext-inl.h
@@ -32,9 +32,10 @@ inline T JNIEnvExt::AddLocalReference(mirror::Object* obj) {
if (check_jni) {
size_t entry_count = locals.Capacity();
if (entry_count > 16) {
- locals.Dump(LOG(WARNING) << "Warning: more than 16 JNI local references: "
- << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")\n");
- // TODO: LOG(FATAL) in a later release?
+ locals.Dump(LOG_STREAM(WARNING) << "Warning: more than 16 JNI local references: "
+ << entry_count << " (most recent was a "
+ << PrettyTypeOf(obj) << ")\n");
+ // TODO: LOG(FATAL) in a later release?
}
}
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index a434442d93..a11f9ab31f 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -100,8 +100,9 @@ static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
const char* kind, jint idx, bool return_errors)
REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
- << PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
+ LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
+ << "Failed to register native method in " << PrettyDescriptor(c)
+ << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"%s is null at index %d", kind, idx);
@@ -375,7 +376,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
ArtMethod* m = soa.DecodeMethod(mid);
- mirror::AbstractMethod* method;
+ mirror::Executable* method;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
if (m->IsConstructor()) {
@@ -618,7 +619,8 @@ class JNI {
}
if (c->IsStringClass()) {
// Replace calls to String.<init> with equivalent StringFactory call.
- jmethodID sf_mid = WellKnownClasses::StringInitToStringFactoryMethodID(mid);
+ jmethodID sf_mid = soa.EncodeMethod(
+ WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
mirror::Object* result = c->AllocObject(soa.Self());
@@ -643,7 +645,8 @@ class JNI {
}
if (c->IsStringClass()) {
// Replace calls to String.<init> with equivalent StringFactory call.
- jmethodID sf_mid = WellKnownClasses::StringInitToStringFactoryMethodID(mid);
+ jmethodID sf_mid = soa.EncodeMethod(
+ WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid)));
return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
}
mirror::Object* result = c->AllocObject(soa.Self());
@@ -2226,16 +2229,20 @@ class JNI {
}
if (m == nullptr) {
- LOG(return_errors ? ERROR : INTERNAL_FATAL) << "Failed to register native method "
+ c->DumpClass(
+ LOG_STREAM(return_errors
+ ? ::android::base::ERROR
+ : ::android::base::FATAL_WITHOUT_ABORT),
+ mirror::Class::kDumpClassFullDetail);
+ LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
+ << "Failed to register native method "
<< PrettyDescriptor(c) << "." << name << sig << " in "
<< c->GetDexCache()->GetLocation()->ToModifiedUtf8();
- // Safe to pass in LOG(FATAL) since the log object aborts in destructor and only goes
- // out of scope after the DumpClass is done executing.
- c->DumpClass(LOG(return_errors ? ERROR : FATAL), mirror::Class::kDumpClassFullDetail);
ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static");
return JNI_ERR;
} else if (!m->IsNative()) {
- LOG(return_errors ? ERROR : FATAL) << "Failed to register non-native method "
+ LOG(return_errors ? ::android::base::ERROR : ::android::base::FATAL)
+ << "Failed to register non-native method "
<< PrettyDescriptor(c) << "." << name << sig
<< " as native";
ThrowNoSuchMethodError(soa, c, name, sig, "native");
@@ -2478,7 +2485,7 @@ class JNI {
} else if (kWarnJniAbort && memcmp(array_data, elements, bytes) != 0) {
// Warn if we have JNI_ABORT and the arrays don't match since this is usually an error.
LOG(WARNING) << "Possible incorrect JNI_ABORT in Release*ArrayElements";
- soa.Self()->DumpJavaStack(LOG(WARNING));
+ soa.Self()->DumpJavaStack(LOG_STREAM(WARNING));
}
}
if (mode != JNI_COMMIT) {
@@ -3042,7 +3049,7 @@ std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs) {
os << "JNIWeakGlobalRefType";
return os;
default:
- LOG(::art::FATAL) << "jobjectRefType[" << static_cast<int>(rhs) << "]";
+ LOG(FATAL) << "jobjectRefType[" << static_cast<int>(rhs) << "]";
UNREACHABLE();
}
}
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
deleted file mode 100644
index 9c2061387e..0000000000
--- a/runtime/mirror/abstract_method.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
-#define ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
-
-#include "executable.h"
-#include "gc_root.h"
-#include "object.h"
-#include "object_callbacks.h"
-#include "read_barrier_option.h"
-
-namespace art {
-
-struct AbstractMethodOffsets;
-class ArtMethod;
-
-namespace mirror {
-
-// C++ mirror of java.lang.reflect.AbstractMethod.
-class MANAGED AbstractMethod : public Executable {
- public:
- // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
- template <PointerSize kPointerSize, bool kTransactionActive>
- bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
- ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- // Only used by the image writer.
- template <bool kTransactionActive = false>
- void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
-
- private:
- static MemberOffset ArtMethodOffset() {
- return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, art_method_));
- }
- static MemberOffset DeclaringClassOffset() {
- return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_));
- }
- static MemberOffset DeclaringClassOfOverriddenMethodOffset() {
- return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_of_overridden_method_));
- }
- static MemberOffset AccessFlagsOffset() {
- return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, access_flags_));
- }
- static MemberOffset DexMethodIndexOffset() {
- return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, dex_method_index_));
- }
-
- HeapReference<mirror::Class> declaring_class_;
- HeapReference<mirror::Class> declaring_class_of_overridden_method_;
- uint64_t art_method_;
- uint32_t access_flags_;
- uint32_t dex_method_index_;
-
- friend struct art::AbstractMethodOffsets; // for verifying offset information
- DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod);
-};
-
-} // namespace mirror
-} // namespace art
-
-#endif // ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f21baed70f..2e5f532606 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -147,8 +147,8 @@ void Class::SetDexCache(DexCache* new_dex_cache) {
void Class::SetClassSize(uint32_t new_class_size) {
if (kIsDebugBuild && new_class_size < GetClassSize()) {
- DumpClass(LOG(INTERNAL_FATAL), kDumpClassFullDetail);
- LOG(INTERNAL_FATAL) << new_class_size << " vs " << GetClassSize();
+ DumpClass(LOG_STREAM(FATAL_WITHOUT_ABORT), kDumpClassFullDetail);
+ LOG(FATAL_WITHOUT_ABORT) << new_class_size << " vs " << GetClassSize();
LOG(FATAL) << "class=" << PrettyTypeOf(this);
}
// Not called within a transaction.
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/executable.cc
index b4dce583e1..33ebd817d1 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/executable.cc
@@ -14,15 +14,14 @@
* limitations under the License.
*/
-#include "abstract_method.h"
-
#include "art_method-inl.h"
+#include "executable.h"
namespace art {
namespace mirror {
template <PointerSize kPointerSize, bool kTransactionActive>
-bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
+bool Executable::CreateFromArtMethod(ArtMethod* method) {
auto* interface_method = method->GetInterfaceMethodIfProxy(kPointerSize);
SetArtMethod<kTransactionActive>(method);
SetFieldObject<kTransactionActive>(DeclaringClassOffset(), method->GetDeclaringClass());
@@ -33,28 +32,28 @@ bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) {
return true;
}
-template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, false>(
+template bool Executable::CreateFromArtMethod<PointerSize::k32, false>(
ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, true>(
+template bool Executable::CreateFromArtMethod<PointerSize::k32, true>(
ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, false>(
+template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(
ArtMethod* method);
-template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, true>(
+template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(
ArtMethod* method);
-ArtMethod* AbstractMethod::GetArtMethod() {
+ArtMethod* Executable::GetArtMethod() {
return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
}
template <bool kTransactionActive>
-void AbstractMethod::SetArtMethod(ArtMethod* method) {
+void Executable::SetArtMethod(ArtMethod* method) {
SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
}
-template void AbstractMethod::SetArtMethod<false>(ArtMethod* method);
-template void AbstractMethod::SetArtMethod<true>(ArtMethod* method);
+template void Executable::SetArtMethod<false>(ArtMethod* method);
+template void Executable::SetArtMethod<true>(ArtMethod* method);
-mirror::Class* AbstractMethod::GetDeclaringClass() {
+mirror::Class* Executable::GetDeclaringClass() {
return GetFieldObject<mirror::Class>(DeclaringClassOffset());
}
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index 232fce8693..6c465f6bbb 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -32,9 +32,42 @@ namespace mirror {
// C++ mirror of java.lang.reflect.Executable.
class MANAGED Executable : public AccessibleObject {
+ public:
+ // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
+ template <PointerSize kPointerSize, bool kTransactionActive>
+ bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
+
+ ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Only used by the image writer.
+ template <bool kTransactionActive = false>
+ void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
uint16_t has_real_parameter_data_;
+ HeapReference<mirror::Class> declaring_class_;
+ HeapReference<mirror::Class> declaring_class_of_overridden_method_;
HeapReference<mirror::Array> parameters_;
+ uint64_t art_method_;
+ uint32_t access_flags_;
+ uint32_t dex_method_index_;
+
+ static MemberOffset ArtMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Executable, art_method_));
+ }
+ static MemberOffset DeclaringClassOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Executable, declaring_class_));
+ }
+ static MemberOffset DeclaringClassOfOverriddenMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Executable, declaring_class_of_overridden_method_));
+ }
+ static MemberOffset AccessFlagsOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Executable, access_flags_));
+ }
+ static MemberOffset DexMethodIndexOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Executable, dex_method_index_));
+ }
friend struct art::ExecutableOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Executable);
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index ef16719f27..71bac7e3d6 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -56,7 +56,7 @@ Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<AbstractMethod*>(ret)->
+ static_cast<Executable*>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
return ret;
@@ -108,7 +108,7 @@ Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << PrettyMethod(method);
auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
- static_cast<AbstractMethod*>(ret)->
+ static_cast<Executable*>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
return ret;
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index 6881991736..205ea7a050 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -17,8 +17,8 @@
#ifndef ART_RUNTIME_MIRROR_METHOD_H_
#define ART_RUNTIME_MIRROR_METHOD_H_
-#include "abstract_method.h"
#include "gc_root.h"
+#include "executable.h"
namespace art {
namespace mirror {
@@ -26,7 +26,7 @@ namespace mirror {
class Class;
// C++ mirror of java.lang.reflect.Method.
-class MANAGED Method : public AbstractMethod {
+class MANAGED Method : public Executable {
public:
template <PointerSize kPointerSize, bool kTransactionActive>
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
@@ -58,7 +58,7 @@ class MANAGED Method : public AbstractMethod {
};
// C++ mirror of java.lang.reflect.Constructor.
-class MANAGED Constructor: public AbstractMethod {
+class MANAGED Constructor: public Executable {
public:
template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
diff --git a/runtime/mirror/obj_ptr.h b/runtime/mirror/obj_ptr.h
new file mode 100644
index 0000000000..10378e8bc9
--- /dev/null
+++ b/runtime/mirror/obj_ptr.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_OBJ_PTR_H_
+#define ART_RUNTIME_MIRROR_OBJ_PTR_H_
+
+#include "base/mutex.h" // For Locks::mutator_lock_.
+#include "globals.h"
+#include "mirror/object_reference.h"
+#include "utils.h"
+
+namespace art {
+namespace mirror {
+
+class Object;
+
+// Value type representing a pointer to a mirror::Object of type MirrorType
+// Pass kPoison as a template boolean for testing in non-debug builds.
+// Note that the functions are not 100% thread safe and may have spurious positive check passes in
+// these cases.
+template<class MirrorType, bool kPoison = kIsDebugBuild>
+class ObjPtr {
+ static constexpr size_t kCookieShift =
+ sizeof(mirror::HeapReference<mirror::Object>) * kBitsPerByte - kObjectAlignmentShift;
+ static constexpr size_t kCookieBits = sizeof(uintptr_t) * kBitsPerByte - kCookieShift;
+ static constexpr uintptr_t kCookieMask = (static_cast<uintptr_t>(1u) << kCookieBits) - 1;
+
+ static_assert(kCookieBits >= kObjectAlignmentShift,
+ "must have a least kObjectAlignmentShift bits");
+
+ public:
+ ALWAYS_INLINE ObjPtr() REQUIRES_SHARED(Locks::mutator_lock_) : reference_(0u) {}
+
+ ALWAYS_INLINE explicit ObjPtr(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
+ : reference_(Encode(ptr)) {}
+
+ ALWAYS_INLINE explicit ObjPtr(const ObjPtr& other) REQUIRES_SHARED(Locks::mutator_lock_)
+ = default;
+
+ ALWAYS_INLINE ObjPtr& operator=(const ObjPtr& other) {
+ reference_ = other.reference_;
+ return *this;
+ }
+
+ ALWAYS_INLINE ObjPtr& operator=(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
+ Assign(ptr);
+ return *this;
+ }
+
+ ALWAYS_INLINE void Assign(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
+ reference_ = Encode(ptr);
+ }
+
+ ALWAYS_INLINE MirrorType* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Get();
+ }
+
+ ALWAYS_INLINE MirrorType* Get() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Decode();
+ }
+
+ ALWAYS_INLINE bool IsNull() const {
+ return reference_ == 0;
+ }
+
+ ALWAYS_INLINE bool IsValid() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!kPoison || IsNull()) {
+ return true;
+ }
+ return GetCookie() == TrimCookie(Thread::Current()->GetPoisonObjectCookie());
+ }
+
+ ALWAYS_INLINE void AssertValid() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kPoison) {
+ CHECK(IsValid()) << "Stale object pointer, expected cookie "
+ << TrimCookie(Thread::Current()->GetPoisonObjectCookie()) << " but got " << GetCookie();
+ }
+ }
+
+ ALWAYS_INLINE bool operator==(const ObjPtr& ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Decode() == ptr.Decode();
+ }
+
+ ALWAYS_INLINE bool operator==(const MirrorType* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Decode() == ptr;
+ }
+
+ ALWAYS_INLINE bool operator==(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return IsNull();
+ }
+
+ ALWAYS_INLINE bool operator!=(const ObjPtr& ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Decode() != ptr.Decode();
+ }
+
+ ALWAYS_INLINE bool operator!=(const MirrorType* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Decode() != ptr;
+ }
+
+ ALWAYS_INLINE bool operator!=(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return !IsNull();
+ }
+
+ private:
+ // Trim off high bits of thread local cookie.
+ ALWAYS_INLINE static uintptr_t TrimCookie(uintptr_t cookie) {
+ return cookie & kCookieMask;
+ }
+
+ ALWAYS_INLINE uintptr_t GetCookie() const {
+ return reference_ >> kCookieShift;
+ }
+
+ ALWAYS_INLINE static uintptr_t Encode(MirrorType* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uintptr_t ref = reinterpret_cast<uintptr_t>(ptr);
+ if (kPoison && ref != 0) {
+ DCHECK_LE(ref, 0xFFFFFFFFU);
+ ref >>= kObjectAlignmentShift;
+ // Put cookie in high bits.
+ Thread* self = Thread::Current();
+ DCHECK(self != nullptr);
+ ref |= self->GetPoisonObjectCookie() << kCookieShift;
+ }
+ return ref;
+ }
+
+ // Decode makes sure that the object pointer is valid.
+ ALWAYS_INLINE MirrorType* Decode() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ AssertValid();
+ if (kPoison) {
+ return reinterpret_cast<MirrorType*>(
+ static_cast<uintptr_t>(static_cast<uint32_t>(reference_ << kObjectAlignmentShift)));
+ } else {
+ return reinterpret_cast<MirrorType*>(reference_);
+ }
+ }
+
+ // The encoded reference and cookie.
+ uintptr_t reference_;
+};
+
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_OBJ_PTR_H_
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index afd6115f1b..f4ecfb53b8 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -35,6 +35,7 @@
#include "gc/heap.h"
#include "handle_scope-inl.h"
#include "iftable-inl.h"
+#include "obj_ptr.h"
#include "object-inl.h"
#include "object_array-inl.h"
#include "scoped_thread_state_change.h"
@@ -738,5 +739,62 @@ TEST_F(ObjectTest, IdentityHashCode) {
EXPECT_NE(hash_code, 0);
}
+TEST_F(ObjectTest, ObjectPointer) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader = LoadDex("XandY");
+ StackHandleScope<2> hs(soa.Self());
+ ObjPtr<mirror::Object, /*kPoison*/ true> null_ptr;
+ EXPECT_TRUE(null_ptr.IsNull());
+ EXPECT_TRUE(null_ptr.IsValid());
+ EXPECT_TRUE(null_ptr.Get() == nullptr);
+ EXPECT_TRUE(null_ptr == nullptr);
+ EXPECT_TRUE(null_ptr == null_ptr);
+ EXPECT_FALSE(null_ptr != null_ptr);
+ EXPECT_FALSE(null_ptr != nullptr);
+ null_ptr.AssertValid();
+ Handle<ClassLoader> class_loader(hs.NewHandle(soa.Decode<ClassLoader*>(jclass_loader)));
+ Handle<mirror::Class> h_X(
+ hs.NewHandle(class_linker_->FindClass(soa.Self(), "LX;", class_loader)));
+ ObjPtr<Class, /*kPoison*/ true> X(h_X.Get());
+ EXPECT_TRUE(!X.IsNull());
+ EXPECT_TRUE(X.IsValid());
+ EXPECT_TRUE(X.Get() != nullptr);
+ EXPECT_EQ(h_X.Get(), X.Get());
+ // FindClass may cause thread suspension, it should invalidate X.
+ ObjPtr<Class, /*kPoison*/ true> Y(class_linker_->FindClass(soa.Self(), "LY;", class_loader));
+ EXPECT_TRUE(!Y.IsNull());
+ EXPECT_TRUE(Y.IsValid());
+ EXPECT_TRUE(Y.Get() != nullptr);
+
+ // Should IsNull be safe to call on null ObjPtr? I'll allow it for now.
+ EXPECT_TRUE(!X.IsNull());
+ EXPECT_TRUE(!X.IsValid());
+ // Make X valid again by copying out of handle.
+ X.Assign(h_X.Get());
+ EXPECT_TRUE(!X.IsNull());
+ EXPECT_TRUE(X.IsValid());
+ EXPECT_EQ(h_X.Get(), X.Get());
+
+ // Allow thread suspension to invalidate Y.
+ soa.Self()->AllowThreadSuspension();
+ EXPECT_TRUE(!Y.IsNull());
+ EXPECT_TRUE(!Y.IsValid());
+
+ // Test unpoisoned.
+ ObjPtr<mirror::Object, /*kPoison*/ false> unpoisoned;
+ EXPECT_TRUE(unpoisoned.IsNull());
+ EXPECT_TRUE(unpoisoned.IsValid());
+ EXPECT_TRUE(unpoisoned.Get() == nullptr);
+ EXPECT_TRUE(unpoisoned == nullptr);
+ EXPECT_TRUE(unpoisoned == unpoisoned);
+ EXPECT_FALSE(unpoisoned != unpoisoned);
+ EXPECT_FALSE(unpoisoned != nullptr);
+
+ unpoisoned = h_X.Get();
+ EXPECT_FALSE(unpoisoned.IsNull());
+ EXPECT_TRUE(unpoisoned == h_X.Get());
+ EXPECT_EQ(unpoisoned.Get(), h_X.Get());
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 22cc197996..49b83a719d 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -457,7 +457,7 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
if (!Runtime::Current()->IsStarted() || VLOG_IS_ON(monitor)) {
std::ostringstream ss;
self->Dump(ss);
- LOG(Runtime::Current()->IsStarted() ? INFO : ERROR)
+ LOG(Runtime::Current()->IsStarted() ? ::android::base::INFO : ::android::base::ERROR)
<< self->GetException()->Dump() << "\n" << ss.str();
}
va_end(args);
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 8f108faa1f..f09c067cfd 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -240,8 +240,8 @@ static void VMDebug_dumpReferenceTables(JNIEnv* env, jclass) {
ScopedObjectAccess soa(env);
LOG(INFO) << "--- reference table dump ---";
- soa.Env()->DumpReferenceTables(LOG(INFO));
- soa.Vm()->DumpReferenceTables(LOG(INFO));
+ soa.Env()->DumpReferenceTables(LOG_STREAM(INFO));
+ soa.Vm()->DumpReferenceTables(LOG_STREAM(INFO));
LOG(INFO) << "---";
}
diff --git a/runtime/native/java_lang_reflect_AbstractMethod.cc b/runtime/native/java_lang_reflect_AbstractMethod.cc
deleted file mode 100644
index 254f8dbb4c..0000000000
--- a/runtime/native/java_lang_reflect_AbstractMethod.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "java_lang_reflect_AbstractMethod.h"
-
-#include "art_method-inl.h"
-#include "dex_file_annotations.h"
-#include "jni_internal.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "reflection.h"
-#include "scoped_fast_native_object_access.h"
-#include "well_known_classes.h"
-
-namespace art {
-
-static jobjectArray AbstractMethod_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
- ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->GetDeclaringClass()->IsProxyClass()) {
- // Return an empty array instead of a null pointer.
- mirror::Class* annotation_array_class =
- soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
- mirror::ObjectArray<mirror::Object>* empty_array =
- mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
- return soa.AddLocalReference<jobjectArray>(empty_array);
- }
- return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForMethod(method));
-}
-
-static jobject AbstractMethod_getAnnotationNative(JNIEnv* env,
- jobject javaMethod,
- jclass annotationType) {
- ScopedFastNativeObjectAccess soa(env);
- StackHandleScope<1> hs(soa.Self());
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->IsProxyMethod()) {
- return nullptr;
- } else {
- Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
- return soa.AddLocalReference<jobject>(annotations::GetAnnotationForMethod(method, klass));
- }
-}
-
-static jobjectArray AbstractMethod_getSignatureAnnotation(JNIEnv* env, jobject javaMethod) {
- ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->GetDeclaringClass()->IsProxyClass()) {
- return nullptr;
- }
- StackHandleScope<1> hs(soa.Self());
- return soa.AddLocalReference<jobjectArray>(annotations::GetSignatureAnnotationForMethod(method));
-}
-
-
-static jobjectArray AbstractMethod_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
- ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->IsProxyMethod()) {
- return nullptr;
- } else {
- return soa.AddLocalReference<jobjectArray>(annotations::GetParameterAnnotations(method));
- }
-}
-
-static jboolean AbstractMethod_isAnnotationPresentNative(JNIEnv* env,
- jobject javaMethod,
- jclass annotationType) {
- ScopedFastNativeObjectAccess soa(env);
- ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
- if (method->GetDeclaringClass()->IsProxyClass()) {
- return false;
- }
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
- return annotations::IsMethodAnnotationPresent(method, klass);
-}
-
-static JNINativeMethod gMethods[] = {
- NATIVE_METHOD(AbstractMethod, getAnnotationNative,
- "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
- NATIVE_METHOD(AbstractMethod, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"),
- NATIVE_METHOD(AbstractMethod, getParameterAnnotationsNative,
- "!()[[Ljava/lang/annotation/Annotation;"),
- NATIVE_METHOD(AbstractMethod, getSignatureAnnotation, "!()[Ljava/lang/String;"),
- NATIVE_METHOD(AbstractMethod, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
-};
-
-void register_java_lang_reflect_AbstractMethod(JNIEnv* env) {
- REGISTER_NATIVE_METHODS("java/lang/reflect/AbstractMethod");
-}
-
-} // namespace art
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
new file mode 100644
index 0000000000..f345c098e0
--- /dev/null
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_lang_reflect_Executable.h"
+
+#include "art_method-inl.h"
+#include "dex_file_annotations.h"
+#include "handle.h"
+#include "jni_internal.h"
+#include "mirror/class-inl.h"
+#include "mirror/method.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "reflection.h"
+#include "scoped_fast_native_object_access.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+static jobjectArray Executable_getDeclaredAnnotationsNative(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ // Return an empty array instead of a null pointer.
+ mirror::Class* annotation_array_class =
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
+ mirror::ObjectArray<mirror::Object>* empty_array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0);
+ return soa.AddLocalReference<jobjectArray>(empty_array);
+ }
+ return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForMethod(method));
+}
+
+static jobject Executable_getAnnotationNative(JNIEnv* env,
+ jobject javaMethod,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ } else {
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return soa.AddLocalReference<jobject>(annotations::GetAnnotationForMethod(method, klass));
+ }
+}
+
+static jobjectArray Executable_getSignatureAnnotation(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ return soa.AddLocalReference<jobjectArray>(annotations::GetSignatureAnnotationForMethod(method));
+}
+
+
+static jobjectArray Executable_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->IsProxyMethod()) {
+ return nullptr;
+ } else {
+ return soa.AddLocalReference<jobjectArray>(annotations::GetParameterAnnotations(method));
+ }
+}
+
+static jobjectArray Executable_getParameters0(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ Thread* self = soa.Self();
+ StackHandleScope<8> hs(self);
+
+ Handle<mirror::Method> executable = hs.NewHandle(soa.Decode<mirror::Method*>(javaMethod));
+ ArtMethod* art_method = executable.Get()->GetArtMethod();
+ if (art_method->GetDeclaringClass()->IsProxyClass()) {
+ return nullptr;
+ }
+
+ // Find the MethodParameters system annotation.
+ MutableHandle<mirror::ObjectArray<mirror::String>> names =
+ hs.NewHandle<mirror::ObjectArray<mirror::String>>(nullptr);
+ MutableHandle<mirror::IntArray> access_flags = hs.NewHandle<mirror::IntArray>(nullptr);
+ if (!annotations::GetParametersMetadataForMethod(art_method, &names, &access_flags)) {
+ return nullptr;
+ }
+
+ // Validate the MethodParameters system annotation data.
+ if (UNLIKELY(names.Get() == nullptr || access_flags.Get() == nullptr)) {
+ ThrowIllegalArgumentException(
+ StringPrintf("Missing parameter metadata for names or access flags for %s",
+ PrettyMethod(art_method).c_str()).c_str());
+ return nullptr;
+ }
+
+ // Check array sizes match each other
+ int32_t names_count = names.Get()->GetLength();
+ int32_t access_flags_count = access_flags.Get()->GetLength();
+ if (names_count != access_flags_count) {
+ ThrowIllegalArgumentException(
+ StringPrintf(
+ "Inconsistent parameter metadata for %s. names length: %d, access flags length: %d",
+ PrettyMethod(art_method).c_str(),
+ names_count,
+ access_flags_count).c_str());
+ return nullptr;
+ }
+
+ // Instantiate a Parameter[] to hold the result.
+ Handle<mirror::Class> parameter_array_class =
+ hs.NewHandle(
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Parameter__array));
+ Handle<mirror::ObjectArray<mirror::Object>> parameter_array =
+ hs.NewHandle(
+ mirror::ObjectArray<mirror::Object>::Alloc(self,
+ parameter_array_class.Get(),
+ names_count));
+ if (UNLIKELY(parameter_array.Get() == nullptr)) {
+ self->AssertPendingException();
+ return nullptr;
+ }
+
+ Handle<mirror::Class> parameter_class =
+ hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Parameter));
+ ArtMethod* parameter_init =
+ soa.DecodeMethod(WellKnownClasses::java_lang_reflect_Parameter_init);
+
+ // Mutable handles used in the loop below to ensure cleanup without scaling the number of
+ // handles by the number of parameters.
+ MutableHandle<mirror::String> name = hs.NewHandle<mirror::String>(nullptr);
+ MutableHandle<mirror::Object> parameter = hs.NewHandle<mirror::Object>(nullptr);
+
+ // Populate the Parameter[] to return.
+ for (int32_t parameter_index = 0; parameter_index < names_count; parameter_index++) {
+ name.Assign(names.Get()->Get(parameter_index));
+ int32_t modifiers = access_flags.Get()->Get(parameter_index);
+
+ // Allocate / initialize the Parameter to add to parameter_array.
+ parameter.Assign(parameter_class->AllocObject(self));
+ if (UNLIKELY(parameter.Get() == nullptr)) {
+ self->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ uint32_t args[5] = { PointerToLowMemUInt32(parameter.Get()),
+ PointerToLowMemUInt32(name.Get()),
+ static_cast<uint32_t>(modifiers),
+ PointerToLowMemUInt32(executable.Get()),
+ static_cast<uint32_t>(parameter_index)
+ };
+ JValue result;
+ static const char* method_signature = "VLILI"; // return + parameter types
+ parameter_init->Invoke(self, args, sizeof(args), &result, method_signature);
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return nullptr;
+ }
+
+ // Store the Parameter in the Parameter[].
+ parameter_array.Get()->Set(parameter_index, parameter.Get());
+ if (UNLIKELY(self->IsExceptionPending())) {
+ return nullptr;
+ }
+ }
+ return soa.AddLocalReference<jobjectArray>(parameter_array.Get());
+}
+
+static jboolean Executable_isAnnotationPresentNative(JNIEnv* env,
+ jobject javaMethod,
+ jclass annotationType) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ if (method->GetDeclaringClass()->IsProxyClass()) {
+ return false;
+ }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
+ return annotations::IsMethodAnnotationPresent(method, klass);
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(Executable, getAnnotationNative,
+ "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Executable, getDeclaredAnnotationsNative, "!()[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Executable, getParameterAnnotationsNative,
+ "!()[[Ljava/lang/annotation/Annotation;"),
+ NATIVE_METHOD(Executable, getParameters0, "!()[Ljava/lang/reflect/Parameter;"),
+ NATIVE_METHOD(Executable, getSignatureAnnotation, "!()[Ljava/lang/String;"),
+ NATIVE_METHOD(Executable, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
+};
+
+void register_java_lang_reflect_Executable(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/reflect/Executable");
+}
+
+} // namespace art
diff --git a/runtime/native/java_lang_reflect_AbstractMethod.h b/runtime/native/java_lang_reflect_Executable.h
index 222e5a05d0..0cfed62e49 100644
--- a/runtime/native/java_lang_reflect_AbstractMethod.h
+++ b/runtime/native/java_lang_reflect_Executable.h
@@ -14,15 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_
-#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_EXECUTABLE_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_EXECUTABLE_H_
#include <jni.h>
namespace art {
-void register_java_lang_reflect_AbstractMethod(JNIEnv* env);
+void register_java_lang_reflect_Executable(JNIEnv* env);
} // namespace art
-#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_EXECUTABLE_H_
diff --git a/runtime/openjdkjvm/Android.bp b/runtime/openjdkjvm/Android.bp
index 5ed16153ef..37112b61e7 100644
--- a/runtime/openjdkjvm/Android.bp
+++ b/runtime/openjdkjvm/Android.bp
@@ -19,7 +19,10 @@ cc_defaults {
host_supported: true,
srcs: ["OpenjdkJvm.cc"],
include_dirs: ["art/runtime"],
- shared_libs: ["libnativehelper"],
+ shared_libs: [
+ "libbase",
+ "libnativehelper"
+ ],
}
art_cc_library {
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
index 54ec5d32f5..4a62ecdf4d 100644
--- a/runtime/openjdkjvm/OpenjdkJvm.cc
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -61,10 +61,10 @@
#undef LOG_TAG
#define LOG_TAG "artopenjdk"
-using art::WARNING;
-using art::INFO;
-using art::ERROR;
-using art::FATAL;
+using ::android::base::WARNING;
+using ::android::base::INFO;
+using ::android::base::ERROR;
+using ::android::base::FATAL;
/* posix open() with extensions; used by e.g. ZipFile */
JNIEXPORT jint JVM_Open(const char* fname, jint flags, jint mode) {
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index 977ef44de2..d7a6c0a86c 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -17,9 +17,13 @@ cc_defaults {
name: "libopenjdkjvmti_defaults",
defaults: ["art_defaults"],
host_supported: true,
- srcs: ["OpenjdkJvmTi.cc"],
+ srcs: ["OpenjdkJvmTi.cc",
+ "transform.cc"],
include_dirs: ["art/runtime"],
- shared_libs: ["libnativehelper"],
+ shared_libs: [
+ "libbase",
+ "libnativehelper",
+ ],
}
art_cc_library {
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index d3561c1e1b..a1a23619f3 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -29,15 +29,17 @@
* questions.
*/
+#include <string>
+#include <vector>
+
#include <jni.h>
+
#include "openjdkjvmti/jvmti.h"
#include "art_jvmti.h"
-#include "gc_root-inl.h"
-#include "globals.h"
#include "jni_env_ext-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread_list.h"
+#include "runtime.h"
+#include "transform.h"
// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
// easier to create.
@@ -904,6 +906,66 @@ class JvmtiFunctions {
static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
return ERR(NOT_IMPLEMENTED);
}
+
+ // TODO Remove this once events are working.
+ static jvmtiError RetransformClassWithHook(jvmtiEnv* env,
+ jclass klass,
+ jvmtiEventClassFileLoadHook hook) {
+ std::vector<jclass> classes;
+ classes.push_back(klass);
+ return RetransformClassesWithHook(reinterpret_cast<ArtJvmTiEnv*>(env), classes, hook);
+ }
+
+ // TODO This will be called by the event handler for the art::ti Event Load Event
+ static jvmtiError RetransformClassesWithHook(ArtJvmTiEnv* env,
+ const std::vector<jclass>& classes,
+ jvmtiEventClassFileLoadHook hook) {
+ if (!IsValidEnv(env)) {
+ return ERR(INVALID_ENVIRONMENT);
+ }
+ for (jclass klass : classes) {
+ JNIEnv* jni_env = nullptr;
+ jobject loader = nullptr;
+ std::string name;
+ jobject protection_domain = nullptr;
+ jint data_len = 0;
+ unsigned char* dex_data = nullptr;
+ jvmtiError ret = OK;
+ std::string location;
+ if ((ret = GetTransformationData(env,
+ klass,
+ /*out*/&location,
+ /*out*/&jni_env,
+ /*out*/&loader,
+ /*out*/&name,
+ /*out*/&protection_domain,
+ /*out*/&data_len,
+ /*out*/&dex_data)) != OK) {
+ // TODO Do something more here? Maybe give log statements?
+ return ret;
+ }
+ jint new_data_len = 0;
+ unsigned char* new_dex_data = nullptr;
+ hook(env,
+ jni_env,
+ klass,
+ loader,
+ name.c_str(),
+ protection_domain,
+ data_len,
+ dex_data,
+ /*out*/&new_data_len,
+ /*out*/&new_dex_data);
+ // Check if anything actually changed.
+ if ((new_data_len != 0 || new_dex_data != nullptr) && new_dex_data != dex_data) {
+ MoveTransformedFileIntoRuntime(klass, std::move(location), new_data_len, new_dex_data);
+ env->Deallocate(new_dex_data);
+ }
+ // Deallocate the old dex data.
+ env->Deallocate(dex_data);
+ }
+ return OK;
+ }
};
static bool IsJvmtiVersion(jint version) {
@@ -942,7 +1004,10 @@ extern "C" bool ArtPlugin_Initialize() {
// The actual struct holding all of the entrypoints into the jvmti interface.
const jvmtiInterface_1 gJvmtiInterface = {
- nullptr, // reserved1
+ // SPECIAL FUNCTION: RetransformClassWithHook Is normally reserved1
+ // TODO Remove once we have events working.
+ reinterpret_cast<void*>(JvmtiFunctions::RetransformClassWithHook),
+ // nullptr, // reserved1
JvmtiFunctions::SetEventNotificationMode,
nullptr, // reserved3
JvmtiFunctions::GetAllThreads,
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
new file mode 100644
index 0000000000..b5622b5cad
--- /dev/null
+++ b/runtime/openjdkjvmti/transform.cc
@@ -0,0 +1,362 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "transform.h"
+
+#include "class_linker.h"
+#include "dex_file.h"
+#include "gc_root-inl.h"
+#include "globals.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti.h"
+#include "linear_alloc.h"
+#include "mem_map.h"
+#include "mirror/array.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader-inl.h"
+#include "mirror/string-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread_list.h"
+#include "transform.h"
+#include "utf.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+
+namespace openjdkjvmti {
+
+static bool ReadChecksum(jint data_len, const unsigned char* dex, /*out*/uint32_t* res) {
+ if (data_len < static_cast<jint>(sizeof(art::DexFile::Header))) {
+ return false;
+ }
+ *res = reinterpret_cast<const art::DexFile::Header*>(dex)->checksum_;
+ return true;
+}
+
+static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
+ jint data_len,
+ unsigned char* dex_data) {
+ std::string error_msg;
+ std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
+ art::StringPrintf("%s-transformed", original_location.c_str()).c_str(),
+ nullptr,
+ data_len,
+ PROT_READ|PROT_WRITE,
+ /*low_4gb*/false,
+ /*reuse*/false,
+ &error_msg));
+ if (map == nullptr) {
+ return map;
+ }
+ memcpy(map->Begin(), dex_data, data_len);
+ map->Protect(PROT_READ);
+ return map;
+}
+
+static void InvalidateExistingMethods(art::Thread* self,
+ art::Handle<art::mirror::Class> klass,
+ art::Handle<art::mirror::DexCache> cache,
+ const art::DexFile* dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Create new DexCache with new DexFile.
+ // reset dex_class_def_idx_
+ // for each method reset entry_point_from_quick_compiled_code_ to bridge
+ // for each method reset dex_code_item_offset_
+ // for each method reset dex_method_index_
+ // for each method set dex_cache_resolved_methods_ to new DexCache
+ // for each method set dex_cache_resolved_types_ to new DexCache
+ auto* runtime = art::Runtime::Current();
+ art::ClassLinker* linker = runtime->GetClassLinker();
+ art::PointerSize image_pointer_size = linker->GetImagePointerSize();
+ std::string descriptor_storage;
+ const char* descriptor = klass->GetDescriptor(&descriptor_storage);
+ // Get the new class def
+ const art::DexFile::ClassDef* class_def = art::OatFile::OatDexFile::FindClassDef(
+ *dex_file, descriptor, art::ComputeModifiedUtf8Hash(descriptor));
+ CHECK(class_def != nullptr);
+ const art::DexFile::TypeId& declaring_class_id = dex_file->GetTypeId(class_def->class_idx_);
+ art::StackHandleScope<6> hs(self);
+ const art::DexFile& old_dex_file = klass->GetDexFile();
+ for (art::ArtMethod& method : klass->GetMethods(image_pointer_size)) {
+ // Find the code_item for the method then find the dex_method_index and dex_code_item_offset to
+ // set.
+ const art::DexFile::StringId* new_name_id = dex_file->FindStringId(method.GetName());
+ uint16_t method_return_idx =
+ dex_file->GetIndexForTypeId(*dex_file->FindTypeId(method.GetReturnTypeDescriptor()));
+ const auto* old_type_list = method.GetParameterTypeList();
+ std::vector<uint16_t> new_type_list;
+ for (uint32_t i = 0; old_type_list != nullptr && i < old_type_list->Size(); i++) {
+ new_type_list.push_back(
+ dex_file->GetIndexForTypeId(
+ *dex_file->FindTypeId(
+ old_dex_file.GetTypeDescriptor(
+ old_dex_file.GetTypeId(
+ old_type_list->GetTypeItem(i).type_idx_)))));
+ }
+ const art::DexFile::ProtoId* proto_id = dex_file->FindProtoId(method_return_idx,
+ new_type_list);
+ CHECK(proto_id != nullptr || old_type_list == nullptr);
+ const art::DexFile::MethodId* method_id = dex_file->FindMethodId(declaring_class_id,
+ *new_name_id,
+ *proto_id);
+ CHECK(method_id != nullptr);
+ uint32_t dex_method_idx = dex_file->GetIndexForMethodId(*method_id);
+ method.SetDexMethodIndex(dex_method_idx);
+ linker->SetEntryPointsToInterpreter(&method);
+ method.SetCodeItemOffset(dex_file->FindCodeItemOffset(*class_def, dex_method_idx));
+ method.SetDexCacheResolvedMethods(cache->GetResolvedMethods(), image_pointer_size);
+ method.SetDexCacheResolvedTypes(cache->GetResolvedTypes(), image_pointer_size);
+ }
+
+ // Update the class fields.
+ // Need to update class last since the ArtMethod gets its DexFile from the class (which is needed
+ // to call GetReturnTypeDescriptor and GetParameterTypeList above).
+ klass->SetDexCache(cache.Get());
+ klass->SetDexCacheStrings(cache->GetStrings());
+ klass->SetDexClassDefIndex(dex_file->GetIndexForClassDef(*class_def));
+ klass->SetDexTypeIndex(dex_file->GetIndexForTypeId(*dex_file->FindTypeId(descriptor)));
+}
+
+// Adds the dex file.
+static art::mirror::LongArray* InsertDexFileIntoArray(art::Thread* self,
+ const art::DexFile* dex,
+ art::Handle<art::mirror::LongArray>& orig)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::StackHandleScope<1> hs(self);
+ CHECK_GE(orig->GetLength(), 1);
+ art::Handle<art::mirror::LongArray> ret(
+ hs.NewHandle(art::mirror::LongArray::Alloc(self, orig->GetLength() + 1)));
+ CHECK(ret.Get() != nullptr);
+ // Copy the oat-dex.
+ // TODO Should I clear the oatdex element?
+ ret->SetWithoutChecks<false>(0, orig->GetWithoutChecks(0));
+ ret->SetWithoutChecks<false>(1, static_cast<int64_t>(reinterpret_cast<intptr_t>(dex)));
+ ret->Memcpy(2, orig.Get(), 1, orig->GetLength() - 1);
+ return ret.Get();
+}
+
+// TODO Handle all types of class loaders.
+static bool FindDalvikSystemDexFileAndLoaderForClass(
+ art::Handle<art::mirror::Class> klass,
+ /*out*/art::mirror::Object** dex_file,
+ /*out*/art::mirror::ClassLoader** loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ const char* dex_path_list_element_array_name = "[Ldalvik/system/DexPathList$Element;";
+ const char* dex_path_list_element_name = "Ldalvik/system/DexPathList$Element;";
+ const char* dex_file_name = "Ldalvik/system/DexFile;";
+ const char* dex_path_list_name = "Ldalvik/system/DexPathList;";
+ const char* dex_class_loader_name = "Ldalvik/system/BaseDexClassLoader;";
+
+ art::Thread* self = art::Thread::Current();
+ CHECK(!self->IsExceptionPending());
+ art::StackHandleScope<11> hs(self);
+ art::ClassLinker* class_linker = art::Runtime::Current()->GetClassLinker();
+
+ art::Handle<art::mirror::ClassLoader> null_loader(hs.NewHandle<art::mirror::ClassLoader>(
+ nullptr));
+ art::Handle<art::mirror::Class> base_dex_loader_class(hs.NewHandle(class_linker->FindClass(
+ self, dex_class_loader_name, null_loader)));
+
+ art::ArtField* path_list_field = base_dex_loader_class->FindDeclaredInstanceField(
+ "pathList", dex_path_list_name);
+ CHECK(path_list_field != nullptr);
+
+ art::ArtField* dex_path_list_element_field =
+ class_linker->FindClass(self, dex_path_list_name, null_loader)
+ ->FindDeclaredInstanceField("dexElements", dex_path_list_element_array_name);
+ CHECK(dex_path_list_element_field != nullptr);
+
+ art::ArtField* element_dex_file_field =
+ class_linker->FindClass(self, dex_path_list_element_name, null_loader)
+ ->FindDeclaredInstanceField("dexFile", dex_file_name);
+ CHECK(element_dex_file_field != nullptr);
+
+ art::Handle<art::mirror::ClassLoader> h_class_loader(hs.NewHandle(klass->GetClassLoader()));
+ art::Handle<art::mirror::Class> loader_class(hs.NewHandle(h_class_loader->GetClass()));
+ // Check if loader is a BaseDexClassLoader
+ if (!loader_class->IsSubClass(base_dex_loader_class.Get())) {
+ LOG(ERROR) << "The classloader is not a BaseDexClassLoader which is currently the only "
+ << "supported class loader type!";
+ return false;
+ }
+ art::Handle<art::mirror::Object> path_list(
+ hs.NewHandle(path_list_field->GetObject(h_class_loader.Get())));
+ CHECK(path_list.Get() != nullptr);
+ CHECK(!self->IsExceptionPending());
+ art::Handle<art::mirror::ObjectArray<art::mirror::Object>> dex_elements_list(
+ hs.NewHandle(art::down_cast<art::mirror::ObjectArray<art::mirror::Object>*>(
+ dex_path_list_element_field->GetObject(path_list.Get()))));
+ CHECK(!self->IsExceptionPending());
+ CHECK(dex_elements_list.Get() != nullptr);
+ size_t num_elements = dex_elements_list->GetLength();
+ art::MutableHandle<art::mirror::Object> current_element(
+ hs.NewHandle<art::mirror::Object>(nullptr));
+ art::MutableHandle<art::mirror::Object> first_dex_file(
+ hs.NewHandle<art::mirror::Object>(nullptr));
+ for (size_t i = 0; i < num_elements; i++) {
+ current_element.Assign(dex_elements_list->Get(i));
+ CHECK(current_element.Get() != nullptr);
+ CHECK(!self->IsExceptionPending());
+ CHECK(dex_elements_list.Get() != nullptr);
+ CHECK_EQ(current_element->GetClass(), class_linker->FindClass(self,
+ dex_path_list_element_name,
+ null_loader));
+ // TODO It would be cleaner to put the art::DexFile into the dalvik.system.DexFile the class
+ // comes from but it is more annoying because we would need to find this class. It is not
+ // necessary for proper function since we just need to be in front of the classes old dex file
+ // in the path.
+ first_dex_file.Assign(element_dex_file_field->GetObject(current_element.Get()));
+ if (first_dex_file.Get() != nullptr) {
+ *dex_file = first_dex_file.Get();
+ *loader = h_class_loader.Get();
+ return true;
+ }
+ }
+ return false;
+}
+
+// Gets the data surrounding the given class.
+jvmtiError GetTransformationData(ArtJvmTiEnv* env,
+ jclass klass,
+ /*out*/std::string* location,
+ /*out*/JNIEnv** jni_env_ptr,
+ /*out*/jobject* loader,
+ /*out*/std::string* name,
+ /*out*/jobject* protection_domain,
+ /*out*/jint* data_len,
+ /*out*/unsigned char** dex_data) {
+ jint ret = env->art_vm->GetEnv(reinterpret_cast<void**>(jni_env_ptr), JNI_VERSION_1_1);
+ if (ret != JNI_OK) {
+ // TODO Different error might be better?
+ return ERR(INTERNAL);
+ }
+ JNIEnv* jni_env = *jni_env_ptr;
+ art::ScopedObjectAccess soa(jni_env);
+ art::StackHandleScope<3> hs(art::Thread::Current());
+ art::Handle<art::mirror::Class> hs_klass(hs.NewHandle(soa.Decode<art::mirror::Class*>(klass)));
+ *loader = soa.AddLocalReference<jobject>(hs_klass->GetClassLoader());
+ *name = art::mirror::Class::ComputeName(hs_klass)->ToModifiedUtf8();
+ // TODO is this always null?
+ *protection_domain = nullptr;
+ const art::DexFile& dex = hs_klass->GetDexFile();
+ *location = dex.GetLocation();
+ *data_len = static_cast<jint>(dex.Size());
+ // TODO We should maybe change env->Allocate to allow us to mprotect this memory and stop writes.
+ jvmtiError alloc_error = env->Allocate(*data_len, dex_data);
+ if (alloc_error != OK) {
+ return alloc_error;
+ }
+ // Copy the data into a temporary buffer.
+ memcpy(reinterpret_cast<void*>(*dex_data),
+ reinterpret_cast<const void*>(dex.Begin()),
+ *data_len);
+ return OK;
+}
+
+// Install the new dex file.
+// TODO do error checks for bad state (method in a stack, changes to number of methods/fields/etc).
+jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
+ std::string original_location,
+ jint data_len,
+ unsigned char* dex_data) {
+ const char* dex_file_name = "Ldalvik/system/DexFile;";
+ art::Thread* self = art::Thread::Current();
+ art::Runtime* runtime = art::Runtime::Current();
+ art::ThreadList* threads = runtime->GetThreadList();
+ art::ClassLinker* class_linker = runtime->GetClassLinker();
+ uint32_t checksum = 0;
+ if (!ReadChecksum(data_len, dex_data, &checksum)) {
+ return ERR(INVALID_CLASS_FORMAT);
+ }
+
+ std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_location, data_len, dex_data));
+ if (map.get() == nullptr) {
+ return ERR(INTERNAL);
+ }
+ std::string error_msg;
+ // Load the new dex_data in memory (mmap it, etc)
+ std::unique_ptr<const art::DexFile> new_dex_file = art::DexFile::Open(map->GetName(),
+ checksum,
+ std::move(map),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg);
+ CHECK(new_dex_file.get() != nullptr) << "Unable to load dex file! " << error_msg;
+
+ // Get mutator lock. We need the lifetimes of these variables (hs, the classes, etc.) to be longer
+ // then current lock (since there isn't upgrading of the lock) so we don't use soa.
+ art::ThreadState old_state = self->TransitionFromSuspendedToRunnable();
+ // This scope is needed to make sure that the HandleScope dies with mutator_lock_ since we need to
+ // upgrade the mutator_lock during the execution.
+ {
+ art::StackHandleScope<11> hs(self);
+ art::Handle<art::mirror::ClassLoader> null_loader(
+ hs.NewHandle<art::mirror::ClassLoader>(nullptr));
+ CHECK(null_loader.Get() == nullptr);
+ art::ArtField* dex_file_cookie_field = class_linker->
+ FindClass(self, dex_file_name, null_loader)->
+ FindDeclaredInstanceField("mCookie", "Ljava/lang/Object;");
+ art::ArtField* dex_file_internal_cookie_field =
+ class_linker->FindClass(self, dex_file_name, null_loader)
+ ->FindDeclaredInstanceField("mInternalCookie", "Ljava/lang/Object;");
+ CHECK(dex_file_cookie_field != nullptr);
+ art::Handle<art::mirror::Class> klass(
+ hs.NewHandle(art::down_cast<art::mirror::Class*>(self->DecodeJObject(jklass))));
+ art::mirror::Object* dex_file_ptr = nullptr;
+ art::mirror::ClassLoader* class_loader_ptr = nullptr;
+ // Find dalvik.system.DexFile that represents the dex file we are changing.
+ if (!FindDalvikSystemDexFileAndLoaderForClass(klass, &dex_file_ptr, &class_loader_ptr)) {
+ self->TransitionFromRunnableToSuspended(old_state);
+ LOG(ERROR) << "Could not find DexFile.";
+ return ERR(INTERNAL);
+ }
+ art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(dex_file_ptr));
+ art::Handle<art::mirror::ClassLoader> class_loader(hs.NewHandle(class_loader_ptr));
+ art::Handle<art::mirror::LongArray> art_dex_array(
+ hs.NewHandle<art::mirror::LongArray>(
+ dex_file_cookie_field->GetObject(dex_file_obj.Get())->AsLongArray()));
+ art::Handle<art::mirror::LongArray> new_art_dex_array(
+ hs.NewHandle<art::mirror::LongArray>(
+ InsertDexFileIntoArray(self, new_dex_file.get(), art_dex_array)));
+ art::Handle<art::mirror::DexCache> cache(
+ hs.NewHandle(class_linker->RegisterDexFile(*new_dex_file.get(), class_loader.Get())));
+ self->TransitionFromRunnableToSuspended(old_state);
+
+ threads->SuspendAll("moving dex file into runtime", /*long_suspend*/true);
+ // Change the mCookie field. Old value will be GC'd as normal.
+ dex_file_cookie_field->SetObject<false>(dex_file_obj.Get(), new_art_dex_array.Get());
+ dex_file_internal_cookie_field->SetObject<false>(dex_file_obj.Get(), new_art_dex_array.Get());
+ // Invalidate existing methods.
+ InvalidateExistingMethods(self, klass, cache, new_dex_file.release());
+ }
+ threads->ResumeAll();
+ return OK;
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
new file mode 100644
index 0000000000..85bcb00eca
--- /dev/null
+++ b/runtime/openjdkjvmti/transform.h
@@ -0,0 +1,64 @@
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TRANSFORM_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TRANSFORM_H_
+
+#include <string>
+
+#include <jni.h>
+
+#include "art_jvmti.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+// Gets the data surrounding the given class.
+jvmtiError GetTransformationData(ArtJvmTiEnv* env,
+ jclass klass,
+ /*out*/std::string* location,
+ /*out*/JNIEnv** jni_env_ptr,
+ /*out*/jobject* loader,
+ /*out*/std::string* name,
+ /*out*/jobject* protection_domain,
+ /*out*/jint* data_len,
+ /*out*/unsigned char** dex_data);
+
+// Install the new dex file.
+jvmtiError MoveTransformedFileIntoRuntime(jclass jklass,
+ std::string original_location,
+ jint data_len,
+ unsigned char* dex_data);
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TRANSFORM_H_
+
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index b3f29c28e4..9056d96f79 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -145,7 +145,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
if (kDebugExceptionDelivery) {
mirror::String* msg = exception->GetDetailMessage();
std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
- self_->DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
+ self_->DumpStack(LOG_STREAM(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
<< ": " << str_msg << "\n");
}
StackHandleScope<1> hs(self_);
@@ -218,7 +218,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
DCHECK(handler_method_ != nullptr && handler_method_header_->IsOptimized());
if (kDebugExceptionDelivery) {
- self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
+ self_->DumpStack(LOG_STREAM(INFO) << "Setting catch phis: ");
}
const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
@@ -520,7 +520,7 @@ void QuickExceptionHandler::PrepareForLongJumpToInvokeStubOrInterpreterBridge()
void QuickExceptionHandler::DeoptimizeStack() {
DCHECK(is_deoptimization_);
if (kDebugExceptionDelivery) {
- self_->DumpStack(LOG(INFO) << "Deoptimizing: ");
+ self_->DumpStack(LOG_STREAM(INFO) << "Deoptimizing: ");
}
DeoptimizeStackVisitor visitor(self_, context_, this, false);
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index f04d41dc80..0be79efb76 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -192,6 +192,13 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
} else {
StringAppendF(&extras, " \"%.16s... (%d chars)", utf8.c_str(), s->GetLength());
}
+ } else if (ref->IsReferenceInstance()) {
+ mirror::Object* referent = ref->AsReference()->GetReferent();
+ if (referent == nullptr) {
+ extras = " (referent is null)";
+ } else {
+ extras = StringPrintf(" (referent is a %s)", PrettyTypeOf(referent).c_str());
+ }
}
os << StringPrintf(" %5d: ", idx) << ref << " " << className << extras << "\n";
}
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index fae8e722c3..819e17a619 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -16,11 +16,15 @@
#include "reference_table.h"
+#include "class_linker.h"
#include "common_runtime_test.h"
+#include "handle_scope-inl.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
#include "mirror/string.h"
#include "primitive.h"
+#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -28,6 +32,39 @@ namespace art {
class ReferenceTableTest : public CommonRuntimeTest {};
+static mirror::Object* CreateWeakReference(mirror::Object* referent)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ StackHandleScope<3> scope(self);
+ Handle<mirror::Object> h_referent(scope.NewHandle<mirror::Object>(referent));
+
+ Handle<mirror::Class> h_ref_class(scope.NewHandle<mirror::Class>(
+ class_linker->FindClass(self,
+ "Ljava/lang/ref/WeakReference;",
+ ScopedNullHandle<mirror::ClassLoader>())));
+ CHECK(h_ref_class.Get() != nullptr);
+ CHECK(class_linker->EnsureInitialized(self, h_ref_class, true, true));
+
+ Handle<mirror::Object> h_ref_instance(scope.NewHandle<mirror::Object>(
+ h_ref_class->AllocObject(self)));
+ CHECK(h_ref_instance.Get() != nullptr);
+
+ ArtMethod* constructor = h_ref_class->FindDeclaredDirectMethod(
+ "<init>", "(Ljava/lang/Object;)V", class_linker->GetImagePointerSize());
+ CHECK(constructor != nullptr);
+
+ uint32_t args[2];
+ args[0] = PointerToLowMemUInt32(h_ref_instance.Get());
+ args[1] = PointerToLowMemUInt32(h_referent.Get());
+ JValue result;
+ constructor->Invoke(self, args, sizeof(uint32_t), &result, constructor->GetShorty());
+ CHECK(!self->IsExceptionPending());
+
+ return h_ref_instance.Get();
+}
+
TEST_F(ReferenceTableTest, Basics) {
ScopedObjectAccess soa(Thread::Current());
mirror::Object* o1 = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello");
@@ -104,6 +141,29 @@ TEST_F(ReferenceTableTest, Basics) {
std::string::npos) << oss.str();
}
}
+
+ // Add a reference and check that the type of the referent is dumped.
+ {
+ mirror::Object* empty_reference = CreateWeakReference(nullptr);
+ ASSERT_TRUE(empty_reference->IsReferenceInstance());
+ rt.Add(empty_reference);
+ std::ostringstream oss;
+ rt.Dump(oss);
+ EXPECT_NE(oss.str().find("java.lang.ref.WeakReference (referent is null)"), std::string::npos)
+ << oss.str();
+ }
+
+ {
+ mirror::Object* string_referent = mirror::String::AllocFromModifiedUtf8(Thread::Current(), "A");
+ mirror::Object* non_empty_reference = CreateWeakReference(string_referent);
+ ASSERT_TRUE(non_empty_reference->IsReferenceInstance());
+ rt.Add(non_empty_reference);
+ std::ostringstream oss;
+ rt.Dump(oss);
+ EXPECT_NE(oss.str().find("java.lang.ref.WeakReference (referent is a java.lang.String)"),
+ std::string::npos)
+ << oss.str();
+ }
}
} // namespace art
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 67e3fe8864..30b10d82f4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -24,8 +24,8 @@
#include "dex_file-inl.h"
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
-#include "mirror/abstract_method.h"
#include "mirror/class-inl.h"
+#include "mirror/executable.h"
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change.h"
@@ -455,7 +455,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
- method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ method = WellKnownClasses::StringInitToStringFactory(method);
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
@@ -486,7 +486,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
- method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ method = WellKnownClasses::StringInitToStringFactory(method);
}
mirror::Object* receiver = method->IsStatic() ? nullptr : soa.Decode<mirror::Object*>(obj);
uint32_t shorty_len = 0;
@@ -518,7 +518,7 @@ JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnab
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
- method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ method = WellKnownClasses::StringInitToStringFactory(method);
receiver = nullptr;
}
uint32_t shorty_len = 0;
@@ -550,7 +550,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab
bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
if (is_string_init) {
// Replace calls to String.<init> with equivalent StringFactory call.
- method = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ method = WellKnownClasses::StringInitToStringFactory(method);
receiver = nullptr;
}
uint32_t shorty_len = 0;
@@ -578,9 +578,9 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
return nullptr;
}
- auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(javaMethod);
- const bool accessible = abstract_method->IsAccessible();
- ArtMethod* m = abstract_method->GetArtMethod();
+ auto* executable = soa.Decode<mirror::Executable*>(javaMethod);
+ const bool accessible = executable->IsAccessible();
+ ArtMethod* m = executable->GetArtMethod();
mirror::Class* declaring_class = m->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
@@ -596,8 +596,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
if (!m->IsStatic()) {
// Replace calls to String.<init> with equivalent StringFactory call.
if (declaring_class->IsStringClass() && m->IsConstructor()) {
- jmethodID mid = soa.EncodeMethod(m);
- m = soa.DecodeMethod(WellKnownClasses::StringInitToStringFactoryMethodID(mid));
+ m = WellKnownClasses::StringInitToStringFactory(m);
CHECK(javaReceiver == nullptr);
} else {
// Check that the receiver is non-null and an instance of the field's declaring class.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f144b981d3..5bb38f5af8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -108,9 +108,9 @@
#include "native/java_lang_VMClassLoader.h"
#include "native/java_lang_ref_FinalizerReference.h"
#include "native/java_lang_ref_Reference.h"
-#include "native/java_lang_reflect_AbstractMethod.h"
#include "native/java_lang_reflect_Array.h"
#include "native/java_lang_reflect_Constructor.h"
+#include "native/java_lang_reflect_Executable.h"
#include "native/java_lang_reflect_Field.h"
#include "native/java_lang_reflect_Method.h"
#include "native/java_lang_reflect_Parameter.h"
@@ -255,7 +255,7 @@ Runtime::~Runtime() {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
// to be still alive.
- heap_->DumpGcPerformanceInfo(LOG(INFO));
+ heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
}
Thread* self = Thread::Current();
@@ -433,14 +433,14 @@ void Runtime::Abort() {
// Many people have difficulty distinguish aborts from crashes,
// so be explicit.
AbortState state;
- LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
+ LOG(FATAL_WITHOUT_ABORT) << Dumpable<AbortState>(state);
// Call the abort hook if we have one.
if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
- LOG(INTERNAL_FATAL) << "Calling abort hook...";
+ LOG(FATAL_WITHOUT_ABORT) << "Calling abort hook...";
Runtime::Current()->abort_();
// notreached
- LOG(INTERNAL_FATAL) << "Unexpectedly returned from abort hook!";
+ LOG(FATAL_WITHOUT_ABORT) << "Unexpectedly returned from abort hook!";
}
#if defined(__GLIBC__)
@@ -856,9 +856,9 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
const OatHeader& boot_oat_header = oat_file->GetOatHeader();
const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
if (boot_cp != nullptr) {
- gc::space::ImageSpace::CreateMultiImageLocations(image_locations[0],
- boot_cp,
- &image_locations);
+ gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
+ boot_cp,
+ &image_locations);
}
}
@@ -1378,9 +1378,9 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_java_lang_DexCache(env);
register_java_lang_Object(env);
register_java_lang_ref_FinalizerReference(env);
- register_java_lang_reflect_AbstractMethod(env);
register_java_lang_reflect_Array(env);
register_java_lang_reflect_Constructor(env);
+ register_java_lang_reflect_Executable(env);
register_java_lang_reflect_Field(env);
register_java_lang_reflect_Method(env);
register_java_lang_reflect_Parameter(env);
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 33600ddba5..aed6a2b1cf 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -34,7 +34,10 @@ struct sigaction old_action;
void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
static bool handling_unexpected_signal = false;
if (handling_unexpected_signal) {
- LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL, "HandleUnexpectedSignal reentered\n");
+ LogHelper::LogLineLowStack(__FILE__,
+ __LINE__,
+ ::android::base::FATAL_WITHOUT_ABORT,
+ "HandleUnexpectedSignal reentered\n");
_exit(1);
}
handling_unexpected_signal = true;
@@ -44,11 +47,11 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
// Print this out first in case DumpObject faults.
- LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
gc::Heap* heap = runtime->GetHeap();
if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
- LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
- heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ LOG(FATAL_WITHOUT_ABORT) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG_STREAM(FATAL_WITHOUT_ABORT), reinterpret_cast<mirror::Object*>(info->si_addr));
}
}
// Run the old signal handler.
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 60ebabc109..cee73e175a 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -309,7 +309,10 @@ static bool IsTimeoutSignal(int signal_number) {
void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
static bool handlingUnexpectedSignal = false;
if (handlingUnexpectedSignal) {
- LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL, "HandleUnexpectedSignal reentered\n");
+ LogHelper::LogLineLowStack(__FILE__,
+ __LINE__,
+ ::android::base::FATAL_WITHOUT_ABORT,
+ "HandleUnexpectedSignal reentered\n");
if (IsTimeoutSignal(signal_number)) {
// Ignore a recursive timeout.
return;
@@ -334,7 +337,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
UContext thread_context(raw_context);
Backtrace thread_backtrace(raw_context);
- LOG(INTERNAL_FATAL) << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
+ LOG(FATAL_WITHOUT_ABORT) << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
<< StringPrintf("Fatal signal %d (%s), code %d (%s)",
signal_number, GetSignalName(signal_number),
info->si_code,
@@ -346,7 +349,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
<< "Registers:\n" << Dumpable<UContext>(thread_context) << "\n"
<< "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace);
if (kIsDebugBuild && signal_number == SIGSEGV) {
- PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
+ PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
}
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
@@ -354,17 +357,17 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
// Special timeout signal. Try to dump all threads.
// Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
// are of value here.
- runtime->GetThreadList()->Dump(LOG(INTERNAL_FATAL), kDumpNativeStackOnTimeout);
+ runtime->GetThreadList()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT), kDumpNativeStackOnTimeout);
}
gc::Heap* heap = runtime->GetHeap();
- LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage();
+ LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
if (kDumpHeapObjectOnSigsevg && heap != nullptr && info != nullptr) {
- LOG(INTERNAL_FATAL) << "Dump heap object at fault address: ";
- heap->DumpObject(LOG(INTERNAL_FATAL), reinterpret_cast<mirror::Object*>(info->si_addr));
+ LOG(FATAL_WITHOUT_ABORT) << "Dump heap object at fault address: ";
+ heap->DumpObject(LOG_STREAM(FATAL_WITHOUT_ABORT), reinterpret_cast<mirror::Object*>(info->si_addr));
}
}
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
- LOG(INTERNAL_FATAL) << "********************************************************\n"
+ LOG(FATAL_WITHOUT_ABORT) << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name
<< "\""
<< " has been suspended while crashing.\n"
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 6cb795061d..848c0e3b90 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -172,7 +172,7 @@ int SignalCatcher::WaitForSignal(Thread* self, SignalSet& signals) {
LOG(INFO) << *self << ": reacting to signal " << signal_number;
// If anyone's holding locks (which might prevent us from getting back into state Runnable), say so...
- Runtime::Current()->DumpLockHolders(LOG(INFO));
+ Runtime::Current()->DumpLockHolders(LOG_STREAM(INFO));
}
return signal_number;
diff --git a/runtime/simulator/Android.bp b/runtime/simulator/Android.bp
index 49322fcfb8..03e3f1562a 100644
--- a/runtime/simulator/Android.bp
+++ b/runtime/simulator/Android.bp
@@ -25,6 +25,7 @@ cc_defaults {
"code_simulator_arm64.cc",
],
shared_libs: [
+ "libbase",
"liblog",
],
cflags: ["-DVIXL_INCLUDE_SIMULATOR_AARCH64"],
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 216d8a7194..298a974fc1 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -59,6 +59,7 @@ inline void Thread::AllowThreadSuspension() {
if (UNLIKELY(TestAllFlags())) {
CheckSuspend();
}
+ PoisonObjectPointers();
}
inline void Thread::CheckSuspend() {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 89403545fd..d0ea2d7569 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -127,43 +127,6 @@ void Thread::InitTlsEntryPoints() {
InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints);
}
-void Thread::InitStringEntryPoints() {
- ScopedObjectAccess soa(this);
- QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
- qpoints->pNewEmptyString = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newEmptyString));
- qpoints->pNewStringFromBytes_B = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B));
- qpoints->pNewStringFromBytes_BI = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI));
- qpoints->pNewStringFromBytes_BII = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII));
- qpoints->pNewStringFromBytes_BIII = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII));
- qpoints->pNewStringFromBytes_BIIString = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString));
- qpoints->pNewStringFromBytes_BString = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString));
- qpoints->pNewStringFromBytes_BIICharset = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset));
- qpoints->pNewStringFromBytes_BCharset = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset));
- qpoints->pNewStringFromChars_C = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_C));
- qpoints->pNewStringFromChars_CII = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII));
- qpoints->pNewStringFromChars_IIC = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC));
- qpoints->pNewStringFromCodePoints = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints));
- qpoints->pNewStringFromString = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromString));
- qpoints->pNewStringFromStringBuffer = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer));
- qpoints->pNewStringFromStringBuilder = reinterpret_cast<void(*)()>(
- soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder));
-}
-
void Thread::ResetQuickAllocEntryPointsForThread() {
ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
}
@@ -609,7 +572,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
}
VLOG(threads) << "Creating native thread for " << thread_name;
- self->Dump(LOG(INFO));
+ self->Dump(LOG_STREAM(INFO));
}
Runtime* runtime = Runtime::Current();
@@ -804,7 +767,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
VLOG(threads) << "Attaching unnamed thread.";
}
ScopedObjectAccess soa(self);
- self->Dump(LOG(INFO));
+ self->Dump(LOG_STREAM(INFO));
}
{
@@ -911,8 +874,10 @@ bool Thread::InitStackHwm() {
+ 4 * KB;
if (read_stack_size <= min_stack) {
// Note, as we know the stack is small, avoid operations that could use a lot of stack.
- LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
- "Attempt to attach a thread with a too-small stack");
+ LogHelper::LogLineLowStack(__PRETTY_FUNCTION__,
+ __LINE__,
+ ::android::base::ERROR,
+ "Attempt to attach a thread with a too-small stack");
return false;
}
@@ -2382,7 +2347,7 @@ void Thread::ThrowOutOfMemoryError(const char* msg) {
ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
tls32_.throwing_OutOfMemoryError = false;
} else {
- Dump(LOG(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
+ Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
}
}
@@ -2775,16 +2740,16 @@ class ReferenceMapVisitor : public StackVisitor {
bool failed = false;
if (!space->GetLiveBitmap()->Test(klass)) {
failed = true;
- LOG(INTERNAL_FATAL) << "Unmarked object in image " << *space;
+ LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space;
} else if (!heap->GetLiveBitmap()->Test(klass)) {
failed = true;
- LOG(INTERNAL_FATAL) << "Unmarked object in image through live bitmap " << *space;
+ LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space;
}
if (failed) {
- GetThread()->Dump(LOG(INTERNAL_FATAL));
- space->AsImageSpace()->DumpSections(LOG(INTERNAL_FATAL));
- LOG(INTERNAL_FATAL) << "Method@" << method->GetDexMethodIndex() << ":" << method
- << " klass@" << klass;
+ GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT));
+ LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method
+ << " klass@" << klass;
// Pretty info last in case it crashes.
LOG(FATAL) << "Method " << PrettyMethod(method) << " klass " << PrettyClass(klass);
}
@@ -2838,7 +2803,7 @@ class ReferenceMapVisitor : public StackVisitor {
if (kIsDebugBuild && ref_addr == nullptr) {
std::string thread_name;
GetThread()->GetThreadName(thread_name);
- LOG(INTERNAL_FATAL) << "On thread " << thread_name;
+ LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name;
DescribeStack(GetThread());
LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) "
<< "set in register_mask=" << register_mask << " at " << DescribeLocation();
@@ -2953,7 +2918,7 @@ void Thread::SetStackEndForStackOverflow() {
// However, we seem to have already extended to use the full stack.
LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
<< GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
- DumpStack(LOG(ERROR));
+ DumpStack(LOG_STREAM(ERROR));
LOG(FATAL) << "Recursive stack overflow.";
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 016c2bc7ea..fb6bde622d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -471,6 +471,14 @@ class Thread {
}
void Notify() REQUIRES(!*wait_mutex_);
+ ALWAYS_INLINE void PoisonObjectPointers() {
+ ++poison_object_cookie_;
+ }
+
+ ALWAYS_INLINE uintptr_t GetPoisonObjectCookie() const {
+ return poison_object_cookie_;
+ }
+
private:
void NotifyLocked(Thread* self) REQUIRES(wait_mutex_);
@@ -1528,6 +1536,9 @@ class Thread {
// Debug disable read barrier count, only is checked for debug builds and only in the runtime.
uint8_t debug_disallow_read_barrier_ = 0;
+ // Note that it is not in the packed struct, may not be accessed for cross compilation.
+ uintptr_t poison_object_cookie_ = 0;
+
// Pending extra checkpoints if checkpoint_function_ is already used.
std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 5e6c8a40f1..17c6c2e65d 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -214,7 +214,7 @@ class DumpCheckpoint FINAL : public Closure {
bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
if (timed_out) {
// Avoid a recursive abort.
- LOG((kIsDebugBuild && (gAborting == 0)) ? FATAL : ERROR)
+ LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
<< "Unexpected time out during dump checkpoint.";
}
}
@@ -441,8 +441,8 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
// runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
// for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
ThreadState state = thread->GetState();
- if (state == kWaitingForGcThreadFlip ||
- thread->IsTransitioningToRunnable()) {
+ if ((state == kWaitingForGcThreadFlip || thread->IsTransitioningToRunnable()) &&
+ thread->GetSuspendCount() == 1) {
// The thread will resume right after the broadcast.
thread->ModifySuspendCount(self, -1, nullptr, false);
++runnable_thread_count;
@@ -628,7 +628,8 @@ void ThreadList::SuspendAllInternal(Thread* self,
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
if ((errno != EAGAIN) && (errno != EINTR)) {
if (errno == ETIMEDOUT) {
- LOG(kIsDebugBuild ? FATAL : ERROR) << "Unexpected time out during suspend all.";
+ LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
+ << "Unexpected time out during suspend all.";
} else {
PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
}
@@ -775,7 +776,10 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer,
// ThreadList::WaitForOtherNonDaemonThreadsToExit.
suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
}
- ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
+ ThreadSuspendByPeerWarning(self,
+ ::android::base::WARNING,
+ "No such thread for suspend",
+ peer);
return nullptr;
}
if (!Contains(thread)) {
@@ -822,7 +826,10 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer,
}
const uint64_t total_delay = NanoTime() - start_time;
if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
- ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer);
+ ThreadSuspendByPeerWarning(self,
+ ::android::base::FATAL,
+ "Thread suspension timed out",
+ peer);
if (suspended_thread != nullptr) {
CHECK_EQ(suspended_thread, thread);
suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
@@ -882,7 +889,9 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
<< " no longer in thread list";
// There's a race in inflating a lock and the owner giving up ownership and then dying.
- ThreadSuspendByThreadIdWarning(WARNING, "No such thread id for suspend", thread_id);
+ ThreadSuspendByThreadIdWarning(::android::base::WARNING,
+ "No such thread id for suspend",
+ thread_id);
return nullptr;
}
VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
@@ -923,7 +932,9 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
}
const uint64_t total_delay = NanoTime() - start_time;
if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
- ThreadSuspendByThreadIdWarning(WARNING, "Thread suspension timed out", thread_id);
+ ThreadSuspendByThreadIdWarning(::android::base::WARNING,
+ "Thread suspension timed out",
+ thread_id);
if (suspended_thread != nullptr) {
thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 6f10aaacaf..b52e2f2bca 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -52,6 +52,77 @@
namespace art {
+static const uint8_t kBase64Map[256] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
+ 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
+ 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
+ 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
+ 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255
+};
+
+uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+ std::vector<uint8_t> tmp;
+ uint32_t t = 0, y = 0;
+ int g = 3;
+ for (size_t i = 0; src[i] != '\0'; ++i) {
+ uint8_t c = kBase64Map[src[i] & 0xFF];
+ if (c == 255) continue;
+ // the final = symbols are read and used to trim the remaining bytes
+ if (c == 254) {
+ c = 0;
+ // prevent g < 0 which would potentially allow an overflow later
+ if (--g < 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ } else if (g != 3) {
+ // we only allow = to be at the end
+ *dst_size = 0;
+ return nullptr;
+ }
+ t = (t << 6) | c;
+ if (++y == 4) {
+ tmp.push_back((t >> 16) & 255);
+ if (g > 1) {
+ tmp.push_back((t >> 8) & 255);
+ }
+ if (g > 2) {
+ tmp.push_back(t & 255);
+ }
+ y = t = 0;
+ }
+ }
+ if (y != 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
+ if (dst_size != nullptr) {
+ *dst_size = tmp.size();
+ } else {
+ *dst_size = 0;
+ }
+ std::copy(tmp.begin(), tmp.end(), dst.get());
+ return dst.release();
+}
+
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
diff --git a/runtime/utils.h b/runtime/utils.h
index f3284e8304..e65b947e73 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -116,6 +116,8 @@ inline typename std::make_unsigned<T>::type MakeUnsigned(T x) {
return static_cast<typename std::make_unsigned<T>::type>(x);
}
+uint8_t* DecodeBase64(const char* src, size_t* dst_size);
+
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index a71578b1c3..9fbf87595b 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -34,7 +34,9 @@ bool VdexFile::Header::IsVersionValid() const {
return (memcmp(version_, kVdexVersion, sizeof(kVdexVersion)) == 0);
}
-VdexFile::Header::Header() {
+VdexFile::Header::Header(uint32_t dex_size, uint32_t verifier_deps_size)
+ : dex_size_(dex_size),
+ verifier_deps_size_(verifier_deps_size) {
memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
memcpy(version_, kVdexVersion, sizeof(kVdexVersion));
DCHECK(IsMagicValid());
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 9215e52b07..6bea153d29 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -42,17 +42,22 @@ class VdexFile {
public:
struct Header {
public:
- Header();
+ Header(uint32_t dex_size, uint32_t verifier_deps_size);
bool IsMagicValid() const;
bool IsVersionValid() const;
+ uint32_t GetDexSize() const { return dex_size_; }
+ uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
+
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
static constexpr uint8_t kVdexVersion[] = { '0', '0', '0', '\0' };
uint8_t magic_[4];
uint8_t version_[4];
+ uint32_t dex_size_;
+ uint32_t verifier_deps_size_;
};
static VdexFile* Open(const std::string& vdex_filename,
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f1d3189309..13ef043174 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -434,14 +434,15 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
severity = LogSeverity::WARNING;
break;
case HardFailLogMode::kLogInternalFatal:
- severity = LogSeverity::INTERNAL_FATAL;
+ severity = LogSeverity::FATAL_WITHOUT_ABORT;
break;
default:
LOG(FATAL) << "Unsupported log-level " << static_cast<uint32_t>(log_level);
UNREACHABLE();
}
- verifier.DumpFailures(LOG(severity) << "Verification error in "
- << PrettyMethod(method_idx, *dex_file) << "\n");
+ verifier.DumpFailures(LOG_STREAM(severity) << "Verification error in "
+ << PrettyMethod(method_idx, *dex_file)
+ << "\n");
}
if (hard_failure_msg != nullptr) {
CHECK(!verifier.failure_messages_.empty());
@@ -3284,7 +3285,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
break;
// Note: the following instructions encode offsets derived from class linking.
- // As such they use Class*/Field*/AbstractMethod* as these offsets only have
+ // As such they use Class*/Field*/Executable* as these offsets only have
// meaning if the class linking and resolution were successful.
case Instruction::IGET_QUICK:
VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true);
diff --git a/runtime/verifier/verifier_log_mode.h b/runtime/verifier/verifier_log_mode.h
index 3744b9b416..e83d1749c8 100644
--- a/runtime/verifier/verifier_log_mode.h
+++ b/runtime/verifier/verifier_log_mode.h
@@ -24,7 +24,7 @@ enum class HardFailLogMode {
kLogNone, // Don't log hard failures at all.
kLogVerbose, // Log with severity VERBOSE.
kLogWarning, // Log with severity WARNING.
- kLogInternalFatal, // Log with severity INTERNAL_FATAL
+ kLogInternalFatal, // Log with severity FATAL_WITHOUT_ABORT
};
} // namespace verifier
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 5f5fbc89f9..e5216fbcff 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -21,6 +21,7 @@
#include <sstream>
#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "mirror/class.h"
#include "mirror/throwable.h"
#include "ScopedLocalRef.h"
@@ -48,10 +49,12 @@ jclass WellKnownClasses::java_lang_IllegalAccessError;
jclass WellKnownClasses::java_lang_NoClassDefFoundError;
jclass WellKnownClasses::java_lang_Object;
jclass WellKnownClasses::java_lang_OutOfMemoryError;
-jclass WellKnownClasses::java_lang_reflect_AbstractMethod;
jclass WellKnownClasses::java_lang_reflect_Constructor;
+jclass WellKnownClasses::java_lang_reflect_Executable;
jclass WellKnownClasses::java_lang_reflect_Field;
jclass WellKnownClasses::java_lang_reflect_Method;
+jclass WellKnownClasses::java_lang_reflect_Parameter;
+jclass WellKnownClasses::java_lang_reflect_Parameter__array;
jclass WellKnownClasses::java_lang_reflect_Proxy;
jclass WellKnownClasses::java_lang_RuntimeException;
jclass WellKnownClasses::java_lang_StackOverflowError;
@@ -86,41 +89,10 @@ jmethodID WellKnownClasses::java_lang_Integer_valueOf;
jmethodID WellKnownClasses::java_lang_Long_valueOf;
jmethodID WellKnownClasses::java_lang_ref_FinalizerReference_add;
jmethodID WellKnownClasses::java_lang_ref_ReferenceQueue_add;
+jmethodID WellKnownClasses::java_lang_reflect_Parameter_init;
jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
jmethodID WellKnownClasses::java_lang_Short_valueOf;
-jmethodID WellKnownClasses::java_lang_String_init;
-jmethodID WellKnownClasses::java_lang_String_init_B;
-jmethodID WellKnownClasses::java_lang_String_init_BI;
-jmethodID WellKnownClasses::java_lang_String_init_BII;
-jmethodID WellKnownClasses::java_lang_String_init_BIII;
-jmethodID WellKnownClasses::java_lang_String_init_BIIString;
-jmethodID WellKnownClasses::java_lang_String_init_BString;
-jmethodID WellKnownClasses::java_lang_String_init_BIICharset;
-jmethodID WellKnownClasses::java_lang_String_init_BCharset;
-jmethodID WellKnownClasses::java_lang_String_init_C;
-jmethodID WellKnownClasses::java_lang_String_init_CII;
-jmethodID WellKnownClasses::java_lang_String_init_IIC;
-jmethodID WellKnownClasses::java_lang_String_init_String;
-jmethodID WellKnownClasses::java_lang_String_init_StringBuffer;
-jmethodID WellKnownClasses::java_lang_String_init_III;
-jmethodID WellKnownClasses::java_lang_String_init_StringBuilder;
-jmethodID WellKnownClasses::java_lang_StringFactory_newEmptyString;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromChars_C;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromString;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints;
-jmethodID WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder;
jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
jmethodID WellKnownClasses::java_lang_Thread_dispatchUncaughtException;
jmethodID WellKnownClasses::java_lang_Thread_init;
@@ -154,7 +126,7 @@ jfieldID WellKnownClasses::java_lang_Throwable_detailMessage;
jfieldID WellKnownClasses::java_lang_Throwable_stackTrace;
jfieldID WellKnownClasses::java_lang_Throwable_stackState;
jfieldID WellKnownClasses::java_lang_Throwable_suppressedExceptions;
-jfieldID WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod;
+jfieldID WellKnownClasses::java_lang_reflect_Executable_artMethod;
jfieldID WellKnownClasses::java_lang_reflect_Proxy_h;
jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity;
jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress;
@@ -182,7 +154,7 @@ static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
if (fid == nullptr) {
ScopedObjectAccess soa(env);
if (soa.Self()->IsExceptionPending()) {
- LOG(INTERNAL_FATAL) << soa.Self()->GetException()->Dump();
+ LOG(FATAL_WITHOUT_ABORT) << soa.Self()->GetException()->Dump();
}
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -199,7 +171,7 @@ jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static,
if (mid == nullptr) {
ScopedObjectAccess soa(env);
if (soa.Self()->IsExceptionPending()) {
- LOG(INTERNAL_FATAL) << soa.Self()->GetException()->Dump();
+ LOG(FATAL_WITHOUT_ABORT) << soa.Self()->GetException()->Dump();
}
std::ostringstream os;
WellKnownClasses::ToClass(c)->DumpClass(os, mirror::Class::kDumpClassFullDetail);
@@ -215,6 +187,76 @@ static jmethodID CachePrimitiveBoxingMethod(JNIEnv* env, char prim_name, const c
StringPrintf("(%c)L%s;", prim_name, boxed_name).c_str());
}
+#define STRING_INIT_LIST(V) \
+ V(java_lang_String_init, "()V", newEmptyString, "newEmptyString", "()Ljava/lang/String;", NewEmptyString) \
+ V(java_lang_String_init_B, "([B)V", newStringFromBytes_B, "newStringFromBytes", "([B)Ljava/lang/String;", NewStringFromBytes_B) \
+ V(java_lang_String_init_BI, "([BI)V", newStringFromBytes_BI, "newStringFromBytes", "([BI)Ljava/lang/String;", NewStringFromBytes_BI) \
+ V(java_lang_String_init_BII, "([BII)V", newStringFromBytes_BII, "newStringFromBytes", "([BII)Ljava/lang/String;", NewStringFromBytes_BII) \
+ V(java_lang_String_init_BIII, "([BIII)V", newStringFromBytes_BIII, "newStringFromBytes", "([BIII)Ljava/lang/String;", NewStringFromBytes_BIII) \
+ V(java_lang_String_init_BIIString, "([BIILjava/lang/String;)V", newStringFromBytes_BIIString, "newStringFromBytes", "([BIILjava/lang/String;)Ljava/lang/String;", NewStringFromBytes_BIIString) \
+ V(java_lang_String_init_BString, "([BLjava/lang/String;)V", newStringFromBytes_BString, "newStringFromBytes", "([BLjava/lang/String;)Ljava/lang/String;", NewStringFromBytes_BString) \
+ V(java_lang_String_init_BIICharset, "([BIILjava/nio/charset/Charset;)V", newStringFromBytes_BIICharset, "newStringFromBytes", "([BIILjava/nio/charset/Charset;)Ljava/lang/String;", NewStringFromBytes_BIICharset) \
+ V(java_lang_String_init_BCharset, "([BLjava/nio/charset/Charset;)V", newStringFromBytes_BCharset, "newStringFromBytes", "([BLjava/nio/charset/Charset;)Ljava/lang/String;", NewStringFromBytes_BCharset) \
+ V(java_lang_String_init_C, "([C)V", newStringFromChars_C, "newStringFromChars", "([C)Ljava/lang/String;", NewStringFromChars_C) \
+ V(java_lang_String_init_CII, "([CII)V", newStringFromChars_CII, "newStringFromChars", "([CII)Ljava/lang/String;", NewStringFromChars_CII) \
+ V(java_lang_String_init_IIC, "(II[C)V", newStringFromChars_IIC, "newStringFromChars", "(II[C)Ljava/lang/String;", NewStringFromChars_IIC) \
+ V(java_lang_String_init_String, "(Ljava/lang/String;)V", newStringFromString, "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;", NewStringFromString) \
+ V(java_lang_String_init_StringBuffer, "(Ljava/lang/StringBuffer;)V", newStringFromStringBuffer, "newStringFromStringBuffer", "(Ljava/lang/StringBuffer;)Ljava/lang/String;", NewStringFromStringBuffer) \
+ V(java_lang_String_init_III, "([III)V", newStringFromCodePoints, "newStringFromCodePoints", "([III)Ljava/lang/String;", NewStringFromCodePoints) \
+ V(java_lang_String_init_StringBuilder, "(Ljava/lang/StringBuilder;)V", newStringFromStringBuilder, "newStringFromStringBuilder", "(Ljava/lang/StringBuilder;)Ljava/lang/String;", NewStringFromStringBuilder) \
+
+#define STATIC_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, ...) \
+ static ArtMethod* init_runtime_name; \
+ static ArtMethod* new_runtime_name;
+ STRING_INIT_LIST(STATIC_STRING_INIT)
+#undef STATIC_STRING_INIT
+
+void WellKnownClasses::InitStringInit(JNIEnv* env) {
+ ScopedObjectAccess soa(Thread::Current());
+ #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, \
+ new_java_name, new_signature, ...) \
+ init_runtime_name = soa.DecodeMethod( \
+ CacheMethod(env, java_lang_String, false, "<init>", init_signature)); \
+ new_runtime_name = soa.DecodeMethod( \
+ CacheMethod(env, java_lang_StringFactory, true, new_java_name, new_signature));
+ STRING_INIT_LIST(LOAD_STRING_INIT)
+ #undef LOAD_STRING_INIT
+}
+
+void Thread::InitStringEntryPoints() {
+ QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
+ #define SET_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name, \
+ new_java_name, new_signature, entry_point_name) \
+ qpoints->p ## entry_point_name = reinterpret_cast<void(*)()>(new_runtime_name);
+ STRING_INIT_LIST(SET_ENTRY_POINT)
+ #undef SET_ENTRY_POINT
+}
+
+ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) {
+ #define TO_STRING_FACTORY(init_runtime_name, init_signature, new_runtime_name, \
+ new_java_name, new_signature, entry_point_name) \
+ if (string_init == init_runtime_name) { \
+ return new_runtime_name; \
+ }
+ STRING_INIT_LIST(TO_STRING_FACTORY)
+ #undef TO_STRING_FACTORY
+ LOG(FATAL) << "Could not find StringFactory method for String.<init>";
+ return nullptr;
+}
+
+uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) {
+ #define TO_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name, \
+ new_java_name, new_signature, entry_point_name) \
+ if (string_init == init_runtime_name) { \
+ return kQuick ## entry_point_name; \
+ }
+ STRING_INIT_LIST(TO_ENTRY_POINT)
+ #undef TO_STRING_FACTORY
+ LOG(FATAL) << "Could not find StringFactory method for String.<init>";
+ return 0;
+}
+#undef STRING_INIT_LIST
+
void WellKnownClasses::Init(JNIEnv* env) {
com_android_dex_Dex = CacheClass(env, "com/android/dex/Dex");
dalvik_annotation_optimization_CriticalNative =
@@ -237,10 +279,12 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_ExceptionInInitializerError = CacheClass(env, "java/lang/ExceptionInInitializerError");
java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError");
java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError");
- java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod");
java_lang_reflect_Constructor = CacheClass(env, "java/lang/reflect/Constructor");
+ java_lang_reflect_Executable = CacheClass(env, "java/lang/reflect/Executable");
java_lang_reflect_Field = CacheClass(env, "java/lang/reflect/Field");
java_lang_reflect_Method = CacheClass(env, "java/lang/reflect/Method");
+ java_lang_reflect_Parameter = CacheClass(env, "java/lang/reflect/Parameter");
+ java_lang_reflect_Parameter__array = CacheClass(env, "[Ljava/lang/reflect/Parameter;");
java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy");
java_lang_RuntimeException = CacheClass(env, "java/lang/RuntimeException");
java_lang_StackOverflowError = CacheClass(env, "java/lang/StackOverflowError");
@@ -273,6 +317,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
ScopedLocalRef<jclass> java_lang_ref_ReferenceQueue(env, env->FindClass("java/lang/ref/ReferenceQueue"));
java_lang_ref_ReferenceQueue_add = CacheMethod(env, java_lang_ref_ReferenceQueue.get(), true, "add", "(Ljava/lang/ref/Reference;)V");
+ java_lang_reflect_Parameter_init = CacheMethod(env, java_lang_reflect_Parameter, false, "<init>", "(Ljava/lang/String;ILjava/lang/reflect/Executable;I)V");
java_lang_reflect_Proxy_invoke = CacheMethod(env, java_lang_reflect_Proxy, true, "invoke", "(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V");
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
@@ -284,62 +329,6 @@ void WellKnownClasses::Init(JNIEnv* env) {
org_apache_harmony_dalvik_ddmc_DdmServer_broadcast = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "broadcast", "(I)V");
org_apache_harmony_dalvik_ddmc_DdmServer_dispatch = CacheMethod(env, org_apache_harmony_dalvik_ddmc_DdmServer, true, "dispatch", "(I[BII)Lorg/apache/harmony/dalvik/ddmc/Chunk;");
- java_lang_String_init = CacheMethod(env, java_lang_String, false, "<init>", "()V");
- java_lang_String_init_B = CacheMethod(env, java_lang_String, false, "<init>", "([B)V");
- java_lang_String_init_BI = CacheMethod(env, java_lang_String, false, "<init>", "([BI)V");
- java_lang_String_init_BII = CacheMethod(env, java_lang_String, false, "<init>", "([BII)V");
- java_lang_String_init_BIII = CacheMethod(env, java_lang_String, false, "<init>", "([BIII)V");
- java_lang_String_init_BIIString = CacheMethod(env, java_lang_String, false, "<init>",
- "([BIILjava/lang/String;)V");
- java_lang_String_init_BString = CacheMethod(env, java_lang_String, false, "<init>",
- "([BLjava/lang/String;)V");
- java_lang_String_init_BIICharset = CacheMethod(env, java_lang_String, false, "<init>",
- "([BIILjava/nio/charset/Charset;)V");
- java_lang_String_init_BCharset = CacheMethod(env, java_lang_String, false, "<init>",
- "([BLjava/nio/charset/Charset;)V");
- java_lang_String_init_C = CacheMethod(env, java_lang_String, false, "<init>", "([C)V");
- java_lang_String_init_CII = CacheMethod(env, java_lang_String, false, "<init>", "([CII)V");
- java_lang_String_init_IIC = CacheMethod(env, java_lang_String, false, "<init>", "(II[C)V");
- java_lang_String_init_String = CacheMethod(env, java_lang_String, false, "<init>",
- "(Ljava/lang/String;)V");
- java_lang_String_init_StringBuffer = CacheMethod(env, java_lang_String, false, "<init>",
- "(Ljava/lang/StringBuffer;)V");
- java_lang_String_init_III = CacheMethod(env, java_lang_String, false, "<init>", "([III)V");
- java_lang_String_init_StringBuilder = CacheMethod(env, java_lang_String, false, "<init>",
- "(Ljava/lang/StringBuilder;)V");
- java_lang_StringFactory_newEmptyString = CacheMethod(env, java_lang_StringFactory, true,
- "newEmptyString", "()Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_B = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromBytes", "([B)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BI = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromBytes", "([BI)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BII = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromBytes", "([BII)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BIII = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromBytes", "([BIII)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BIIString = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromBytes", "([BIILjava/lang/String;)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BString = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromBytes", "([BLjava/lang/String;)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BIICharset = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromBytes", "([BIILjava/nio/charset/Charset;)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromBytes_BCharset = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromBytes", "([BLjava/nio/charset/Charset;)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromChars_C = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromChars", "([C)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromChars_CII = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromChars", "([CII)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromChars_IIC = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromChars", "(II[C)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromString = CacheMethod(env, java_lang_StringFactory, true,
- "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromStringBuffer = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromStringBuffer", "(Ljava/lang/StringBuffer;)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromCodePoints = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromCodePoints", "([III)Ljava/lang/String;");
- java_lang_StringFactory_newStringFromStringBuilder = CacheMethod(env, java_lang_StringFactory,
- true, "newStringFromStringBuilder", "(Ljava/lang/StringBuilder;)Ljava/lang/String;");
-
dalvik_system_DexFile_cookie = CacheField(env, dalvik_system_DexFile, false, "mCookie", "Ljava/lang/Object;");
dalvik_system_DexFile_fileName = CacheField(env, dalvik_system_DexFile, false, "mFileName", "Ljava/lang/String;");
dalvik_system_PathClassLoader_pathList = CacheField(env, dalvik_system_PathClassLoader, false, "pathList", "Ldalvik/system/DexPathList;");
@@ -362,7 +351,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;");
java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "backtrace", "Ljava/lang/Object;");
java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;");
- java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "J");
+ java_lang_reflect_Executable_artMethod = CacheField(env, java_lang_reflect_Executable, false, "artMethod", "J");
java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "address", "J");
@@ -384,6 +373,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Long_valueOf = CachePrimitiveBoxingMethod(env, 'J', "java/lang/Long");
java_lang_Short_valueOf = CachePrimitiveBoxingMethod(env, 'S', "java/lang/Short");
+ InitStringInit(env);
Thread::Current()->InitStringEntryPoints();
}
@@ -399,43 +389,4 @@ mirror::Class* WellKnownClasses::ToClass(jclass global_jclass) {
return reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(global_jclass));
}
-jmethodID WellKnownClasses::StringInitToStringFactoryMethodID(jmethodID string_init) {
- // TODO: Prioritize ordering.
- if (string_init == java_lang_String_init) {
- return java_lang_StringFactory_newEmptyString;
- } else if (string_init == java_lang_String_init_B) {
- return java_lang_StringFactory_newStringFromBytes_B;
- } else if (string_init == java_lang_String_init_BI) {
- return java_lang_StringFactory_newStringFromBytes_BI;
- } else if (string_init == java_lang_String_init_BII) {
- return java_lang_StringFactory_newStringFromBytes_BII;
- } else if (string_init == java_lang_String_init_BIII) {
- return java_lang_StringFactory_newStringFromBytes_BIII;
- } else if (string_init == java_lang_String_init_BIIString) {
- return java_lang_StringFactory_newStringFromBytes_BIIString;
- } else if (string_init == java_lang_String_init_BString) {
- return java_lang_StringFactory_newStringFromBytes_BString;
- } else if (string_init == java_lang_String_init_BIICharset) {
- return java_lang_StringFactory_newStringFromBytes_BIICharset;
- } else if (string_init == java_lang_String_init_BCharset) {
- return java_lang_StringFactory_newStringFromBytes_BCharset;
- } else if (string_init == java_lang_String_init_C) {
- return java_lang_StringFactory_newStringFromChars_C;
- } else if (string_init == java_lang_String_init_CII) {
- return java_lang_StringFactory_newStringFromChars_CII;
- } else if (string_init == java_lang_String_init_IIC) {
- return java_lang_StringFactory_newStringFromChars_IIC;
- } else if (string_init == java_lang_String_init_String) {
- return java_lang_StringFactory_newStringFromString;
- } else if (string_init == java_lang_String_init_StringBuffer) {
- return java_lang_StringFactory_newStringFromStringBuffer;
- } else if (string_init == java_lang_String_init_III) {
- return java_lang_StringFactory_newStringFromCodePoints;
- } else if (string_init == java_lang_String_init_StringBuilder) {
- return java_lang_StringFactory_newStringFromStringBuilder;
- }
- LOG(FATAL) << "Could not find StringFactory method for String.<init>";
- return nullptr;
-}
-
} // namespace art
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index ce710ffa29..ddfc5b80f7 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -21,6 +21,9 @@
#include "jni.h"
namespace art {
+
+class ArtMethod;
+
namespace mirror {
class Class;
} // namespace mirror
@@ -35,7 +38,8 @@ struct WellKnownClasses {
public:
static void Init(JNIEnv* env); // Run before native methods are registered.
static void LateInit(JNIEnv* env); // Run after native methods are registered.
- static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init);
+ static ArtMethod* StringInitToStringFactory(ArtMethod* method);
+ static uint32_t StringInitToEntryPoint(ArtMethod* method);
static mirror::Class* ToClass(jclass global_jclass)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -59,10 +63,12 @@ struct WellKnownClasses {
static jclass java_lang_NoClassDefFoundError;
static jclass java_lang_Object;
static jclass java_lang_OutOfMemoryError;
- static jclass java_lang_reflect_AbstractMethod;
static jclass java_lang_reflect_Constructor;
+ static jclass java_lang_reflect_Executable;
static jclass java_lang_reflect_Field;
static jclass java_lang_reflect_Method;
+ static jclass java_lang_reflect_Parameter;
+ static jclass java_lang_reflect_Parameter__array;
static jclass java_lang_reflect_Proxy;
static jclass java_lang_RuntimeException;
static jclass java_lang_StackOverflowError;
@@ -97,41 +103,10 @@ struct WellKnownClasses {
static jmethodID java_lang_Long_valueOf;
static jmethodID java_lang_ref_FinalizerReference_add;
static jmethodID java_lang_ref_ReferenceQueue_add;
+ static jmethodID java_lang_reflect_Parameter_init;
static jmethodID java_lang_reflect_Proxy_invoke;
static jmethodID java_lang_Runtime_nativeLoad;
static jmethodID java_lang_Short_valueOf;
- static jmethodID java_lang_String_init;
- static jmethodID java_lang_String_init_B;
- static jmethodID java_lang_String_init_BI;
- static jmethodID java_lang_String_init_BII;
- static jmethodID java_lang_String_init_BIII;
- static jmethodID java_lang_String_init_BIIString;
- static jmethodID java_lang_String_init_BString;
- static jmethodID java_lang_String_init_BIICharset;
- static jmethodID java_lang_String_init_BCharset;
- static jmethodID java_lang_String_init_C;
- static jmethodID java_lang_String_init_CII;
- static jmethodID java_lang_String_init_IIC;
- static jmethodID java_lang_String_init_String;
- static jmethodID java_lang_String_init_StringBuffer;
- static jmethodID java_lang_String_init_III;
- static jmethodID java_lang_String_init_StringBuilder;
- static jmethodID java_lang_StringFactory_newEmptyString;
- static jmethodID java_lang_StringFactory_newStringFromBytes_B;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BI;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BII;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BIII;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BIIString;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BString;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BIICharset;
- static jmethodID java_lang_StringFactory_newStringFromBytes_BCharset;
- static jmethodID java_lang_StringFactory_newStringFromChars_C;
- static jmethodID java_lang_StringFactory_newStringFromChars_CII;
- static jmethodID java_lang_StringFactory_newStringFromChars_IIC;
- static jmethodID java_lang_StringFactory_newStringFromString;
- static jmethodID java_lang_StringFactory_newStringFromStringBuffer;
- static jmethodID java_lang_StringFactory_newStringFromCodePoints;
- static jmethodID java_lang_StringFactory_newStringFromStringBuilder;
static jmethodID java_lang_System_runFinalization;
static jmethodID java_lang_Thread_dispatchUncaughtException;
static jmethodID java_lang_Thread_init;
@@ -148,7 +123,7 @@ struct WellKnownClasses {
static jfieldID dalvik_system_DexPathList_dexElements;
static jfieldID dalvik_system_DexPathList__Element_dexFile;
static jfieldID dalvik_system_PathClassLoader_pathList;
- static jfieldID java_lang_reflect_AbstractMethod_artMethod;
+ static jfieldID java_lang_reflect_Executable_artMethod;
static jfieldID java_lang_reflect_Proxy_h;
static jfieldID java_lang_Thread_daemon;
static jfieldID java_lang_Thread_group;
@@ -177,6 +152,9 @@ struct WellKnownClasses {
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_length;
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_offset;
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_type;
+
+ private:
+ static void InitStringInit(JNIEnv* env);
};
} // namespace art
diff --git a/test/005-annotations/build b/test/005-annotations/build
index 216843d619..8b9f55065e 100644
--- a/test/005-annotations/build
+++ b/test/005-annotations/build
@@ -30,7 +30,8 @@ rm 'classes/android/test/anno/ClassWithInnerAnnotationClass$MissingInnerAnnotati
if [ ${USE_JACK} = "true" ]; then
jar cf classes.jill.jar -C classes .
- ${JACK} --import classes.jill.jar --output-dex .
+ # Jack needs to emit annotations with CLASS retention.
+ ${JACK} -D jack.dex.annotation.class-retention=true --import classes.jill.jar --output-dex .
else
if [ ${NEED_DEX} = "true" ]; then
${DX} -JXmx256m --debug --dex --output=classes.dex classes
diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java
index 346e13d110..39e69a3066 100644
--- a/test/031-class-attributes/src/ClassAttrs.java
+++ b/test/031-class-attributes/src/ClassAttrs.java
@@ -1,9 +1,9 @@
import otherpackage.OtherPackageClass;
import java.io.Serializable;
-import java.lang.reflect.AbstractMethod;
import java.lang.reflect.AccessibleObject;
import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -223,7 +223,7 @@ public class ClassAttrs {
try {
Class<?> c = obj.getClass();
if (c == Method.class || c == Constructor.class) {
- c = AbstractMethod.class;
+ c = Executable.class;
}
method = c.getDeclaredMethod("getSignatureAttribute");
method.setAccessible(true);
diff --git a/test/099-vmdebug/check b/test/099-vmdebug/check
index 57111bcc99..d124ce8cfd 100755
--- a/test/099-vmdebug/check
+++ b/test/099-vmdebug/check
@@ -15,6 +15,6 @@
# limitations under the License.
# Strip the process pids and line numbers from exact error messages.
-sed -e '/^art E.*\] /d' "$2" > "$2.tmp"
+sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/118-noimage-dex2oat/check b/test/118-noimage-dex2oat/check
index 57111bcc99..4f4608316d 100755
--- a/test/118-noimage-dex2oat/check
+++ b/test/118-noimage-dex2oat/check
@@ -15,6 +15,6 @@
# limitations under the License.
# Strip the process pids and line numbers from exact error messages.
-sed -e '/^art E.*\] /d' "$2" > "$2.tmp"
+sed -e '/^dalvikvm.* E.*\] /d' "$2" > "$2.tmp"
diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/119-noimage-patchoat/check b/test/119-noimage-patchoat/check
index 57111bcc99..d124ce8cfd 100755
--- a/test/119-noimage-patchoat/check
+++ b/test/119-noimage-patchoat/check
@@ -15,6 +15,6 @@
# limitations under the License.
# Strip the process pids and line numbers from exact error messages.
-sed -e '/^art E.*\] /d' "$2" > "$2.tmp"
+sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 9f1499e38a..113b35f98d 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -91,7 +91,7 @@ static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
static void MoreErrorInfo(pid_t pid, bool sig_quit_on_fail) {
printf("Secondary pid is %d\n", pid);
- PrintFileToLog(StringPrintf("/proc/%d/maps", pid), ERROR);
+ PrintFileToLog(StringPrintf("/proc/%d/maps", pid), ::android::base::ERROR);
if (sig_quit_on_fail) {
int res = kill(pid, SIGQUIT);
diff --git a/test/143-string-value/check b/test/143-string-value/check
index 92f6e90023..2a3476c2ab 100755
--- a/test/143-string-value/check
+++ b/test/143-string-value/check
@@ -15,6 +15,6 @@
# limitations under the License.
# Strip error log messages.
-sed -e '/^art E.*\] /d' "$2" > "$2.tmp"
+sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/149-suspend-all-stress/suspend_all.cc b/test/149-suspend-all-stress/suspend_all.cc
index dfd944a267..c1c0ff9f5f 100644
--- a/test/149-suspend-all-stress/suspend_all.cc
+++ b/test/149-suspend-all-stress/suspend_all.cc
@@ -42,14 +42,16 @@ extern "C" JNIEXPORT void JNICALL Java_Main_suspendAndResume(JNIEnv*, jclass) {
break;
}
case kOPDumpStack: {
- Runtime::Current()->GetThreadList()->Dump(LOG(INFO));
+ Runtime::Current()->GetThreadList()->Dump(LOG_STREAM(INFO));
usleep(500);
break;
}
case kOPSuspendAllDumpStack: {
// Not yet supported.
- // ScopedSuspendAll ssa(__FUNCTION__);
- // Runtime::Current()->GetThreadList()->Dump(LOG(INFO));
+ if ((false)) {
+ ScopedSuspendAll ssa(__FUNCTION__);
+ Runtime::Current()->GetThreadList()->Dump(LOG_STREAM(INFO));
+ }
break;
}
case kOPNumber:
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 5b1473523b..40baa15938 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -1178,16 +1178,32 @@ public class Main {
* remove the second.
*/
- /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier (before)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<NotArg:i\d+>> Select [<<Const1>>,<<Const0>>,<<Arg>>]
- /// CHECK-DAG: <<NotNotArg:i\d+>> Select [<<Const1>>,<<Const0>>,<<NotArg>>]
+ /// CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: <<NotResult:i\d+>> Xor [<<Result>>,<<Const1>>]
+ /// CHECK-DAG: Return [<<NotResult>>]
+
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: <<NotResult:z\d+>> BooleanNot [<<Result>>]
+ /// CHECK-DAG: Return [<<NotResult>>]
+
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ /// CHECK-DAG: <<NotNotArg:z\d+>> BooleanNot [<<NotArg>>]
/// CHECK-DAG: Return [<<NotNotArg>>]
/// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) instruction_simplifier$after_bce (after)
/// CHECK-DAG: <<Arg:z\d+>> ParameterValue
+ /// CHECK-DAG: <<NotArg:z\d+>> BooleanNot [<<Arg>>]
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ /// CHECK-START: boolean Main.$noinline$NotNotBool(boolean) dead_code_elimination$final (after)
+ /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
/// CHECK-DAG: Return [<<Arg>>]
public static boolean NegateValue(boolean arg) {
diff --git a/test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali b/test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali
new file mode 100644
index 0000000000..765d0eb663
--- /dev/null
+++ b/test/463-checker-boolean-simplifier/smali/BooleanNotDx.smali
@@ -0,0 +1,65 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LBooleanNotSmali;
+.super Ljava/lang/Object;
+
+#
+# Elementary test negating a boolean. Verifies that blocks are merged and
+# empty branches removed.
+#
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (before)
+## CHECK-DAG: <<Param:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: If [<<Param>>]
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Const1>>]
+## CHECK-DAG: Return [<<Phi>>]
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (before)
+## CHECK: Goto
+## CHECK: Goto
+## CHECK: Goto
+## CHECK-NOT: Goto
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (after)
+## CHECK-DAG: <<Param:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<NotParam:i\d+>> Select [<<Const1>>,<<Const0>>,<<Param>>]
+## CHECK-DAG: Return [<<NotParam>>]
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (after)
+## CHECK-NOT: If
+## CHECK-NOT: Phi
+
+## CHECK-START: boolean BooleanNotSmali.BooleanNot(boolean) select_generator (after)
+## CHECK: Goto
+## CHECK-NOT: Goto
+
+.method public static BooleanNot(Z)Z
+ .registers 2
+
+ if-eqz v1, :true_start
+ const/4 v0, 0x0
+
+:return_start
+ return v0
+
+:true_start
+ const/4 v0, 0x1
+ goto :return_start
+
+.end method
diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java
index f0fe1b172f..9368488056 100644
--- a/test/463-checker-boolean-simplifier/src/Main.java
+++ b/test/463-checker-boolean-simplifier/src/Main.java
@@ -32,42 +32,14 @@ public class Main {
}
}
- /*
- * Elementary test negating a boolean. Verifies that blocks are merged and
- * empty branches removed.
- */
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (before)
- /// CHECK-DAG: <<Param:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: If [<<Param>>]
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,<<Const1>>]
- /// CHECK-DAG: Return [<<Phi>>]
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (before)
- /// CHECK: Goto
- /// CHECK: Goto
- /// CHECK: Goto
- /// CHECK-NOT: Goto
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (after)
- /// CHECK-DAG: <<Param:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<NotParam:i\d+>> Select [<<Const1>>,<<Const0>>,<<Param>>]
- /// CHECK-DAG: Return [<<NotParam>>]
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (after)
- /// CHECK-NOT: If
- /// CHECK-NOT: Phi
-
- /// CHECK-START: boolean Main.BooleanNot(boolean) select_generator (after)
- /// CHECK: Goto
- /// CHECK-NOT: Goto
-
- public static boolean BooleanNot(boolean x) {
- return !x;
+ // Invoke a method written in smali that implements the boolean ! operator. This method
+ // uses the if/else pattern generated by dx (while Jack generates a different pattern).
+ // Since this method is in a smali-generated class, we invoke it through reflection.
+ public static boolean SmaliBooleanNot(boolean x) throws Exception {
+ Class<?> c = Class.forName("BooleanNotSmali");
+ java.lang.reflect.Method method = c.getMethod("BooleanNot", boolean.class);
+ Object retValue = method.invoke(null, new Object[] { Boolean.valueOf(x) });
+ return ((Boolean) retValue).booleanValue();
}
/*
@@ -357,9 +329,9 @@ public class Main {
return x ? 42 : (write_field = 43);
}
- public static void main(String[] args) {
- assertBoolEquals(false, BooleanNot(true));
- assertBoolEquals(true, BooleanNot(false));
+ public static void main(String[] args) throws Exception {
+ assertBoolEquals(false, SmaliBooleanNot(true));
+ assertBoolEquals(true, SmaliBooleanNot(false));
assertBoolEquals(true, GreaterThan(10, 5));
assertBoolEquals(false, GreaterThan(10, 10));
assertBoolEquals(false, GreaterThan(5, 10));
diff --git a/test/555-checker-regression-x86const/build b/test/555-checker-regression-x86const/build
deleted file mode 100644
index 92ddfc9a58..0000000000
--- a/test/555-checker-regression-x86const/build
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# We can't use src-ex testing infrastructure because src and src-ex are compiled
-# with javac independetely and can't share code (without reflection).
-
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-
-mkdir classes-ex
-mv classes/UnresolvedClass.class classes-ex
-
-if [ ${USE_JACK} = "true" ]; then
- jar cf classes.jill.jar -C classes .
- jar cf classes-ex.jill.jar -C classes-ex .
-
- ${JACK} --import classes.jill.jar --output-dex .
- zip $TEST_NAME.jar classes.dex
- ${JACK} --import classes-ex.jill.jar --output-dex .
- zip ${TEST_NAME}-ex.jar classes.dex
-else
- if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
- zip $TEST_NAME.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
- zip ${TEST_NAME}-ex.jar classes.dex
- fi
-fi
diff --git a/test/555-checker-regression-x86const/expected.txt b/test/555-checker-regression-x86const/expected.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/555-checker-regression-x86const/expected.txt
+++ /dev/null
diff --git a/test/555-checker-regression-x86const/info.txt b/test/555-checker-regression-x86const/info.txt
deleted file mode 100644
index c4037fa88f..0000000000
--- a/test/555-checker-regression-x86const/info.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Check that X86 FP constant-area handling handles intrinsics with CurrentMethod
-on the call.
diff --git a/test/555-checker-regression-x86const/src/Main.java b/test/555-checker-regression-x86const/src/Main.java
deleted file mode 100644
index 914cfde74f..0000000000
--- a/test/555-checker-regression-x86const/src/Main.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main extends UnresolvedClass {
-
- /// CHECK-START: float Main.callAbs(float) register (before)
- /// CHECK: <<CurrentMethod:[ij]\d+>> CurrentMethod
- /// CHECK: <<ParamValue:f\d+>> ParameterValue
- /// CHECK: InvokeStaticOrDirect [<<ParamValue>>,<<CurrentMethod>>] method_name:java.lang.Math.abs
- static public float callAbs(float f) {
- // An intrinsic invoke in a method that has unresolved references will still
- // have a CurrentMethod as an argument. The X86 pc_relative_fixups_x86 pass
- // must be able to handle Math.abs invokes that have a CurrentMethod, as both
- // the CurrentMethod and the HX86LoadFromConstantTable (for the bitmask)
- // expect to be in the 'SpecialInputIndex' input index.
- return Math.abs(f);
- }
-
- static public void main(String[] args) {
- expectEquals(callAbs(-6.5f), 6.5f);
- }
-
- public static void expectEquals(float expected, float result) {
- if (expected != result) {
- throw new Error("Expected: " + expected + ", found: " + result);
- }
- }
-}
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index 811c280474..5ccc648076 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -70,20 +70,19 @@ public class Main {
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK: <<And:i\d+>> And [<<Select2>>,<<Select1>>]
+ /// CHECK-DAG: <<NotP1:i\d+>> Xor [<<P1>>,<<Const1>>]
+ /// CHECK-DAG: <<NotP2:i\d+>> Xor [<<P2>>,<<Const1>>]
+ /// CHECK: <<And:i\d+>> And [<<NotP1>>,<<NotP2>>]
/// CHECK: Return [<<And>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
- /// CHECK: <<Or:i\d+>> Or [<<Cond2>>,<<Cond1>>]
+ /// CHECK: <<Or:i\d+>> Or [<<Cond1>>,<<Cond2>>]
/// CHECK: <<BooleanNot:z\d+>> BooleanNot [<<Or>>]
/// CHECK: Return [<<BooleanNot>>]
@@ -138,20 +137,19 @@ public class Main {
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK: <<Or:i\d+>> Or [<<Select2>>,<<Select1>>]
+ /// CHECK: <<NotP1:i\d+>> Xor [<<P1>>,<<Const1>>]
+ /// CHECK: <<NotP2:i\d+>> Xor [<<P2>>,<<Const1>>]
+ /// CHECK: <<Or:i\d+>> Or [<<NotP1>>,<<NotP2>>]
/// CHECK: Return [<<Or>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
- /// CHECK: <<And:i\d+>> And [<<Cond2>>,<<Cond1>>]
+ /// CHECK: <<And:i\d+>> And [<<Cond1>>,<<Cond2>>]
/// CHECK: <<BooleanNot:z\d+>> BooleanNot [<<And>>]
/// CHECK: Return [<<BooleanNot>>]
@@ -246,20 +244,19 @@ public class Main {
* same pass.
*/
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (before)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier (before)
/// CHECK: <<P1:z\d+>> ParameterValue
/// CHECK: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK: <<Xor:i\d+>> Xor [<<Select2>>,<<Select1>>]
+ /// CHECK: <<NotP1:i\d+>> Xor [<<P1>>,<<Const1>>]
+ /// CHECK: <<NotP2:i\d+>> Xor [<<P2>>,<<Const1>>]
+ /// CHECK: <<Xor:i\d+>> Xor [<<NotP1>>,<<NotP2>>]
/// CHECK: Return [<<Xor>>]
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
+ /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier (after)
/// CHECK: <<Cond1:z\d+>> ParameterValue
/// CHECK: <<Cond2:z\d+>> ParameterValue
- /// CHECK: <<Xor:i\d+>> Xor [<<Cond2>>,<<Cond1>>]
+ /// CHECK: <<Xor:i\d+>> Xor [<<Cond1>>,<<Cond2>>]
/// CHECK: Return [<<Xor>>]
/// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 3bb372529f..b8324e54e3 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -70,4 +70,5 @@ b/28187158
b/29778499 (1)
b/29778499 (2)
b/30458218
+b/31313170
Done!
diff --git a/test/800-smali/smali/b_31313170.smali b/test/800-smali/smali/b_31313170.smali
new file mode 100644
index 0000000000..327942a365
--- /dev/null
+++ b/test/800-smali/smali/b_31313170.smali
@@ -0,0 +1,22 @@
+.class public LB31313170;
+.super Ljava/lang/Object;
+
+
+.method public constructor <init>()V
+.registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static run()I
+.registers 4
+ const/4 v0, 0
+ const/4 v1, 1
+ sget v2, LB31313170;->a:I
+ if-nez v2, :exit
+ move-object v1, v0
+ :exit
+ return v1
+.end method
+
+.field static public a:I
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 34f2580dde..8d39f0971f 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -181,6 +181,7 @@ public class Main {
testCases.add(new TestCase("b/29778499 (2)", "B29778499_2", "run", null,
new IncompatibleClassChangeError(), null));
testCases.add(new TestCase("b/30458218", "B30458218", "run", null, null, null));
+ testCases.add(new TestCase("b/31313170", "B31313170", "run", null, null, 0));
}
public void runTests() {
@@ -228,7 +229,7 @@ public class Main {
tc.testName);
} else if (tc.expectedReturn == null && retValue != null) {
errorReturn = new IllegalStateException("Expected a null result in test " +
- tc.testName);
+ tc.testName + " got " + retValue);
} else if (tc.expectedReturn != null &&
(retValue == null || !tc.expectedReturn.equals(retValue))) {
errorReturn = new IllegalStateException("Expected return " +
diff --git a/test/555-checker-regression-x86const/run b/test/902-hello-transformation/build
index 63fdb8c749..898e2e54a2 100644..100755
--- a/test/555-checker-regression-x86const/run
+++ b/test/902-hello-transformation/build
@@ -1,12 +1,12 @@
#!/bin/bash
#
-# Copyright (C) 2015 The Android Open Source Project
+# Copyright 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,5 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Use secondary switch to add secondary dex file to class path.
-exec ${RUN} "${@}" --secondary
+./default-build "$@" --experimental agents
diff --git a/test/902-hello-transformation/expected.txt b/test/902-hello-transformation/expected.txt
new file mode 100644
index 0000000000..e86e814cab
--- /dev/null
+++ b/test/902-hello-transformation/expected.txt
@@ -0,0 +1,3 @@
+Hello
+modifying class 'Transform'
+Goodbye
diff --git a/test/902-hello-transformation/info.txt b/test/902-hello-transformation/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/902-hello-transformation/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/902-hello-transformation/run b/test/902-hello-transformation/run
new file mode 100755
index 0000000000..204e4cc5d1
--- /dev/null
+++ b/test/902-hello-transformation/run
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+plugin=libopenjdkjvmtid.so
+agent=libtiagentd.so
+lib=tiagentd
+if [[ "$@" == *"-O"* ]]; then
+ agent=libtiagent.so
+ plugin=libopenjdkjvmti.so
+ lib=tiagent
+fi
+
+if [[ "$@" == *"--jvm"* ]]; then
+ arg="jvm"
+else
+ arg="art"
+fi
+
+if [[ "$@" != *"--debuggable"* ]]; then
+ other_args=" -Xcompiler-option --debuggable "
+else
+ other_args=""
+fi
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --runtime-option -agentpath:${agent}=902-hello-transformation,${arg} \
+ --android-runtime-option -Xplugin:${plugin} \
+ ${other_args} \
+ --args ${lib}
diff --git a/test/902-hello-transformation/src/Main.java b/test/902-hello-transformation/src/Main.java
new file mode 100644
index 0000000000..204b6e757d
--- /dev/null
+++ b/test/902-hello-transformation/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.loadLibrary(args[1]);
+ doTest(new Transform());
+ }
+
+ public static void doTest(Transform t) {
+ t.sayHi();
+ doClassTransformation(Transform.class);
+ t.sayHi();
+ }
+
+ // Transforms the class
+ private static native void doClassTransformation(Class target);
+}
diff --git a/test/902-hello-transformation/src/Transform.java b/test/902-hello-transformation/src/Transform.java
new file mode 100644
index 0000000000..dc0a0c4f04
--- /dev/null
+++ b/test/902-hello-transformation/src/Transform.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Transform {
+ public void sayHi() {
+ System.out.println("Hello");
+ }
+}
diff --git a/test/902-hello-transformation/transform.cc b/test/902-hello-transformation/transform.cc
new file mode 100644
index 0000000000..e0d623e6e1
--- /dev/null
+++ b/test/902-hello-transformation/transform.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <pthread.h>
+#include <stdio.h>
+#include <vector>
+
+#include "art_method-inl.h"
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "utils.h"
+
+namespace art {
+namespace Test902HelloTransformation {
+
+static bool RuntimeIsJvm = false;
+
+jvmtiEnv* jvmti_env;
+bool IsJVM() {
+ return RuntimeIsJvm;
+}
+
+// base64 encoded class/dex file for
+//
+// class Transform {
+// public void sayHi() {
+// System.out.println("Goodbye");
+// }
+// }
+const char* class_file_base64 =
+ "yv66vgAAADQAHAoABgAOCQAPABAIABEKABIAEwcAFAcAFQEABjxpbml0PgEAAygpVgEABENvZGUB"
+ "AA9MaW5lTnVtYmVyVGFibGUBAAVzYXlIaQEAClNvdXJjZUZpbGUBAA5UcmFuc2Zvcm0uamF2YQwA"
+ "BwAIBwAWDAAXABgBAAdHb29kYnllBwAZDAAaABsBAAlUcmFuc2Zvcm0BABBqYXZhL2xhbmcvT2Jq"
+ "ZWN0AQAQamF2YS9sYW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2ph"
+ "dmEvaW8vUHJpbnRTdHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWACAABQAG"
+ "AAAAAAACAAAABwAIAAEACQAAAB0AAQABAAAABSq3AAGxAAAAAQAKAAAABgABAAAAEQABAAsACAAB"
+ "AAkAAAAlAAIAAQAAAAmyAAISA7YABLEAAAABAAoAAAAKAAIAAAATAAgAFAABAAwAAAACAA0=";
+
+const char* dex_file_base64 =
+ "ZGV4CjAzNQCLXSBQ5FiS3f16krSYZFF8xYZtFVp0GRXMAgAAcAAAAHhWNBIAAAAAAAAAACwCAAAO"
+ "AAAAcAAAAAYAAACoAAAAAgAAAMAAAAABAAAA2AAAAAQAAADgAAAAAQAAAAABAACsAQAAIAEAAGIB"
+ "AABqAQAAcwEAAIABAACXAQAAqwEAAL8BAADTAQAA4wEAAOYBAADqAQAA/gEAAAMCAAAMAgAAAgAA"
+ "AAMAAAAEAAAABQAAAAYAAAAIAAAACAAAAAUAAAAAAAAACQAAAAUAAABcAQAABAABAAsAAAAAAAAA"
+ "AAAAAAAAAAANAAAAAQABAAwAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAHAAAAAAAAAB4CAAAA"
+ "AAAAAQABAAEAAAATAgAABAAAAHAQAwAAAA4AAwABAAIAAAAYAgAACQAAAGIAAAAbAQEAAABuIAIA"
+ "EAAOAAAAAQAAAAMABjxpbml0PgAHR29vZGJ5ZQALTFRyYW5zZm9ybTsAFUxqYXZhL2lvL1ByaW50"
+ "U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xh"
+ "bmcvU3lzdGVtOwAOVHJhbnNmb3JtLmphdmEAAVYAAlZMABJlbWl0dGVyOiBqYWNrLTMuMzYAA291"
+ "dAAHcHJpbnRsbgAFc2F5SGkAEQAHDgATAAcOhQAAAAEBAICABKACAQG4Ag0AAAAAAAAAAQAAAAAA"
+ "AAABAAAADgAAAHAAAAACAAAABgAAAKgAAAADAAAAAgAAAMAAAAAEAAAAAQAAANgAAAAFAAAABAAA"
+ "AOAAAAAGAAAAAQAAAAABAAABIAAAAgAAACABAAABEAAAAQAAAFwBAAACIAAADgAAAGIBAAADIAAA"
+ "AgAAABMCAAAAIAAAAQAAAB4CAAAAEAAAAQAAACwCAAA=";
+
+static void JNICALL transformationHook(jvmtiEnv *jvmtienv,
+ JNIEnv* jni_env ATTRIBUTE_UNUSED,
+ jclass class_being_redefined ATTRIBUTE_UNUSED,
+ jobject loader ATTRIBUTE_UNUSED,
+ const char* name,
+ jobject protection_domain ATTRIBUTE_UNUSED,
+ jint class_data_len ATTRIBUTE_UNUSED,
+ const unsigned char* class_data ATTRIBUTE_UNUSED,
+ jint* new_class_data_len,
+ unsigned char** new_class_data) {
+ if (strcmp("Transform", name)) {
+ return;
+ }
+ printf("modifying class '%s'\n", name);
+ bool is_jvm = IsJVM();
+ size_t decode_len = 0;
+ unsigned char* new_data;
+ std::unique_ptr<uint8_t[]> file_data(
+ DecodeBase64((is_jvm) ? class_file_base64 : dex_file_base64, &decode_len));
+ jvmtiError ret = JVMTI_ERROR_NONE;
+ if ((ret = jvmtienv->Allocate(static_cast<jlong>(decode_len), &new_data)) != JVMTI_ERROR_NONE) {
+ printf("Unable to allocate buffer!\n");
+ return;
+ }
+ memcpy(new_data, file_data.get(), decode_len);
+ *new_class_data_len = static_cast<jint>(decode_len);
+ *new_class_data = new_data;
+ return;
+}
+
+using RetransformWithHookFunction = jvmtiError (*)(jvmtiEnv*, jclass, jvmtiEventClassFileLoadHook);
+static void DoClassTransformation(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jclass target) {
+ if (IsJVM()) {
+ UNUSED(jnienv);
+ jvmtienv->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_CLASS_FILE_LOAD_HOOK, nullptr);
+ jvmtiError ret = jvmtienv->RetransformClasses(1, &target);
+ if (ret != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmtienv->GetErrorName(ret, &err);
+ printf("Error transforming: %s\n", err);
+ }
+ } else {
+ RetransformWithHookFunction f =
+ reinterpret_cast<RetransformWithHookFunction>(jvmtienv->functions->reserved1);
+ if (f(jvmtienv, target, transformationHook) != JVMTI_ERROR_NONE) {
+ printf("Failed to tranform class!");
+ return;
+ }
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_doClassTransformation(JNIEnv* env,
+ jclass,
+ jclass target) {
+ JavaVM* vm;
+ if (env->GetJavaVM(&vm)) {
+ printf("Unable to get javaVM!\n");
+ return;
+ }
+ DoClassTransformation(jvmti_env, env, target);
+}
+
+// Don't do anything
+jint OnLoad(JavaVM* vm,
+ char* options,
+ void* reserved ATTRIBUTE_UNUSED) {
+ jvmtiCapabilities caps;
+ RuntimeIsJvm = (strcmp("jvm", options) == 0);
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti_env), JVMTI_VERSION_1_0)) {
+ printf("Unable to get jvmti env!\n");
+ return 1;
+ }
+ if (IsJVM()) {
+ jvmti_env->GetPotentialCapabilities(&caps);
+ jvmti_env->AddCapabilities(&caps);
+ jvmtiEventCallbacks cbs;
+ memset(&cbs, 0, sizeof(cbs));
+ cbs.ClassFileLoadHook = transformationHook;
+ jvmti_env->SetEventCallbacks(&cbs, sizeof(jvmtiEventCallbacks));
+ }
+ return 0;
+}
+
+} // namespace Test902HelloTransformation
+} // namespace art
+
diff --git a/test/902-hello-transformation/transform.h b/test/902-hello-transformation/transform.h
new file mode 100644
index 0000000000..661058dd99
--- /dev/null
+++ b/test/902-hello-transformation/transform.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
+#define ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
+
+#include <jni.h>
+
+namespace art {
+namespace Test902HelloTransformation {
+
+jint OnLoad(JavaVM* vm, char* options, void* reserved);
+
+} // namespace Test902HelloTransformation
+} // namespace art
+
+#endif // ART_TEST_902_HELLO_TRANSFORMATION_TRANSFORM_H_
diff --git a/test/961-default-iface-resolut-generated/build b/test/961-default-iface-resolution-gen/build
index ccebbe4ac9..ccebbe4ac9 100755
--- a/test/961-default-iface-resolut-generated/build
+++ b/test/961-default-iface-resolution-gen/build
diff --git a/test/961-default-iface-resolut-generated/expected.txt b/test/961-default-iface-resolution-gen/expected.txt
index 1ddd65d177..1ddd65d177 100644
--- a/test/961-default-iface-resolut-generated/expected.txt
+++ b/test/961-default-iface-resolution-gen/expected.txt
diff --git a/test/961-default-iface-resolut-generated/info.txt b/test/961-default-iface-resolution-gen/info.txt
index 2cd2cc75b7..2cd2cc75b7 100644
--- a/test/961-default-iface-resolut-generated/info.txt
+++ b/test/961-default-iface-resolution-gen/info.txt
diff --git a/test/961-default-iface-resolut-generated/util-src/generate_java.py b/test/961-default-iface-resolution-gen/util-src/generate_java.py
index a205cd6ce0..a205cd6ce0 100755
--- a/test/961-default-iface-resolut-generated/util-src/generate_java.py
+++ b/test/961-default-iface-resolution-gen/util-src/generate_java.py
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-gen/build
index ccebbe4ac9..ccebbe4ac9 100755
--- a/test/964-default-iface-init-generated/build
+++ b/test/964-default-iface-init-gen/build
diff --git a/test/964-default-iface-init-generated/expected.txt b/test/964-default-iface-init-gen/expected.txt
index 1ddd65d177..1ddd65d177 100644
--- a/test/964-default-iface-init-generated/expected.txt
+++ b/test/964-default-iface-init-gen/expected.txt
diff --git a/test/964-default-iface-init-generated/info.txt b/test/964-default-iface-init-gen/info.txt
index 5805a86854..5805a86854 100644
--- a/test/964-default-iface-init-generated/info.txt
+++ b/test/964-default-iface-init-gen/info.txt
diff --git a/test/964-default-iface-init-generated/src/Displayer.java b/test/964-default-iface-init-gen/src/Displayer.java
index 4be0ab2732..4be0ab2732 100644
--- a/test/964-default-iface-init-generated/src/Displayer.java
+++ b/test/964-default-iface-init-gen/src/Displayer.java
diff --git a/test/964-default-iface-init-generated/util-src/generate_java.py b/test/964-default-iface-init-gen/util-src/generate_java.py
index b2df49f70e..b2df49f70e 100755
--- a/test/964-default-iface-init-generated/util-src/generate_java.py
+++ b/test/964-default-iface-init-gen/util-src/generate_java.py
diff --git a/test/968-default-part-compile-generated/build b/test/968-default-partial-compile-gen/build
index 1e9f8aadd5..1e9f8aadd5 100755
--- a/test/968-default-part-compile-generated/build
+++ b/test/968-default-partial-compile-gen/build
diff --git a/test/968-default-part-compile-generated/expected.txt b/test/968-default-partial-compile-gen/expected.txt
index 1ddd65d177..1ddd65d177 100644
--- a/test/968-default-part-compile-generated/expected.txt
+++ b/test/968-default-partial-compile-gen/expected.txt
diff --git a/test/968-default-part-compile-generated/info.txt b/test/968-default-partial-compile-gen/info.txt
index bc1c42816e..bc1c42816e 100644
--- a/test/968-default-part-compile-generated/info.txt
+++ b/test/968-default-partial-compile-gen/info.txt
diff --git a/test/968-default-part-compile-generated/util-src/generate_java.py b/test/968-default-partial-compile-gen/util-src/generate_java.py
index 35290efe1d..35290efe1d 100755
--- a/test/968-default-part-compile-generated/util-src/generate_java.py
+++ b/test/968-default-partial-compile-gen/util-src/generate_java.py
diff --git a/test/968-default-part-compile-generated/util-src/generate_smali.py b/test/968-default-partial-compile-gen/util-src/generate_smali.py
index 9855bcf854..9855bcf854 100755
--- a/test/968-default-part-compile-generated/util-src/generate_smali.py
+++ b/test/968-default-partial-compile-gen/util-src/generate_smali.py
diff --git a/test/970-iface-superresolution-generated/build b/test/970-iface-super-resolution-gen/build
index fd1b271c1c..fd1b271c1c 100755
--- a/test/970-iface-superresolution-generated/build
+++ b/test/970-iface-super-resolution-gen/build
diff --git a/test/970-iface-superresolution-generated/expected.txt b/test/970-iface-super-resolution-gen/expected.txt
index 1ddd65d177..1ddd65d177 100644
--- a/test/970-iface-superresolution-generated/expected.txt
+++ b/test/970-iface-super-resolution-gen/expected.txt
diff --git a/test/970-iface-superresolution-generated/info.txt b/test/970-iface-super-resolution-gen/info.txt
index 2cd2cc75b7..2cd2cc75b7 100644
--- a/test/970-iface-superresolution-generated/info.txt
+++ b/test/970-iface-super-resolution-gen/info.txt
diff --git a/test/970-iface-superresolution-generated/util-src/generate_java.py b/test/970-iface-super-resolution-gen/util-src/generate_java.py
index c12f10d790..c12f10d790 100755
--- a/test/970-iface-superresolution-generated/util-src/generate_java.py
+++ b/test/970-iface-super-resolution-gen/util-src/generate_java.py
diff --git a/test/970-iface-superresolution-generated/util-src/generate_smali.py b/test/970-iface-super-resolution-gen/util-src/generate_smali.py
index cb7b0fa4f2..cb7b0fa4f2 100755
--- a/test/970-iface-superresolution-generated/util-src/generate_smali.py
+++ b/test/970-iface-super-resolution-gen/util-src/generate_smali.py
diff --git a/test/Android.bp b/test/Android.bp
index ff408f41a7..72dcbba9e2 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -40,6 +40,9 @@ art_cc_defaults {
enabled: false,
},
},
+ cflags: [
+ "-Wno-frame-larger-than=",
+ ],
}
art_cc_defaults {
@@ -60,6 +63,7 @@ art_cc_defaults {
"libvixld-arm64",
"libart-gtest",
+ "libbase",
"libicuuc",
"libicui18n",
"libnativehelper",
@@ -201,6 +205,7 @@ cc_defaults {
],
shared_libs: [
"libbacktrace",
+ "libbase",
"libnativehelper",
],
target: {
@@ -239,9 +244,11 @@ art_cc_test_library {
srcs: [
"ti-agent/common_load.cc",
"901-hello-ti-agent/basics.cc",
+ "902-hello-transformation/transform.cc",
],
shared_libs: [
"libart",
+ "libbase",
"libopenjdkjvmti",
],
}
@@ -255,6 +262,7 @@ art_cc_test_library {
srcs: [
"ti-agent/common_load.cc",
"901-hello-ti-agent/basics.cc",
+ "902-hello-transformation/transform.cc",
],
shared_libs: [
"libartd",
@@ -301,6 +309,7 @@ cc_defaults {
],
shared_libs: [
"libbacktrace",
+ "libbase",
"libnativehelper",
],
target: {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 559e96359b..211a69fa76 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -263,9 +263,12 @@ endif
# 147-stripped-dex-fallback isn't supported on device because --strip-dex
# requires the zip command.
# 569-checker-pattern-replacement tests behaviour present only on host.
+# 902-hello-transformation isn't supported in current form due to linker
+# restrictions. See b/31681198
TEST_ART_BROKEN_TARGET_TESTS := \
147-stripped-dex-fallback \
- 569-checker-pattern-replacement
+ 569-checker-pattern-replacement \
+ 902-hello-transformation
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -278,11 +281,11 @@ TEST_ART_BROKEN_TARGET_TESTS :=
# Tests that require python3.
TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
960-default-smali \
- 961-default-iface-resolut-generated \
- 964-default-iface-init-generated \
- 968-default-part-compile-generated \
+ 961-default-iface-resolution-gen \
+ 964-default-iface-init-gen \
+ 968-default-partial-compile-gen \
969-iface-super \
- 970-iface-superresolution-generated \
+ 970-iface-super-resolution-gen \
971-iface-super
# Check if we have python3 to run our tests.
@@ -346,9 +349,7 @@ endif
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
-# 004-JniTest is disabled because @CriticalNative is unsupported by generic JNI b/31400248
TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
- 004-JniTest \
137-cfi
ifneq (,$(filter interp-ac,$(COMPILER_TYPES)))
@@ -362,13 +363,13 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
# Tests that are broken with GC stress.
# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
# hope the second process got into the expected state. The slowness of gcstress makes this bad.
-# * 961-default-iface-resolut-generated and 964-default-iface-init-generated are very long tests
-# that often will take more than the timeout to run when gcstress is enabled. This is because
-# gcstress slows down allocations significantly which these tests do a lot.
+# * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often
+# will take more than the timeout to run when gcstress is enabled. This is because gcstress
+# slows down allocations significantly which these tests do a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
137-cfi \
- 961-default-iface-resolut-generated \
- 964-default-iface-init-generated
+ 961-default-iface-resolution-gen \
+ 964-default-iface-init-gen
ifneq (,$(filter gcstress,$(GC_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -407,11 +408,9 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREB
# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
# Therefore we shouldn't run them in situations where we actually don't have these since they
# explicitly test for them. These all also assume we have an image.
-# 004-JniTest is disabled because @CriticalNative is unsupported by generic JNI b/31400248
# 147-stripped-dex-fallback is disabled because it requires --prebuild.
# 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save.
TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \
- 004-JniTest \
116-nodex2oat \
117-nopatchoat \
118-noimage-dex2oat \
@@ -485,9 +484,7 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Known broken tests for the JIT.
# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
# also uses Generic JNI instead of the JNI compiler.
-# 004-JniTest is disabled because @CriticalNative is unsupported by generic JNI b/31400248
TEST_ART_BROKEN_JIT_RUN_TESTS := \
- 004-JniTest \
137-cfi
ifneq (,$(filter jit,$(COMPILER_TYPES)))
diff --git a/test/555-checker-regression-x86const/src/Unresolved.java b/test/ImageLayoutA/ImageLayoutA.java
index e98bdbf8fb..0784ec267f 100644
--- a/test/555-checker-regression-x86const/src/Unresolved.java
+++ b/test/ImageLayoutA/ImageLayoutA.java
@@ -14,5 +14,8 @@
* limitations under the License.
*/
-class UnresolvedClass {
+import java.util.HashMap;
+
+class MyClass {
+ static int i = 123;
}
diff --git a/test/ImageLayoutB/ImageLayoutB.java b/test/ImageLayoutB/ImageLayoutB.java
new file mode 100644
index 0000000000..a21c5e20fc
--- /dev/null
+++ b/test/ImageLayoutB/ImageLayoutB.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.HashMap;
+
+class MyClass {
+ public static String string = "ASDF_UNIQUE_STRING";
+ public static HashMap<String, String> map = new HashMap<String, String>();
+ static {
+ map.put("KEY_FOR_HASH_MAP", "VALUE_FOR_HASH_MAP");
+ }
+}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index a445f4d630..c51cb0db2a 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -310,8 +310,9 @@ if [ "$DEBUGGER" = "y" ]; then
fi
if [ "$USE_JVM" = "y" ]; then
+ export LD_LIBRARY_PATH=${ANDROID_HOST_OUT}/lib64
# Xmx is necessary since we don't pass down the ART flags to JVM.
- cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes ${FLAGS} $MAIN $@"
+ cmdline="${JAVA} ${DEBUGGER_OPTS} ${JVM_VERIFY_ARG} -Xmx256m -classpath classes ${FLAGS} $MAIN $@ ${ARGS}"
if [ "$DEV_MODE" = "y" ]; then
echo $cmdline
fi
diff --git a/test/run-test b/test/run-test
index ae53f9ecc0..7a4afafa4e 100755
--- a/test/run-test
+++ b/test/run-test
@@ -476,7 +476,7 @@ elif [ "$runtime" = "art" ]; then
run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}:${ANDROID_HOST_OUT}/nativetest${suffix64}"
else
guess_target_arch_name
- run_args="${run_args} --runtime-option -Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}:/system/lib${suffix64}"
+ run_args="${run_args} --runtime-option -Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}:${android_root}/lib${suffix64}"
run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art"
fi
if [ "$relocate" = "yes" ]; then
@@ -743,9 +743,7 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
fi
fi
-if [ "$runtime" != "jvm" ]; then
run_args="${run_args} --testlib ${testlib}"
-fi
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB.
build_file_size_limit=2048
@@ -939,6 +937,7 @@ if [ "$bisection_search" = "yes" -a "$good" != "yes" ]; then
--raw-cmd="$raw_cmd" \
--check-script="$cwd/check" \
--expected-output="$cwd/expected.txt" \
+ --logfile="$cwd/bisection_log.txt" \
--timeout=300
fi
fi
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index ed280e4fdc..53bb1533e7 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "901-hello-ti-agent/basics.h"
+#include "902-hello-transformation/transform.h"
namespace art {
@@ -39,6 +40,7 @@ struct AgentLib {
// A list of all the agents we have for testing.
AgentLib agents[] = {
{ "901-hello-ti-agent", Test901HelloTi::OnLoad, nullptr },
+ { "902-hello-transformation", Test902HelloTransformation::OnLoad, nullptr },
};
static AgentLib* FindAgent(char* name) {
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 4003ee08c5..ebf087d859 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -73,7 +73,7 @@ AHAT_TEST_DUMP_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) \
$(HOST_OUT_EXECUTABLES)/art \
- $(HOST_CORE_IMG_OUT_BASE)-optimizing-pic$(CORE_IMG_SUFFIX)
+ $(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index ecf9e5383d..8604ff0cbb 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -9,7 +9,6 @@ Usage:
Serve pages on the given port. Defaults to 7100.
TODO:
- * Show GC Root paths.
* Have a way to diff two heap dumps.
* Add more tips to the help page.
@@ -76,6 +75,7 @@ Things to move to perflib:
Release History:
0.8 Pending
+ Show sample path from GC root with field names in place of dominator path.
0.7 Aug 16, 2016
Launch ahat server before processing the heap dump.
diff --git a/tools/ahat/src/HeapTable.java b/tools/ahat/src/HeapTable.java
index ed11d1724a..5b840489d2 100644
--- a/tools/ahat/src/HeapTable.java
+++ b/tools/ahat/src/HeapTable.java
@@ -84,10 +84,10 @@ class HeapTable {
for (Heap heap : heaps) {
long size = config.getSize(elem, heap);
total += size;
- vals.add(DocString.format("%,14d", size));
+ vals.add(size == 0 ? DocString.text("") : DocString.format("%,14d", size));
}
if (showTotal) {
- vals.add(DocString.format("%,14d", total));
+ vals.add(total == 0 ? DocString.text("") : DocString.format("%,14d", total));
}
for (ValueConfig<T> value : values) {
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index 8769d115ff..94934a2831 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -19,11 +19,17 @@ package com.android.ahat;
import com.android.tools.perflib.heap.ArrayInstance;
import com.android.tools.perflib.heap.ClassInstance;
import com.android.tools.perflib.heap.ClassObj;
+import com.android.tools.perflib.heap.Field;
import com.android.tools.perflib.heap.Heap;
import com.android.tools.perflib.heap.Instance;
+import com.android.tools.perflib.heap.RootObj;
import com.android.tools.perflib.heap.Type;
import java.awt.image.BufferedImage;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
/**
* Utilities for extracting information from hprof instances.
@@ -179,7 +185,7 @@ class InstanceUtils {
* Read a reference field of an instance.
* Returns null if the field value is null, or if the field couldn't be read.
*/
- private static Instance getRefField(Instance inst, String fieldName) {
+ public static Instance getRefField(Instance inst, String fieldName) {
Object value = getField(inst, fieldName);
if (!(value instanceof Instance)) {
return null;
@@ -357,4 +363,90 @@ class InstanceUtils {
}
return new NativeAllocation(size, inst.getHeap(), pointer, referent);
}
+
+ public static class PathElement {
+ public final Instance instance;
+ public final String field;
+ public boolean isDominator;
+
+ public PathElement(Instance instance, String field) {
+ this.instance = instance;
+ this.field = field;
+ this.isDominator = false;
+ }
+ }
+
+ /**
+ * Returns a sample path from a GC root to this instance.
+ * The given instance is included as the last element of the path with an
+ * empty field description.
+ */
+ public static List<PathElement> getPathFromGcRoot(Instance inst) {
+ List<PathElement> path = new ArrayList<PathElement>();
+
+ Instance dom = inst;
+ for (PathElement elem = new PathElement(inst, ""); elem != null;
+ elem = getNextPathElementToGcRoot(elem.instance)) {
+ if (elem.instance == dom) {
+ elem.isDominator = true;
+ dom = dom.getImmediateDominator();
+ }
+ path.add(elem);
+ }
+ Collections.reverse(path);
+ return path;
+ }
+
+ /**
+ * Returns the next instance to GC root from this object and a string
+ * description of which field of that object refers to the given instance.
+ * Returns null if the given instance has no next instance to the gc root.
+ */
+ private static PathElement getNextPathElementToGcRoot(Instance inst) {
+ Instance parent = inst.getNextInstanceToGcRoot();
+ if (parent == null || parent instanceof RootObj) {
+ return null;
+ }
+
+ // Search the parent for the reference to the child.
+ // TODO: This seems terribly inefficient. Can we use data structures to
+ // help us here?
+ String description = ".???";
+ if (parent instanceof ArrayInstance) {
+ ArrayInstance array = (ArrayInstance)parent;
+ Object[] values = array.getValues();
+ for (int i = 0; i < values.length; i++) {
+ if (values[i] instanceof Instance) {
+ Instance ref = (Instance)values[i];
+ if (ref.getId() == inst.getId()) {
+ description = String.format("[%d]", i);
+ break;
+ }
+ }
+ }
+ } else if (parent instanceof ClassObj) {
+ ClassObj cls = (ClassObj)parent;
+ for (Map.Entry<Field, Object> entries : cls.getStaticFieldValues().entrySet()) {
+ if (entries.getValue() instanceof Instance) {
+ Instance ref = (Instance)entries.getValue();
+ if (ref.getId() == inst.getId()) {
+ description = "." + entries.getKey().getName();
+ break;
+ }
+ }
+ }
+ } else if (parent instanceof ClassInstance) {
+ ClassInstance obj = (ClassInstance)parent;
+ for (ClassInstance.FieldValue fields : obj.getValues()) {
+ if (fields.getValue() instanceof Instance) {
+ Instance ref = (Instance)fields.getValue();
+ if (ref.getId() == inst.getId()) {
+ description = "." + fields.getField().getName();
+ break;
+ }
+ }
+ }
+ }
+ return new PathElement(parent, description);
+ }
}
diff --git a/tools/ahat/src/ObjectHandler.java b/tools/ahat/src/ObjectHandler.java
index 4df1be5ac2..78aac178e9 100644
--- a/tools/ahat/src/ObjectHandler.java
+++ b/tools/ahat/src/ObjectHandler.java
@@ -22,7 +22,6 @@ import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Field;
import com.android.tools.perflib.heap.Heap;
import com.android.tools.perflib.heap.Instance;
-import com.android.tools.perflib.heap.RootObj;
import com.android.tools.perflib.heap.RootType;
import java.io.IOException;
import java.util.ArrayList;
@@ -32,6 +31,8 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
+import static com.android.ahat.InstanceUtils.PathElement;
+
class ObjectHandler implements AhatHandler {
private static final String ARRAY_ELEMENTS_ID = "elements";
@@ -62,7 +63,7 @@ class ObjectHandler implements AhatHandler {
doc.big(Value.render(mSnapshot, inst));
printAllocationSite(doc, query, inst);
- printDominatorPath(doc, query, inst);
+ printGcRootPath(doc, query, inst);
doc.section("Object Info");
ClassObj cls = inst.getClassObj();
@@ -202,43 +203,43 @@ class ObjectHandler implements AhatHandler {
}
}
- private void printDominatorPath(Doc doc, Query query, Instance inst) {
- doc.section("Dominator Path from Root");
- List<Instance> path = new ArrayList<Instance>();
- for (Instance parent = inst;
- parent != null && !(parent instanceof RootObj);
- parent = parent.getImmediateDominator()) {
- path.add(parent);
- }
+ private void printGcRootPath(Doc doc, Query query, Instance inst) {
+ doc.section("Sample Path from GC Root");
+ List<PathElement> path = InstanceUtils.getPathFromGcRoot(inst);
// Add 'null' as a marker for the root.
- path.add(null);
- Collections.reverse(path);
+ path.add(0, null);
- HeapTable.TableConfig<Instance> table = new HeapTable.TableConfig<Instance>() {
+ HeapTable.TableConfig<PathElement> table = new HeapTable.TableConfig<PathElement>() {
public String getHeapsDescription() {
- return "Bytes Retained by Heap";
+ return "Bytes Retained by Heap (Dominators Only)";
}
- public long getSize(Instance element, Heap heap) {
+ public long getSize(PathElement element, Heap heap) {
if (element == null) {
return mSnapshot.getHeapSize(heap);
}
- int index = mSnapshot.getHeapIndex(heap);
- return element.getRetainedSize(index);
+ if (element.isDominator) {
+ int index = mSnapshot.getHeapIndex(heap);
+ return element.instance.getRetainedSize(index);
+ }
+ return 0;
}
- public List<HeapTable.ValueConfig<Instance>> getValueConfigs() {
- HeapTable.ValueConfig<Instance> value = new HeapTable.ValueConfig<Instance>() {
+ public List<HeapTable.ValueConfig<PathElement>> getValueConfigs() {
+ HeapTable.ValueConfig<PathElement> value = new HeapTable.ValueConfig<PathElement>() {
public String getDescription() {
- return "Object";
+ return "Path Element";
}
- public DocString render(Instance element) {
+ public DocString render(PathElement element) {
if (element == null) {
return DocString.link(DocString.uri("rooted"), DocString.text("ROOT"));
} else {
- return DocString.text("→ ").append(Value.render(mSnapshot, element));
+ DocString label = DocString.text(" → ");
+ label.append(Value.render(mSnapshot, element.instance));
+ label.append(element.field);
+ return label;
}
}
};
diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java
index 3936f296d3..e08df67b13 100644
--- a/tools/ahat/test-dump/Main.java
+++ b/tools/ahat/test-dump/Main.java
@@ -29,6 +29,16 @@ public class Main {
// collected before we take the heap dump.
public static DumpedStuff stuff;
+ public static class ObjectTree {
+ public ObjectTree left;
+ public ObjectTree right;
+
+ public ObjectTree(ObjectTree left, ObjectTree right) {
+ this.left = left;
+ this.right = right;
+ }
+ }
+
// We will take a heap dump that includes a single instance of this
// DumpedStuff class. Objects stored as fields in this class can be easily
// found in the hprof dump by searching for the instance of the DumpedStuff
@@ -42,6 +52,11 @@ public class Main {
public PhantomReference aPhantomReference = new PhantomReference(anObject, referenceQueue);
public WeakReference aWeakReference = new WeakReference(anObject, referenceQueue);
public byte[] bigArray;
+ public ObjectTree[] gcPathArray = new ObjectTree[]{null, null,
+ new ObjectTree(
+ new ObjectTree(null, new ObjectTree(null, null)),
+ new ObjectTree(null, null)),
+ null};
DumpedStuff() {
int N = 1000000;
@@ -53,6 +68,8 @@ public class Main {
NativeAllocationRegistry registry = new NativeAllocationRegistry(
Main.class.getClassLoader(), 0x12345, 42);
registry.registerNativeAllocation(anObject, 0xABCDABCD);
+
+ gcPathArray[2].right.left = gcPathArray[2].left.right;
}
}
diff --git a/tools/ahat/test/InstanceUtilsTest.java b/tools/ahat/test/InstanceUtilsTest.java
index 59b1c90e10..ec77e70da1 100644
--- a/tools/ahat/test/InstanceUtilsTest.java
+++ b/tools/ahat/test/InstanceUtilsTest.java
@@ -16,11 +16,16 @@
package com.android.ahat;
+import com.android.tools.perflib.heap.ArrayInstance;
+import com.android.tools.perflib.heap.ClassObj;
import com.android.tools.perflib.heap.Instance;
import java.io.IOException;
+import java.util.List;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class InstanceUtilsTest {
@@ -123,4 +128,55 @@ public class InstanceUtilsTest {
assertEquals(referent, InstanceUtils.getReferent(wref));
assertNull(InstanceUtils.getReferent(referent));
}
+
+ @Test
+ public void gcRootPath() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ ClassObj main = dump.getAhatSnapshot().findClass("Main");
+ ArrayInstance gcPathArray = (ArrayInstance)dump.getDumpedThing("gcPathArray");
+ Object[] values = gcPathArray.getValues();
+ Instance base = (Instance)values[2];
+ Instance left = InstanceUtils.getRefField(base, "left");
+ Instance right = InstanceUtils.getRefField(base, "right");
+ Instance target = InstanceUtils.getRefField(left, "right");
+
+ List<InstanceUtils.PathElement> path = InstanceUtils.getPathFromGcRoot(target);
+ assertEquals(6, path.size());
+
+ assertEquals(main, path.get(0).instance);
+ assertEquals(".stuff", path.get(0).field);
+ assertTrue(path.get(0).isDominator);
+
+ assertEquals(".gcPathArray", path.get(1).field);
+ assertTrue(path.get(1).isDominator);
+
+ assertEquals(gcPathArray, path.get(2).instance);
+ assertEquals("[2]", path.get(2).field);
+ assertTrue(path.get(2).isDominator);
+
+ assertEquals(base, path.get(3).instance);
+ assertTrue(path.get(3).isDominator);
+
+ // There are two possible paths. Either it can go through the 'left' node,
+ // or the 'right' node.
+ if (path.get(3).field.equals(".left")) {
+ assertEquals(".left", path.get(3).field);
+
+ assertEquals(left, path.get(4).instance);
+ assertEquals(".right", path.get(4).field);
+ assertFalse(path.get(4).isDominator);
+
+ } else {
+ assertEquals(".right", path.get(3).field);
+
+ assertEquals(right, path.get(4).instance);
+ assertEquals(".left", path.get(4).field);
+ assertFalse(path.get(4).isDominator);
+ }
+
+ assertEquals(target, path.get(5).instance);
+ assertEquals("", path.get(5).field);
+ assertTrue(path.get(5).isDominator);
+ }
}
diff --git a/tools/bisection_search/README.md b/tools/bisection_search/README.md
index 64ccb206c4..d641102d1f 100644
--- a/tools/bisection_search/README.md
+++ b/tools/bisection_search/README.md
@@ -21,12 +21,14 @@ There are two supported invocation modes:
./bisection_search.py -cp classes.dex --expected-output out_int --class Test
-2. Raw-cmd invocation, dalvikvm command is accepted as an argument. The command
- has to start with an executable.
+2. Raw-cmd invocation, dalvikvm command is accepted as an argument.
Extra dalvikvm arguments will be placed on second position in the command
by default. {ARGS} tag can be used to specify a custom position.
+ If used in device mode, the command has to exec a dalvikvm instance. Bisection
+ will fail if pid of the process started by raw-cmd is different than pid of runtime.
+
./bisection_search.py --raw-cmd='run.sh -cp classes.dex Test' --expected-retcode SUCCESS
./bisection_search.py --raw-cmd='/bin/sh art {ARGS} -cp classes.dex Test' --expected-retcode SUCCESS
diff --git a/tools/bisection_search/bisection_search.py b/tools/bisection_search/bisection_search.py
index 0d36aa4622..27bd599aaa 100755
--- a/tools/bisection_search/bisection_search.py
+++ b/tools/bisection_search/bisection_search.py
@@ -24,17 +24,23 @@ Example usage:
import abc
import argparse
+import os
import re
import shlex
-from subprocess import call
import sys
+
+from subprocess import call
from tempfile import NamedTemporaryFile
-from common import DeviceTestEnv
-from common import FatalError
-from common import GetEnvVariableOrError
-from common import HostTestEnv
-from common import RetCode
+sys.path.append(os.path.dirname(os.path.dirname(
+ os.path.realpath(__file__))))
+
+from common.common import DeviceTestEnv
+from common.common import FatalError
+from common.common import GetEnvVariableOrError
+from common.common import HostTestEnv
+from common.common import LogSeverity
+from common.common import RetCode
# Passes that are never disabled during search process because disabling them
@@ -57,6 +63,9 @@ NON_PASSES = ['builder', 'prepare_for_register_allocation',
# position in the command.
RAW_CMD_RUNTIME_ARGS_TAG = '{ARGS}'
+# Default core image path relative to ANDROID_HOST_OUT.
+DEFAULT_IMAGE_RELATIVE_PATH = '/framework/core.art'
+
class Dex2OatWrapperTestable(object):
"""Class representing a testable compilation.
@@ -104,10 +113,9 @@ class Dex2OatWrapperTestable(object):
print('Testing methods: {0} passes: {1}.'.format(
compiled_methods, passes_to_run))
cmd = self._PrepareCmd(compiled_methods=compiled_methods,
- passes_to_run=passes_to_run,
- verbose_compiler=False)
+ passes_to_run=passes_to_run)
(output, ret_code) = self._test_env.RunCommand(
- cmd, {'ANDROID_LOG_TAGS': '*:e'})
+ cmd, LogSeverity.ERROR)
res = True
if self._expected_retcode:
res = self._expected_retcode == ret_code
@@ -126,8 +134,8 @@ class Dex2OatWrapperTestable(object):
Raises:
FatalError: An error occurred when retrieving methods list.
"""
- cmd = self._PrepareCmd(verbose_compiler=True)
- (output, _) = self._test_env.RunCommand(cmd, {'ANDROID_LOG_TAGS': '*:i'})
+ cmd = self._PrepareCmd()
+ (output, _) = self._test_env.RunCommand(cmd, LogSeverity.INFO)
match_methods = re.findall(r'Building ([^\n]+)\n', output)
if not match_methods:
raise FatalError('Failed to retrieve methods list. '
@@ -146,17 +154,15 @@ class Dex2OatWrapperTestable(object):
Raises:
FatalError: An error occurred when retrieving passes list.
"""
- cmd = self._PrepareCmd(compiled_methods=[compiled_method],
- verbose_compiler=True)
- (output, _) = self._test_env.RunCommand(cmd, {'ANDROID_LOG_TAGS': '*:i'})
+ cmd = self._PrepareCmd(compiled_methods=[compiled_method])
+ (output, _) = self._test_env.RunCommand(cmd, LogSeverity.INFO)
match_passes = re.findall(r'Starting pass: ([^\n]+)\n', output)
if not match_passes:
raise FatalError('Failed to retrieve passes list. '
'Not recognized output format.')
return [p for p in match_passes if p not in NON_PASSES]
- def _PrepareCmd(self, compiled_methods=None, passes_to_run=None,
- verbose_compiler=False):
+ def _PrepareCmd(self, compiled_methods=None, passes_to_run=None):
"""Prepare command to run."""
cmd = self._base_cmd[0:self._arguments_position]
# insert additional arguments before the first argument
@@ -168,9 +174,8 @@ class Dex2OatWrapperTestable(object):
self._test_env.WriteLines(self._passes_to_run_path, passes_to_run)
cmd += ['-Xcompiler-option', '--run-passes={0}'.format(
self._passes_to_run_path)]
- if verbose_compiler:
- cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option',
- '-verbose:compiler', '-Xcompiler-option', '-j1']
+ cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option',
+ '-verbose:compiler', '-Xcompiler-option', '-j1']
cmd += self._base_cmd[self._arguments_position:]
return cmd
@@ -289,8 +294,7 @@ def BugSearch(testable):
if faulty_method_idx == len(all_methods) + 1:
return (None, None)
if faulty_method_idx == 0:
- raise FatalError('Testable fails with no methods compiled. '
- 'Perhaps issue lies outside of compiler.')
+ raise FatalError('Testable fails with no methods compiled.')
faulty_method = all_methods[faulty_method_idx - 1]
all_passes = testable.GetAllPassesForMethod(faulty_method)
faulty_pass_idx = BinarySearch(
@@ -361,8 +365,8 @@ def PrepareBaseCommand(args, classpath):
if not args.device:
base_cmd += ['-XXlib:{0}'.format(args.lib)]
if not args.image:
- image_path = '{0}/framework/core-optimizing-pic.art'.format(
- GetEnvVariableOrError('ANDROID_HOST_OUT'))
+ image_path = (GetEnvVariableOrError('ANDROID_HOST_OUT') +
+ DEFAULT_IMAGE_RELATIVE_PATH)
else:
image_path = args.image
base_cmd += ['-Ximage:{0}'.format(image_path)]
@@ -410,7 +414,11 @@ def main():
try:
testable = Dex2OatWrapperTestable(base_cmd, test_env, args.expected_retcode,
output_checker, args.verbose)
- (method, opt_pass) = BugSearch(testable)
+ if testable.Test(compiled_methods=[]):
+ (method, opt_pass) = BugSearch(testable)
+ else:
+ print('Testable fails with no methods compiled.')
+ sys.exit(1)
except Exception as e:
print('Error occurred.\nLogfile: {0}'.format(test_env.logfile.name))
test_env.logfile.write('Exception: {0}\n'.format(e))
diff --git a/tools/javafuzz/__init__.py b/tools/common/__init__.py
index 3955c712ab..3955c712ab 100644
--- a/tools/javafuzz/__init__.py
+++ b/tools/common/__init__.py
diff --git a/tools/bisection_search/common.py b/tools/common/common.py
index b69b60668b..b822dcadb7 100755
--- a/tools/bisection_search/common.py
+++ b/tools/common/common.py
@@ -21,7 +21,12 @@ import os
import signal
import shlex
import shutil
+import time
+from enum import Enum
+from enum import unique
+
+from subprocess import DEVNULL
from subprocess import check_call
from subprocess import PIPE
from subprocess import Popen
@@ -31,9 +36,6 @@ from subprocess import TimeoutExpired
from tempfile import mkdtemp
from tempfile import NamedTemporaryFile
-from enum import Enum
-from enum import unique
-
# Temporary directory path on device.
DEVICE_TMP_PATH = '/data/local/tmp'
@@ -51,6 +53,48 @@ class RetCode(Enum):
NOTRUN = 4
+@unique
+class LogSeverity(Enum):
+ VERBOSE = 0
+ DEBUG = 1
+ INFO = 2
+ WARNING = 3
+ ERROR = 4
+ FATAL = 5
+ SILENT = 6
+
+ @property
+ def symbol(self):
+ return self.name[0]
+
+ @classmethod
+ def FromSymbol(cls, s):
+ for log_severity in LogSeverity:
+ if log_severity.symbol == s:
+ return log_severity
+ raise ValueError("{0} is not a valid log severity symbol".format(s))
+
+ def __ge__(self, other):
+ if self.__class__ is other.__class__:
+ return self.value >= other.value
+ return NotImplemented
+
+ def __gt__(self, other):
+ if self.__class__ is other.__class__:
+ return self.value > other.value
+ return NotImplemented
+
+ def __le__(self, other):
+ if self.__class__ is other.__class__:
+ return self.value <= other.value
+ return NotImplemented
+
+ def __lt__(self, other):
+ if self.__class__ is other.__class__:
+ return self.value < other.value
+ return NotImplemented
+
+
def GetEnvVariableOrError(variable_name):
"""Gets value of an environmental variable.
@@ -72,6 +116,14 @@ def GetEnvVariableOrError(variable_name):
return top
+def GetJackClassPath():
+ """Returns Jack's classpath."""
+ top = GetEnvVariableOrError('ANDROID_BUILD_TOP')
+ libdir = top + '/out/host/common/obj/JAVA_LIBRARIES'
+ return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \
+ + libdir + '/core-oj-hostdex_intermediates/classes.jack'
+
+
def _DexArchCachePaths(android_data_path):
"""Returns paths to architecture specific caches.
@@ -116,23 +168,44 @@ def RunCommandForOutput(cmd, env, stdout, stderr, timeout=60):
return (output, stderr_output, retcode)
-def _RunCommandForOutputAndLog(cmd, env, logfile, timeout=60):
- """Runs command and logs its output. Returns the output.
+def _LogCmdOutput(logfile, cmd, output, retcode):
+ """Logs output of a command.
Args:
- cmd: list of strings, command to run.
- env: shell environment to run the command with.
logfile: file handle to logfile.
- timeout: int, timeout in seconds.
-
- Returns:
- tuple (string, string, RetCode) stdout output, stderr output, normalized
- return code.
+ cmd: list of strings, command.
+ output: command output.
+ retcode: RetCode, normalized retcode.
"""
- (output, _, retcode) = RunCommandForOutput(cmd, env, PIPE, STDOUT, timeout)
logfile.write('Command:\n{0}\n{1}\nReturn code: {2}\n'.format(
CommandListToCommandString(cmd), output, retcode))
- return (output, retcode)
+
+
+def RunCommand(cmd, out, err, timeout=5):
+ """Executes a command, and returns its return code.
+
+ Args:
+ cmd: list of strings, a command to execute
+ out: string, file name to open for stdout (or None)
+ err: string, file name to open for stderr (or None)
+ timeout: int, time out in seconds
+ Returns:
+ RetCode, return code of running command (forced RetCode.TIMEOUT
+ on timeout)
+ """
+ devnull = DEVNULL
+ outf = devnull
+ if out is not None:
+ outf = open(out, mode='w')
+ errf = devnull
+ if err is not None:
+ errf = open(err, mode='w')
+ (_, _, retcode) = RunCommandForOutput(cmd, None, outf, errf, timeout)
+ if outf != devnull:
+ outf.close()
+ if errf != devnull:
+ errf.close()
+ return retcode
def CommandListToCommandString(cmd):
@@ -187,15 +260,14 @@ class ITestEnv(object):
"""
@abc.abstractmethod
- def RunCommand(self, cmd, env_updates=None):
- """Runs command in environment with updated environmental variables.
+ def RunCommand(self, cmd, log_severity=LogSeverity.ERROR):
+ """Runs command in environment.
Args:
cmd: list of strings, command to run.
- env_updates: dict, string to string, maps names of variables to their
- updated values.
+ log_severity: LogSeverity, minimum severity of logs included in output.
Returns:
- tuple (string, string, int) stdout output, stderr output, return code.
+ tuple (string, int) output, return code.
"""
@abc.abstractproperty
@@ -262,13 +334,17 @@ class HostTestEnv(ITestEnv):
f.writelines('{0}\n'.format(line) for line in lines)
return
- def RunCommand(self, cmd, env_updates=None):
- if not env_updates:
- env_updates = {}
+ def RunCommand(self, cmd, log_severity=LogSeverity.ERROR):
self._EmptyDexCache()
env = self._shell_env.copy()
- env.update(env_updates)
- return _RunCommandForOutputAndLog(cmd, env, self._logfile, self._timeout)
+ env.update({'ANDROID_LOG_TAGS':'*:' + log_severity.symbol.lower()})
+ (output, err_output, retcode) = RunCommandForOutput(
+ cmd, env, PIPE, PIPE, self._timeout)
+ # We append err_output to output to stay consistent with DeviceTestEnv
+ # implementation.
+ output += err_output
+ _LogCmdOutput(self._logfile, cmd, output, retcode)
+ return (output, retcode)
@property
def logfile(self):
@@ -341,26 +417,63 @@ class DeviceTestEnv(ITestEnv):
self._AdbPush(temp_file.name, file_path)
return
- def RunCommand(self, cmd, env_updates=None):
- if not env_updates:
- env_updates = {}
+ def _ExtractPid(self, brief_log_line):
+ """Extracts PID from a single logcat line in brief format."""
+ pid_start_idx = brief_log_line.find('(') + 2
+ if pid_start_idx == -1:
+ return None
+ pid_end_idx = brief_log_line.find(')', pid_start_idx)
+ if pid_end_idx == -1:
+ return None
+ return brief_log_line[pid_start_idx:pid_end_idx]
+
+ def _ExtractSeverity(self, brief_log_line):
+ """Extracts LogSeverity from a single logcat line in brief format."""
+ if not brief_log_line:
+ return None
+ return LogSeverity.FromSymbol(brief_log_line[0])
+
+ def RunCommand(self, cmd, log_severity=LogSeverity.ERROR):
self._EmptyDexCache()
- if 'ANDROID_DATA' not in env_updates:
- env_updates['ANDROID_DATA'] = self._device_env_path
- env_updates_cmd = ' '.join(['{0}={1}'.format(var, val) for var, val
- in env_updates.items()])
- cmd = CommandListToCommandString(cmd)
- adb = 'adb'
+ env_vars_cmd = 'ANDROID_DATA={0} ANDROID_LOG_TAGS=*:i'.format(
+ self._device_env_path)
+ adb_cmd = ['adb']
if self._specific_device:
- adb += ' -s ' + self._specific_device
- cmd = '{0} shell "logcat -c && {1} {2}"'.format(
- adb, env_updates_cmd, cmd)
- (output, retcode) = _RunCommandForOutputAndLog(
- shlex.split(cmd), self._shell_env, self._logfile, self._timeout)
- logcat_cmd = 'adb shell "logcat -d -s -b main dex2oat:* dex2oatd:*"'
- (err_output, _) = _RunCommandForOutputAndLog(
- shlex.split(logcat_cmd), self._shell_env, self._logfile)
- return (output + err_output, retcode)
+ adb_cmd += ['-s', self._specific_device]
+ logcat_cmd = adb_cmd + ['logcat', '-v', 'brief', '-s', '-b', 'main',
+ '-T', '1', 'dex2oat:*', 'dex2oatd:*']
+ logcat_proc = Popen(logcat_cmd, stdout=PIPE, stderr=STDOUT,
+ universal_newlines=True)
+ cmd_str = CommandListToCommandString(cmd)
+ # Print PID of the shell and exec command. We later retrieve this PID and
+ # use it to filter dex2oat logs, keeping those with matching parent PID.
+ device_cmd = ('echo $$ && ' + env_vars_cmd + ' exec ' + cmd_str)
+ cmd = adb_cmd + ['shell', device_cmd]
+ (output, _, retcode) = RunCommandForOutput(cmd, self._shell_env, PIPE,
+ STDOUT, self._timeout)
+ # We need to make sure to only kill logcat once all relevant logs arrive.
+ # Sleep is used for simplicity.
+ time.sleep(0.5)
+ logcat_proc.kill()
+ end_of_first_line = output.find('\n')
+ if end_of_first_line != -1:
+ parent_pid = output[:end_of_first_line]
+ output = output[end_of_first_line + 1:]
+ logcat_output, _ = logcat_proc.communicate()
+ logcat_lines = logcat_output.splitlines(keepends=True)
+ dex2oat_pids = []
+ for line in logcat_lines:
+ # Dex2oat was started by our runtime instance.
+ if 'Running dex2oat (parent PID = ' + parent_pid in line:
+ dex2oat_pids.append(self._ExtractPid(line))
+ break
+ if dex2oat_pids:
+ for line in logcat_lines:
+ if (self._ExtractPid(line) in dex2oat_pids and
+ self._ExtractSeverity(line) >= log_severity):
+ output += line
+ _LogCmdOutput(self._logfile, cmd, output, retcode)
+ return (output, retcode)
@property
def logfile(self):
diff --git a/tools/javafuzz/Android.mk b/tools/jfuzz/Android.mk
index 63db57ab18..c7002d67ec 100644
--- a/tools/javafuzz/Android.mk
+++ b/tools/jfuzz/Android.mk
@@ -12,14 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Java fuzzer tool.
+# Fuzzer tool.
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := javafuzz.cc
+LOCAL_SRC_FILES := jfuzz.cc
LOCAL_CFLAGS += -O0 -g -Wall
LOCAL_MODULE_HOST_OS := darwin linux windows
-LOCAL_MODULE := javafuzz
+LOCAL_MODULE := jfuzz
include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/javafuzz/README.md b/tools/jfuzz/README.md
index b08075a9d8..c87e714050 100644
--- a/tools/javafuzz/README.md
+++ b/tools/jfuzz/README.md
@@ -1,20 +1,20 @@
-JavaFuzz
-========
+JFuzz
+=====
-JavaFuzz is a tool for generating random Java programs with the objective
-of fuzz testing the ART infrastructure. Each randomly generated Java program
+JFuzz is a tool for generating random programs with the objective
+of fuzz testing the ART infrastructure. Each randomly generated program
can be run under various modes of execution, such as using the interpreter,
using the optimizing compiler, using an external reference implementation,
or using various target architectures. Any difference between the outputs
(**divergence**) may indicate a bug in one of the execution modes.
-JavaFuzz can be combined with dexfuzz to get multi-layered fuzz testing.
+JFuzz can be combined with DexFuzz to get multi-layered fuzz testing.
-How to run JavaFuzz
-===================
+How to run JFuzz
+================
- javafuzz [-s seed] [-d expr-depth] [-l stmt-length]
- [-i if-nest] [-n loop-nest]
+ jfuzz [-s seed] [-d expr-depth] [-l stmt-length]
+ [-i if-nest] [-n loop-nest] [-v] [-h]
where
@@ -28,33 +28,67 @@ where
(higher values yield deeper nested conditionals)
-n : defines a fuzzing nest for for/while/do-while loops
(higher values yield deeper nested loops)
+ -v : prints version number and exits
+ -h : prints help and exits
-The current version of JavaFuzz sends all output to stdout, and uses
+The current version of JFuzz sends all output to stdout, and uses
a fixed testing class named Test. So a typical test run looks as follows.
- javafuzz > Test.java
+ jfuzz > Test.java
jack -cp ${JACK_CLASSPATH} --output-dex . Test.java
art -classpath classes.dex Test
-How to start the JavaFuzz tests
-===============================
+How to start JFuzz testing
+==========================
- run_java_fuzz_test.py
- [--num_tests=#TESTS]
+ run_jfuzz_test.py
+ [--num_tests=NUM_TESTS]
[--device=DEVICE]
[--mode1=MODE] [--mode2=MODE]
+ [--report_script=SCRIPT]
+ [--jfuzz_arg=ARG]
+ [--true_divergence]
where
- --num_tests : number of tests to run (10000 by default)
- --device : target device serial number (passed to adb -s)
- --mode1 : m1
- --mode2 : m2, with m1 != m2, and values one of
+ --num_tests : number of tests to run (10000 by default)
+ --device : target device serial number (passed to adb -s)
+ --mode1 : m1
+ --mode2 : m2, with m1 != m2, and values one of
ri = reference implementation on host (default for m1)
hint = Art interpreter on host
hopt = Art optimizing on host (default for m2)
tint = Art interpreter on target
topt = Art optimizing on target
+ --report_script : path to script called for each divergence
+ --jfuzz_arg : argument for jfuzz
+ --true_divergence : don't bisect timeout divergences
+
+How to start JFuzz nightly testing
+==================================
+
+ run_jfuzz_test_nightly.py
+ [--num_proc NUM_PROC]
+
+where
+
+ --num_proc : number of run_jfuzz_test.py instances to run (8 by default)
+
+Remaining arguments are passed to run\_jfuzz_test.py.
+
+How to start J/DexFuzz testing (multi-layered)
+==============================================
+
+ run_dex_fuzz_test.py
+ [--num_tests=NUM_TESTS]
+ [--num_inputs=NUM_INPUTS]
+ [--device=DEVICE]
+
+where
+
+ --num_tests : number of tests to run (10000 by default)
+ --num_inputs: number of JFuzz programs to generate
+ --device : target device serial number (passed to adb -s)
Background
==========
diff --git a/build/Android.common_utils.mk b/tools/jfuzz/__init__.py
index 8069c3a9b4..3955c712ab 100644
--- a/build/Android.common_utils.mk
+++ b/tools/jfuzz/__init__.py
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2014 The Android Open Source Project
+# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,13 +14,4 @@
# limitations under the License.
#
-ifndef ART_ANDROID_COMMON_UTILS_MK
-ART_ANDROID_COMMON_UTILS_MK = true
-
-#
-# Convert a string into an uppercase string.
-#
-# $(1): a string which should be made uppercase
-art-string-to-uppercase = $(shell echo $(1) | tr '[:lower:]' '[:upper:]')
-
-endif # ART_ANDROID_COMMON_UTILS_MK
+# This file is intentionally left empty. It indicates that the directory is a Python package. \ No newline at end of file
diff --git a/tools/javafuzz/javafuzz.cc b/tools/jfuzz/jfuzz.cc
index 161ae0a178..82683f2186 100644
--- a/tools/javafuzz/javafuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -21,12 +21,13 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
-#include <time.h>
+
+#include <sys/time.h>
namespace {
/*
- * Java operators.
+ * Operators.
*/
#define EMIT(x) fputs((x)[random0(sizeof(x)/sizeof(const char*))], out_);
@@ -49,33 +50,34 @@ static constexpr const char* kBoolRelOps[] = { "==", "!=" };
static constexpr const char* kRelOps[] = { "==", "!=", ">", ">=", "<", "<=" };
/*
- * Version of JavaFuzz. Increase this each time changes are made to the program
- * to preserve the property that a given version of JavaFuzz yields the same
- * fuzzed Java program for a deterministic random seed.
+ * Version of JFuzz. Increase this each time changes are made to the program
+ * to preserve the property that a given version of JFuzz yields the same
+ * fuzzed program for a deterministic random seed.
*/
-const char* VERSION = "1.1";
+const char* VERSION = "1.2";
-static const uint32_t MAX_DIMS[11] = { 0, 1000, 32, 10, 6, 4, 3, 3, 2, 2, 2 };
+/*
+ * Maximum number of array dimensions, together with corresponding maximum size
+ * within each dimension (to keep memory/runtime requirements roughly the same).
+ */
+static const uint32_t kMaxDim = 10;
+static const uint32_t kMaxDimSize[kMaxDim + 1] = { 0, 1000, 32, 10, 6, 4, 3, 3, 2, 2, 2 };
/**
- * A class that generates a random Java program that compiles correctly. The program
+ * A class that generates a random program that compiles correctly. The program
* is generated using rules that generate various programming constructs. Each rule
* has a fixed probability to "fire". Running a generated program yields deterministic
* output, making it suited to test various modes of execution (e.g an interpreter vs.
* an compiler or two different run times) for divergences.
- *
- * TODO: Due to the original scope of this project, the generated Java program is heavy
- * on loops, arrays, and basic operations; fuzzing other aspects of Java programs,
- * like elaborate typing, class hierarchies, and interfaces is still TBD.
*/
-class JavaFuzz {
+class JFuzz {
public:
- JavaFuzz(FILE* out,
- uint32_t seed,
- uint32_t expr_depth,
- uint32_t stmt_length,
- uint32_t if_nest,
- uint32_t loop_nest)
+ JFuzz(FILE* out,
+ uint32_t seed,
+ uint32_t expr_depth,
+ uint32_t stmt_length,
+ uint32_t if_nest,
+ uint32_t loop_nest)
: out_(out),
fuzz_random_engine_(seed),
fuzz_seed_(seed),
@@ -85,8 +87,8 @@ class JavaFuzz {
fuzz_loop_nest_(loop_nest),
return_type_(randomType()),
array_type_(randomType()),
- array_dim_(random1(10)),
- array_size_(random1(MAX_DIMS[array_dim_])),
+ array_dim_(random1(kMaxDim)),
+ array_size_(random1(kMaxDimSize[array_dim_])),
indentation_(0),
expr_depth_(0),
stmt_length_(0),
@@ -98,9 +100,10 @@ class JavaFuzz {
int_local_(0),
long_local_(0),
float_local_(0),
- double_local_(0) { }
+ double_local_(0),
+ in_inner_(false) { }
- ~JavaFuzz() { }
+ ~JFuzz() { }
void emitProgram() {
emitHeader();
@@ -378,6 +381,27 @@ class JavaFuzz {
}
}
+ // Emit a method call (out type given).
+ void emitMethodCall(Type tp) {
+ if (tp != kBoolean && !in_inner_) {
+ // Accept all numerical types (implicit conversion) and when not
+ // declaring inner classes (to avoid infinite recursion).
+ switch (random1(8)) {
+ case 1: fputs("mA.a()", out_); break;
+ case 2: fputs("mB.a()", out_); break;
+ case 3: fputs("mB.x()", out_); break;
+ case 4: fputs("mBX.x()", out_); break;
+ case 5: fputs("mC.s()", out_); break;
+ case 6: fputs("mC.c()", out_); break;
+ case 7: fputs("mC.x()", out_); break;
+ case 8: fputs("mCX.x()", out_); break;
+ }
+ } else {
+ // Fall back to intrinsic.
+ emitIntrinsic(tp);
+ }
+ }
+
// Emit unboxing boxed object.
void emitUnbox(Type tp) {
fputc('(', out_);
@@ -392,7 +416,7 @@ class JavaFuzz {
// Emit miscellaneous constructs.
void emitMisc(Type tp) {
if (tp == kBoolean) {
- fputs("this instanceof Test", out_);
+ fprintf(out_, "this instanceof %s", in_inner_ ? "X" : "Test");
} else if (isInteger(tp)) {
const char* prefix = tp == kLong ? "Long" : "Integer";
switch (random1(2)) {
@@ -572,10 +596,14 @@ class JavaFuzz {
emitIntrinsic(tp);
break;
case 7:
+ // Method call: mA.a()
+ emitMethodCall(tp);
+ break;
+ case 8:
// Emit unboxing boxed value: (int) Integer(x)
emitUnbox(tp);
break;
- case 8:
+ case 9:
// Miscellaneous constructs: a.length
emitMisc(tp);
break;
@@ -870,8 +898,52 @@ class JavaFuzz {
return true;
}
+ // Emit interface and class declarations.
+ void emitClassDecls() {
+ in_inner_ = true;
+ fputs(" private interface X {\n", out_);
+ fputs(" int x();\n", out_);
+ fputs(" }\n\n", out_);
+ fputs(" private class A {\n", out_);
+ fputs(" public int a() {\n", out_);
+ fputs(" return ", out_);
+ emitExpression(kInt);
+ fputs(";\n }\n", out_);
+ fputs(" }\n\n", out_);
+ fputs(" private class B extends A implements X {\n", out_);
+ fputs(" public int a() {\n", out_);
+ fputs(" return super.a() + ", out_);
+ emitExpression(kInt);
+ fputs(";\n }\n", out_);
+ fputs(" public int x() {\n", out_);
+ fputs(" return ", out_);
+ emitExpression(kInt);
+ fputs(";\n }\n", out_);
+ fputs(" }\n\n", out_);
+ fputs(" private static class C implements X {\n", out_);
+ fputs(" public static int s() {\n", out_);
+ fputs(" return ", out_);
+ emitLiteral(kInt);
+ fputs(";\n }\n", out_);
+ fputs(" public int c() {\n", out_);
+ fputs(" return ", out_);
+ emitLiteral(kInt);
+ fputs(";\n }\n", out_);
+ fputs(" public int x() {\n", out_);
+ fputs(" return ", out_);
+ emitLiteral(kInt);
+ fputs(";\n }\n", out_);
+ fputs(" }\n\n", out_);
+ in_inner_ = false;
+ }
+
// Emit field declarations.
void emitFieldDecls() {
+ fputs(" private A mA = new B();\n", out_);
+ fputs(" private B mB = new B();\n", out_);
+ fputs(" private X mBX = new B();\n", out_);
+ fputs(" private C mC = new C();\n", out_);
+ fputs(" private X mCX = new C();\n\n", out_);
fputs(" private boolean mZ = false;\n", out_);
fputs(" private int mI = 0;\n", out_);
fputs(" private long mJ = 0;\n", out_);
@@ -978,10 +1050,10 @@ class JavaFuzz {
// Emit program header. Emit command line options in the comments.
void emitHeader() {
- fputs("\n/**\n * AOSP Java Fuzz Tester.\n", out_);
- fputs(" * Automatically generated Java program.\n", out_);
+ fputs("\n/**\n * AOSP JFuzz Tester.\n", out_);
+ fputs(" * Automatically generated program.\n", out_);
fprintf(out_,
- " * javafuzz -s %u -d %u -l %u -i %u -n %u (version %s)\n */\n\n",
+ " * jfuzz -s %u -d %u -l %u -i %u -n %u (version %s)\n */\n\n",
fuzz_seed_,
fuzz_expr_depth_,
fuzz_stmt_length_,
@@ -995,6 +1067,7 @@ class JavaFuzz {
void emitTestClassWithMain() {
fputs("public class Test {\n\n", out_);
indentation_ += 2;
+ emitClassDecls();
emitFieldDecls();
emitArrayDecl();
emitTestConstructor();
@@ -1053,13 +1126,18 @@ class JavaFuzz {
uint32_t long_local_;
uint32_t float_local_;
uint32_t double_local_;
+ bool in_inner_;
};
} // anonymous namespace
int32_t main(int32_t argc, char** argv) {
+ // Time-based seed.
+ struct timeval tp;
+ gettimeofday(&tp, NULL);
+
// Defaults.
- uint32_t seed = time(NULL);
+ uint32_t seed = (tp.tv_sec * 1000000 + tp.tv_usec);
uint32_t expr_depth = 1;
uint32_t stmt_length = 8;
uint32_t if_nest = 2;
@@ -1067,7 +1145,7 @@ int32_t main(int32_t argc, char** argv) {
// Parse options.
while (1) {
- int32_t option = getopt(argc, argv, "s:d:l:i:n:h");
+ int32_t option = getopt(argc, argv, "s:d:l:i:n:vh");
if (option < 0) {
break; // done
}
@@ -1087,12 +1165,15 @@ int32_t main(int32_t argc, char** argv) {
case 'n':
loop_nest = strtoul(optarg, nullptr, 0);
break;
+ case 'v':
+ fprintf(stderr, "jfuzz version %s\n", VERSION);
+ return 0;
case 'h':
default:
fprintf(stderr,
"usage: %s [-s seed] "
"[-d expr-depth] [-l stmt-length] "
- "[-i if-nest] [-n loop-nest] [-h]\n",
+ "[-i if-nest] [-n loop-nest] [-v] [-h]\n",
argv[0]);
return 1;
}
@@ -1101,8 +1182,8 @@ int32_t main(int32_t argc, char** argv) {
// Seed global random generator.
srand(seed);
- // Generate fuzzed Java program.
- JavaFuzz fuzz(stdout, seed, expr_depth, stmt_length, if_nest, loop_nest);
+ // Generate fuzzed program.
+ JFuzz fuzz(stdout, seed, expr_depth, stmt_length, if_nest, loop_nest);
fuzz.emitProgram();
return 0;
}
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
new file mode 100755
index 0000000000..56cdf02d15
--- /dev/null
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3.4
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import shutil
+import sys
+
+from subprocess import check_call
+from tempfile import mkdtemp
+
+sys.path.append(os.path.dirname(os.path.dirname(
+ os.path.realpath(__file__))))
+
+from common.common import FatalError
+from common.common import GetJackClassPath
+from common.common import RetCode
+from common.common import RunCommand
+
+
+#
+# Tester class.
+#
+
+
+class DexFuzzTester(object):
+ """Tester that feeds JFuzz programs into DexFuzz testing."""
+
+ def __init__(self, num_tests, num_inputs, device):
+ """Constructor for the tester.
+
+ Args:
+ num_tests: int, number of tests to run
+ num_inputs: int, number of JFuzz programs to generate
+ device: string, target device serial number (or None)
+ """
+ self._num_tests = num_tests
+ self._num_inputs = num_inputs
+ self._device = device
+ self._save_dir = None
+ self._results_dir = None
+ self._dexfuzz_dir = None
+ self._inputs_dir = None
+
+ def __enter__(self):
+ """On entry, enters new temp directory after saving current directory.
+
+ Raises:
+ FatalError: error when temp directory cannot be constructed
+ """
+ self._save_dir = os.getcwd()
+ self._results_dir = mkdtemp(dir='/tmp/')
+ self._dexfuzz_dir = mkdtemp(dir=self._results_dir)
+ self._inputs_dir = mkdtemp(dir=self._dexfuzz_dir)
+ if self._results_dir is None or self._dexfuzz_dir is None or \
+ self._inputs_dir is None:
+ raise FatalError('Cannot obtain temp directory')
+ os.chdir(self._dexfuzz_dir)
+ return self
+
+ def __exit__(self, etype, evalue, etraceback):
+ """On exit, re-enters previously saved current directory and cleans up."""
+ os.chdir(self._save_dir)
+ # TODO: detect divergences or shutil.rmtree(self._results_dir)
+
+ def Run(self):
+ """Feeds JFuzz programs into DexFuzz testing."""
+ print()
+ print('**\n**** JFuzz Testing\n**')
+ print()
+ print('#Tests :', self._num_tests)
+ print('Device :', self._device)
+ print('Directory :', self._results_dir)
+ print()
+ self.GenerateJFuzzPrograms()
+ self.RunDexFuzz()
+
+
+ def GenerateJFuzzPrograms(self):
+ """Generates JFuzz programs.
+
+ Raises:
+ FatalError: error when generation fails
+ """
+ os.chdir(self._inputs_dir)
+ for i in range(1, self._num_inputs + 1):
+ jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.', 'Test.java']
+ if RunCommand(['jfuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
+ raise FatalError('Unexpected error while running JFuzz')
+ if RunCommand(['jack'] + jack_args, out=None, err='jackerr.txt',
+ timeout=30) != RetCode.SUCCESS:
+ raise FatalError('Unexpected error while running Jack')
+ shutil.move('Test.java', '../Test' + str(i) + '.java')
+ shutil.move('classes.dex', 'classes' + str(i) + '.dex')
+ os.unlink('jackerr.txt')
+
+ def RunDexFuzz(self):
+ """Starts the DexFuzz testing."""
+ os.chdir(self._dexfuzz_dir)
+ os.environ['ANDROID_DATA'] = self._dexfuzz_dir
+ dexfuzz_args = ['--inputs=' + self._inputs_dir, '--execute',
+ '--execute-class=Test', '--repeat=' + str(self._num_tests),
+ '--dump-output', '--interpreter', '--optimizing']
+ if self._device is not None:
+ dexfuzz_args += ['--device=' + self._device, '--allarm']
+ else:
+ dexfuzz_args += ['--host'] # Assume host otherwise.
+ check_call(['dexfuzz'] + dexfuzz_args)
+ # TODO: summarize findings.
+
+
+def main():
+ # Handle arguments.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--num_tests', default=10000,
+ type=int, help='number of tests to run')
+ parser.add_argument('--num_inputs', default=50,
+ type=int, help='number of JFuzz program to generate')
+ parser.add_argument('--device', help='target device serial number')
+ args = parser.parse_args()
+ # Run the DexFuzz tester.
+ with DexFuzzTester(args.num_tests, args.num_inputs, args.device) as fuzzer:
+ fuzzer.Run()
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/javafuzz/run_java_fuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
index 51d00be373..54f9bb494e 100755
--- a/tools/javafuzz/run_java_fuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -17,69 +17,31 @@
import abc
import argparse
import filecmp
-
-from glob import glob
-
import os
import shlex
import shutil
import subprocess
import sys
+from glob import glob
+from subprocess import DEVNULL
from tempfile import mkdtemp
sys.path.append(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))))
-from bisection_search.common import RetCode
-from bisection_search.common import CommandListToCommandString
-from bisection_search.common import FatalError
-from bisection_search.common import GetEnvVariableOrError
-from bisection_search.common import RunCommandForOutput
-from bisection_search.common import DeviceTestEnv
+from common.common import RetCode
+from common.common import CommandListToCommandString
+from common.common import FatalError
+from common.common import GetJackClassPath
+from common.common import GetEnvVariableOrError
+from common.common import RunCommand
+from common.common import RunCommandForOutput
+from common.common import DeviceTestEnv
# Return codes supported by bisection bug search.
BISECTABLE_RET_CODES = (RetCode.SUCCESS, RetCode.ERROR, RetCode.TIMEOUT)
-#
-# Utility methods.
-#
-
-
-def RunCommand(cmd, out, err, timeout=5):
- """Executes a command, and returns its return code.
-
- Args:
- cmd: list of strings, a command to execute
- out: string, file name to open for stdout (or None)
- err: string, file name to open for stderr (or None)
- timeout: int, time out in seconds
- Returns:
- RetCode, return code of running command (forced RetCode.TIMEOUT
- on timeout)
- """
- devnull = subprocess.DEVNULL
- outf = devnull
- if out is not None:
- outf = open(out, mode='w')
- errf = devnull
- if err is not None:
- errf = open(err, mode='w')
- (_, _, retcode) = RunCommandForOutput(cmd, None, outf, errf, timeout)
- if outf != devnull:
- outf.close()
- if errf != devnull:
- errf.close()
- return retcode
-
-
-def GetJackClassPath():
- """Returns Jack's classpath."""
- top = GetEnvVariableOrError('ANDROID_BUILD_TOP')
- libdir = top + '/out/host/common/obj/JAVA_LIBRARIES'
- return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \
- + libdir + '/core-oj-hostdex_intermediates/classes.jack'
-
def GetExecutionModeRunner(device, mode):
"""Returns a runner for the given execution mode.
@@ -104,6 +66,7 @@ def GetExecutionModeRunner(device, mode):
return TestRunnerArtOptOnTarget(device)
raise FatalError('Unknown execution mode')
+
#
# Execution mode classes.
#
@@ -248,7 +211,7 @@ class TestRunnerArtOnTarget(TestRunner):
device: string, target device serial number (or None)
extra_args: list of strings, extra arguments for dalvikvm
"""
- self._test_env = DeviceTestEnv('javafuzz_', specific_device=device)
+ self._test_env = DeviceTestEnv('jfuzz_', specific_device=device)
self._dalvik_cmd = ['dalvikvm']
if extra_args is not None:
self._dalvik_cmd += extra_args
@@ -335,14 +298,15 @@ class TestRunnerArtOptOnTarget(TestRunnerArtOnTarget):
#
-# Tester classes.
+# Tester class.
#
-class JavaFuzzTester(object):
- """Tester that runs JavaFuzz many times and report divergences."""
+class JFuzzTester(object):
+ """Tester that runs JFuzz many times and report divergences."""
- def __init__(self, num_tests, device, mode1, mode2):
+ def __init__(self, num_tests, device, mode1, mode2, jfuzz_args,
+ report_script, true_divergence_only):
"""Constructor for the tester.
Args:
@@ -350,13 +314,20 @@ class JavaFuzzTester(object):
device: string, target device serial number (or None)
mode1: string, execution mode for first runner
mode2: string, execution mode for second runner
+ jfuzz_args: list of strings, additional arguments for jfuzz
+ report_script: string, path to script called for each divergence
+ true_divergence_only: boolean, if True don't bisect timeout divergences
"""
self._num_tests = num_tests
self._device = device
self._runner1 = GetExecutionModeRunner(device, mode1)
self._runner2 = GetExecutionModeRunner(device, mode2)
+ self._jfuzz_args = jfuzz_args
+ self._report_script = report_script
+ self._true_divergence_only = true_divergence_only
self._save_dir = None
- self._tmp_dir = None
+ self._results_dir = None
+ self._jfuzz_dir = None
# Statistics.
self._test = 0
self._num_success = 0
@@ -373,23 +344,23 @@ class JavaFuzzTester(object):
"""
self._save_dir = os.getcwd()
self._results_dir = mkdtemp(dir='/tmp/')
- self._tmp_dir = mkdtemp(dir=self._results_dir)
- if self._tmp_dir is None or self._results_dir is None:
+ self._jfuzz_dir = mkdtemp(dir=self._results_dir)
+ if self._results_dir is None or self._jfuzz_dir is None:
raise FatalError('Cannot obtain temp directory')
- os.chdir(self._tmp_dir)
+ os.chdir(self._jfuzz_dir)
return self
def __exit__(self, etype, evalue, etraceback):
"""On exit, re-enters previously saved current directory and cleans up."""
os.chdir(self._save_dir)
- shutil.rmtree(self._tmp_dir)
+ shutil.rmtree(self._jfuzz_dir)
if self._num_divergences == 0:
shutil.rmtree(self._results_dir)
def Run(self):
- """Runs JavaFuzz many times and report divergences."""
+ """Runs JFuzz many times and report divergences."""
print()
- print('**\n**** JavaFuzz Testing\n**')
+ print('**\n**** JFuzz Testing\n**')
print()
print('#Tests :', self._num_tests)
print('Device :', self._device)
@@ -399,7 +370,7 @@ class JavaFuzzTester(object):
print()
self.ShowStats()
for self._test in range(1, self._num_tests + 1):
- self.RunJavaFuzzTest()
+ self.RunJFuzzTest()
self.ShowStats()
if self._num_divergences == 0:
print('\n\nsuccess (no divergences)\n')
@@ -408,16 +379,17 @@ class JavaFuzzTester(object):
def ShowStats(self):
"""Shows current statistics (on same line) while tester is running."""
- print('\rTests:', self._test, \
- 'Success:', self._num_success, \
- 'Not-compiled:', self._num_not_compiled, \
- 'Not-run:', self._num_not_run, \
- 'Timed-out:', self._num_timed_out, \
- 'Divergences:', self._num_divergences, end='')
+ print('\rTests:', self._test,
+ 'Success:', self._num_success,
+ 'Not-compiled:', self._num_not_compiled,
+ 'Not-run:', self._num_not_run,
+ 'Timed-out:', self._num_timed_out,
+ 'Divergences:', self._num_divergences,
+ end='')
sys.stdout.flush()
- def RunJavaFuzzTest(self):
- """Runs a single JavaFuzz test, comparing two execution modes."""
+ def RunJFuzzTest(self):
+ """Runs a single JFuzz test, comparing two execution modes."""
self.ConstructTest()
retc1 = self._runner1.CompileAndRunTest()
retc2 = self._runner2.CompileAndRunTest()
@@ -425,13 +397,14 @@ class JavaFuzzTester(object):
self.CleanupTest()
def ConstructTest(self):
- """Use JavaFuzz to generate next Test.java test.
+ """Use JFuzz to generate next Test.java test.
Raises:
- FatalError: error when javafuzz fails
+ FatalError: error when jfuzz fails
"""
- if RunCommand(['javafuzz'], out='Test.java', err=None) != RetCode.SUCCESS:
- raise FatalError('Unexpected error while running JavaFuzz')
+ if (RunCommand(['jfuzz'] + self._jfuzz_args, out='Test.java', err=None)
+ != RetCode.SUCCESS):
+ raise FatalError('Unexpected error while running JFuzz')
def CheckForDivergence(self, retc1, retc2):
"""Checks for divergences and updates statistics.
@@ -478,8 +451,47 @@ class JavaFuzzTester(object):
for f in glob('*.txt') + ['Test.java']:
shutil.copy(f, ddir)
# Maybe run bisection bug search.
- if retc1 in BISECTABLE_RET_CODES and retc2 in BISECTABLE_RET_CODES:
+ if (retc1 in BISECTABLE_RET_CODES and retc2 in BISECTABLE_RET_CODES and
+ not (self._true_divergence_only and RetCode.TIMEOUT in (retc1, retc2))):
self.MaybeBisectDivergence(retc1, retc2, is_output_divergence)
+ # Call reporting script.
+ if self._report_script:
+ self.RunReportScript(retc1, retc2, is_output_divergence)
+
+ def RunReportScript(self, retc1, retc2, is_output_divergence):
+ """Runs report script."""
+ try:
+ title = "Divergence between {0} and {1} (found with fuzz testing)".format(
+ self._runner1.description, self._runner2.description)
+ # Prepare divergence comment.
+ jfuzz_cmd_and_version = subprocess.check_output(
+ ['grep', '-o', 'jfuzz.*', 'Test.java'], universal_newlines=True)
+ (jfuzz_cmd_str, jfuzz_ver) = jfuzz_cmd_and_version.split('(')
+ # Strip right parenthesis and new line.
+ jfuzz_ver = jfuzz_ver[:-2]
+ jfuzz_args = ['\'-{0}\''.format(arg)
+ for arg in jfuzz_cmd_str.strip().split(' -')][1:]
+ wrapped_args = ['--jfuzz_arg={0}'.format(opt) for opt in jfuzz_args]
+ repro_cmd_str = (os.path.basename(__file__) + ' --num_tests 1 ' +
+ ' '.join(wrapped_args))
+ comment = 'jfuzz {0}\nReproduce test:\n{1}\nReproduce divergence:\n{2}\n'.format(
+ jfuzz_ver, jfuzz_cmd_str, repro_cmd_str)
+ if is_output_divergence:
+ (output, _, _) = RunCommandForOutput(
+ ['diff', self._runner1.output_file, self._runner2.output_file],
+ None, subprocess.PIPE, subprocess.STDOUT)
+ comment += 'Diff:\n' + output
+ else:
+ comment += '{0} vs {1}\n'.format(retc1, retc2)
+ # Prepare report script command.
+ script_cmd = [self._report_script, title, comment]
+ ddir = self.GetCurrentDivergenceDir()
+ bisection_out_files = glob(ddir + '/*_bisection_out.txt')
+ if bisection_out_files:
+ script_cmd += ['--bisection_out', bisection_out_files[0]]
+ subprocess.check_call(script_cmd, stdout=DEVNULL, stderr=DEVNULL)
+ except subprocess.CalledProcessError as err:
+ print('Failed to run report script.\n', err)
def RunBisectionSearch(self, args, expected_retcode, expected_output,
runner_id):
@@ -515,12 +527,12 @@ class JavaFuzzTester(object):
def CleanupTest(self):
"""Cleans up after a single test run."""
- for file_name in os.listdir(self._tmp_dir):
- file_path = os.path.join(self._tmp_dir, file_name)
- if os.path.isfile(file_path):
- os.unlink(file_path)
- elif os.path.isdir(file_path):
- shutil.rmtree(file_path)
+ for file_name in os.listdir(self._jfuzz_dir):
+ file_path = os.path.join(self._jfuzz_dir, file_name)
+ if os.path.isfile(file_path):
+ os.unlink(file_path)
+ elif os.path.isdir(file_path):
+ shutil.rmtree(file_path)
def main():
@@ -533,12 +545,19 @@ def main():
help='execution mode 1 (default: ri)')
parser.add_argument('--mode2', default='hopt',
help='execution mode 2 (default: hopt)')
+ parser.add_argument('--report_script', help='script called for each'
+ 'divergence')
+ parser.add_argument('--jfuzz_arg', default=[], dest='jfuzz_args',
+ action='append', help='argument for jfuzz')
+ parser.add_argument('--true_divergence', default=False, action='store_true',
+ help='don\'t bisect timeout divergences')
args = parser.parse_args()
if args.mode1 == args.mode2:
raise FatalError('Identical execution modes given')
- # Run the JavaFuzz tester.
- with JavaFuzzTester(args.num_tests, args.device,
- args.mode1, args.mode2) as fuzzer:
+ # Run the JFuzz tester.
+ with JFuzzTester(args.num_tests, args.device, args.mode1, args.mode2,
+ args.jfuzz_args, args.report_script,
+ args.true_divergence) as fuzzer:
fuzzer.Run()
if __name__ == '__main__':
diff --git a/tools/jfuzz/run_jfuzz_test_nightly.py b/tools/jfuzz/run_jfuzz_test_nightly.py
new file mode 100755
index 0000000000..cd338fb136
--- /dev/null
+++ b/tools/jfuzz/run_jfuzz_test_nightly.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3.4
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import subprocess
+import sys
+
+from tempfile import TemporaryFile
+
+# Default arguments for run_jfuzz_test.py.
+DEFAULT_ARGS = ['--num_tests=20000']
+
+# run_jfuzz_test.py success string.
+SUCCESS_STRING = 'success (no divergences)'
+
+# Constant returned by string find() method when search fails.
+NOT_FOUND = -1
+
+def main(argv):
+ cwd = os.path.dirname(os.path.realpath(__file__))
+ cmd = [cwd + '/run_jfuzz_test.py'] + DEFAULT_ARGS
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--num_proc', default=8,
+ type=int, help='number of processes to run')
+ # Unknown arguments are passed to run_jfuzz_test.py.
+ (args, unknown_args) = parser.parse_known_args()
+ output_files = [TemporaryFile('wb+') for _ in range(args.num_proc)]
+ processes = []
+ for output_file in output_files:
+ processes.append(subprocess.Popen(cmd + unknown_args, stdout=output_file,
+ stderr=subprocess.STDOUT))
+ try:
+ # Wait for processes to terminate.
+ for proc in processes:
+ proc.wait()
+ except KeyboardInterrupt:
+ for proc in processes:
+ proc.kill()
+ # Output results.
+ for i, output_file in enumerate(output_files):
+ output_file.seek(0)
+ output_str = output_file.read().decode('ascii')
+ output_file.close()
+ print('Tester', i)
+ if output_str.find(SUCCESS_STRING) == NOT_FOUND:
+ print(output_str)
+ else:
+ print(SUCCESS_STRING)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt
index 95f0c2dcf2..0e289a66d7 100644
--- a/tools/libcore_failures_concurrent_collector.txt
+++ b/tools/libcore_failures_concurrent_collector.txt
@@ -10,11 +10,4 @@
*/
[
-{
- description: "Assertion failing on the concurrent collector configuration.",
- result: EXEC_FAILED,
- names: ["jsr166.LinkedTransferQueueTest#testTransfer2",
- "jsr166.LinkedTransferQueueTest#testWaitingConsumer"],
- bug: 25883050
-}
]