diff options
703 files changed, 38226 insertions, 11636 deletions
diff --git a/Android.mk b/Android.mk index 54a33b2305..49b61bb221 100644 --- a/Android.mk +++ b/Android.mk @@ -78,6 +78,8 @@ include $(art_path)/build/Android.cpplint.mk include $(art_path)/runtime/Android.mk include $(art_path)/compiler/Android.mk +include $(art_path)/dexdump/Android.mk +include $(art_path)/dexlist/Android.mk include $(art_path)/dex2oat/Android.mk include $(art_path)/disassembler/Android.mk include $(art_path)/oatdump/Android.mk @@ -166,7 +168,8 @@ test-art-host-vixl: $(VIXL_TEST_DEPENDENCY) # "mm test-art-host" to build and run all host tests. .PHONY: test-art-host -test-art-host: test-art-host-gtest test-art-host-run-test test-art-host-vixl +test-art-host: test-art-host-gtest test-art-host-run-test \ + test-art-host-vixl test-art-host-dexdump $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All host tests that run solely with the default compiler. @@ -235,6 +238,11 @@ test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$( $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) endif +# Dexdump/list regression test. +.PHONY: test-art-host-dexdump +test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist2) + ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests + # Valgrind. Currently only 32b gtests. .PHONY: valgrind-test-art-host valgrind-test-art-host: valgrind-test-art-host-gtest32 @@ -419,6 +427,7 @@ use-art-full: adb shell setprop dalvik.vm.dex2oat-filter \"\" adb shell setprop dalvik.vm.image-dex2oat-filter \"\" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-artd-full @@ -429,16 +438,18 @@ use-artd-full: adb shell setprop dalvik.vm.dex2oat-filter \"\" adb shell setprop dalvik.vm.image-dex2oat-filter \"\" adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + adb shell setprop dalvik.vm.usejit false adb shell start -.PHONY: use-art-verify-at-runtime -use-art-verify-at-runtime: +.PHONY: use-art-jit +use-art-jit: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime" adb shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit true adb shell start .PHONY: use-art-interpret-only @@ -449,6 +460,7 @@ use-art-interpret-only: adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-artd-interpret-only @@ -459,6 +471,7 @@ use-artd-interpret-only: adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-art-verify-none @@ -469,6 +482,7 @@ use-art-verify-none: adb shell setprop dalvik.vm.dex2oat-filter "verify-none" adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit false adb shell start ######################################################################## diff --git a/build/Android.common.mk b/build/Android.common.mk index 0f756eff2d..6952d691ba 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -20,6 +20,15 @@ ART_ANDROID_COMMON_MK = true ART_TARGET_SUPPORTED_ARCH := arm arm64 mips mips64 x86 x86_64 ART_HOST_SUPPORTED_ARCH := x86 x86_64 +ifneq ($(HOST_OS),darwin) + ART_HOST_SUPPORTED_ARCH := x86 x86_64 +else + # Mac OS doesn't support low-4GB allocation in a 64-bit process. So we won't be able to create + # our heaps. + ART_HOST_SUPPORTED_ARCH := x86 + ART_MULTILIB_OVERRIDE_host := 32 +endif + ART_COVERAGE := false ifeq ($(ART_COVERAGE),true) diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 02ef0c6206..ad2feebf12 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -33,7 +33,22 @@ ART_BUILD_TARGET_NDEBUG ?= true ART_BUILD_TARGET_DEBUG ?= true ART_BUILD_HOST_NDEBUG ?= true ART_BUILD_HOST_DEBUG ?= true -ART_BUILD_HOST_STATIC ?= true + +# Enable the static builds only for checkbuilds. +ifneq (,$(filter checkbuild,$(MAKECMDGOALS))) + ART_BUILD_HOST_STATIC ?= true +else + ART_BUILD_HOST_STATIC ?= false +endif + +# Asan does not support static linkage +ifdef SANITIZE_HOST + ART_BUILD_HOST_STATIC := false +endif + +ifneq ($(HOST_OS),linux) + ART_BUILD_HOST_STATIC := false +endif ifeq ($(ART_BUILD_TARGET_NDEBUG),false) $(info Disabling ART_BUILD_TARGET_NDEBUG) @@ -51,6 +66,11 @@ ifeq ($(ART_BUILD_HOST_STATIC),true) $(info Enabling ART_BUILD_HOST_STATIC) endif +ifeq ($(ART_TEST_DEBUG_GC),true) + ART_DEFAULT_GC_TYPE := SS + ART_USE_TLAB := true +endif + # # Used to enable JIT # @@ -113,12 +133,8 @@ ART_TARGET_CLANG_CFLAGS_mips64 := ART_TARGET_CLANG_CFLAGS_x86 := ART_TARGET_CLANG_CFLAGS_x86_64 := -# These are necessary for Clang ARM64 ART builds. TODO: remove. -ART_TARGET_CLANG_CFLAGS_arm64 += \ - -DNVALGRIND - # Warn about thread safety violations with clang. -art_clang_cflags := -Wthread-safety +art_clang_cflags := -Wthread-safety -Wthread-safety-negative # Warn if switch fallthroughs aren't annotated. art_clang_cflags += -Wimplicit-fallthrough @@ -189,6 +205,11 @@ ART_C_INCLUDES := \ external/vixl/src \ external/zlib \ +# We optimize Thread::Current() with a direct TLS access. This requires access to a private +# Bionic header. +# Note: technically we only need this on device, but this avoids the duplication of the includes. +ART_C_INCLUDES += bionic/libc/private + # Base set of cflags used by all things ART. art_cflags := \ -fno-rtti \ @@ -206,6 +227,59 @@ art_cflags := \ -fvisibility=protected \ $(art_default_gc_type_cflags) +# The architectures the compiled tools are able to run on. Setting this to 'all' will cause all +# architectures to be included. +ART_TARGET_CODEGEN_ARCHS ?= all +ART_HOST_CODEGEN_ARCHS ?= all + +ifeq ($(ART_TARGET_CODEGEN_ARCHS),all) + ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH)) + # We need to handle the fact that some compiler tests mix code from different architectures. + ART_TARGET_COMPILER_TESTS ?= true +else + ART_TARGET_COMPILER_TESTS := false + ifeq ($(ART_TARGET_CODEGEN_ARCHS),svelte) + ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_ARCH_64) $(ART_TARGET_ARCH_32)) + endif +endif +ifeq ($(ART_HOST_CODEGEN_ARCHS),all) + ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH)) + ART_HOST_COMPILER_TESTS ?= true +else + ART_HOST_COMPILER_TESTS := false + ifeq ($(ART_HOST_CODEGEN_ARCHS),svelte) + ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS) $(ART_HOST_ARCH_64) $(ART_HOST_ARCH_32)) + endif +endif + +ifneq (,$(filter arm64,$(ART_TARGET_CODEGEN_ARCHS))) + ART_TARGET_CODEGEN_ARCHS += arm +endif +ifneq (,$(filter mips64,$(ART_TARGET_CODEGEN_ARCHS))) + ART_TARGET_CODEGEN_ARCHS += mips +endif +ifneq (,$(filter x86_64,$(ART_TARGET_CODEGEN_ARCHS))) + ART_TARGET_CODEGEN_ARCHS += x86 +endif +ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS)) +ifneq (,$(filter arm64,$(ART_HOST_CODEGEN_ARCHS))) + ART_HOST_CODEGEN_ARCHS += arm +endif +ifneq (,$(filter mips64,$(ART_HOST_CODEGEN_ARCHS))) + ART_HOST_CODEGEN_ARCHS += mips +endif +ifneq (,$(filter x86_64,$(ART_HOST_CODEGEN_ARCHS))) + ART_HOST_CODEGEN_ARCHS += x86 +endif +ART_HOST_CODEGEN_ARCHS := $(sort $(ART_HOST_CODEGEN_ARCHS)) + +# Base set of cflags used by target build only +art_target_cflags := \ + $(foreach target_arch,$(strip $(ART_TARGET_CODEGEN_ARCHS)), -DART_ENABLE_CODEGEN_$(target_arch)) +# Base set of cflags used by host build only +art_host_cflags := \ + $(foreach host_arch,$(strip $(ART_HOST_CODEGEN_ARCHS)), -DART_ENABLE_CODEGEN_$(host_arch)) + # Base set of asflags used by all things ART. art_asflags := @@ -257,10 +331,10 @@ ifeq ($(HOST_OS),linux) # Larger frame-size for host clang builds today ifneq ($(ART_COVERAGE),true) ifneq ($(NATIVE_COVERAGE),true) - ifndef SANITIZE_HOST - art_host_non_debug_cflags += -Wframe-larger-than=2700 - endif - ifndef SANITIZE_TARGET + art_host_non_debug_cflags += -Wframe-larger-than=2700 + ifdef SANITIZE_TARGET + art_target_non_debug_cflags += -Wframe-larger-than=5450 + else art_target_non_debug_cflags += -Wframe-larger-than=1728 endif endif @@ -271,13 +345,30 @@ ifndef LIBART_IMG_HOST_BASE_ADDRESS $(error LIBART_IMG_HOST_BASE_ADDRESS unset) endif ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS) -ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default +ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default $(art_host_cflags) ART_HOST_ASFLAGS += $(art_asflags) +# Disable -Wpessimizing-move: triggered for art/runtime/base/variant_map.h:261 +# Adding this flag to art_clang_cflags doesn't work because -Wall gets added to +# ART_HOST_CFLAGS (as a part of art_cflags) after +# -Wno-pessimizing-move. Instead, add the flag here to both +# ART_TARGET_CLANG_CFLAGS and ART_HOST_CFLAGS +ifeq ($(ART_HOST_CLANG),true) +ART_HOST_CFLAGS += -Wno-pessimizing-move +endif +ART_TARGET_CLANG_CFLAGS += -Wno-pessimizing-move + +# The latest clang update trips over many of the files in art and never finishes +# compiling for aarch64 with -O3 (or -O2). Drop back to -O1 while we investigate +# to stop punishing the build server. +# Bug: http://b/23256622 +ART_TARGET_CLANG_CFLAGS_arm64 += -O1 + ifndef LIBART_IMG_TARGET_BASE_ADDRESS $(error LIBART_IMG_TARGET_BASE_ADDRESS unset) endif ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) +ART_TARGET_CFLAGS += $(art_target_cflags) ART_TARGET_ASFLAGS += $(art_asflags) ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags) @@ -309,6 +400,8 @@ ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_A # Clear locals now they've served their purpose. art_cflags := art_asflags := +art_host_cflags := +art_target_cflags := art_debug_cflags := art_non_debug_cflags := art_host_non_debug_cflags := @@ -338,7 +431,7 @@ define set-target-local-cflags-vars endif LOCAL_CLANG_CFLAGS := $(ART_TARGET_CLANG_CFLAGS) - $(foreach arch,$(ART_SUPPORTED_ARCH), + $(foreach arch,$(ART_TARGET_SUPPORTED_ARCH), LOCAL_CLANG_CFLAGS_$(arch) += $$(ART_TARGET_CLANG_CFLAGS_$(arch))) # Clear locally used variables. diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk index 183f4e3ec3..a561c5f4c5 100644 --- a/build/Android.common_path.mk +++ b/build/Android.common_path.mk @@ -88,4 +88,8 @@ TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) + +# Classpath for Jack compilation: we only need core-libart. +HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack) +TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack) endif # ART_ANDROID_COMMON_PATH_MK diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk index 45b649047f..2f43f5f809 100644 --- a/build/Android.common_test.mk +++ b/build/Android.common_test.mk @@ -45,6 +45,7 @@ ART_TEST_DEFAULT_COMPILER ?= true # Do you want interpreter tests run? ART_TEST_INTERPRETER ?= $(ART_TEST_FULL) +ART_TEST_INTERPRETER_ACCESS_CHECKS ?= $(ART_TEST_FULL) # Do you want JIT tests run? ART_TEST_JIT ?= $(ART_TEST_FULL) diff --git a/build/Android.executable.mk b/build/Android.executable.mk index a251c92464..72cf978339 100644 --- a/build/Android.executable.mk +++ b/build/Android.executable.mk @@ -127,6 +127,10 @@ define build-art-executable LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) endif + ifdef ART_MULTILIB_OVERRIDE_$$(art_target_or_host) + art_multilib := $$(ART_MULTILIB_OVERRIDE_$$(art_target_or_host)) + endif + LOCAL_MULTILIB := $$(art_multilib) art_out_binary_name := $$(LOCAL_MODULE) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 9f6dc9af18..566d289696 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -19,6 +19,7 @@ LOCAL_PATH := art/test include art/build/Android.common_test.mk include art/build/Android.common_path.mk +include art/build/Android.common_build.mk # Subdirectories in art/test which contain dex files used as inputs for gtests. GTEST_DEX_DIRECTORIES := \ @@ -93,6 +94,28 @@ ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \ # TODO: document why this is needed. ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) +# The dexdump test requires an image and the dexdump utility. +# TODO: rename into dexdump when migration completes +ART_GTEST_dexdump_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/dexdump2 +ART_GTEST_dexdump_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) \ + dexdump2 + +# The dexlist test requires an image and the dexlist utility. +# TODO: rename into dexlist when migration completes +ART_GTEST_dexlist_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/dexlist2 +ART_GTEST_dexlist_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) \ + dexlist2 + # The imgdiag test has dependencies on core.oat since it needs to load it during the test. # For the host, also add the installed tool (in the base size, that should suffice). For the # target, just the module is fine, the sync will happen late enough. @@ -120,6 +143,8 @@ LOCAL_PATH := art RUNTIME_GTEST_COMMON_SRC_FILES := \ cmdline/cmdline_parser_test.cc \ + dexdump/dexdump_test.cc \ + dexlist/dexlist_test.cc \ imgdiag/imgdiag_test.cc \ oatdump/oatdump_test.cc \ runtime/arch/arch_test.cc \ @@ -209,37 +234,25 @@ COMPILER_GTEST_COMMON_SRC_FILES := \ compiler/dex/local_value_numbering_test.cc \ compiler/dex/mir_graph_test.cc \ compiler/dex/mir_optimization_test.cc \ - compiler/dex/quick/quick_cfi_test.cc \ compiler/dex/type_inference_test.cc \ compiler/dwarf/dwarf_test.cc \ compiler/driver/compiler_driver_test.cc \ compiler/elf_writer_test.cc \ compiler/image_test.cc \ - compiler/jni/jni_cfi_test.cc \ compiler/jni/jni_compiler_test.cc \ - compiler/linker/arm64/relative_patcher_arm64_test.cc \ - compiler/linker/arm/relative_patcher_thumb2_test.cc \ - compiler/linker/x86/relative_patcher_x86_test.cc \ - compiler/linker/x86_64/relative_patcher_x86_64_test.cc \ compiler/oat_test.cc \ compiler/optimizing/bounds_check_elimination_test.cc \ - compiler/optimizing/codegen_test.cc \ - compiler/optimizing/dead_code_elimination_test.cc \ - compiler/optimizing/constant_folding_test.cc \ compiler/optimizing/dominator_test.cc \ compiler/optimizing/find_loops_test.cc \ compiler/optimizing/graph_checker_test.cc \ compiler/optimizing/graph_test.cc \ compiler/optimizing/gvn_test.cc \ - compiler/optimizing/linearize_test.cc \ - compiler/optimizing/liveness_test.cc \ + compiler/optimizing/licm_test.cc \ compiler/optimizing/live_interval_test.cc \ - compiler/optimizing/live_ranges_test.cc \ compiler/optimizing/nodes_test.cc \ - compiler/optimizing/optimizing_cfi_test.cc \ compiler/optimizing/parallel_move_test.cc \ compiler/optimizing/pretty_printer_test.cc \ - compiler/optimizing/register_allocator_test.cc \ + compiler/optimizing/side_effects_test.cc \ compiler/optimizing/ssa_test.cc \ compiler/optimizing/stack_map_test.cc \ compiler/optimizing/suspend_check_test.cc \ @@ -248,10 +261,38 @@ COMPILER_GTEST_COMMON_SRC_FILES := \ compiler/utils/dedupe_set_test.cc \ compiler/utils/swap_space_test.cc \ compiler/utils/test_dex_file_builder_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_all := \ + compiler/dex/quick/quick_cfi_test.cc \ + compiler/jni/jni_cfi_test.cc \ + compiler/optimizing/codegen_test.cc \ + compiler/optimizing/constant_folding_test.cc \ + compiler/optimizing/dead_code_elimination_test.cc \ + compiler/optimizing/linearize_test.cc \ + compiler/optimizing/liveness_test.cc \ + compiler/optimizing/live_ranges_test.cc \ + compiler/optimizing/optimizing_cfi_test.cc \ + compiler/optimizing/register_allocator_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_arm := \ + compiler/linker/arm/relative_patcher_thumb2_test.cc \ compiler/utils/arm/managed_register_arm_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_arm64 := \ + compiler/linker/arm64/relative_patcher_arm64_test.cc \ compiler/utils/arm64/managed_register_arm64_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_mips := \ + +COMPILER_GTEST_COMMON_SRC_FILES_mips64 := \ + +COMPILER_GTEST_COMMON_SRC_FILES_x86 := \ + compiler/linker/x86/relative_patcher_x86_test.cc \ compiler/utils/x86/managed_register_x86_test.cc \ +COMPILER_GTEST_COMMON_SRC_FILES_x86_64 := \ + compiler/linker/x86_64/relative_patcher_x86_64_test.cc \ + RUNTIME_GTEST_TARGET_SRC_FILES := \ $(RUNTIME_GTEST_COMMON_SRC_FILES) @@ -261,15 +302,67 @@ RUNTIME_GTEST_HOST_SRC_FILES := \ COMPILER_GTEST_TARGET_SRC_FILES := \ $(COMPILER_GTEST_COMMON_SRC_FILES) +COMPILER_GTEST_TARGET_SRC_FILES_all := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_all) \ + +COMPILER_GTEST_TARGET_SRC_FILES_arm := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm) \ + +COMPILER_GTEST_TARGET_SRC_FILES_arm64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm64) \ + +COMPILER_GTEST_TARGET_SRC_FILES_mips := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ + +COMPILER_GTEST_TARGET_SRC_FILES_mips64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ + +COMPILER_GTEST_TARGET_SRC_FILES_x86 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \ + +COMPILER_GTEST_TARGET_SRC_FILES_x86_64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ + +$(foreach arch,$(ART_TARGET_CODEGEN_ARCHS),$(eval COMPILER_GTEST_TARGET_SRC_FILES += $$(COMPILER_GTEST_TARGET_SRC_FILES_$(arch)))) +ifeq (true,$(ART_TARGET_COMPILER_TESTS)) + COMPILER_GTEST_TARGET_SRC_FILES += $(COMPILER_GTEST_TARGET_SRC_FILES_all) +endif + COMPILER_GTEST_HOST_SRC_FILES := \ $(COMPILER_GTEST_COMMON_SRC_FILES) \ - compiler/dex/quick/x86/quick_assemble_x86_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_all := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_all) \ + +COMPILER_GTEST_HOST_SRC_FILES_arm := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm) \ compiler/utils/arm/assembler_arm32_test.cc \ compiler/utils/arm/assembler_thumb2_test.cc \ compiler/utils/assembler_thumb_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_arm64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm64) \ + +COMPILER_GTEST_HOST_SRC_FILES_mips := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ + +COMPILER_GTEST_HOST_SRC_FILES_mips64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ + +COMPILER_GTEST_HOST_SRC_FILES_x86 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \ + compiler/dex/quick/x86/quick_assemble_x86_test.cc \ compiler/utils/x86/assembler_x86_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_x86_64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ compiler/utils/x86_64/assembler_x86_64_test.cc +$(foreach arch,$(ART_HOST_CODEGEN_ARCHS),$(eval COMPILER_GTEST_HOST_SRC_FILES += $$(COMPILER_GTEST_HOST_SRC_FILES_$(arch)))) +ifeq (true,$(ART_HOST_COMPILER_TESTS)) + COMPILER_GTEST_HOST_SRC_FILES += $(COMPILER_GTEST_HOST_SRC_FILES_all) +endif + ART_TEST_CFLAGS := include $(CLEAR_VARS) @@ -532,6 +625,7 @@ valgrind-test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_VALGRIND_GTEST 2nd_library_path := endef # define-art-gtest + ifeq ($(ART_BUILD_TARGET),true) $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace))) $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace))) diff --git a/build/Android.oat.mk b/build/Android.oat.mk index 728469c2c4..3a3cb990ca 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -23,15 +23,21 @@ include art/build/Android.common_build.mk +LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := ifeq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) - DEX2OAT_HOST_INSTRUCTION_SET_FEATURES := default + LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default +else + LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) endif +LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) - $(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES := default + LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default +else + LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) endif # Use dex2oat debug version for better error reporting -# $(1): compiler - default, optimizing, jit or interpreter. +# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. # $(2): pic/no-pic # $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds. # $(4): wrapper, e.g., valgrind. @@ -64,12 +70,16 @@ define create-core-oat-host-rules core_compile_options += --compiler-filter=interpret-only core_infix := -interpreter endif + ifeq ($(1),interp-ac) + core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail + core_infix := -interp-ac + endif ifeq ($(1),default) # Default has no infix, no compile options. endif - ifneq ($(filter-out default interpreter jit optimizing,$(1)),) + ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) #Technically this test is not precise, but hopefully good enough. - $$(error found $(1) expected default, interpreter, jit or optimizing) + $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) endif ifeq ($(2),pic) @@ -112,7 +122,7 @@ $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency) $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(3)ART_HOST_ARCH) \ - --instruction-set-features=$$($(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) \ + $$(LOCAL_$(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ --host --android-root=$$(HOST_OUT) --include-patch-information --generate-debug-info \ $$(PRIVATE_CORE_COMPILE_OPTIONS) @@ -127,7 +137,7 @@ $$(core_oat_name): $$(core_image_name) core_pic_infix := endef # create-core-oat-host-rules -# $(1): compiler - default, optimizing, jit or interpreter. +# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. # $(2): wrapper. # $(3): dex2oat suffix. define create-core-oat-host-rule-combination @@ -143,12 +153,14 @@ endef $(eval $(call create-core-oat-host-rule-combination,default,,)) $(eval $(call create-core-oat-host-rule-combination,optimizing,,)) $(eval $(call create-core-oat-host-rule-combination,interpreter,,)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,,)) valgrindHOST_CORE_IMG_OUTS := valgrindHOST_CORE_OAT_OUTS := $(eval $(call create-core-oat-host-rule-combination,default,valgrind,32)) $(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32)) $(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32)) valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS) @@ -178,12 +190,16 @@ define create-core-oat-target-rules core_compile_options += --compiler-filter=interpret-only core_infix := -interpreter endif + ifeq ($(1),interp-ac) + core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail + core_infix := -interp-ac + endif ifeq ($(1),default) # Default has no infix, no compile options. endif - ifneq ($(filter-out default interpreter jit optimizing,$(1)),) + ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) # Technically this test is not precise, but hopefully good enough. - $$(error found $(1) expected default, interpreter, jit or optimizing) + $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) endif ifeq ($(2),pic) @@ -246,7 +262,7 @@ $$(core_oat_name): $$(core_image_name) core_pic_infix := endef # create-core-oat-target-rules -# $(1): compiler - default, optimizing, jit or interpreter. +# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. # $(2): wrapper. # $(3): dex2oat suffix. define create-core-oat-target-rule-combination @@ -262,12 +278,14 @@ endef $(eval $(call create-core-oat-target-rule-combination,default,,)) $(eval $(call create-core-oat-target-rule-combination,optimizing,,)) $(eval $(call create-core-oat-target-rule-combination,interpreter,,)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac,,)) valgrindTARGET_CORE_IMG_OUTS := valgrindTARGET_CORE_OAT_OUTS := $(eval $(call create-core-oat-target-rule-combination,default,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32)) valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS) diff --git a/cmdline/cmdline_parse_result.h b/cmdline/cmdline_parse_result.h index 717642f18c..982f17866d 100644 --- a/cmdline/cmdline_parse_result.h +++ b/cmdline/cmdline_parse_result.h @@ -126,7 +126,7 @@ struct CmdlineParseResult : CmdlineResult { : CmdlineResult(kSuccess), value_(value), has_value_(true) {} explicit CmdlineParseResult(T&& value) : CmdlineResult(kSuccess), value_(std::forward<T>(value)), has_value_(true) {} - explicit CmdlineParseResult() + CmdlineParseResult() : CmdlineResult(kSuccess), value_(), has_value_(false) {} T value_; diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h index cebba65aca..cfc096728f 100644 --- a/cmdline/cmdline_parser.h +++ b/cmdline/cmdline_parser.h @@ -497,11 +497,10 @@ struct CmdlineParser { friend struct Builder; // Construct a new parser from the builder. Move all the arguments. - explicit CmdlineParser(bool ignore_unrecognized, - std::vector<const char*>&& ignore_list, - std::shared_ptr<SaveDestination> save_destination, - std::vector<std::unique_ptr<detail::CmdlineParseArgumentAny>>&& - completed_arguments) + CmdlineParser(bool ignore_unrecognized, + std::vector<const char*>&& ignore_list, + std::shared_ptr<SaveDestination> save_destination, + std::vector<std::unique_ptr<detail::CmdlineParseArgumentAny>>&& completed_arguments) : ignore_unrecognized_(ignore_unrecognized), ignore_list_(std::move(ignore_list)), save_destination_(save_destination), diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc index 98fd327f02..52df7deb25 100644 --- a/cmdline/cmdline_parser_test.cc +++ b/cmdline/cmdline_parser_test.cc @@ -216,9 +216,6 @@ TEST_F(CmdlineParserTest, TestSimpleSuccesses) { EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote); EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath); EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath); - EXPECT_SINGLE_PARSE_VALUE(false, "-Xverify:none", M::Verify); - EXPECT_SINGLE_PARSE_VALUE(true, "-Xverify:remote", M::Verify); - EXPECT_SINGLE_PARSE_VALUE(true, "-Xverify:all", M::Verify); EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize); EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize); EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); @@ -550,6 +547,14 @@ TEST_F(CmdlineParserTest, TestExperimentalLambdas) { M::ExperimentalLambdas); } +// -Xverify:_ +TEST_F(CmdlineParserTest, TestVerify) { + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kNone, "-Xverify:none", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:remote", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:all", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kSoftFail, "-Xverify:softfail", M::Verify); +} + TEST_F(CmdlineParserTest, TestIgnoreUnrecognized) { RuntimeParser::Builder parserBuilder; diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h index 81ef36b086..3009b32162 100644 --- a/cmdline/detail/cmdline_parse_argument_detail.h +++ b/cmdline/detail/cmdline_parse_argument_detail.h @@ -300,9 +300,9 @@ namespace art { // be able to parse arguments. template <typename TArg> struct CmdlineParseArgument : CmdlineParseArgumentAny { - explicit CmdlineParseArgument(CmdlineParserArgumentInfo<TArg>&& argument_info, - std::function<void(TArg&)>&& save_argument, - std::function<TArg&(void)>&& load_argument) + CmdlineParseArgument(CmdlineParserArgumentInfo<TArg>&& argument_info, + std::function<void(TArg&)>&& save_argument, + std::function<TArg&(void)>&& load_argument) : argument_info_(std::forward<decltype(argument_info)>(argument_info)), save_argument_(std::forward<decltype(save_argument)>(save_argument)), load_argument_(std::forward<decltype(load_argument)>(load_argument)) { diff --git a/cmdline/token_range.h b/cmdline/token_range.h index 5b54384e42..335806795a 100644 --- a/cmdline/token_range.h +++ b/cmdline/token_range.h @@ -45,7 +45,7 @@ struct TokenRange { // Copying-from-iterator constructor template <typename ForwardIterator> - explicit TokenRange(ForwardIterator it_begin, ForwardIterator it_end) + TokenRange(ForwardIterator it_begin, ForwardIterator it_end) : token_list_(new TokenList(it_begin, it_end)), begin_(token_list_->begin()), end_(token_list_->end()) diff --git a/compiler/Android.mk b/compiler/Android.mk index 39470785ab..7d368a2e80 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -24,18 +24,6 @@ LIBART_COMPILER_SRC_FILES := \ dex/gvn_dead_code_elimination.cc \ dex/local_value_numbering.cc \ dex/type_inference.cc \ - dex/quick/arm/assemble_arm.cc \ - dex/quick/arm/call_arm.cc \ - dex/quick/arm/fp_arm.cc \ - dex/quick/arm/int_arm.cc \ - dex/quick/arm/target_arm.cc \ - dex/quick/arm/utility_arm.cc \ - dex/quick/arm64/assemble_arm64.cc \ - dex/quick/arm64/call_arm64.cc \ - dex/quick/arm64/fp_arm64.cc \ - dex/quick/arm64/int_arm64.cc \ - dex/quick/arm64/target_arm64.cc \ - dex/quick/arm64/utility_arm64.cc \ dex/quick/codegen_util.cc \ dex/quick/dex_file_method_inliner.cc \ dex/quick/dex_file_to_method_inliner_map.cc \ @@ -44,22 +32,10 @@ LIBART_COMPILER_SRC_FILES := \ dex/quick/gen_loadstore.cc \ dex/quick/lazy_debug_frame_opcode_writer.cc \ dex/quick/local_optimizations.cc \ - dex/quick/mips/assemble_mips.cc \ - dex/quick/mips/call_mips.cc \ - dex/quick/mips/fp_mips.cc \ - dex/quick/mips/int_mips.cc \ - dex/quick/mips/target_mips.cc \ - dex/quick/mips/utility_mips.cc \ dex/quick/mir_to_lir.cc \ dex/quick/quick_compiler.cc \ dex/quick/ralloc_util.cc \ dex/quick/resource_mask.cc \ - dex/quick/x86/assemble_x86.cc \ - dex/quick/x86/call_x86.cc \ - dex/quick/x86/fp_x86.cc \ - dex/quick/x86/int_x86.cc \ - dex/quick/x86/target_x86.cc \ - dex/quick/x86/utility_x86.cc \ dex/dex_to_dex_compiler.cc \ dex/bb_optimizations.cc \ dex/compiler_ir.cc \ @@ -82,30 +58,13 @@ LIBART_COMPILER_SRC_FILES := \ driver/compiler_options.cc \ driver/dex_compilation_unit.cc \ linker/relative_patcher.cc \ - linker/arm/relative_patcher_arm_base.cc \ - linker/arm/relative_patcher_thumb2.cc \ - linker/arm64/relative_patcher_arm64.cc \ - linker/x86/relative_patcher_x86_base.cc \ - linker/x86/relative_patcher_x86.cc \ - linker/x86_64/relative_patcher_x86_64.cc \ jit/jit_compiler.cc \ - jni/quick/arm/calling_convention_arm.cc \ - jni/quick/arm64/calling_convention_arm64.cc \ - jni/quick/mips/calling_convention_mips.cc \ - jni/quick/mips64/calling_convention_mips64.cc \ - jni/quick/x86/calling_convention_x86.cc \ - jni/quick/x86_64/calling_convention_x86_64.cc \ jni/quick/calling_convention.cc \ jni/quick/jni_compiler.cc \ optimizing/boolean_simplifier.cc \ optimizing/builder.cc \ optimizing/bounds_check_elimination.cc \ optimizing/code_generator.cc \ - optimizing/code_generator_arm.cc \ - optimizing/code_generator_arm64.cc \ - optimizing/code_generator_mips64.cc \ - optimizing/code_generator_x86.cc \ - optimizing/code_generator_x86_64.cc \ optimizing/code_generator_utils.cc \ optimizing/constant_folding.cc \ optimizing/dead_code_elimination.cc \ @@ -115,10 +74,6 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/inliner.cc \ optimizing/instruction_simplifier.cc \ optimizing/intrinsics.cc \ - optimizing/intrinsics_arm.cc \ - optimizing/intrinsics_arm64.cc \ - optimizing/intrinsics_x86.cc \ - optimizing/intrinsics_x86_64.cc \ optimizing/licm.cc \ optimizing/locations.cc \ optimizing/nodes.cc \ @@ -136,49 +91,130 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/stack_map_stream.cc \ trampolines/trampoline_compiler.cc \ utils/arena_bit_vector.cc \ + utils/assembler.cc \ + utils/swap_space.cc \ + buffered_output_stream.cc \ + compiler.cc \ + elf_writer.cc \ + elf_writer_debug.cc \ + elf_writer_quick.cc \ + file_output_stream.cc \ + image_writer.cc \ + oat_writer.cc \ + output_stream.cc \ + vector_output_stream.cc + +LIBART_COMPILER_SRC_FILES_arm := \ + dex/quick/arm/assemble_arm.cc \ + dex/quick/arm/call_arm.cc \ + dex/quick/arm/fp_arm.cc \ + dex/quick/arm/int_arm.cc \ + dex/quick/arm/target_arm.cc \ + dex/quick/arm/utility_arm.cc \ + jni/quick/arm/calling_convention_arm.cc \ + linker/arm/relative_patcher_arm_base.cc \ + linker/arm/relative_patcher_thumb2.cc \ + optimizing/code_generator_arm.cc \ + optimizing/intrinsics_arm.cc \ utils/arm/assembler_arm.cc \ utils/arm/assembler_arm32.cc \ utils/arm/assembler_thumb2.cc \ utils/arm/managed_register_arm.cc \ + +# TODO We should really separate out those files that are actually needed for both variants of an +# architecture into its own category. Currently we just include all of the 32bit variant in the +# 64bit variant. It also might be good to allow one to compile only the 64bit variant without the +# 32bit one. +LIBART_COMPILER_SRC_FILES_arm64 := \ + $(LIBART_COMPILER_SRC_FILES_arm) \ + dex/quick/arm64/assemble_arm64.cc \ + dex/quick/arm64/call_arm64.cc \ + dex/quick/arm64/fp_arm64.cc \ + dex/quick/arm64/int_arm64.cc \ + dex/quick/arm64/target_arm64.cc \ + dex/quick/arm64/utility_arm64.cc \ + jni/quick/arm64/calling_convention_arm64.cc \ + linker/arm64/relative_patcher_arm64.cc \ + optimizing/code_generator_arm64.cc \ + optimizing/intrinsics_arm64.cc \ utils/arm64/assembler_arm64.cc \ utils/arm64/managed_register_arm64.cc \ - utils/assembler.cc \ + +LIBART_COMPILER_SRC_FILES_mips := \ + dex/quick/mips/assemble_mips.cc \ + dex/quick/mips/call_mips.cc \ + dex/quick/mips/fp_mips.cc \ + dex/quick/mips/int_mips.cc \ + dex/quick/mips/target_mips.cc \ + dex/quick/mips/utility_mips.cc \ + jni/quick/mips/calling_convention_mips.cc \ utils/mips/assembler_mips.cc \ utils/mips/managed_register_mips.cc \ + +LIBART_COMPILER_SRC_FILES_mips64 := \ + $(LIBART_COMPILER_SRC_FILES_mips) \ + jni/quick/mips64/calling_convention_mips64.cc \ + optimizing/code_generator_mips64.cc \ utils/mips64/assembler_mips64.cc \ utils/mips64/managed_register_mips64.cc \ + + +LIBART_COMPILER_SRC_FILES_x86 := \ + dex/quick/x86/assemble_x86.cc \ + dex/quick/x86/call_x86.cc \ + dex/quick/x86/fp_x86.cc \ + dex/quick/x86/int_x86.cc \ + dex/quick/x86/target_x86.cc \ + dex/quick/x86/utility_x86.cc \ + jni/quick/x86/calling_convention_x86.cc \ + linker/x86/relative_patcher_x86.cc \ + linker/x86/relative_patcher_x86_base.cc \ + optimizing/code_generator_x86.cc \ + optimizing/intrinsics_x86.cc \ utils/x86/assembler_x86.cc \ utils/x86/managed_register_x86.cc \ + +LIBART_COMPILER_SRC_FILES_x86_64 := \ + $(LIBART_COMPILER_SRC_FILES_x86) \ + jni/quick/x86_64/calling_convention_x86_64.cc \ + linker/x86_64/relative_patcher_x86_64.cc \ + optimizing/intrinsics_x86_64.cc \ + optimizing/code_generator_x86_64.cc \ utils/x86_64/assembler_x86_64.cc \ utils/x86_64/managed_register_x86_64.cc \ - utils/swap_space.cc \ - buffered_output_stream.cc \ - compiler.cc \ - elf_writer.cc \ - elf_writer_debug.cc \ - elf_writer_quick.cc \ - file_output_stream.cc \ - image_writer.cc \ - oat_writer.cc \ - output_stream.cc \ - vector_output_stream.cc + LIBART_COMPILER_CFLAGS := LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \ - dex/quick/arm/arm_lir.h \ - dex/quick/arm64/arm64_lir.h \ - dex/quick/mips/mips_lir.h \ dex/quick/resource_mask.h \ dex/compiler_enums.h \ + dex/dex_to_dex_compiler.h \ dex/global_value_numbering.h \ dex/pass_me.h \ driver/compiler_driver.h \ driver/compiler_options.h \ image_writer.h \ - optimizing/locations.h \ + optimizing/locations.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm := \ + dex/quick/arm/arm_lir.h \ utils/arm/constants_arm.h +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \ + $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm) \ + dex/quick/arm64/arm64_lir.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \ + dex/quick/mips/mips_lir.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \ + $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips) + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86 := +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86_64 := \ + $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86) + # $(1): target or host # $(2): ndebug or debug # $(3): static or shared (empty means shared, applies only for host) @@ -201,6 +237,9 @@ define build-libart-compiler include $(CLEAR_VARS) ifeq ($$(art_target_or_host),host) LOCAL_IS_HOST_MODULE := true + art_codegen_targets := $(ART_HOST_CODEGEN_ARCHS) + else + art_codegen_targets := $(ART_TARGET_CODEGEN_ARCHS) endif LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) ifeq ($$(art_ndebug_or_debug),ndebug) @@ -229,10 +268,14 @@ define build-libart-compiler LOCAL_MODULE_CLASS := SHARED_LIBRARIES endif - LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES) + # Sort removes duplicates. + LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES) \ + $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_SRC_FILES_$$(arch)))) GENERATED_SRC_DIR := $$(call local-generated-sources-dir) - ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,$$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES)) + ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,\ + $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES) \ + $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_$$(arch))))) ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES)) $$(ENUM_OPERATOR_OUT_GEN): art/tools/generate-operator-out.py @@ -325,6 +368,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT art_target_or_host := art_ndebug_or_debug := art_static_or_shared := + art_codegen_targets := endef # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index d215662645..dc2bc5c3f4 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -46,12 +46,12 @@ class CommonCompilerTest : public CommonRuntimeTest { // Create an OatMethod based on pointers (for unit tests). OatFile::OatMethod CreateOatMethod(const void* code); - void MakeExecutable(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); static void MakeExecutable(const void* code_start, size_t code_length); void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: virtual void SetUp(); @@ -76,17 +76,17 @@ class CommonCompilerTest : public CommonRuntimeTest { virtual void TearDown(); void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void CompileMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name, const char* method_name, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ReserveImageSpace(); diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc index d1acada6dd..74ef35e740 100644 --- a/compiler/compiled_method.cc +++ b/compiler/compiled_method.cc @@ -23,20 +23,12 @@ CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instr const ArrayRef<const uint8_t>& quick_code, bool owns_code_array) : compiler_driver_(compiler_driver), instruction_set_(instruction_set), owns_code_array_(owns_code_array), quick_code_(nullptr) { - SetCode(&quick_code); -} - -void CompiledCode::SetCode(const ArrayRef<const uint8_t>* quick_code) { - if (quick_code != nullptr) { - CHECK(!quick_code->empty()); - if (owns_code_array_) { - // If we are supposed to own the code, don't deduplicate it. - CHECK(quick_code_ == nullptr); - quick_code_ = new SwapVector<uint8_t>(quick_code->begin(), quick_code->end(), - compiler_driver_->GetSwapSpaceAllocator()); - } else { - quick_code_ = compiler_driver_->DeduplicateCode(*quick_code); - } + if (owns_code_array_) { + // If we are supposed to own the code, don't deduplicate it. + quick_code_ = new SwapVector<uint8_t>(quick_code.begin(), quick_code.end(), + compiler_driver_->GetSwapSpaceAllocator()); + } else { + quick_code_ = compiler_driver_->DeduplicateCode(quick_code); } } diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h index 45a62bc6c7..a4d2387030 100644 --- a/compiler/compiled_method.h +++ b/compiler/compiled_method.h @@ -47,8 +47,6 @@ class CompiledCode { return quick_code_; } - void SetCode(const ArrayRef<const uint8_t>* quick_code); - bool operator==(const CompiledCode& rhs) const; // To align an offset from a page-aligned value to make it suitable diff --git a/compiler/compiler.h b/compiler/compiler.h index e5d1aff08c..01ca46efd3 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -58,7 +58,7 @@ class Compiler { const DexFile& dex_file) const = 0; virtual uintptr_t GetEntryPointOf(ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; uint64_t GetMaximumCompilationTimeBeforeWarning() const { return maximum_compilation_time_before_warning_; @@ -89,7 +89,7 @@ class Compiler { const DexFile& dex_file); protected: - explicit Compiler(CompilerDriver* driver, uint64_t warning) : + Compiler(CompilerDriver* driver, uint64_t warning) : driver_(driver), maximum_compilation_time_before_warning_(warning) { } diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h index d28df1dcce..5203355d06 100644 --- a/compiler/dex/compiler_ir.h +++ b/compiler/dex/compiler_ir.h @@ -129,7 +129,7 @@ struct OptionContent { * Union containing the option value of either type. */ union OptionContainer { - explicit OptionContainer(const OptionContainer& c, OptionType t) { + OptionContainer(const OptionContainer& c, OptionType t) { if (t == kString) { DCHECK(c.s != nullptr); s = strndup(c.s, kOptionStringMaxLength); diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index bd590467e3..603130ab96 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -14,10 +14,13 @@ * limitations under the License. */ +#include "dex_to_dex_compiler.h" + #include "art_field-inl.h" #include "art_method-inl.h" #include "base/logging.h" #include "base/mutex.h" +#include "compiled_method.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "driver/compiler_driver.h" @@ -34,6 +37,13 @@ const bool kEnableQuickening = true; // Control check-cast elision. const bool kEnableCheckCastEllision = true; +struct QuickenedInfo { + QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {} + + uint32_t dex_pc; + uint16_t dex_member_index; +}; + class DexCompiler { public: DexCompiler(art::CompilerDriver& compiler, @@ -47,13 +57,17 @@ class DexCompiler { void Compile(); + const std::vector<QuickenedInfo>& GetQuickenedInfo() const { + return quickened_info_; + } + private: const DexFile& GetDexFile() const { return *unit_.GetDexFile(); } bool PerformOptimizations() const { - return dex_to_dex_compilation_level_ >= kOptimize; + return dex_to_dex_compilation_level_ >= DexToDexCompilationLevel::kOptimize; } // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where @@ -87,11 +101,16 @@ class DexCompiler { const DexCompilationUnit& unit_; const DexToDexCompilationLevel dex_to_dex_compilation_level_; + // Filled by the compiler when quickening, in order to encode that information + // in the .oat file. The runtime will use that information to get to the original + // opcodes. + std::vector<QuickenedInfo> quickened_info_; + DISALLOW_COPY_AND_ASSIGN(DexCompiler); }; void DexCompiler::Compile() { - DCHECK_GE(dex_to_dex_compilation_level_, kRequired); + DCHECK_GE(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kRequired); const DexFile::CodeItem* code_item = unit_.GetCodeItem(); const uint16_t* insns = code_item->insns_; const uint32_t insns_size = code_item->insns_size_in_code_units_; @@ -248,6 +267,7 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst, inst->SetOpcode(new_opcode); // Replace field index by field offset. inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value())); + quickened_info_.push_back(QuickenedInfo(dex_pc, field_idx)); } } @@ -287,24 +307,61 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, } else { inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx)); } + quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx)); } } } -} // namespace optimizer -} // namespace art - -extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item, - uint32_t access_flags, art::InvokeType invoke_type, - uint16_t class_def_idx, uint32_t method_idx, jobject class_loader, - const art::DexFile& dex_file, - art::DexToDexCompilationLevel dex_to_dex_compilation_level) { - UNUSED(invoke_type); - if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) { +CompiledMethod* ArtCompileDEX( + CompilerDriver* driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type ATTRIBUTE_UNUSED, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + DexToDexCompilationLevel dex_to_dex_compilation_level) { + DCHECK(driver != nullptr); + if (dex_to_dex_compilation_level != DexToDexCompilationLevel::kDontDexToDexCompile) { art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item, class_def_idx, method_idx, access_flags, - driver.GetVerifiedMethod(&dex_file, method_idx)); - art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level); + driver->GetVerifiedMethod(&dex_file, method_idx)); + art::optimizer::DexCompiler dex_compiler(*driver, unit, dex_to_dex_compilation_level); dex_compiler.Compile(); + if (dex_compiler.GetQuickenedInfo().empty()) { + // No need to create a CompiledMethod if there are no quickened opcodes. + return nullptr; + } + + // Create a `CompiledMethod`, with the quickened information in the vmap table. + Leb128EncodingVector builder; + for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) { + builder.PushBackUnsigned(info.dex_pc); + builder.PushBackUnsigned(info.dex_member_index); + } + InstructionSet instruction_set = driver->GetInstructionSet(); + if (instruction_set == kThumb2) { + // Don't use the thumb2 instruction set to avoid the one off code delta. + instruction_set = kArm; + } + return CompiledMethod::SwapAllocCompiledMethod( + driver, + instruction_set, + ArrayRef<const uint8_t>(), // no code + 0, + 0, + 0, + nullptr, // src_mapping_table + ArrayRef<const uint8_t>(), // mapping_table + ArrayRef<const uint8_t>(builder.GetData()), // vmap_table + ArrayRef<const uint8_t>(), // gc_map + ArrayRef<const uint8_t>(), // cfi data + ArrayRef<const LinkerPatch>()); } + return nullptr; } + +} // namespace optimizer + +} // namespace art diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h new file mode 100644 index 0000000000..3fad6d4c95 --- /dev/null +++ b/compiler/dex/dex_to_dex_compiler.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ +#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ + +#include "jni.h" + +#include "dex_file.h" +#include "invoke_type.h" + +namespace art { + +class CompiledMethod; +class CompilerDriver; + +namespace optimizer { + +enum class DexToDexCompilationLevel { + kDontDexToDexCompile, // Only meaning wrt image time interpretation. + kRequired, // Dex-to-dex compilation required for correctness. + kOptimize // Perform required transformation and peep-hole optimizations. +}; +std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs); + +CompiledMethod* ArtCompileDEX(CompilerDriver* driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + DexToDexCompilationLevel dex_to_dex_compilation_level); + +} // namespace optimizer + +} // namespace art + +#endif // ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc index b1f5d870d4..4de3410616 100644 --- a/compiler/dex/gvn_dead_code_elimination.cc +++ b/compiler/dex/gvn_dead_code_elimination.cc @@ -74,7 +74,7 @@ inline void GvnDeadCodeElimination::MIRData::RemovePrevChange(int v_reg, MIRData GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc) : num_vregs_(num_vregs), vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)), - vreg_high_words_(num_vregs, false, Allocator::GetNoopAllocator(), + vreg_high_words_(false, Allocator::GetNoopAllocator(), BitVector::BitsToWords(num_vregs), alloc->AllocArray<uint32_t>(BitVector::BitsToWords(num_vregs))), mir_data_(alloc->Adapter()) { @@ -715,6 +715,7 @@ void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_ // Try to find a MOVE to a vreg that wasn't changed since check_change. uint16_t value_name = data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg); + uint32_t dest_v_reg = mir_graph_->SRegToVReg(dest_s_reg); for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) { MIRData* d = vreg_chains_.GetMIRData(c); if (d->is_move && d->wide_def == data->wide_def && @@ -731,8 +732,21 @@ void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_ if (!vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg, mir_graph_) && (!d->wide_def || !vreg_chains_.IsVRegUsed(check_change + 1u, c, new_dest_v_reg + 1, mir_graph_))) { - RecordPassKillMoveByRenamingSrcDef(check_change, c); - return; + // If the move's destination vreg changed, check if the vreg we're trying + // to rename is unused after that change. + uint16_t dest_change = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg, c); + if (d->wide_def) { + uint16_t dest_change_high = vreg_chains_.FindFirstChangeAfter(new_dest_v_reg + 1, c); + if (dest_change_high != kNPos && + (dest_change == kNPos || dest_change_high < dest_change)) { + dest_change = dest_change_high; + } + } + if (dest_change == kNPos || + !vreg_chains_.IsVRegUsed(dest_change + 1u, size, dest_v_reg, mir_graph_)) { + RecordPassKillMoveByRenamingSrcDef(check_change, c); + return; + } } } } @@ -1192,7 +1206,6 @@ bool GvnDeadCodeElimination::RecordMIR(MIR* mir) { case Instruction::CONST_WIDE_32: case Instruction::CONST_WIDE: case Instruction::CONST_WIDE_HIGH16: - case Instruction::ARRAY_LENGTH: case Instruction::CMPL_FLOAT: case Instruction::CMPG_FLOAT: case Instruction::CMPL_DOUBLE: @@ -1316,6 +1329,13 @@ bool GvnDeadCodeElimination::RecordMIR(MIR* mir) { } break; + case Instruction::ARRAY_LENGTH: + if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) { + must_keep = true; + uses_all_vregs = true; + } + break; + case Instruction::AGET_OBJECT: case Instruction::AGET: case Instruction::AGET_WIDE: diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc index 461c844a60..4df0a8b98d 100644 --- a/compiler/dex/gvn_dead_code_elimination_test.cc +++ b/compiler/dex/gvn_dead_code_elimination_test.cc @@ -1933,6 +1933,78 @@ TEST_F(GvnDeadCodeEliminationTestDiamond, LongOverlaps1) { } } +TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps2) { + static const MIRDef mirs[] = { + DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u), + }; + + // The last insn should overlap the first and second. + static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + static const int32_t wide_sregs[] = { 0, 2, 4 }; + MarkAsWideSRegs(wide_sregs); + PerformGVN_DCE(); + + ASSERT_EQ(arraysize(mirs), value_names_.size()); + EXPECT_EQ(value_names_[0], value_names_[1]); + EXPECT_EQ(value_names_[0], value_names_[2]); + + static const bool eliminated[] = { + false, true, true, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } + // Check that the CONST_WIDE registers have been correctly renamed. + MIR* const_wide = &mirs_[0]; + ASSERT_EQ(2u, const_wide->ssa_rep->num_defs); + EXPECT_EQ(4, const_wide->ssa_rep->defs[0]); + EXPECT_EQ(5, const_wide->ssa_rep->defs[1]); + EXPECT_EQ(1u, const_wide->dalvikInsn.vA); +} + +TEST_F(GvnDeadCodeEliminationTestSimple, LongOverlaps3) { + static const MIRDef mirs[] = { + DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u), + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 4u, 2u), + }; + + // The last insn should overlap the first and second. + static const int32_t sreg_to_vreg_map[] = { 2, 3, 0, 1, 1, 2 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + static const int32_t wide_sregs[] = { 0, 2, 4 }; + MarkAsWideSRegs(wide_sregs); + PerformGVN_DCE(); + + ASSERT_EQ(arraysize(mirs), value_names_.size()); + EXPECT_EQ(value_names_[0], value_names_[1]); + EXPECT_EQ(value_names_[0], value_names_[2]); + + static const bool eliminated[] = { + false, true, true, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } + // Check that the CONST_WIDE registers have been correctly renamed. + MIR* const_wide = &mirs_[0]; + ASSERT_EQ(2u, const_wide->ssa_rep->num_defs); + EXPECT_EQ(4, const_wide->ssa_rep->defs[0]); + EXPECT_EQ(5, const_wide->ssa_rep->defs[1]); + EXPECT_EQ(1u, const_wide->dalvikInsn.vA); +} + TEST_F(GvnDeadCodeEliminationTestSimple, MixedOverlaps1) { static const MIRDef mirs[] = { DEF_CONST(3, Instruction::CONST, 0u, 1000u), @@ -2066,4 +2138,64 @@ TEST_F(GvnDeadCodeEliminationTestSimple, UnusedRegs2) { } } +TEST_F(GvnDeadCodeEliminationTestSimple, ArrayLengthThrows) { + static const MIRDef mirs[] = { + DEF_CONST(3, Instruction::CONST, 0u, 0), // null + DEF_UNOP(3, Instruction::ARRAY_LENGTH, 1u, 0u), // null.length + DEF_CONST(3, Instruction::CONST, 2u, 1000u), // Overwrite the array-length dest. + }; + + static const int32_t sreg_to_vreg_map[] = { 0, 1, 1 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + PerformGVN_DCE(); + + ASSERT_EQ(arraysize(mirs), value_names_.size()); + static const size_t diff_indexes[] = { 0, 1, 2 }; + ExpectValueNamesNE(diff_indexes); + + static const bool eliminated[] = { + false, false, false, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } +} + +TEST_F(GvnDeadCodeEliminationTestSimple, Dependancy) { + static const MIRDef mirs[] = { + DEF_MOVE(3, Instruction::MOVE, 5u, 1u), // move v5,v1 + DEF_MOVE(3, Instruction::MOVE, 6u, 1u), // move v12,v1 + DEF_MOVE(3, Instruction::MOVE, 7u, 0u), // move v13,v0 + DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 8u, 2u), // move v0_1,v2_3 + DEF_MOVE(3, Instruction::MOVE, 10u, 6u), // move v3,v12 + DEF_MOVE(3, Instruction::MOVE, 11u, 4u), // move v2,v4 + DEF_MOVE(3, Instruction::MOVE, 12u, 7u), // move v4,v13 + DEF_MOVE(3, Instruction::MOVE, 13, 11u), // move v12,v2 + DEF_MOVE(3, Instruction::MOVE, 14u, 10u), // move v2,v3 + DEF_MOVE(3, Instruction::MOVE, 15u, 5u), // move v3,v5 + DEF_MOVE(3, Instruction::MOVE, 16u, 12u), // move v5,v4 + }; + + static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 12, 13, 0, 1, 3, 2, 4, 12, 2, 3, 5 }; + PrepareSRegToVRegMap(sreg_to_vreg_map); + + PrepareMIRs(mirs); + static const int32_t wide_sregs[] = { 2, 8 }; + MarkAsWideSRegs(wide_sregs); + PerformGVN_DCE(); + + static const bool eliminated[] = { + false, false, false, false, false, false, false, true, true, false, false, + }; + static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch"); + for (size_t i = 0; i != arraysize(eliminated); ++i) { + bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop); + EXPECT_EQ(eliminated[i], actually_eliminated) << i; + } +} + } // namespace art diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h index e4570fd8d3..053029d839 100644 --- a/compiler/dex/mir_field_info.h +++ b/compiler/dex/mir_field_info.h @@ -135,10 +135,10 @@ class MirIFieldLoweringInfo : public MirFieldInfo { // with IGET/IPUT. For fast path fields, retrieve the field offset. static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit, MirIFieldLoweringInfo* field_infos, size_t count) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Construct an unresolved instance field lowering info. - explicit MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened) + MirIFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type, bool is_quickened) : MirFieldInfo(field_idx, kFlagIsVolatile | (is_quickened ? kFlagIsQuickened : 0u), type), // Without kFlagIsStatic. @@ -192,10 +192,10 @@ class MirSFieldLoweringInfo : public MirFieldInfo { // and the type index of the declaring class in the compiled method's dex file. static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit, MirSFieldLoweringInfo* field_infos, size_t count) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Construct an unresolved static field lowering info. - explicit MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type) + MirSFieldLoweringInfo(uint16_t field_idx, DexMemAccessType type) : MirFieldInfo(field_idx, kFlagIsVolatile | kFlagIsStatic, type), field_offset_(0u), storage_index_(DexFile::kDexNoIndex) { diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index dbe906280f..8bf709ab86 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -173,7 +173,17 @@ enum OatMethodAttributes { typedef uint16_t BasicBlockId; static const BasicBlockId NullBasicBlockId = 0; -static constexpr bool kLeafOptimization = false; + +// Leaf optimization is basically the removal of suspend checks from leaf methods. +// This is incompatible with SuspendCheckElimination (SCE) which eliminates suspend +// checks from loops that call any non-intrinsic method, since a loop that calls +// only a leaf method would end up without any suspend checks at all. So turning +// this on automatically disables the SCE in MIRGraph::EliminateSuspendChecksGate(). +// +// Since the Optimizing compiler is actually applying the same optimization, Quick +// must not run SCE anyway, so we enable this optimization as a way to disable SCE +// while keeping a consistent behavior across the backends, b/22657404. +static constexpr bool kLeafOptimization = true; /* * In general, vreg/sreg describe Dalvik registers that originated with dx. However, @@ -251,7 +261,7 @@ class MIR : public ArenaObject<kArenaAllocMIR> { uint32_t arg[5]; /* vC/D/E/F/G in invoke or filled-new-array */ Instruction::Code opcode; - explicit DecodedInstruction():vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) { + DecodedInstruction() : vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) { } /* @@ -343,7 +353,7 @@ class MIR : public ArenaObject<kArenaAllocMIR> { uint32_t method_lowering_info; } meta; - explicit MIR() : offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId), + MIR() : offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId), next(nullptr), ssa_rep(nullptr) { memset(&meta, 0, sizeof(meta)); } diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h index 946c74becf..4512f35a99 100644 --- a/compiler/dex/mir_method_info.h +++ b/compiler/dex/mir_method_info.h @@ -99,7 +99,7 @@ class MirMethodLoweringInfo : public MirMethodInfo { // path methods, retrieve the method's vtable index and direct code and method when applicable. static void Resolve(CompilerDriver* compiler_driver, const DexCompilationUnit* mUnit, MirMethodLoweringInfo* method_infos, size_t count) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); MirMethodLoweringInfo(uint16_t method_idx, InvokeType type, bool is_quickened) : MirMethodInfo(method_idx, diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 5bb0ce3ba5..80b7ac1e5b 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -1724,7 +1724,8 @@ void MIRGraph::StringChange() { bool MIRGraph::EliminateSuspendChecksGate() { - if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled. + if (kLeafOptimization || // Incompatible (could create loops without suspend checks). + (cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled. GetMaxNestedLoops() == 0u || // Nothing to do. GetMaxNestedLoops() >= 32u || // Only 32 bits in suspend_checks_in_loops_[.]. // Exclude 32 as well to keep bit shifts well-defined. diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc index 10a4337cf5..47123ba28c 100644 --- a/compiler/dex/mir_optimization_test.cc +++ b/compiler/dex/mir_optimization_test.cc @@ -467,8 +467,17 @@ class SuspendCheckEliminationTest : public MirOptimizationTest { cu_.mir_graph->ComputeDominators(); cu_.mir_graph->ComputeTopologicalSortOrder(); cu_.mir_graph->SSATransformationEnd(); + bool gate_result = cu_.mir_graph->EliminateSuspendChecksGate(); - ASSERT_TRUE(gate_result); + ASSERT_NE(gate_result, kLeafOptimization); + if (kLeafOptimization) { + // Even with kLeafOptimization on and Gate() refusing to allow SCE, we want + // to run the SCE test to avoid bitrot, so we need to initialize explicitly. + cu_.mir_graph->suspend_checks_in_loops_ = + cu_.mir_graph->arena_->AllocArray<uint32_t>(cu_.mir_graph->GetNumBlocks(), + kArenaAllocMisc); + } + TopologicalSortIterator iterator(cu_.mir_graph.get()); bool change = false; for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) { diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h index cbe4a02edb..d0af71c061 100644 --- a/compiler/dex/pass_driver_me.h +++ b/compiler/dex/pass_driver_me.h @@ -36,7 +36,7 @@ class PassManagerOptions; class PassDriverME: public PassDriver { public: - explicit PassDriverME(const PassManager* const pass_manager, CompilationUnit* cu) + PassDriverME(const PassManager* const pass_manager, CompilationUnit* cu) : PassDriver(pass_manager), pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") { pass_me_data_holder_.bb = nullptr; pass_me_data_holder_.c_unit = cu; @@ -314,4 +314,3 @@ class PassDriverME: public PassDriver { }; } // namespace art #endif // ART_COMPILER_DEX_PASS_DRIVER_ME_H_ - diff --git a/compiler/dex/pass_driver_me_opts.h b/compiler/dex/pass_driver_me_opts.h index e94c1894c9..c8093d0a02 100644 --- a/compiler/dex/pass_driver_me_opts.h +++ b/compiler/dex/pass_driver_me_opts.h @@ -29,9 +29,9 @@ class PassManager; class PassDriverMEOpts : public PassDriverME { public: - explicit PassDriverMEOpts(const PassManager* const manager, - const PassManager* const post_opt_pass_manager, - CompilationUnit* cu) + PassDriverMEOpts(const PassManager* const manager, + const PassManager* const post_opt_pass_manager, + CompilationUnit* cu) : PassDriverME(manager, cu), post_opt_pass_manager_(post_opt_pass_manager) { } diff --git a/compiler/dex/pass_driver_me_post_opt.h b/compiler/dex/pass_driver_me_post_opt.h index 9e03c4e73e..94176dbf0f 100644 --- a/compiler/dex/pass_driver_me_post_opt.h +++ b/compiler/dex/pass_driver_me_post_opt.h @@ -28,7 +28,7 @@ class PassDataHolder; class PassDriverMEPostOpt : public PassDriverME { public: - explicit PassDriverMEPostOpt(const PassManager* const manager, CompilationUnit* cu) + PassDriverMEPostOpt(const PassManager* const manager, CompilationUnit* cu) : PassDriverME(manager, cu) { } diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc index df4a9f2048..5f911db382 100644 --- a/compiler/dex/quick/arm/assemble_arm.cc +++ b/compiler/dex/quick/arm/assemble_arm.cc @@ -1298,7 +1298,7 @@ void ArmMir2Lir::AssembleLIR() { */ delta &= ~0x3; } - DCHECK_EQ((delta & 0x3), 0); + DCHECK_ALIGNED(delta, 4); // First, a sanity check for cases we shouldn't see now if (kIsDebugBuild && (((lir->opcode == kThumbAddPcRel) && (delta > 1020)) || ((lir->opcode == kThumbLdrPcRel) && (delta > 1020)))) { diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index cf0188456d..db76cc6f53 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -593,13 +593,20 @@ bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) { return true; } + // At this point lit != 1 (which is a power of two). + DCHECK_NE(lit, 1); if (IsPowerOfTwo(lit - 1)) { op->op = kOpAdd; op->shift = CTZ(lit - 1); return true; } - if (IsPowerOfTwo(lit + 1)) { + if (lit == -1) { + // Can be created as neg. + op->op = kOpNeg; + op->shift = 0; + return true; + } else if (IsPowerOfTwo(lit + 1)) { op->op = kOpRsub; op->shift = CTZ(lit + 1); return true; @@ -612,21 +619,26 @@ bool ArmMir2Lir::GetEasyMultiplyOp(int lit, ArmMir2Lir::EasyMultiplyOp* op) { // Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms. bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) { + DCHECK_NE(lit, 1); // A case of "1" should have been folded. + DCHECK_NE(lit, -1); // A case of "-1" should have been folded. if (GetEasyMultiplyOp(lit, &ops[0])) { ops[1].op = kOpInvalid; ops[1].shift = 0; return true; } - int lit1 = lit; - uint32_t shift = CTZ(lit1); + DCHECK_NE(lit, 0); // Should be handled above. + DCHECK(!IsPowerOfTwo(lit)); // Same. + + int lit1 = lit; // With the DCHECKs, it's clear we don't get "0", "1" or "-1" for + uint32_t shift = CTZ(lit1); // lit1. if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) { ops[1].op = kOpLsl; ops[1].shift = shift; return true; } - lit1 = lit - 1; + lit1 = lit - 1; // With the DCHECKs, it's clear we don't get "0" or "1" for lit1. shift = CTZ(lit1); if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) { ops[1].op = kOpAdd; @@ -634,7 +646,7 @@ bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) { return true; } - lit1 = lit + 1; + lit1 = lit + 1; // With the DCHECKs, it's clear we don't get "0" here. shift = CTZ(lit1); if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) { ops[1].op = kOpRsub; @@ -652,7 +664,7 @@ bool ArmMir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) { // Additional temporary register is required, // if it need to generate 2 instructions and src/dest overlap. void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) { - // tmp1 = ( src << shift1) + [ src | -src | 0 ] + // tmp1 = (( src << shift1) + [ src | -src | 0 ] ) | -src // dest = (tmp1 << shift2) + [ src | -src | 0 ] RegStorage r_tmp1; @@ -674,6 +686,9 @@ void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, Easy case kOpRsub: OpRegRegRegShift(kOpRsub, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift)); break; + case kOpNeg: + OpRegReg(kOpNeg, r_tmp1, r_src); + break; default: DCHECK_EQ(ops[0].op, kOpInvalid); break; @@ -691,6 +706,7 @@ void ArmMir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, Easy case kOpRsub: OpRegRegRegShift(kOpRsub, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift)); break; + // No negation allowed in second op. default: LOG(FATAL) << "Unexpected opcode passed to GenEasyMultiplyTwoOps"; break; diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 2ef92f851b..062f7aff66 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -880,7 +880,7 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor LIR* ArmMir2Lir::LoadStoreUsingInsnWithOffsetImm8Shl2(ArmOpcode opcode, RegStorage r_base, int displacement, RegStorage r_src_dest, RegStorage r_work) { - DCHECK_EQ(displacement & 3, 0); + DCHECK_ALIGNED(displacement, 4); constexpr int kOffsetMask = 0xff << 2; int encoded_disp = (displacement & kOffsetMask) >> 2; // Within range of the instruction. RegStorage r_ptr = r_base; @@ -942,7 +942,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag already_generated = true; break; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); scale = 2; if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) && (displacement >= 0)) { @@ -959,14 +959,14 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag } break; case kUnsignedHalf: - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); scale = 1; short_form = all_low && (displacement >> (5 + scale)) == 0; opcode16 = kThumbLdrhRRI5; opcode32 = kThumb2LdrhRRI12; break; case kSignedHalf: - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); scale = 1; DCHECK_EQ(opcode16, kThumbBkpt); // Not available. opcode32 = kThumb2LdrshRRI12; @@ -1096,7 +1096,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora already_generated = true; break; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); scale = 2; if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) { short_form = true; @@ -1109,7 +1109,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora break; case kUnsignedHalf: case kSignedHalf: - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); scale = 1; short_form = all_low && (displacement >> (5 + scale)) == 0; opcode16 = kThumbStrhRRI5; diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index b78fb80aa0..25c69d19e5 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -909,7 +909,7 @@ void Arm64Mir2Lir::AssembleLIR() { CodeOffset target = target_lir->offset + ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t delta = target - pc; - DCHECK_EQ(delta & 0x3, 0); + DCHECK_ALIGNED(delta, 4); if (!IS_SIGNED_IMM26(delta >> 2)) { LOG(FATAL) << "Invalid jump range in kFixupT1Branch"; } @@ -933,7 +933,7 @@ void Arm64Mir2Lir::AssembleLIR() { CodeOffset target = target_lir->offset + ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t delta = target - pc; - DCHECK_EQ(delta & 0x3, 0); + DCHECK_ALIGNED(delta, 4); if (!IS_SIGNED_IMM19(delta >> 2)) { LOG(FATAL) << "Invalid jump range in kFixupLoad"; } @@ -965,7 +965,7 @@ void Arm64Mir2Lir::AssembleLIR() { CodeOffset target = target_lir->offset + ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment); int32_t delta = target - pc; - DCHECK_EQ(delta & 0x3, 0); + DCHECK_ALIGNED(delta, 4); // Check if branch offset can be encoded in tbz/tbnz. if (!IS_SIGNED_IMM14(delta >> 2)) { DexOffset dalvik_offset = lir->dalvik_offset; diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index 2568ee3064..42b792ca1a 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -38,6 +38,7 @@ static constexpr bool kIntrinsicIsStatic[] = { true, // kIntrinsicFloatCvt true, // kIntrinsicReverseBits true, // kIntrinsicReverseBytes + true, // kIntrinsicNumberOfLeadingZeros true, // kIntrinsicAbsInt true, // kIntrinsicAbsLong true, // kIntrinsicAbsFloat @@ -55,6 +56,7 @@ static constexpr bool kIntrinsicIsStatic[] = { false, // kIntrinsicReferenceGetReferent false, // kIntrinsicCharAt false, // kIntrinsicCompareTo + false, // kIntrinsicEquals false, // kIntrinsicGetCharsNoCheck false, // kIntrinsicIsEmptyOrLength false, // kIntrinsicIndexOf @@ -75,6 +77,8 @@ static_assert(kIntrinsicIsStatic[kIntrinsicDoubleCvt], "DoubleCvt must be static static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static"); static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be static"); static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfLeadingZeros], + "NumberOfLeadingZeros must be static"); static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static"); static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static"); static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static"); @@ -92,6 +96,7 @@ static_assert(kIntrinsicIsStatic[kIntrinsicRoundDouble], "RoundDouble must be st static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicEquals], "String equals must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicGetCharsNoCheck], "GetCharsNoCheck must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static"); @@ -189,6 +194,7 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = { "getReferent", // kNameCacheReferenceGet "charAt", // kNameCacheCharAt "compareTo", // kNameCacheCompareTo + "equals", // kNameCacheEquals "getCharsNoCheck", // kNameCacheGetCharsNoCheck "isEmpty", // kNameCacheIsEmpty "indexOf", // kNameCacheIndexOf @@ -225,6 +231,7 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = { "putObjectVolatile", // kNameCachePutObjectVolatile "putOrderedObject", // kNameCachePutOrderedObject "arraycopy", // kNameCacheArrayCopy + "numberOfLeadingZeros", // kNameCacheNumberOfLeadingZeros }; const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = { @@ -280,6 +287,8 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = { { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheLong } }, // kProtoCacheJS_V { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheShort } }, + // kProtoCacheObject_Z + { kClassCacheBoolean, 1, { kClassCacheJavaLangObject } }, // kProtoCacheObjectJII_Z { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheInt, kClassCacheInt } }, @@ -368,6 +377,9 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods INTRINSIC(JavaLangInteger, Reverse, I_I, kIntrinsicReverseBits, k32), INTRINSIC(JavaLangLong, Reverse, J_J, kIntrinsicReverseBits, k64), + INTRINSIC(JavaLangInteger, NumberOfLeadingZeros, I_I, kIntrinsicNumberOfLeadingZeros, k32), + INTRINSIC(JavaLangLong, NumberOfLeadingZeros, J_I, kIntrinsicNumberOfLeadingZeros, k64), + INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0), INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0), INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0), @@ -411,6 +423,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0), INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0), + INTRINSIC(JavaLangString, Equals, Object_Z, kIntrinsicEquals, 0), INTRINSIC(JavaLangString, GetCharsNoCheck, IICharArrayI_V, kIntrinsicGetCharsNoCheck, 0), INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty), INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone), @@ -581,6 +594,9 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) { return backend->GenInlinedCharAt(info); case kIntrinsicCompareTo: return backend->GenInlinedStringCompareTo(info); + case kIntrinsicEquals: + // Quick does not implement this intrinsic. + return false; case kIntrinsicGetCharsNoCheck: return backend->GenInlinedStringGetCharsNoCheck(info); case kIntrinsicIsEmptyOrLength: @@ -614,6 +630,8 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) { intrinsic.d.data & kIntrinsicFlagIsOrdered); case kIntrinsicSystemArrayCopyCharArray: return backend->GenInlinedArrayCopyCharArray(info); + case kIntrinsicNumberOfLeadingZeros: + return false; // not implemented in quick default: LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode; return false; // avoid warning "control reaches end of non-void function" diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h index 26b41bf54d..d6c8bfbdb6 100644 --- a/compiler/dex/quick/dex_file_method_inliner.h +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -62,49 +62,49 @@ class DexFileMethodInliner { * @return true if the method is a candidate for inlining, false otherwise. */ bool AnalyseMethodCode(verifier::MethodVerifier* verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); /** * Check whether a particular method index corresponds to an intrinsic or special function. */ - InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_); + InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) REQUIRES(!lock_); /** * Check whether a particular method index corresponds to an intrinsic function. */ - bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) LOCKS_EXCLUDED(lock_); + bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_); /** * Generate code for an intrinsic function invocation. */ - bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) LOCKS_EXCLUDED(lock_); + bool GenIntrinsic(Mir2Lir* backend, CallInfo* info) REQUIRES(!lock_); /** * Check whether a particular method index corresponds to a special function. */ - bool IsSpecial(uint32_t method_index) LOCKS_EXCLUDED(lock_); + bool IsSpecial(uint32_t method_index) REQUIRES(!lock_); /** * Generate code for a special function. */ - bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) LOCKS_EXCLUDED(lock_); + bool GenSpecial(Mir2Lir* backend, uint32_t method_idx) REQUIRES(!lock_); /** * Try to inline an invoke. */ bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); /** * Gets the thread pointer entrypoint offset for a string init method index and pointer size. */ uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); /** * Check whether a particular method index is a string init. */ - bool IsStringInitMethodIndex(uint32_t method_index) LOCKS_EXCLUDED(lock_); + bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_); /** * To avoid multiple lookups of a class by its descriptor, we cache its @@ -170,6 +170,7 @@ class DexFileMethodInliner { kNameCacheReferenceGetReferent, kNameCacheCharAt, kNameCacheCompareTo, + kNameCacheEquals, kNameCacheGetCharsNoCheck, kNameCacheIsEmpty, kNameCacheIndexOf, @@ -206,6 +207,7 @@ class DexFileMethodInliner { kNameCachePutObjectVolatile, kNameCachePutOrderedObject, kNameCacheArrayCopy, + kNameCacheNumberOfLeadingZeros, kNameCacheLast }; @@ -242,6 +244,7 @@ class DexFileMethodInliner { kProtoCacheJJ_J, kProtoCacheJJ_V, kProtoCacheJS_V, + kProtoCacheObject_Z, kProtoCacheObjectJII_Z, kProtoCacheObjectJJJ_Z, kProtoCacheObjectJObjectObject_Z, @@ -351,11 +354,11 @@ class DexFileMethodInliner { * * Only DexFileToMethodInlinerMap may call this function to initialize the inliner. */ - void FindIntrinsics(const DexFile* dex_file) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void FindIntrinsics(const DexFile* dex_file) REQUIRES(lock_); friend class DexFileToMethodInlinerMap; - bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) LOCKS_EXCLUDED(lock_); + bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_); static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, MIR* move_result, const InlineMethod& method); diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h index 94ffd7f957..3e9fb96bfa 100644 --- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h +++ b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h @@ -40,12 +40,11 @@ class LazyDebugFrameOpCodeWriter FINAL const ArenaVector<uint8_t>* Patch(size_t code_size); - explicit LazyDebugFrameOpCodeWriter(LIR** last_lir_insn, bool enable_writes, - ArenaAllocator* allocator) - : Base(enable_writes, allocator->Adapter()), - last_lir_insn_(last_lir_insn), - advances_(allocator->Adapter()), - patched_(false) { + LazyDebugFrameOpCodeWriter(LIR** last_lir_insn, bool enable_writes, ArenaAllocator* allocator) + : Base(enable_writes, allocator->Adapter()), + last_lir_insn_(last_lir_insn), + advances_(allocator->Adapter()), + patched_(false) { } private: diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index da12d8e3bf..853980d10a 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -24,6 +24,7 @@ #include "dex/quick/dex_file_to_method_inliner_map.h" #include "dex/quick/mir_to_lir-inl.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" #include "gc/accounting/card_table.h" #include "mips_lir.h" @@ -285,12 +286,25 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) RegStorage check_reg = AllocPtrSizeTemp(); RegStorage new_sp = AllocPtrSizeTemp(); const RegStorage rs_sp = TargetPtrReg(kSp); + const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(target); + const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes; + bool generate_explicit_stack_overflow_check = large_frame || + !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks(); + if (!skip_overflow_check) { - // Load stack limit. - if (cu_->target64) { - LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg); + if (generate_explicit_stack_overflow_check) { + // Load stack limit. + if (cu_->target64) { + LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg); + } else { + Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg); + } } else { - Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg); + // Implicit stack overflow check. + // Generate a load from [sp, #-overflowsize]. If this is in the stack + // redzone we will get a segmentation fault. + Load32Disp(rs_sp, -kStackOverflowReservedUsableBytes, rs_rZERO); + MarkPossibleStackOverflowException(); } } // Spill core callee saves. @@ -298,7 +312,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) // NOTE: promotion of FP regs currently unsupported, thus no FP spill. DCHECK_EQ(num_fp_spills_, 0); const int frame_sub = frame_size_ - spill_count * ptr_size; - if (!skip_overflow_check) { + if (!skip_overflow_check && generate_explicit_stack_overflow_check) { class StackOverflowSlowPath : public LIRSlowPath { public: StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) @@ -329,6 +343,8 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OpRegCopy(rs_sp, new_sp); // Establish stack. cfi_.AdjustCFAOffset(frame_sub); } else { + // Here if skip_overflow_check or doing implicit stack overflow check. + // Just make room on the stack for the frame now. OpRegImm(kOpSub, rs_sp, frame_sub); cfi_.AdjustCFAOffset(frame_sub); } diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 713264e0d9..43fbcbdd2b 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -79,6 +79,7 @@ class MipsMir2Lir FINAL : public Mir2Lir { OVERRIDE; LIR* CheckSuspendUsingLoad() OVERRIDE; RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE; + void ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide); LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, VolatileKind is_volatile) OVERRIDE; LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index f5ad7c7c33..1099303f7d 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -691,6 +691,9 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, reg_len = AllocTemp(); // Get len. Load32Disp(rl_array.reg, len_offset, reg_len); + MarkPossibleNullPointerException(opt_flags); + } else { + ForceImplicitNullCheck(rl_array.reg, opt_flags, false); } // reg_ptr -> array data. OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset); @@ -781,6 +784,9 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, // NOTE: max live temps(4) here. // Get len. Load32Disp(rl_array.reg, len_offset, reg_len); + MarkPossibleNullPointerException(opt_flags); + } else { + ForceImplicitNullCheck(rl_array.reg, opt_flags, false); } // reg_ptr -> array data. OpRegImm(kOpAdd, reg_ptr, data_offset); diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index 4c0bd8378b..ec4bad778c 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -49,9 +49,11 @@ static constexpr RegStorage reserved_regs_arr_32[] = static constexpr RegStorage core_temps_arr_32[] = {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8}; -static constexpr RegStorage sp_temps_arr_32[] = +static constexpr RegStorage sp_fr0_temps_arr_32[] = {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10, rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15}; +static constexpr RegStorage sp_fr1_temps_arr_32[] = + {rs_rF0, rs_rF2, rs_rF4, rs_rF6, rs_rF8, rs_rF10, rs_rF12, rs_rF14}; static constexpr RegStorage dp_fr0_temps_arr_32[] = {rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0, rs_rD7_fr0}; @@ -130,7 +132,8 @@ static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32); static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32); static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32); static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32); -static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32); +static constexpr ArrayRef<const RegStorage> sp_fr0_temps_32(sp_fr0_temps_arr_32); +static constexpr ArrayRef<const RegStorage> sp_fr1_temps_32(sp_fr1_temps_arr_32); static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32); static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32); @@ -591,22 +594,22 @@ void MipsMir2Lir::ClobberCallerSave() { Clobber(rs_rFP); Clobber(rs_rRA); Clobber(rs_rF0); - Clobber(rs_rF1); Clobber(rs_rF2); - Clobber(rs_rF3); Clobber(rs_rF4); - Clobber(rs_rF5); Clobber(rs_rF6); - Clobber(rs_rF7); Clobber(rs_rF8); - Clobber(rs_rF9); Clobber(rs_rF10); - Clobber(rs_rF11); Clobber(rs_rF12); - Clobber(rs_rF13); Clobber(rs_rF14); - Clobber(rs_rF15); if (fpuIs32Bit_) { + Clobber(rs_rF1); + Clobber(rs_rF3); + Clobber(rs_rF5); + Clobber(rs_rF7); + Clobber(rs_rF9); + Clobber(rs_rF11); + Clobber(rs_rF13); + Clobber(rs_rF15); Clobber(rs_rD0_fr0); Clobber(rs_rD1_fr0); Clobber(rs_rD2_fr0); @@ -717,24 +720,26 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() { fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32, reserved_regs_32, empty_pool, // reserved64 core_temps_32, empty_pool, // core64_temps - sp_temps_32, + fpuIs32Bit_ ? sp_fr0_temps_32 : sp_fr1_temps_32, fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32)); // Alias single precision floats to appropriate half of overlapping double. for (RegisterInfo* info : reg_pool_->sp_regs_) { int sp_reg_num = info->GetReg().GetRegNum(); int dp_reg_num = sp_reg_num & ~1; - RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num); - RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); - // Double precision register's master storage should refer to itself. - DCHECK_EQ(dp_reg_info, dp_reg_info->Master()); - // Redirect single precision's master storage to master. - info->SetMaster(dp_reg_info); - // Singles should show a single 32-bit mask bit, at first referring to the low half. - DCHECK_EQ(info->StorageMask(), 0x1U); - if (sp_reg_num & 1) { - // For odd singles, change to user the high word of the backing double. - info->SetStorageMask(0x2); + if (fpuIs32Bit_ || (sp_reg_num == dp_reg_num)) { + RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num); + RegisterInfo* dp_reg_info = GetRegInfo(dp_reg); + // Double precision register's master storage should refer to itself. + DCHECK_EQ(dp_reg_info, dp_reg_info->Master()); + // Redirect single precision's master storage to master. + info->SetMaster(dp_reg_info); + // Singles should show a single 32-bit mask bit, at first referring to the low half. + DCHECK_EQ(info->StorageMask(), 0x1U); + if (sp_reg_num & 1) { + // For odd singles, change to user the high word of the backing double. + info->SetStorageMask(0x2); + } } } } @@ -791,6 +796,7 @@ LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorag RegStorage reg_ptr = TargetReg(kArg0); OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement); RegStorage r_tgt = LoadHelper(kQuickA64Load); + ForceImplicitNullCheck(reg_ptr, 0, true); // is_wide = true LIR *ret = OpReg(kOpBlx, r_tgt); RegStorage reg_ret; if (cu_->target64) { @@ -813,6 +819,7 @@ LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStora LockCallTemps(); // Using fixed registers. RegStorage temp_ptr = AllocTemp(); OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement); + ForceImplicitNullCheck(temp_ptr, 0, true); // is_wide = true RegStorage temp_value = AllocTempWide(); OpRegCopyWide(temp_value, r_src); if (cu_->target64) { diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 95c61cd4ed..ec2475a7f7 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -21,7 +21,9 @@ #include "base/logging.h" #include "dex/quick/mir_to_lir-inl.h" #include "dex/reg_storage_eq.h" +#include "dex/mir_graph.h" #include "driver/compiler_driver.h" +#include "driver/compiler_options.h" #include "mips_lir.h" namespace art { @@ -712,7 +714,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora } else { opcode = kMipsFldc1; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; } is64bit = true; @@ -734,15 +736,15 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora DCHECK(r_dest.IsDouble()); } } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kUnsignedHalf: opcode = kMipsLhu; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kSignedHalf: opcode = kMipsLh; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: opcode = kMipsLbu; @@ -830,6 +832,22 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora return res; } +void MipsMir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags, bool is_wide) { + if (cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { + if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { + return; + } + // Force an implicit null check by performing a memory operation (load) from the given + // register with offset 0. This will cause a signal if the register contains 0 (null). + LIR* load = Load32Disp(reg, LOWORD_OFFSET, rs_rZERO); + MarkSafepointPC(load); + if (is_wide) { + load = Load32Disp(reg, HIWORD_OFFSET, rs_rZERO); + MarkSafepointPC(load); + } + } +} + LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, VolatileKind is_volatile) { if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble)) @@ -873,7 +891,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStor } else { opcode = kMipsFsdc1; } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; } is64bit = true; @@ -895,12 +913,12 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStor DCHECK(r_src.IsDouble()); } } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kUnsignedHalf: case kSignedHalf: opcode = kMipsSh; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: case kSignedByte: diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 7ca03cf0ee..c50246d182 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -193,7 +193,8 @@ void Mir2Lir::LoadArgDirect(size_t in_position, RegLocation rl_dest) { } if (!reg_arg.Valid()) { - LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, rl_dest.wide ? k64 : k32, kNotVolatile); + OpSize op_size = rl_dest.wide ? k64 : (rl_dest.ref ? kReference : k32); + LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, op_size, kNotVolatile); } else { if (rl_dest.wide) { OpRegCopyWide(rl_dest.reg, reg_arg); diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc index dd68dd40c6..16c161e320 100644 --- a/compiler/dex/quick/quick_cfi_test.cc +++ b/compiler/dex/quick/quick_cfi_test.cc @@ -36,7 +36,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class QuickCFITest : public CFITest { public: @@ -56,6 +56,8 @@ class QuickCFITest : public CFITest { CompilerOptions::kDefaultSmallMethodThreshold, CompilerOptions::kDefaultTinyMethodThreshold, CompilerOptions::kDefaultNumDexMethodsThreshold, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, false, CompilerOptions::kDefaultTopKProfileThreshold, false, @@ -134,6 +136,6 @@ TEST_ISA(kX86_64) TEST_ISA(kMips) TEST_ISA(kMips64) -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc index 39496a4b30..6e73ae7be2 100644 --- a/compiler/dex/quick/quick_compiler.cc +++ b/compiler/dex/quick/quick_compiler.cc @@ -43,10 +43,21 @@ #include "runtime.h" // Specific compiler backends. +#ifdef ART_ENABLE_CODEGEN_arm #include "dex/quick/arm/backend_arm.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 #include "dex/quick/arm64/backend_arm64.h" +#endif + +#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64) #include "dex/quick/mips/backend_mips.h" +#endif + +#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64) #include "dex/quick/x86/backend_x86.h" +#endif namespace art { @@ -844,22 +855,42 @@ Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_ UNUSED(compilation_unit); Mir2Lir* mir_to_lir = nullptr; switch (cu->instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm case kThumb2: mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena); break; +#endif // ART_ENABLE_CODEGEN_arm +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena); break; +#endif // ART_ENABLE_CODEGEN_arm64 +#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64) + // Intentional 2 level ifdef. Want to fail on mips64 if it is not enabled, even if mips is + // and vice versa. +#ifdef ART_ENABLE_CODEGEN_mips case kMips: // Fall-through. +#endif // ART_ENABLE_CODEGEN_mips +#ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: +#endif // ART_ENABLE_CODEGEN_mips64 mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena); break; +#endif // ART_ENABLE_CODEGEN_mips || ART_ENABLE_CODEGEN_mips64 +#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64) + // Intentional 2 level ifdef. Want to fail on x86_64 if it is not enabled, even if x86 is + // and vice versa. +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: // Fall-through. +#endif // ART_ENABLE_CODEGEN_x86 +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: +#endif // ART_ENABLE_CODEGEN_x86_64 mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena); break; +#endif // ART_ENABLE_CODEGEN_x86 || ART_ENABLE_CODEGEN_x86_64 default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set; } diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h index 43dd5786af..4a39ab3565 100644 --- a/compiler/dex/quick/quick_compiler.h +++ b/compiler/dex/quick/quick_compiler.h @@ -50,7 +50,7 @@ class QuickCompiler : public Compiler { const DexFile& dex_file) const OVERRIDE; uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit); diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index d993d934a5..d1fe167bb4 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -1336,9 +1336,24 @@ bool X86Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) { } OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh()); OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low); + // Free up at least one input register if it was a temp. Otherwise we may be in the bad + // situation of not having a temp available for SwapBits. Make sure it's not overlapping + // with the output, though. if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) { + // There's definitely a free temp after this. FreeTemp(r_i_low); + } else { + // We opportunistically release both here. That saves duplication of the register state + // lookup (to see if it's actually a temp). + if (rl_i.reg.GetLowReg() != rl_result.reg.GetHighReg()) { + FreeTemp(rl_i.reg.GetLow()); + } + if (rl_i.reg.GetHighReg() != rl_result.reg.GetLowReg() && + rl_i.reg.GetHighReg() != rl_result.reg.GetHighReg()) { + FreeTemp(rl_i.reg.GetHigh()); + } } + SwapBits(rl_result.reg.GetLow(), 1, 0x55555555); SwapBits(rl_result.reg.GetLow(), 2, 0x33333333); SwapBits(rl_result.reg.GetLow(), 4, 0x0f0f0f0f); diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc index 798e23fbac..98e9f38d52 100644 --- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc +++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc @@ -39,6 +39,8 @@ class QuickAssembleX86TestBase : public testing::Test { CompilerOptions::kDefaultSmallMethodThreshold, CompilerOptions::kDefaultTinyMethodThreshold, CompilerOptions::kDefaultNumDexMethodsThreshold, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, false, CompilerOptions::kDefaultTopKProfileThreshold, false, diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 61a1becac1..b16ae982f2 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -659,7 +659,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86Mov32RA : kX86Mov32RM; } // TODO: double store is to unaligned address - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kWord: if (cu_->target64) { @@ -677,15 +677,15 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86MovssRA : kX86MovssRM; DCHECK(r_dest.IsFloat()); } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kUnsignedHalf: opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kSignedHalf: opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM; @@ -812,7 +812,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86Mov32AR : kX86Mov32MR; } // TODO: double store is to unaligned address - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); break; case kWord: if (cu_->target64) { @@ -831,13 +831,13 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int opcode = is_array ? kX86MovssAR : kX86MovssMR; DCHECK(r_src.IsSingle()); } - DCHECK_EQ((displacement & 0x3), 0); + DCHECK_ALIGNED(displacement, 4); consider_non_temporal = true; break; case kUnsignedHalf: case kSignedHalf: opcode = is_array ? kX86Mov16AR : kX86Mov16MR; - DCHECK_EQ((displacement & 0x1), 0); + DCHECK_ALIGNED(displacement, 2); break; case kUnsignedByte: case kSignedByte: diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h index d692d26229..03bf57bded 100644 --- a/compiler/dex/quick_compiler_callbacks.h +++ b/compiler/dex/quick_compiler_callbacks.h @@ -38,7 +38,7 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks { ~QuickCompilerCallbacks() { } bool MethodVerified(verifier::MethodVerifier* verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; void ClassRejected(ClassReference ref) OVERRIDE; diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h index 7fc2a2363d..9934f6b13b 100644 --- a/compiler/dex/verification_results.h +++ b/compiler/dex/verification_results.h @@ -43,15 +43,15 @@ class VerificationResults { ~VerificationResults(); bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(verified_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!verified_methods_lock_); const VerifiedMethod* GetVerifiedMethod(MethodReference ref) - LOCKS_EXCLUDED(verified_methods_lock_); - void RemoveVerifiedMethod(MethodReference ref) LOCKS_EXCLUDED(verified_methods_lock_); + REQUIRES(!verified_methods_lock_); + void RemoveVerifiedMethod(MethodReference ref) REQUIRES(!verified_methods_lock_); - void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); - bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); + void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_); + bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_); bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags); diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h index bf11839cf0..f7d6d67368 100644 --- a/compiler/dex/verified_method.h +++ b/compiler/dex/verified_method.h @@ -44,7 +44,7 @@ class VerifiedMethod { typedef SafeMap<uint32_t, DexFileReference> DequickenMap; static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ~VerifiedMethod() = default; const std::vector<uint8_t>& GetDexGcMap() const { @@ -107,15 +107,15 @@ class VerifiedMethod { // Generate devirtualizaion map into devirt_map_. void GenerateDevirtMap(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Generate dequickening map into dequicken_map_. Returns false if there is an error. bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Generate safe case set into safe_cast_set_. void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::vector<uint8_t> dex_gc_map_; DevirtualizationMap devirt_map_; diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 7890108f41..fa25a17481 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -39,6 +39,7 @@ #include "compiler_driver-inl.h" #include "dex_compilation_unit.h" #include "dex_file-inl.h" +#include "dex/dex_to_dex_compiler.h" #include "dex/verification_results.h" #include "dex/verified_method.h" #include "dex/quick/dex_file_method_inliner.h" @@ -167,69 +168,69 @@ class CompilerDriver::AOTCompilationStats { #define STATS_LOCK() #endif - void TypeInDexCache() { + void TypeInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); types_in_dex_cache_++; } - void TypeNotInDexCache() { + void TypeNotInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); types_not_in_dex_cache_++; } - void StringInDexCache() { + void StringInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); strings_in_dex_cache_++; } - void StringNotInDexCache() { + void StringNotInDexCache() REQUIRES(!stats_lock_) { STATS_LOCK(); strings_not_in_dex_cache_++; } - void TypeDoesntNeedAccessCheck() { + void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_types_++; } - void TypeNeedsAccessCheck() { + void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_types_++; } - void ResolvedInstanceField() { + void ResolvedInstanceField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_instance_fields_++; } - void UnresolvedInstanceField() { + void UnresolvedInstanceField() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_instance_fields_++; } - void ResolvedLocalStaticField() { + void ResolvedLocalStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_local_static_fields_++; } - void ResolvedStaticField() { + void ResolvedStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_static_fields_++; } - void UnresolvedStaticField() { + void UnresolvedStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_static_fields_++; } // Indicate that type information from the verifier led to devirtualization. - void PreciseTypeDevirtualization() { + void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) { STATS_LOCK(); type_based_devirtualization_++; } // Indicate that a method of the given type was resolved at compile time. - void ResolvedMethod(InvokeType type) { + void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); resolved_methods_[type]++; @@ -237,7 +238,7 @@ class CompilerDriver::AOTCompilationStats { // Indicate that a method of the given type was unresolved at compile time as it was in an // unknown dex file. - void UnresolvedMethod(InvokeType type) { + void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); unresolved_methods_[type]++; @@ -245,27 +246,27 @@ class CompilerDriver::AOTCompilationStats { // Indicate that a type of virtual method dispatch has been converted into a direct method // dispatch. - void VirtualMadeDirect(InvokeType type) { + void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) { DCHECK(type == kVirtual || type == kInterface || type == kSuper); STATS_LOCK(); virtual_made_direct_[type]++; } // Indicate that a method of the given type was able to call directly into boot. - void DirectCallsToBoot(InvokeType type) { + void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); direct_calls_to_boot_[type]++; } // Indicate that a method of the given type was able to be resolved directly from boot. - void DirectMethodsToBoot(InvokeType type) { + void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) { DCHECK_LE(type, kMaxInvokeType); STATS_LOCK(); direct_methods_to_boot_[type]++; } - void ProcessedInvoke(InvokeType type, int flags) { + void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) { STATS_LOCK(); if (flags == 0) { unresolved_methods_[type]++; @@ -290,13 +291,13 @@ class CompilerDriver::AOTCompilationStats { } // A check-cast could be eliminated due to verifier type analysis. - void SafeCast() { + void SafeCast() REQUIRES(!stats_lock_) { STATS_LOCK(); safe_casts_++; } // A check-cast couldn't be eliminated due to verifier type analysis. - void NotASafeCast() { + void NotASafeCast() REQUIRES(!stats_lock_) { STATS_LOCK(); not_safe_casts_++; } @@ -334,16 +335,6 @@ class CompilerDriver::AOTCompilationStats { DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats); }; - -extern "C" art::CompiledMethod* ArtCompileDEX(art::CompilerDriver& compiler, - const art::DexFile::CodeItem* code_item, - uint32_t access_flags, - art::InvokeType invoke_type, - uint16_t class_def_idx, - uint32_t method_idx, - jobject class_loader, - const art::DexFile& dex_file); - CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options, VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map, @@ -394,8 +385,6 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options, DCHECK(verification_results_ != nullptr); DCHECK(method_inliner_map_ != nullptr); - dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX); - compiler_->Init(); CHECK_EQ(image_, image_classes_.get() != nullptr); @@ -508,13 +497,14 @@ void CompilerDriver::CompileAll(jobject class_loader, } } -DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel( - Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, - const DexFile::ClassDef& class_def) { +static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( + Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader, + const DexFile& dex_file, const DexFile::ClassDef& class_def) + SHARED_REQUIRES(Locks::mutator_lock_) { auto* const runtime = Runtime::Current(); - if (runtime->UseJit() || GetCompilerOptions().VerifyAtRuntime()) { + if (runtime->UseJit() || driver.GetCompilerOptions().VerifyAtRuntime()) { // Verify at runtime shouldn't dex to dex since we didn't resolve of verify. - return kDontDexToDexCompile; + return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = runtime->GetClassLinker(); @@ -522,7 +512,7 @@ DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel( if (klass == nullptr) { CHECK(self->IsExceptionPending()); self->ClearException(); - return kDontDexToDexCompile; + return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } // DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic // references with actual offsets. We cannot re-verify such instructions. @@ -532,14 +522,142 @@ DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel( // optimize when a class has been fully verified before. if (klass->IsVerified()) { // Class is verified so we can enable DEX-to-DEX compilation for performance. - return kOptimize; + return optimizer::DexToDexCompilationLevel::kOptimize; } else if (klass->IsCompileTimeVerified()) { // Class verification has soft-failed. Anyway, ensure at least correctness. DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime); - return kRequired; + return optimizer::DexToDexCompilationLevel::kRequired; } else { // Class verification has failed: do not run DEX-to-DEX compilation. - return kDontDexToDexCompile; + return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; + } +} + +static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( + Thread* self, + const CompilerDriver& driver, + jobject jclass_loader, + const DexFile& dex_file, + const DexFile::ClassDef& class_def) { + ScopedObjectAccess soa(self); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + return GetDexToDexCompilationLevel(self, driver, class_loader, dex_file, class_def); +} + +// Does the runtime for the InstructionSet provide an implementation returned by +// GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler? +static bool InstructionSetHasGenericJniStub(InstructionSet isa) { + switch (isa) { + case kArm: + case kArm64: + case kThumb2: + case kMips: + case kMips64: + case kX86: + case kX86_64: return true; + default: return false; + } +} + +static void CompileMethod(Thread* self, + CompilerDriver* driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level, + bool compilation_enabled) + REQUIRES(!driver->compiled_methods_lock_) { + DCHECK(driver != nullptr); + CompiledMethod* compiled_method = nullptr; + uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0; + MethodReference method_ref(&dex_file, method_idx); + + if ((access_flags & kAccNative) != 0) { + // Are we interpreting only and have support for generic JNI down calls? + if (!driver->GetCompilerOptions().IsCompilationEnabled() && + InstructionSetHasGenericJniStub(driver->GetInstructionSet())) { + // Leaving this empty will trigger the generic JNI version + } else { + compiled_method = driver->GetCompiler()->JniCompile(access_flags, method_idx, dex_file); + CHECK(compiled_method != nullptr); + } + } else if ((access_flags & kAccAbstract) != 0) { + // Abstract methods don't have code. + } else { + bool has_verified_method = driver->GetVerificationResults() + ->GetVerifiedMethod(method_ref) != nullptr; + bool compile = compilation_enabled && + // Basic checks, e.g., not <clinit>. + driver->GetVerificationResults() + ->IsCandidateForCompilation(method_ref, access_flags) && + // Did not fail to create VerifiedMethod metadata. + has_verified_method && + // Is eligable for compilation by methods-to-compile filter. + driver->IsMethodToCompile(method_ref); + if (compile) { + // NOTE: if compiler declines to compile this method, it will return null. + compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type, + class_def_idx, method_idx, class_loader, + dex_file); + } + if (compiled_method == nullptr && + dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) { + // TODO: add a command-line option to disable DEX-to-DEX compilation ? + // Do not optimize if a VerifiedMethod is missing. SafeCast elision, for example, relies on + // it. + compiled_method = optimizer::ArtCompileDEX( + driver, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file, + has_verified_method + ? dex_to_dex_compilation_level + : optimizer::DexToDexCompilationLevel::kRequired); + } + } + if (kTimeCompileMethod) { + uint64_t duration_ns = NanoTime() - start_ns; + if (duration_ns > MsToNs(driver->GetCompiler()->GetMaximumCompilationTimeBeforeWarning())) { + LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file) + << " took " << PrettyDuration(duration_ns); + } + } + + if (compiled_method != nullptr) { + // Count non-relative linker patches. + size_t non_relative_linker_patch_count = 0u; + for (const LinkerPatch& patch : compiled_method->GetPatches()) { + if (!patch.IsPcRelative()) { + ++non_relative_linker_patch_count; + } + } + bool compile_pic = driver->GetCompilerOptions().GetCompilePic(); // Off by default + // When compiling with PIC, there should be zero non-relative linker patches + CHECK(!compile_pic || non_relative_linker_patch_count == 0u); + + driver->AddCompiledMethod(method_ref, compiled_method, non_relative_linker_patch_count); + } + + // Done compiling, delete the verified method to reduce native memory usage. Do not delete in + // optimizing compiler, which may need the verified method again for inlining. + if (driver->GetCompilerKind() != Compiler::kOptimizing) { + driver->GetVerificationResults()->RemoveVerifiedMethod(method_ref); + } + + if (self->IsExceptionPending()) { + ScopedObjectAccess soa(self); + LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n" + << self->GetException()->Dump(); } } @@ -570,24 +688,30 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t PreCompile(jclass_loader, dex_files, thread_pool.get(), timings); // Can we run DEX-to-DEX compiler on this class ? - DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; - { - ScopedObjectAccess soa(self); - const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx); - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - dex_to_dex_compilation_level = GetDexToDexCompilationlevel(self, class_loader, *dex_file, - class_def); - } - CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx, - jclass_loader, *dex_file, dex_to_dex_compilation_level, true); + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = + GetDexToDexCompilationLevel(self, + *this, + jclass_loader, + *dex_file, + dex_file->GetClassDef(class_def_idx)); + + CompileMethod(self, + this, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + jclass_loader, + *dex_file, + dex_to_dex_compilation_level, + true); self->GetJniEnv()->DeleteGlobalRef(jclass_loader); self->TransitionFromSuspendedToRunnable(); } -CompiledMethod* CompilerDriver::CompileMethod(Thread* self, ArtMethod* method) { +CompiledMethod* CompilerDriver::CompileArtMethod(Thread* self, ArtMethod* method) { const uint32_t method_idx = method->GetDexMethodIndex(); const uint32_t access_flags = method->GetAccessFlags(); const InvokeType invoke_type = method->GetInvokeType(); @@ -598,12 +722,21 @@ CompiledMethod* CompilerDriver::CompileMethod(Thread* self, ArtMethod* method) { const DexFile* dex_file = method->GetDexFile(); const uint16_t class_def_idx = method->GetClassDefIndex(); const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx); - DexToDexCompilationLevel dex_to_dex_compilation_level = - GetDexToDexCompilationlevel(self, class_loader, *dex_file, class_def); + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = + GetDexToDexCompilationLevel(self, *this, class_loader, *dex_file, class_def); const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); self->TransitionFromRunnableToSuspended(kNative); - CompileMethod(self, code_item, access_flags, invoke_type, class_def_idx, method_idx, - jclass_loader, *dex_file, dex_to_dex_compilation_level, true); + CompileMethod(self, + this, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + jclass_loader, + *dex_file, + dex_to_dex_compilation_level, + true); auto* compiled_method = GetCompiledMethod(MethodReference(dex_file, method_idx)); self->TransitionFromSuspendedToRunnable(); return compiled_method; @@ -690,70 +823,79 @@ bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end(); } -static void ResolveExceptionsForMethod( - ArtMethod* method_handle, std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); - if (code_item == nullptr) { - return; // native or abstract method - } - if (code_item->tries_size_ == 0) { - return; // nothing to process - } - const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); - size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); - for (size_t i = 0; i < num_encoded_catch_handlers; i++) { - int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); - bool has_catch_all = false; - if (encoded_catch_handler_size <= 0) { - encoded_catch_handler_size = -encoded_catch_handler_size; - has_catch_all = true; - } - for (int32_t j = 0; j < encoded_catch_handler_size; j++) { - uint16_t encoded_catch_handler_handlers_type_idx = - DecodeUnsignedLeb128(&encoded_catch_handler_list); - // Add to set of types to resolve if not already in the dex cache resolved types - if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { - exceptions_to_resolve.insert( - std::pair<uint16_t, const DexFile*>(encoded_catch_handler_handlers_type_idx, - method_handle->GetDexFile())); - } - // ignore address associated with catch handler - DecodeUnsignedLeb128(&encoded_catch_handler_list); +class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { + public: + ResolveCatchBlockExceptionsClassVisitor( + std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) + : exceptions_to_resolve_(exceptions_to_resolve) {} + + virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + for (auto& m : c->GetVirtualMethods(pointer_size)) { + ResolveExceptionsForMethod(&m); } - if (has_catch_all) { - // ignore catch all address - DecodeUnsignedLeb128(&encoded_catch_handler_list); + for (auto& m : c->GetDirectMethods(pointer_size)) { + ResolveExceptionsForMethod(&m); } + return true; } -} -static bool ResolveCatchBlockExceptionsClassVisitor(mirror::Class* c, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - auto* exceptions_to_resolve = - reinterpret_cast<std::set<std::pair<uint16_t, const DexFile*>>*>(arg); - const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - for (auto& m : c->GetVirtualMethods(pointer_size)) { - ResolveExceptionsForMethod(&m, *exceptions_to_resolve); + private: + void ResolveExceptionsForMethod(ArtMethod* method_handle) SHARED_REQUIRES(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); + if (code_item == nullptr) { + return; // native or abstract method + } + if (code_item->tries_size_ == 0) { + return; // nothing to process + } + const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); + size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); + for (size_t i = 0; i < num_encoded_catch_handlers; i++) { + int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); + bool has_catch_all = false; + if (encoded_catch_handler_size <= 0) { + encoded_catch_handler_size = -encoded_catch_handler_size; + has_catch_all = true; + } + for (int32_t j = 0; j < encoded_catch_handler_size; j++) { + uint16_t encoded_catch_handler_handlers_type_idx = + DecodeUnsignedLeb128(&encoded_catch_handler_list); + // Add to set of types to resolve if not already in the dex cache resolved types + if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { + exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx, + method_handle->GetDexFile()); + } + // ignore address associated with catch handler + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + if (has_catch_all) { + // ignore catch all address + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + } } - for (auto& m : c->GetDirectMethods(pointer_size)) { - ResolveExceptionsForMethod(&m, *exceptions_to_resolve); + + std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve_; +}; + +class RecordImageClassesVisitor : public ClassVisitor { + public: + explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes) + : image_classes_(image_classes) {} + + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::string temp; + image_classes_->insert(klass->GetDescriptor(&temp)); + return true; } - return true; -} -static bool RecordImageClassesVisitor(mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - std::unordered_set<std::string>* image_classes = - reinterpret_cast<std::unordered_set<std::string>*>(arg); - std::string temp; - image_classes->insert(klass->GetDescriptor(&temp)); - return true; -} + private: + std::unordered_set<std::string>* const image_classes_; +}; // Make a list of descriptors for classes to include in the image -void CompilerDriver::LoadImageClasses(TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_) { +void CompilerDriver::LoadImageClasses(TimingLogger* timings) { CHECK(timings != nullptr); if (!IsImage()) { return; @@ -788,8 +930,8 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"))); do { unresolved_exception_types.clear(); - class_linker->VisitClasses(ResolveCatchBlockExceptionsClassVisitor, - &unresolved_exception_types); + ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types); + class_linker->VisitClasses(&visitor); for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) { uint16_t exception_type_idx = exception_type.first; const DexFile* dex_file = exception_type.second; @@ -812,14 +954,15 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) // We walk the roots looking for classes so that we'll pick up the // above classes plus any classes them depend on such super // classes, interfaces, and the required ClassLinker roots. - class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get()); + RecordImageClassesVisitor visitor(image_classes_.get()); + class_linker->VisitClasses(&visitor); CHECK_NE(image_classes_->size(), 0U); } static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::unordered_set<std::string>* image_classes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); StackHandleScope<1> hs(self); // Make a copy of the handle so that we don't clobber it doing Assign. @@ -876,7 +1019,7 @@ class ClinitImageUpdate { // Visitor for VisitReferences. void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset); if (ref != nullptr) { VisitClinitClassesObject(ref); @@ -884,10 +1027,15 @@ class ClinitImageUpdate { } // java.lang.Reference visitor for VisitReferences. - void operator()(mirror::Class* /* klass */, mirror::Reference* /* ref */) const { - } + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED) + const {} + + // Ignore class native roots. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} - void Walk() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Walk() SHARED_REQUIRES(Locks::mutator_lock_) { // Use the initial classes as roots for a search. for (mirror::Class* klass_root : image_classes_) { VisitClinitClassesObject(klass_root); @@ -895,9 +1043,32 @@ class ClinitImageUpdate { } private: + class FindImageClassesVisitor : public ClassVisitor { + public: + explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {} + + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::string temp; + const char* name = klass->GetDescriptor(&temp); + if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) { + data_->image_classes_.push_back(klass); + } else { + // Check whether it is initialized and has a clinit. They must be kept, too. + if (klass->IsInitialized() && klass->FindClassInitializer( + Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { + data_->image_classes_.push_back(klass); + } + } + return true; + } + + private: + ClinitImageUpdate* const data_; + }; + ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self, ClassLinker* linker) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + SHARED_REQUIRES(Locks::mutator_lock_) : image_class_descriptors_(image_class_descriptors), self_(self) { CHECK(linker != nullptr); CHECK(image_class_descriptors != nullptr); @@ -911,29 +1082,12 @@ class ClinitImageUpdate { // Find all the already-marked classes. WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - linker->VisitClasses(FindImageClasses, this); - } - - static bool FindImageClasses(mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClinitImageUpdate* data = reinterpret_cast<ClinitImageUpdate*>(arg); - std::string temp; - const char* name = klass->GetDescriptor(&temp); - if (data->image_class_descriptors_->find(name) != data->image_class_descriptors_->end()) { - data->image_classes_.push_back(klass); - } else { - // Check whether it is initialized and has a clinit. They must be kept, too. - if (klass->IsInitialized() && klass->FindClassInitializer( - Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { - data->image_classes_.push_back(klass); - } - } - - return true; + FindImageClassesVisitor visitor(this); + linker->VisitClasses(&visitor); } void VisitClinitClassesObject(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(object != nullptr); if (marked_objects_.find(object) != marked_objects_.end()) { // Already processed. @@ -955,7 +1109,7 @@ class ClinitImageUpdate { // If it is not a DexCache, visit all references. mirror::Class* klass = object->GetClass(); if (klass != dex_cache_class_) { - object->VisitReferences<false /* visit class */>(*this, *this); + object->VisitReferences(*this, *this); } } @@ -1569,10 +1723,14 @@ bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc return result; } -class ParallelCompilationManager { +class CompilationVisitor { public: - typedef void Callback(const ParallelCompilationManager* manager, size_t index); + virtual ~CompilationVisitor() {} + virtual void Visit(size_t index) = 0; +}; +class ParallelCompilationManager { + public: ParallelCompilationManager(ClassLinker* class_linker, jobject class_loader, CompilerDriver* compiler, @@ -1610,14 +1768,15 @@ class ParallelCompilationManager { return dex_files_; } - void ForAll(size_t begin, size_t end, Callback callback, size_t work_units) { + void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units) + REQUIRES(!*Locks::mutator_lock_) { Thread* self = Thread::Current(); self->AssertNoPendingException(); CHECK_GT(work_units, 0U); index_.StoreRelaxed(begin); for (size_t i = 0; i < work_units; ++i) { - thread_pool_->AddTask(self, new ForAllClosure(this, end, callback)); + thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor)); } thread_pool_->StartWorkers(self); @@ -1636,10 +1795,10 @@ class ParallelCompilationManager { private: class ForAllClosure : public Task { public: - ForAllClosure(ParallelCompilationManager* manager, size_t end, Callback* callback) + ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor) : manager_(manager), end_(end), - callback_(callback) {} + visitor_(visitor) {} virtual void Run(Thread* self) { while (true) { @@ -1647,7 +1806,7 @@ class ParallelCompilationManager { if (UNLIKELY(index >= end_)) { break; } - callback_(manager_, index); + visitor_->Visit(index); self->AssertNoPendingException(); } } @@ -1659,7 +1818,7 @@ class ParallelCompilationManager { private: ParallelCompilationManager* const manager_; const size_t end_; - Callback* const callback_; + CompilationVisitor* const visitor_; }; AtomicInteger index_; @@ -1676,7 +1835,7 @@ class ParallelCompilationManager { // A fast version of SkipClass above if the class pointer is available // that avoids the expensive FindInClassPath search. static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(klass != nullptr); const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile(); if (&dex_file != &original_dex_file) { @@ -1691,7 +1850,7 @@ static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Cla } static void CheckAndClearResolveException(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(self->IsExceptionPending()); mirror::Throwable* exception = self->GetException(); std::string temp; @@ -1717,134 +1876,148 @@ static void CheckAndClearResolveException(Thread* self) self->ClearException(); } -static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager, - size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - Thread* self = Thread::Current(); - jobject jclass_loader = manager->GetClassLoader(); - const DexFile& dex_file = *manager->GetDexFile(); - ClassLinker* class_linker = manager->GetClassLinker(); - - // If an instance field is final then we need to have a barrier on the return, static final - // fields are assigned within the lock held for class initialization. Conservatively assume - // constructor barriers are always required. - bool requires_constructor_barrier = true; - - // Method and Field are the worst. We can't resolve without either - // context from the code use (to disambiguate virtual vs direct - // method and instance vs static field) or from class - // definitions. While the compiler will resolve what it can as it - // needs it, here we try to resolve fields and methods used in class - // definitions, since many of them many never be referenced by - // generated code. - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - ScopedObjectAccess soa(self); - StackHandleScope<2> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); - // Resolve the class. - mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, - class_loader); - bool resolve_fields_and_methods; - if (klass == nullptr) { - // Class couldn't be resolved, for example, super-class is in a different dex file. Don't - // attempt to resolve methods and fields when there is no declaring class. - CheckAndClearResolveException(soa.Self()); - resolve_fields_and_methods = false; - } else { - // We successfully resolved a class, should we skip it? - if (SkipClass(jclass_loader, dex_file, klass)) { - return; - } - // We want to resolve the methods and fields eagerly. - resolve_fields_and_methods = true; - } - // Note the class_data pointer advances through the headers, - // static fields, instance fields, direct methods, and virtual - // methods. - const uint8_t* class_data = dex_file.GetClassData(class_def); - if (class_data == nullptr) { - // Empty class such as a marker interface. - requires_constructor_barrier = false; - } else { - ClassDataItemIterator it(dex_file, class_data); - while (it.HasNextStaticField()) { - if (resolve_fields_and_methods) { - ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), - dex_cache, class_loader, true); - if (field == nullptr) { - CheckAndClearResolveException(soa.Self()); - } - } - it.Next(); - } - // We require a constructor barrier if there are final instance fields. - requires_constructor_barrier = false; - while (it.HasNextInstanceField()) { - if (it.MemberIsFinal()) { - requires_constructor_barrier = true; +class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor { + public: + explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager) + : manager_(manager) {} + + void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) { + ATRACE_CALL(); + Thread* const self = Thread::Current(); + jobject jclass_loader = manager_->GetClassLoader(); + const DexFile& dex_file = *manager_->GetDexFile(); + ClassLinker* class_linker = manager_->GetClassLinker(); + + // If an instance field is final then we need to have a barrier on the return, static final + // fields are assigned within the lock held for class initialization. Conservatively assume + // constructor barriers are always required. + bool requires_constructor_barrier = true; + + // Method and Field are the worst. We can't resolve without either + // context from the code use (to disambiguate virtual vs direct + // method and instance vs static field) or from class + // definitions. While the compiler will resolve what it can as it + // needs it, here we try to resolve fields and methods used in class + // definitions, since many of them many never be referenced by + // generated code. + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + ScopedObjectAccess soa(self); + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + // Resolve the class. + mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, + class_loader); + bool resolve_fields_and_methods; + if (klass == nullptr) { + // Class couldn't be resolved, for example, super-class is in a different dex file. Don't + // attempt to resolve methods and fields when there is no declaring class. + CheckAndClearResolveException(soa.Self()); + resolve_fields_and_methods = false; + } else { + // We successfully resolved a class, should we skip it? + if (SkipClass(jclass_loader, dex_file, klass)) { + return; } - if (resolve_fields_and_methods) { - ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), - dex_cache, class_loader, false); - if (field == nullptr) { - CheckAndClearResolveException(soa.Self()); + // We want to resolve the methods and fields eagerly. + resolve_fields_and_methods = true; + } + // Note the class_data pointer advances through the headers, + // static fields, instance fields, direct methods, and virtual + // methods. + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // Empty class such as a marker interface. + requires_constructor_barrier = false; + } else { + ClassDataItemIterator it(dex_file, class_data); + while (it.HasNextStaticField()) { + if (resolve_fields_and_methods) { + ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, true); + if (field == nullptr) { + CheckAndClearResolveException(soa.Self()); + } } + it.Next(); } - it.Next(); - } - if (resolve_fields_and_methods) { - while (it.HasNextDirectMethod()) { - ArtMethod* method = class_linker->ResolveMethod( - dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, - it.GetMethodInvokeType(class_def)); - if (method == nullptr) { - CheckAndClearResolveException(soa.Self()); + // We require a constructor barrier if there are final instance fields. + requires_constructor_barrier = false; + while (it.HasNextInstanceField()) { + if (it.MemberIsFinal()) { + requires_constructor_barrier = true; + } + if (resolve_fields_and_methods) { + ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, false); + if (field == nullptr) { + CheckAndClearResolveException(soa.Self()); + } } it.Next(); } - while (it.HasNextVirtualMethod()) { - ArtMethod* method = class_linker->ResolveMethod( - dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, - it.GetMethodInvokeType(class_def)); - if (method == nullptr) { - CheckAndClearResolveException(soa.Self()); + if (resolve_fields_and_methods) { + while (it.HasNextDirectMethod()) { + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); + if (method == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + it.Next(); } - it.Next(); + while (it.HasNextVirtualMethod()) { + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); + if (method == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + it.Next(); + } + DCHECK(!it.HasNext()); } - DCHECK(!it.HasNext()); + } + if (requires_constructor_barrier) { + manager_->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index); } } - if (requires_constructor_barrier) { - manager->GetCompiler()->AddRequiresConstructorBarrier(self, &dex_file, class_def_index); - } -} -static void ResolveType(const ParallelCompilationManager* manager, size_t type_idx) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - // Class derived values are more complicated, they require the linker and loader. - ScopedObjectAccess soa(Thread::Current()); - ClassLinker* class_linker = manager->GetClassLinker(); - const DexFile& dex_file = *manager->GetDexFile(); - StackHandleScope<2> hs(soa.Self()); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader()))); - mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + private: + const ParallelCompilationManager* const manager_; +}; - if (klass == nullptr) { - CHECK(soa.Self()->IsExceptionPending()); - mirror::Throwable* exception = soa.Self()->GetException(); - VLOG(compiler) << "Exception during type resolution: " << exception->Dump(); - if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) { - // There's little point continuing compilation if the heap is exhausted. - LOG(FATAL) << "Out of memory during type resolution for compilation"; +class ResolveTypeVisitor : public CompilationVisitor { + public: + explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) { + } + virtual void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) { + // Class derived values are more complicated, they require the linker and loader. + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = manager_->GetClassLinker(); + const DexFile& dex_file = *manager_->GetDexFile(); + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader()))); + mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + + if (klass == nullptr) { + soa.Self()->AssertPendingException(); + mirror::Throwable* exception = soa.Self()->GetException(); + VLOG(compiler) << "Exception during type resolution: " << exception->Dump(); + if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) { + // There's little point continuing compilation if the heap is exhausted. + LOG(FATAL) << "Out of memory during type resolution for compilation"; + } + soa.Self()->ClearException(); } - soa.Self()->ClearException(); } -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -1860,17 +2033,18 @@ void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_fil // For images we resolve all types, such as array, whereas for applications just those with // classdefs are resolved by ResolveClassFieldsAndMethods. TimingLogger::ScopedTiming t("Resolve Types", timings); - context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_); + ResolveTypeVisitor visitor(&context); + context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count_); } TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings); - context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_); + ResolveClassFieldsAndMethodsVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) { - for (size_t i = 0; i != dex_files.size(); ++i) { - const DexFile* dex_file = dex_files[i]; + for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); SetVerifiedDexFile(class_loader, *dex_file, dex_files, thread_pool, timings); } @@ -1878,67 +2052,73 @@ void CompilerDriver::SetVerified(jobject class_loader, const std::vector<const D void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) { - for (size_t i = 0; i != dex_files.size(); ++i) { - const DexFile* dex_file = dex_files[i]; + for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); VerifyDexFile(class_loader, *dex_file, dex_files, thread_pool, timings); } } -static void VerifyClass(const ParallelCompilationManager* manager, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - ScopedObjectAccess soa(Thread::Current()); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ClassLinker* class_linker = manager->GetClassLinker(); - jobject jclass_loader = manager->GetClassLoader(); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); - if (klass.Get() == nullptr) { - CHECK(soa.Self()->IsExceptionPending()); - soa.Self()->ClearException(); +class VerifyClassVisitor : public CompilationVisitor { + public: + explicit VerifyClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} - /* - * At compile time, we can still structurally verify the class even if FindClass fails. - * This is to ensure the class is structurally sound for compilation. An unsound class - * will be rejected by the verifier and later skipped during compilation in the compiler. - */ - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); - std::string error_msg; - if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader, - &class_def, true, &error_msg) == - verifier::MethodVerifier::kHardFailure) { - LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor) - << " because: " << error_msg; - manager->GetCompiler()->SetHadHardVerifierFailure(); - } - } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) { - CHECK(klass->IsResolved()) << PrettyClass(klass.Get()); - class_linker->VerifyClass(soa.Self(), klass); - - if (klass->IsErroneous()) { - // ClassLinker::VerifyClass throws, which isn't useful in the compiler. + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + ScopedObjectAccess soa(Thread::Current()); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + if (klass.Get() == nullptr) { CHECK(soa.Self()->IsExceptionPending()); soa.Self()->ClearException(); - manager->GetCompiler()->SetHadHardVerifierFailure(); - } - CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) - << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus(); + /* + * At compile time, we can still structurally verify the class even if FindClass fails. + * This is to ensure the class is structurally sound for compilation. An unsound class + * will be rejected by the verifier and later skipped during compilation in the compiler. + */ + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + std::string error_msg; + if (verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader, + &class_def, true, &error_msg) == + verifier::MethodVerifier::kHardFailure) { + LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor) + << " because: " << error_msg; + manager_->GetCompiler()->SetHadHardVerifierFailure(); + } + } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) { + CHECK(klass->IsResolved()) << PrettyClass(klass.Get()); + class_linker->VerifyClass(soa.Self(), klass); + + if (klass->IsErroneous()) { + // ClassLinker::VerifyClass throws, which isn't useful in the compiler. + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + manager_->GetCompiler()->SetHadHardVerifierFailure(); + } + + CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) + << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus(); - // It is *very* problematic if there are verification errors in the boot classpath. For example, - // we rely on things working OK without verification when the decryption dialog is brought up. - // So abort in a debug build if we find this violated. - DCHECK(!manager->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " << - PrettyClass(klass.Get()) << " failed to fully verify."; + // It is *very* problematic if there are verification errors in the boot classpath. For example, + // we rely on things working OK without verification when the decryption dialog is brought up. + // So abort in a debug build if we find this violated. + DCHECK(!manager_->GetCompiler()->IsImage() || klass->IsVerified()) << "Boot classpath class " + << PrettyClass(klass.Get()) << " failed to fully verify."; + } + soa.Self()->AssertNoPendingException(); } - soa.Self()->AssertNoPendingException(); -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -1947,48 +2127,56 @@ void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); - context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_); + VerifyClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } -static void SetVerifiedClass(const ParallelCompilationManager* manager, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - ScopedObjectAccess soa(Thread::Current()); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ClassLinker* class_linker = manager->GetClassLinker(); - jobject jclass_loader = manager->GetClassLoader(); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); - // Class might have failed resolution. Then don't set it to verified. - if (klass.Get() != nullptr) { - // Only do this if the class is resolved. If even resolution fails, quickening will go very, - // very wrong. - if (klass->IsResolved()) { - if (klass->GetStatus() < mirror::Class::kStatusVerified) { - ObjectLock<mirror::Class> lock(soa.Self(), klass); - // Set class status to verified. - mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self()); - // Mark methods as pre-verified. If we don't do this, the interpreter will run with - // access checks. - klass->SetPreverifiedFlagOnAllMethods( - GetInstructionSetPointerSize(manager->GetCompiler()->GetInstructionSet())); - klass->SetPreverified(); +class SetVerifiedClassVisitor : public CompilationVisitor { + public: + explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + ScopedObjectAccess soa(Thread::Current()); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + // Class might have failed resolution. Then don't set it to verified. + if (klass.Get() != nullptr) { + // Only do this if the class is resolved. If even resolution fails, quickening will go very, + // very wrong. + if (klass->IsResolved()) { + if (klass->GetStatus() < mirror::Class::kStatusVerified) { + ObjectLock<mirror::Class> lock(soa.Self(), klass); + // Set class status to verified. + mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self()); + // Mark methods as pre-verified. If we don't do this, the interpreter will run with + // access checks. + klass->SetPreverifiedFlagOnAllMethods( + GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet())); + klass->SetPreverified(); + } + // Record the final class status if necessary. + ClassReference ref(manager_->GetDexFile(), class_def_index); + manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); } - // Record the final class status if necessary. - ClassReference ref(manager->GetDexFile(), class_def_index); - manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); + } else { + Thread* self = soa.Self(); + DCHECK(self->IsExceptionPending()); + self->ClearException(); } - } else { - Thread* self = soa.Self(); - DCHECK(self->IsExceptionPending()); - self->ClearException(); } -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -1997,99 +2185,107 @@ void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); - context.ForAll(0, dex_file.NumClassDefs(), SetVerifiedClass, thread_count_); + SetVerifiedClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } -static void InitializeClass(const ParallelCompilationManager* manager, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_) { - ATRACE_CALL(); - jobject jclass_loader = manager->GetClassLoader(); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_); - const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); +class InitializeClassVisitor : public CompilationVisitor { + public: + explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} - ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(manager->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader))); - - if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) { - // Only try to initialize classes that were successfully verified. - if (klass->IsVerified()) { - // Attempt to initialize the class but bail if we either need to initialize the super-class - // or static fields. - manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false); - if (!klass->IsInitialized()) { - // We don't want non-trivial class initialization occurring on multiple threads due to - // deadlock problems. For example, a parent class is initialized (holding its lock) that - // refers to a sub-class in its static/class initializer causing it to try to acquire the - // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock) - // after first initializing its parents, whose locks are acquired. This leads to a - // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock. - // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather - // than use a special Object for the purpose we use the Class of java.lang.Class. - Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass())); - ObjectLock<mirror::Class> lock(soa.Self(), h_klass); - // Attempt to initialize allowing initialization of parent classes but still not static - // fields. - manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true); + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + jobject jclass_loader = manager_->GetClassLoader(); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_); + const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); + + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader))); + + if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) { + // Only try to initialize classes that were successfully verified. + if (klass->IsVerified()) { + // Attempt to initialize the class but bail if we either need to initialize the super-class + // or static fields. + manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false); if (!klass->IsInitialized()) { - // We need to initialize static fields, we only do this for image classes that aren't - // marked with the $NoPreloadHolder (which implies this should not be initialized early). - bool can_init_static_fields = manager->GetCompiler()->IsImage() && - manager->GetCompiler()->IsImageClass(descriptor) && - !StringPiece(descriptor).ends_with("$NoPreloadHolder;"); - if (can_init_static_fields) { - VLOG(compiler) << "Initializing: " << descriptor; - // TODO multithreading support. We should ensure the current compilation thread has - // exclusive access to the runtime and the transaction. To achieve this, we could use - // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity - // checks in Thread::AssertThreadSuspensionIsAllowable. - Runtime* const runtime = Runtime::Current(); - Transaction transaction; - - // Run the class initializer in transaction mode. - runtime->EnterTransactionMode(&transaction); - const mirror::Class::Status old_status = klass->GetStatus(); - bool success = manager->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true, - true); - // TODO we detach transaction from runtime to indicate we quit the transactional - // mode which prevents the GC from visiting objects modified during the transaction. - // Ensure GC is not run so don't access freed objects when aborting transaction. - - ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end"); - runtime->ExitTransactionMode(); - - if (!success) { - CHECK(soa.Self()->IsExceptionPending()); - mirror::Throwable* exception = soa.Self()->GetException(); - VLOG(compiler) << "Initialization of " << descriptor << " aborted because of " - << exception->Dump(); - std::ostream* file_log = manager->GetCompiler()-> - GetCompilerOptions().GetInitFailureOutput(); - if (file_log != nullptr) { - *file_log << descriptor << "\n"; - *file_log << exception->Dump() << "\n"; + // We don't want non-trivial class initialization occurring on multiple threads due to + // deadlock problems. For example, a parent class is initialized (holding its lock) that + // refers to a sub-class in its static/class initializer causing it to try to acquire the + // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock) + // after first initializing its parents, whose locks are acquired. This leads to a + // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock. + // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather + // than use a special Object for the purpose we use the Class of java.lang.Class. + Handle<mirror::Class> h_klass(hs.NewHandle(klass->GetClass())); + ObjectLock<mirror::Class> lock(soa.Self(), h_klass); + // Attempt to initialize allowing initialization of parent classes but still not static + // fields. + manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true); + if (!klass->IsInitialized()) { + // We need to initialize static fields, we only do this for image classes that aren't + // marked with the $NoPreloadHolder (which implies this should not be initialized early). + bool can_init_static_fields = manager_->GetCompiler()->IsImage() && + manager_->GetCompiler()->IsImageClass(descriptor) && + !StringPiece(descriptor).ends_with("$NoPreloadHolder;"); + if (can_init_static_fields) { + VLOG(compiler) << "Initializing: " << descriptor; + // TODO multithreading support. We should ensure the current compilation thread has + // exclusive access to the runtime and the transaction. To achieve this, we could use + // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity + // checks in Thread::AssertThreadSuspensionIsAllowable. + Runtime* const runtime = Runtime::Current(); + Transaction transaction; + + // Run the class initializer in transaction mode. + runtime->EnterTransactionMode(&transaction); + const mirror::Class::Status old_status = klass->GetStatus(); + bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true, + true); + // TODO we detach transaction from runtime to indicate we quit the transactional + // mode which prevents the GC from visiting objects modified during the transaction. + // Ensure GC is not run so don't access freed objects when aborting transaction. + + ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end"); + runtime->ExitTransactionMode(); + + if (!success) { + CHECK(soa.Self()->IsExceptionPending()); + mirror::Throwable* exception = soa.Self()->GetException(); + VLOG(compiler) << "Initialization of " << descriptor << " aborted because of " + << exception->Dump(); + std::ostream* file_log = manager_->GetCompiler()-> + GetCompilerOptions().GetInitFailureOutput(); + if (file_log != nullptr) { + *file_log << descriptor << "\n"; + *file_log << exception->Dump() << "\n"; + } + soa.Self()->ClearException(); + transaction.Rollback(); + CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored"; } - soa.Self()->ClearException(); - transaction.Rollback(); - CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored"; } } + soa.Self()->AssertNoPendingException(); } - soa.Self()->AssertNoPendingException(); } + // Record the final class status if necessary. + ClassReference ref(manager_->GetDexFile(), class_def_index); + manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); } - // Record the final class status if necessary. - ClassReference ref(manager->GetDexFile(), class_def_index); - manager->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); + // Clear any class not found or verification exceptions. + soa.Self()->ClearException(); } - // Clear any class not found or verification exceptions. - soa.Self()->ClearException(); -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -2105,7 +2301,8 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& } else { thread_count = thread_count_; } - context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count); + InitializeClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); } void CompilerDriver::InitializeClasses(jobject class_loader, @@ -2132,101 +2329,102 @@ void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFi VLOG(compiler) << "Compile: " << GetMemoryUsageString(false); } -void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, - size_t class_def_index) { - ATRACE_CALL(); - const DexFile& dex_file = *manager->GetDexFile(); - const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); - ClassLinker* class_linker = manager->GetClassLinker(); - jobject jclass_loader = manager->GetClassLoader(); - Thread* self = Thread::Current(); - { - // Use a scoped object access to perform to the quick SkipClass check. - const char* descriptor = dex_file.GetClassDescriptor(class_def); - ScopedObjectAccess soa(self); - StackHandleScope<3> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - Handle<mirror::Class> klass( - hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); - if (klass.Get() == nullptr) { - CHECK(soa.Self()->IsExceptionPending()); - soa.Self()->ClearException(); - } else if (SkipClass(jclass_loader, dex_file, klass.Get())) { +class CompileClassVisitor : public CompilationVisitor { + public: + explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + Thread* self = Thread::Current(); + { + // Use a scoped object access to perform to the quick SkipClass check. + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ScopedObjectAccess soa(self); + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::ClassLoader> class_loader( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); + Handle<mirror::Class> klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + if (klass.Get() == nullptr) { + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + } else if (SkipClass(jclass_loader, dex_file, klass.Get())) { + return; + } + } + ClassReference ref(&dex_file, class_def_index); + // Skip compiling classes with generic verifier failures since they will still fail at runtime + if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) { + return; + } + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // empty class, probably a marker interface return; } - } - ClassReference ref(&dex_file, class_def_index); - // Skip compiling classes with generic verifier failures since they will still fail at runtime - if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) { - return; - } - const uint8_t* class_data = dex_file.GetClassData(class_def); - if (class_data == nullptr) { - // empty class, probably a marker interface - return; - } - CompilerDriver* const driver = manager->GetCompiler(); + CompilerDriver* const driver = manager_->GetCompiler(); - // Can we run DEX-to-DEX compiler on this class ? - DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; - { - ScopedObjectAccess soa(self); - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::ClassLoader> class_loader( - hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel( - soa.Self(), class_loader, dex_file, class_def); - } - ClassDataItemIterator it(dex_file, class_data); - // Skip fields - while (it.HasNextStaticField()) { - it.Next(); - } - while (it.HasNextInstanceField()) { - it.Next(); - } + // Can we run DEX-to-DEX compiler on this class ? + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = + GetDexToDexCompilationLevel(self, *driver, jclass_loader, dex_file, class_def); + + ClassDataItemIterator it(dex_file, class_data); + // Skip fields + while (it.HasNextStaticField()) { + it.Next(); + } + while (it.HasNextInstanceField()) { + it.Next(); + } - bool compilation_enabled = driver->IsClassToCompile( - dex_file.StringByTypeIdx(class_def.class_idx_)); + bool compilation_enabled = driver->IsClassToCompile( + dex_file.StringByTypeIdx(class_def.class_idx_)); - // Compile direct methods - int64_t previous_direct_method_idx = -1; - while (it.HasNextDirectMethod()) { - uint32_t method_idx = it.GetMemberIndex(); - if (method_idx == previous_direct_method_idx) { - // smali can create dex files with two encoded_methods sharing the same method_idx - // http://code.google.com/p/smali/issues/detail?id=119 + // Compile direct methods + int64_t previous_direct_method_idx = -1; + while (it.HasNextDirectMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_direct_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_direct_method_idx = method_idx; + CompileMethod(self, driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled); it.Next(); - continue; - } - previous_direct_method_idx = method_idx; - driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), - it.GetMethodInvokeType(class_def), class_def_index, - method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, - compilation_enabled); - it.Next(); - } - // Compile virtual methods - int64_t previous_virtual_method_idx = -1; - while (it.HasNextVirtualMethod()) { - uint32_t method_idx = it.GetMemberIndex(); - if (method_idx == previous_virtual_method_idx) { - // smali can create dex files with two encoded_methods sharing the same method_idx - // http://code.google.com/p/smali/issues/detail?id=119 + } + // Compile virtual methods + int64_t previous_virtual_method_idx = -1; + while (it.HasNextVirtualMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_virtual_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_virtual_method_idx = method_idx; + CompileMethod(self, driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled); it.Next(); - continue; } - previous_virtual_method_idx = method_idx; - driver->CompileMethod(self, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), - it.GetMethodInvokeType(class_def), class_def_index, - method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, - compilation_enabled); - it.Next(); + DCHECK(!it.HasNext()); } - DCHECK(!it.HasNext()); -} + + private: + const ParallelCompilationManager* const manager_; +}; void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, @@ -2234,109 +2432,22 @@ void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_fil TimingLogger::ScopedTiming t("Compile Dex File", timings); ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this, &dex_file, dex_files, thread_pool); - context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_); -} - -// Does the runtime for the InstructionSet provide an implementation returned by -// GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler? -static bool InstructionSetHasGenericJniStub(InstructionSet isa) { - switch (isa) { - case kArm: - case kArm64: - case kThumb2: - case kMips: - case kMips64: - case kX86: - case kX86_64: return true; - default: return false; - } + CompileClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count_); } -void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_item, - uint32_t access_flags, InvokeType invoke_type, - uint16_t class_def_idx, uint32_t method_idx, - jobject class_loader, const DexFile& dex_file, - DexToDexCompilationLevel dex_to_dex_compilation_level, - bool compilation_enabled) { - CompiledMethod* compiled_method = nullptr; - uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0; - MethodReference method_ref(&dex_file, method_idx); - - if ((access_flags & kAccNative) != 0) { - // Are we interpreting only and have support for generic JNI down calls? - if (!compiler_options_->IsCompilationEnabled() && - InstructionSetHasGenericJniStub(instruction_set_)) { - // Leaving this empty will trigger the generic JNI version - } else { - compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file); - CHECK(compiled_method != nullptr); - } - } else if ((access_flags & kAccAbstract) != 0) { - // Abstract methods don't have code. - } else { - bool has_verified_method = verification_results_->GetVerifiedMethod(method_ref) != nullptr; - bool compile = compilation_enabled && - // Basic checks, e.g., not <clinit>. - verification_results_->IsCandidateForCompilation(method_ref, access_flags) && - // Did not fail to create VerifiedMethod metadata. - has_verified_method && - // Is eligable for compilation by methods-to-compile filter. - IsMethodToCompile(method_ref); - if (compile) { - // NOTE: if compiler declines to compile this method, it will return null. - compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx, - method_idx, class_loader, dex_file); - } - if (compiled_method == nullptr && dex_to_dex_compilation_level != kDontDexToDexCompile) { - // TODO: add a command-line option to disable DEX-to-DEX compilation ? - // Do not optimize if a VerifiedMethod is missing. SafeCast elision, for example, relies on - // it. - (*dex_to_dex_compiler_)(*this, code_item, access_flags, - invoke_type, class_def_idx, - method_idx, class_loader, dex_file, - has_verified_method ? dex_to_dex_compilation_level : kRequired); - } - } - if (kTimeCompileMethod) { - uint64_t duration_ns = NanoTime() - start_ns; - if (duration_ns > MsToNs(compiler_->GetMaximumCompilationTimeBeforeWarning())) { - LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file) - << " took " << PrettyDuration(duration_ns); - } - } - - if (compiled_method != nullptr) { - // Count non-relative linker patches. - size_t non_relative_linker_patch_count = 0u; - for (const LinkerPatch& patch : compiled_method->GetPatches()) { - if (!patch.IsPcRelative()) { - ++non_relative_linker_patch_count; - } - } - bool compile_pic = GetCompilerOptions().GetCompilePic(); // Off by default - // When compiling with PIC, there should be zero non-relative linker patches - CHECK(!compile_pic || non_relative_linker_patch_count == 0u); - - DCHECK(GetCompiledMethod(method_ref) == nullptr) << PrettyMethod(method_idx, dex_file); - { - MutexLock mu(self, compiled_methods_lock_); - compiled_methods_.Put(method_ref, compiled_method); - non_relative_linker_patch_count_ += non_relative_linker_patch_count; - } - DCHECK(GetCompiledMethod(method_ref) != nullptr) << PrettyMethod(method_idx, dex_file); - } - - // Done compiling, delete the verified method to reduce native memory usage. Do not delete in - // optimizing compiler, which may need the verified method again for inlining. - if (compiler_kind_ != Compiler::kOptimizing) { - verification_results_->RemoveVerifiedMethod(method_ref); - } - - if (self->IsExceptionPending()) { - ScopedObjectAccess soa(self); - LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n" - << self->GetException()->Dump(); +void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref, + CompiledMethod* const compiled_method, + size_t non_relative_linker_patch_count) { + DCHECK(GetCompiledMethod(method_ref) == nullptr) + << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file); + { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + compiled_methods_.Put(method_ref, compiled_method); + non_relative_linker_patch_count_ += non_relative_linker_patch_count; } + DCHECK(GetCompiledMethod(method_ref) != nullptr) + << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file); } void CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) { @@ -2447,7 +2558,7 @@ bool CompilerDriver::WriteElf(const std::string& android_root, const std::vector<const art::DexFile*>& dex_files, OatWriter* oat_writer, art::File* file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kProduce64BitELFFiles && Is64BitInstructionSet(GetInstructionSet())) { return art::ElfWriterQuick64::Create(file, oat_writer, dex_files, android_root, is_host, *this); } else { diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 2d7ceaeea1..5718be9e89 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -80,13 +80,6 @@ enum EntryPointCallingConvention { kQuickAbi }; -enum DexToDexCompilationLevel { - kDontDexToDexCompile, // Only meaning wrt image time interpretation. - kRequired, // Dex-to-dex compilation required for correctness. - kOptimize // Perform required transformation and peep-hole optimizations. -}; -std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs); - static constexpr bool kUseMurmur3Hash = true; class CompilerDriver { @@ -96,32 +89,33 @@ class CompilerDriver { // enabled. "image_classes" lets the compiler know what classes it // can assume will be in the image, with null implying all available // classes. - explicit CompilerDriver(const CompilerOptions* compiler_options, - VerificationResults* verification_results, - DexFileToMethodInlinerMap* method_inliner_map, - Compiler::Kind compiler_kind, - InstructionSet instruction_set, - const InstructionSetFeatures* instruction_set_features, - bool image, std::unordered_set<std::string>* image_classes, - std::unordered_set<std::string>* compiled_classes, - std::unordered_set<std::string>* compiled_methods, - size_t thread_count, bool dump_stats, bool dump_passes, - const std::string& dump_cfg_file_name, - CumulativeLogger* timer, int swap_fd, - const std::string& profile_file); + CompilerDriver(const CompilerOptions* compiler_options, + VerificationResults* verification_results, + DexFileToMethodInlinerMap* method_inliner_map, + Compiler::Kind compiler_kind, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features, + bool image, std::unordered_set<std::string>* image_classes, + std::unordered_set<std::string>* compiled_classes, + std::unordered_set<std::string>* compiled_methods, + size_t thread_count, bool dump_stats, bool dump_passes, + const std::string& dump_cfg_file_name, + CumulativeLogger* timer, int swap_fd, + const std::string& profile_file); ~CompilerDriver(); void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); - CompiledMethod* CompileMethod(Thread* self, ArtMethod*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) WARN_UNUSED; + CompiledMethod* CompileArtMethod(Thread* self, ArtMethod*) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!compiled_methods_lock_) WARN_UNUSED; // Compile a single Method. void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_); VerificationResults* GetVerificationResults() const { return verification_results_; @@ -162,54 +156,61 @@ class CompilerDriver { // Generate the trampolines that are invoked by unresolved direct methods. const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateJniDlsymLookup() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); CompiledClass* GetCompiledClass(ClassReference ref) const - LOCKS_EXCLUDED(compiled_classes_lock_); + REQUIRES(!compiled_classes_lock_); CompiledMethod* GetCompiledMethod(MethodReference ref) const - LOCKS_EXCLUDED(compiled_methods_lock_); + REQUIRES(!compiled_methods_lock_); size_t GetNonRelativeLinkerPatchCount() const - LOCKS_EXCLUDED(compiled_methods_lock_); + REQUIRES(!compiled_methods_lock_); + // Add a compiled method. + void AddCompiledMethod(const MethodReference& method_ref, + CompiledMethod* const compiled_method, + size_t non_relative_linker_patch_count) + REQUIRES(!compiled_methods_lock_); // Remove and delete a compiled method. - void RemoveCompiledMethod(const MethodReference& method_ref); + void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_); void AddRequiresConstructorBarrier(Thread* self, const DexFile* dex_file, - uint16_t class_def_index); + uint16_t class_def_index) + REQUIRES(!freezing_constructor_lock_); bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, - uint16_t class_def_index) const; + uint16_t class_def_index) const + REQUIRES(!freezing_constructor_lock_); // Callbacks from compiler to see what runtime checks must be generated. bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx); bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Are runtime access checks necessary in the compiled code? bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx, bool* type_known_final = nullptr, bool* type_known_abstract = nullptr, bool* equals_referrers_class = nullptr) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, const DexFile& dex_file, uint32_t type_idx) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx, bool* is_type_initialized, bool* use_direct_type_ptr, @@ -223,22 +224,22 @@ class CompilerDriver { // Get the DexCache for the mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve compiling method's class. Returns null on failure. mirror::Class* ResolveCompilingMethodsClass( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* ResolveClass( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, uint16_t type_index, const DexCompilationUnit* mUnit) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a field. Returns null on failure, including incompatible class change. // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static. @@ -246,40 +247,40 @@ class CompilerDriver { const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a field with a given dex file. ArtField* ResolveFieldWithDexFile( const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file, uint32_t field_idx, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get declaration location of a resolved field. void GetResolvedFieldDexFileLocation( ArtField* resolved_field, const DexFile** declaring_dex_file, uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool IsFieldVolatile(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - MemberOffset GetFieldOffset(ArtField* field) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_); + MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_); // Find a dex cache for a dex file. inline mirror::DexCache* FindDexCache(const DexFile* dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset. std::pair<bool, bool> IsFastInstanceField( mirror::DexCache* dex_cache, mirror::Class* referrer_class, ArtField* resolved_field, uint16_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index // of the declaring class in the referrer's dex file. std::pair<bool, bool> IsFastStaticField( mirror::DexCache* dex_cache, mirror::Class* referrer_class, ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return whether the declaring class of `resolved_method` is // available to `referrer_class`. If this is true, compute the type @@ -291,34 +292,34 @@ class CompilerDriver { ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is static field's in referrer's class? bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is static field's class initialized? bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class, ArtField* resolved_field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a method. Returns null on failure, including incompatible class change. ArtMethod* ResolveMethod( ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get declaration location of a resolved field. void GetResolvedMethodDexFileLocation( ArtMethod* resolved_method, const DexFile** declaring_dex_file, uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the index in the vtable of the method. uint16_t GetResolvedMethodVTableIndex( ArtMethod* resolved_method, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value // for ProcessedInvoke() and computes the necessary lowering info. @@ -328,13 +329,13 @@ class CompilerDriver { mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type, MethodReference* target_method, const MethodReference* devirt_target, uintptr_t* direct_code, uintptr_t* direct_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is method's class initialized for an invoke? // For static invokes to determine whether we need to consider potential call to <clinit>(). // For non-static invokes, assuming a non-null reference, the class is always initialized. bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the // dex cache arrays don't have a fixed layout. @@ -349,18 +350,18 @@ class CompilerDriver { ArtField** resolved_field, mirror::Class** referrer_class, mirror::DexCache** dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast path instance field access? Computes field's offset and volatility. bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, MemberOffset* field_offset, bool* is_volatile) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); ArtField* ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fastpath static field access? Computes field's offset, volatility and whether the @@ -369,7 +370,7 @@ class CompilerDriver { MemberOffset* field_offset, uint32_t* storage_index, bool* is_referrers_class, bool* is_volatile, bool* is_initialized, Primitive::Type* type) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Can we fastpath a interface, super class or virtual method call? Computes method's vtable // index. @@ -377,7 +378,7 @@ class CompilerDriver { bool update_stats, bool enable_devirtualization, InvokeType* type, MethodReference* target_method, int* vtable_idx, uintptr_t* direct_code, uintptr_t* direct_method) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const; bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc); @@ -445,7 +446,7 @@ class CompilerDriver { bool IsMethodToCompile(const MethodReference& method_ref) const; void RecordClassStatus(ClassReference ref, mirror::Class::Status status) - LOCKS_EXCLUDED(compiled_classes_lock_); + REQUIRES(!compiled_classes_lock_); // Checks if the specified method has been verified without failures. Returns // false if the method is not in the verification results (GetVerificationResults). @@ -473,6 +474,10 @@ class CompilerDriver { had_hard_verifier_failure_ = true; } + Compiler::Kind GetCompilerKind() { + return compiler_kind_; + } + private: // Return whether the declaring class of `resolved_member` is // available to `referrer_class` for read or write access using two @@ -487,7 +492,7 @@ class CompilerDriver { ArtMember* resolved_member, uint16_t member_idx, uint32_t* storage_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can `referrer_class` access the resolved `member`? // Dispatch call to mirror::Class::CanAccessResolvedField or @@ -499,17 +504,17 @@ class CompilerDriver { ArtMember* member, mirror::DexCache* dex_cache, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we assume that the klass is initialized? bool CanAssumeClassIsInitialized(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we assume that the klass is loaded? bool CanAssumeClassIsLoaded(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics. // The only external contract is that unresolved method has flags 0 and resolved non-0. @@ -540,71 +545,58 @@ class CompilerDriver { /*out*/int* stats_flags, MethodReference* target_method, uintptr_t* direct_code, uintptr_t* direct_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - DexToDexCompilationLevel GetDexToDexCompilationlevel( - Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, - const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); - void LoadImageClasses(TimingLogger* timings); + void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings); void VerifyDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void SetVerified(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings); void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); void InitializeClasses(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_); + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); - void UpdateImageClasses(TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_); + void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); static void FindClinitImageClassesCallback(mirror::Object* object, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings); void CompileDexFile(jobject class_loader, const DexFile& dex_file, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) - LOCKS_EXCLUDED(Locks::mutator_lock_); - void CompileMethod(Thread* self, const DexFile::CodeItem* code_item, uint32_t access_flags, - InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, - jobject class_loader, const DexFile& dex_file, - DexToDexCompilationLevel dex_to_dex_compilation_level, - bool compilation_enabled) - LOCKS_EXCLUDED(compiled_methods_lock_); - - static void CompileClass(const ParallelCompilationManager* context, size_t class_def_index) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first // as other fields rely on this. @@ -634,8 +626,13 @@ class CompilerDriver { ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_); typedef SafeMap<const MethodReference, CompiledMethod*, MethodReferenceComparator> MethodTable; - // All method references that this compiler has compiled. + + public: + // Lock is public so that non-members can have lock annotations. mutable Mutex compiled_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + + private: + // All method references that this compiler has compiled. MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_); // Number of non-relative patches in all compiled methods. These patches need space // in the .oat_patches ELF section if requested in the compiler options. @@ -675,14 +672,6 @@ class CompilerDriver { typedef void (*CompilerCallbackFn)(CompilerDriver& driver); typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver); - typedef void (*DexToDexCompilerFn)(CompilerDriver& driver, - const DexFile::CodeItem* code_item, - uint32_t access_flags, InvokeType invoke_type, - uint32_t class_dex_idx, uint32_t method_idx, - jobject class_loader, const DexFile& dex_file, - DexToDexCompilationLevel dex_to_dex_compilation_level); - DexToDexCompilerFn dex_to_dex_compiler_; - void* compiler_context_; bool support_boot_image_fixup_; @@ -775,6 +764,7 @@ class CompilerDriver { DedupeSet<ArrayRef<const uint8_t>, SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_; + friend class CompileClassVisitor; DISALLOW_COPY_AND_ASSIGN(CompilerDriver); }; diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index b358f4f396..e35d07da83 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -37,7 +37,7 @@ namespace art { class CompilerDriverTest : public CommonCompilerTest { protected: - void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) { + void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) { TimingLogger timings("CompilerDriverTest::CompileAll", false, false); TimingLogger::ScopedTiming t(__FUNCTION__, &timings); compiler_driver_->CompileAll(class_loader, @@ -49,7 +49,7 @@ class CompilerDriverTest : public CommonCompilerTest { void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, const char* signature, bool is_virtual) - LOCKS_EXCLUDED(Locks::mutator_lock_) { + REQUIRES(!Locks::mutator_lock_) { CompileAll(class_loader); Thread::Current()->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc index 226e6b7952..3f5a1eabb6 100644 --- a/compiler/driver/compiler_options.cc +++ b/compiler/driver/compiler_options.cc @@ -27,6 +27,8 @@ CompilerOptions::CompilerOptions() small_method_threshold_(kDefaultSmallMethodThreshold), tiny_method_threshold_(kDefaultTinyMethodThreshold), num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold), + inline_depth_limit_(kDefaultInlineDepthLimit), + inline_max_code_units_(kDefaultInlineMaxCodeUnits), include_patch_information_(kDefaultIncludePatchInformation), top_k_profile_threshold_(kDefaultTopKProfileThreshold), debuggable_(false), @@ -52,6 +54,8 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter, size_t small_method_threshold, size_t tiny_method_threshold, size_t num_dex_methods_threshold, + size_t inline_depth_limit, + size_t inline_max_code_units, bool include_patch_information, double top_k_profile_threshold, bool debuggable, @@ -71,6 +75,8 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter, small_method_threshold_(small_method_threshold), tiny_method_threshold_(tiny_method_threshold), num_dex_methods_threshold_(num_dex_methods_threshold), + inline_depth_limit_(inline_depth_limit), + inline_max_code_units_(inline_max_code_units), include_patch_information_(include_patch_information), top_k_profile_threshold_(top_k_profile_threshold), debuggable_(debuggable), diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index fe681e2a53..d2a90ec87f 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -51,6 +51,12 @@ class CompilerOptions FINAL { static constexpr double kDefaultTopKProfileThreshold = 90.0; static const bool kDefaultGenerateDebugInfo = kIsDebugBuild; static const bool kDefaultIncludePatchInformation = false; + static const size_t kDefaultInlineDepthLimit = 3; + static const size_t kDefaultInlineMaxCodeUnits = 18; + + // Default inlining settings when the space filter is used. + static constexpr size_t kSpaceFilterInlineDepthLimit = 3; + static constexpr size_t kSpaceFilterInlineMaxCodeUnits = 10; CompilerOptions(); ~CompilerOptions(); @@ -61,6 +67,8 @@ class CompilerOptions FINAL { size_t small_method_threshold, size_t tiny_method_threshold, size_t num_dex_methods_threshold, + size_t inline_depth_limit, + size_t inline_max_code_units, bool include_patch_information, double top_k_profile_threshold, bool debuggable, @@ -137,6 +145,14 @@ class CompilerOptions FINAL { return num_dex_methods_threshold_; } + size_t GetInlineDepthLimit() const { + return inline_depth_limit_; + } + + size_t GetInlineMaxCodeUnits() const { + return inline_max_code_units_; + } + double GetTopKProfileThreshold() const { return top_k_profile_threshold_; } @@ -202,6 +218,8 @@ class CompilerOptions FINAL { const size_t small_method_threshold_; const size_t tiny_method_threshold_; const size_t num_dex_methods_threshold_; + const size_t inline_depth_limit_; + const size_t inline_max_code_units_; const bool include_patch_information_; // When using a profile file only the top K% of the profiled samples will be compiled. const double top_k_profile_threshold_; diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc index 4d423d007f..a07d27c1d2 100644 --- a/compiler/dwarf/dwarf_test.cc +++ b/compiler/dwarf/dwarf_test.cc @@ -27,7 +27,7 @@ namespace art { namespace dwarf { // Run the tests only on host since we need objdump. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ constexpr CFIFormat kCFIFormat = DW_DEBUG_FRAME_FORMAT; @@ -336,7 +336,7 @@ TEST_F(DwarfTest, DebugInfo) { CheckObjdumpOutput(is64bit, "-W"); } -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace dwarf } // namespace art diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h index 8e13b51bbe..03f8ceb306 100644 --- a/compiler/elf_writer.h +++ b/compiler/elf_writer.h @@ -57,7 +57,7 @@ class ElfWriter { const std::vector<const DexFile*>& dex_files, const std::string& android_root, bool is_host) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; const CompilerDriver* const compiler_driver_; File* const elf_file_; diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc index c68bbc0655..c10ffebbbc 100644 --- a/compiler/elf_writer_debug.cc +++ b/compiler/elf_writer_debug.cc @@ -249,16 +249,16 @@ void WriteDebugSections(const CompilerDriver* compiler, // Find all addresses (low_pc) which contain deduped methods. // The first instance of method is not marked deduped_, but the rest is. std::unordered_set<uint32_t> deduped_addresses; - for (auto it = method_infos.begin(); it != method_infos.end(); ++it) { - if (it->deduped_) { - deduped_addresses.insert(it->low_pc_); + for (const OatWriter::DebugInfo& mi : method_infos) { + if (mi.deduped_) { + deduped_addresses.insert(mi.low_pc_); } } // Group the methods into compilation units based on source file. std::vector<std::vector<const OatWriter::DebugInfo*>> compilation_units; const char* last_source_file = nullptr; - for (const auto& mi : method_infos) { + for (const OatWriter::DebugInfo& mi : method_infos) { // Attribute given instruction range only to single method. // Otherwise the debugger might get really confused. if (!mi.deduped_) { diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h index fd202eeb5f..83781abeff 100644 --- a/compiler/elf_writer_quick.h +++ b/compiler/elf_writer_quick.h @@ -33,7 +33,7 @@ class ElfWriterQuick FINAL : public ElfWriter { const std::string& android_root, bool is_host, const CompilerDriver& driver) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void EncodeOatPatches(const std::vector<uintptr_t>& locations, std::vector<uint8_t>* buffer); @@ -44,7 +44,7 @@ class ElfWriterQuick FINAL : public ElfWriter { const std::string& android_root, bool is_host) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: ElfWriterQuick(const CompilerDriver& driver, File* elf_file) diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 73e121f1cd..93897aa228 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -73,7 +73,7 @@ static constexpr bool kBinObjects = true; static constexpr bool kComputeEagerResolvedStrings = false; static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Class* klass = obj->GetClass(); CHECK_NE(PrettyClass(klass), "com.android.dex.Dex"); } @@ -244,8 +244,8 @@ void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot DCHECK(object != nullptr); DCHECK_NE(image_objects_offset_begin_, 0u); - size_t previous_bin_sizes = bin_slot_previous_sizes_[bin_slot.GetBin()]; - size_t new_offset = image_objects_offset_begin_ + previous_bin_sizes + bin_slot.GetIndex(); + size_t bin_slot_offset = bin_slot_offsets_[bin_slot.GetBin()]; + size_t new_offset = bin_slot_offset + bin_slot.GetIndex(); DCHECK_ALIGNED(new_offset, kObjectAlignment); SetImageOffset(object, new_offset); @@ -539,16 +539,19 @@ bool ImageWriter::AllocMemory() { return true; } +class ComputeLazyFieldsForClassesVisitor : public ClassVisitor { + public: + bool Visit(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + StackHandleScope<1> hs(Thread::Current()); + mirror::Class::ComputeName(hs.NewHandle(c)); + return true; + } +}; + void ImageWriter::ComputeLazyFieldsForImageClasses() { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr); -} - -bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) { - Thread* self = Thread::Current(); - StackHandleScope<1> hs(self); - mirror::Class::ComputeName(hs.NewHandle(c)); - return true; + ComputeLazyFieldsForClassesVisitor visitor; + class_linker->VisitClassesWithoutClassesLock(&visitor); } void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) { @@ -592,9 +595,20 @@ bool ImageWriter::IsImageClass(Class* klass) { return compiler_driver_.IsImageClass(klass->GetDescriptor(&temp)); } -struct NonImageClasses { - ImageWriter* image_writer; - std::set<std::string>* non_image_classes; +class NonImageClassesVisitor : public ClassVisitor { + public: + explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {} + + bool Visit(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (!image_writer_->IsImageClass(klass)) { + std::string temp; + non_image_classes_.insert(klass->GetDescriptor(&temp)); + } + return true; + } + + std::set<std::string> non_image_classes_; + ImageWriter* const image_writer_; }; void ImageWriter::PruneNonImageClasses() { @@ -606,14 +620,11 @@ void ImageWriter::PruneNonImageClasses() { Thread* self = Thread::Current(); // Make a list of classes we would like to prune. - std::set<std::string> non_image_classes; - NonImageClasses context; - context.image_writer = this; - context.non_image_classes = &non_image_classes; - class_linker->VisitClasses(NonImageClassesVisitor, &context); + NonImageClassesVisitor visitor(this); + class_linker->VisitClasses(&visitor); // Remove the undesired classes from the class roots. - for (const std::string& it : non_image_classes) { + for (const std::string& it : visitor.non_image_classes_) { bool result = class_linker->RemoveClass(it.c_str(), nullptr); DCHECK(result); } @@ -669,15 +680,6 @@ void ImageWriter::PruneNonImageClasses() { class_linker->DropFindArrayClassCache(); } -bool ImageWriter::NonImageClassesVisitor(Class* klass, void* arg) { - NonImageClasses* context = reinterpret_cast<NonImageClasses*>(arg); - if (!context->image_writer->IsImageClass(klass)) { - std::string temp; - context->non_image_classes->insert(klass->GetDescriptor(&temp)); - } - return true; -} - void ImageWriter::CheckNonImageClassesRemoved() { if (compiler_driver_.GetImageClasses() != nullptr) { gc::Heap* heap = Runtime::Current()->GetHeap(); @@ -715,8 +717,10 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) { DCHECK_EQ(obj, obj->AsString()->Intern()); return; } - mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrong( - obj->AsString()->Intern()); + // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since + // we are guaranteed to not have GC during image writing. + mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString( + obj->AsString()); if (obj != interned) { if (!IsImageBinSlotAssigned(interned)) { // interned obj is after us, allocate its location early @@ -821,35 +825,72 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(mirror::HeapReference<mirror::Object>)); } - // Visit and assign offsets for fields. + // Visit and assign offsets for fields and field arrays. auto* as_klass = h_obj->AsClass(); - ArtField* fields[] = { as_klass->GetSFields(), as_klass->GetIFields() }; - size_t num_fields[] = { as_klass->NumStaticFields(), as_klass->NumInstanceFields() }; - for (size_t i = 0; i < 2; ++i) { - for (size_t j = 0; j < num_fields[i]; ++j) { - auto* field = fields[i] + j; - auto it = native_object_reloc_.find(field); - CHECK(it == native_object_reloc_.end()) << "Field at index " << i << ":" << j - << " already assigned " << PrettyField(field); - native_object_reloc_.emplace( - field, NativeObjectReloc { bin_slot_sizes_[kBinArtField], kBinArtField }); - bin_slot_sizes_[kBinArtField] += sizeof(ArtField); + LengthPrefixedArray<ArtField>* fields[] = { + as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(), + }; + for (LengthPrefixedArray<ArtField>* cur_fields : fields) { + // Total array length including header. + if (cur_fields != nullptr) { + const size_t header_size = LengthPrefixedArray<ArtField>::ComputeSize(0); + // Forward the entire array at once. + auto it = native_object_relocations_.find(cur_fields); + CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields + << " already forwarded"; + size_t& offset = bin_slot_sizes_[kBinArtField]; + native_object_relocations_.emplace( + cur_fields, NativeObjectRelocation { + offset, kNativeObjectRelocationTypeArtFieldArray }); + offset += header_size; + // Forward individual fields so that we can quickly find where they belong. + for (size_t i = 0, count = cur_fields->Length(); i < count; ++i) { + // Need to forward arrays separate of fields. + ArtField* field = &cur_fields->At(i); + auto it2 = native_object_relocations_.find(field); + CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i + << " already assigned " << PrettyField(field) << " static=" << field->IsStatic(); + native_object_relocations_.emplace( + field, NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtField }); + offset += sizeof(ArtField); + } } } // Visit and assign offsets for methods. - IterationRange<StrideIterator<ArtMethod>> method_arrays[] = { - as_klass->GetDirectMethods(target_ptr_size_), - as_klass->GetVirtualMethods(target_ptr_size_) + LengthPrefixedArray<ArtMethod>* method_arrays[] = { + as_klass->GetDirectMethodsPtr(), as_klass->GetVirtualMethodsPtr(), }; - for (auto& array : method_arrays) { + for (LengthPrefixedArray<ArtMethod>* array : method_arrays) { + if (array == nullptr) { + continue; + } bool any_dirty = false; size_t count = 0; - for (auto& m : array) { + const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); + const size_t method_size = ArtMethod::Size(target_ptr_size_); + auto iteration_range = + MakeIterationRangeFromLengthPrefixedArray(array, method_size, method_alignment); + for (auto& m : iteration_range) { any_dirty = any_dirty || WillMethodBeDirty(&m); ++count; } - for (auto& m : array) { - AssignMethodOffset(&m, any_dirty ? kBinArtMethodDirty : kBinArtMethodClean); + NativeObjectRelocationType type = any_dirty ? kNativeObjectRelocationTypeArtMethodDirty : + kNativeObjectRelocationTypeArtMethodClean; + Bin bin_type = BinTypeForNativeRelocationType(type); + // Forward the entire array at once, but header first. + const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0, + method_size, + method_alignment); + auto it = native_object_relocations_.find(array); + CHECK(it == native_object_relocations_.end()) << "Method array " << array + << " already forwarded"; + size_t& offset = bin_slot_sizes_[bin_type]; + native_object_relocations_.emplace(array, NativeObjectRelocation { offset, + any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty : + kNativeObjectRelocationTypeArtMethodArrayClean }); + offset += header_size; + for (auto& m : iteration_range) { + AssignMethodOffset(&m, type); } (any_dirty ? dirty_methods_ : clean_methods_) += count; } @@ -867,12 +908,13 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } -void ImageWriter::AssignMethodOffset(ArtMethod* method, Bin bin) { - auto it = native_object_reloc_.find(method); - CHECK(it == native_object_reloc_.end()) << "Method " << method << " already assigned " +void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type) { + auto it = native_object_relocations_.find(method); + CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned " << PrettyMethod(method); - native_object_reloc_.emplace(method, NativeObjectReloc { bin_slot_sizes_[bin], bin }); - bin_slot_sizes_[bin] += ArtMethod::ObjectSize(target_ptr_size_); + size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(type)]; + native_object_relocations_.emplace(method, NativeObjectRelocation { offset, type }); + offset += ArtMethod::Size(target_ptr_size_); } void ImageWriter::WalkFieldsCallback(mirror::Object* obj, void* arg) { @@ -926,19 +968,40 @@ void ImageWriter::CalculateNewObjectOffsets() { runtime->GetCalleeSaveMethod(Runtime::kRefsOnly); image_methods_[ImageHeader::kRefsAndArgsSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs); + + // Add room for fake length prefixed array. + const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean; + auto it = native_object_relocations_.find(&image_method_array_); + CHECK(it == native_object_relocations_.end()); + size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)]; + native_object_relocations_.emplace(&image_method_array_, + NativeObjectRelocation { offset, image_method_type }); + size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); + const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize( + 0, ArtMethod::Size(target_ptr_size_), method_alignment); + CHECK_ALIGNED_PARAM(array_size, method_alignment); + offset += array_size; for (auto* m : image_methods_) { CHECK(m != nullptr); CHECK(m->IsRuntimeMethod()); - AssignMethodOffset(m, kBinArtMethodDirty); + AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean); } - // Calculate cumulative bin slot sizes. - size_t previous_sizes = 0u; + // Calculate bin slot offsets. + size_t bin_offset = image_objects_offset_begin_; for (size_t i = 0; i != kBinSize; ++i) { - bin_slot_previous_sizes_[i] = previous_sizes; - previous_sizes += bin_slot_sizes_[i]; + bin_slot_offsets_[i] = bin_offset; + bin_offset += bin_slot_sizes_[i]; + if (i == kBinArtField) { + static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields."); + static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4."); + DCHECK_ALIGNED(bin_offset, 4u); + DCHECK(method_alignment == 4u || method_alignment == 8u); + bin_offset = RoundUp(bin_offset, method_alignment); + } } - DCHECK_EQ(previous_sizes, GetBinSizeSum()); + // NOTE: There may be additional padding between the bin slots and the intern table. + DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_); // Transform each object's bin slot into an offset which will be used to do the final copy. @@ -949,10 +1012,10 @@ void ImageWriter::CalculateNewObjectOffsets() { image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get())); // Update the native relocations by adding their bin sums. - for (auto& pair : native_object_reloc_) { - auto& native_reloc = pair.second; - native_reloc.offset += image_objects_offset_begin_ + - bin_slot_previous_sizes_[native_reloc.bin_type]; + for (auto& pair : native_object_relocations_) { + NativeObjectRelocation& relocation = pair.second; + Bin bin_type = BinTypeForNativeRelocationType(relocation.type); + relocation.offset += bin_slot_offsets_[bin_type]; } // Calculate how big the intern table will be after being serialized. @@ -979,16 +1042,18 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { // Add field section. auto* field_section = §ions[ImageHeader::kSectionArtFields]; *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]); - CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtField], - field_section->Offset()); + CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset()); cur_pos = field_section->End(); + // Round up to the alignment the required by the method section. + cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_)); // Add method section. auto* methods_section = §ions[ImageHeader::kSectionArtMethods]; *methods_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); - CHECK_EQ(image_objects_offset_begin_ + bin_slot_previous_sizes_[kBinArtMethodClean], - methods_section->Offset()); + CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset()); cur_pos = methods_section->End(); + // Round up to the alignment the string table expects. See HashSet::WriteToMemory. + cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the interned strings. auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings]; *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); @@ -1019,8 +1084,8 @@ void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { } ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) { - auto it = native_object_reloc_.find(method); - CHECK(it != native_object_reloc_.end()) << PrettyMethod(method) << " @ " << method; + auto it = native_object_relocations_.find(method); + CHECK(it != native_object_relocations_.end()) << PrettyMethod(method) << " @ " << method; CHECK_GE(it->second.offset, image_end_) << "ArtMethods should be after Objects"; return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset); } @@ -1031,7 +1096,7 @@ class FixupRootVisitor : public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { *roots[i] = ImageAddress(*roots[i]); } @@ -1039,7 +1104,7 @@ class FixupRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr())); } @@ -1048,7 +1113,7 @@ class FixupRootVisitor : public RootVisitor { private: ImageWriter* const image_writer_; - mirror::Object* ImageAddress(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { const size_t offset = image_writer_->GetImageOffset(obj); auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset); VLOG(compiler) << "Update root from " << obj << " to " << dest; @@ -1058,20 +1123,37 @@ class FixupRootVisitor : public RootVisitor { void ImageWriter::CopyAndFixupNativeData() { // Copy ArtFields and methods to their locations and update the array for convenience. - for (auto& pair : native_object_reloc_) { - auto& native_reloc = pair.second; - if (native_reloc.bin_type == kBinArtField) { - auto* dest = image_->Begin() + native_reloc.offset; - DCHECK_GE(dest, image_->Begin() + image_end_); - memcpy(dest, pair.first, sizeof(ArtField)); - reinterpret_cast<ArtField*>(dest)->SetDeclaringClass( - GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass())); - } else { - CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type; - auto* dest = image_->Begin() + native_reloc.offset; - DCHECK_GE(dest, image_->Begin() + image_end_); - CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first), - reinterpret_cast<ArtMethod*>(dest)); + for (auto& pair : native_object_relocations_) { + NativeObjectRelocation& relocation = pair.second; + auto* dest = image_->Begin() + relocation.offset; + DCHECK_GE(dest, image_->Begin() + image_end_); + switch (relocation.type) { + case kNativeObjectRelocationTypeArtField: { + memcpy(dest, pair.first, sizeof(ArtField)); + reinterpret_cast<ArtField*>(dest)->SetDeclaringClass( + GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass())); + break; + } + case kNativeObjectRelocationTypeArtMethodClean: + case kNativeObjectRelocationTypeArtMethodDirty: { + CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first), + reinterpret_cast<ArtMethod*>(dest)); + break; + } + // For arrays, copy just the header since the elements will get copied by their corresponding + // relocations. + case kNativeObjectRelocationTypeArtFieldArray: { + memcpy(dest, pair.first, LengthPrefixedArray<ArtField>::ComputeSize(0)); + break; + } + case kNativeObjectRelocationTypeArtMethodArrayClean: + case kNativeObjectRelocationTypeArtMethodArrayDirty: { + memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize( + 0, + ArtMethod::Size(target_ptr_size_), + ArtMethod::Alignment(target_ptr_size_))); + break; + } } } // Fixup the image method roots. @@ -1080,12 +1162,12 @@ void ImageWriter::CopyAndFixupNativeData() { for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) { auto* m = image_methods_[i]; CHECK(m != nullptr); - auto it = native_object_reloc_.find(m); - CHECK(it != native_object_reloc_.end()) << "No fowarding for " << PrettyMethod(m); - auto& native_reloc = it->second; - CHECK(methods_section.Contains(native_reloc.offset)) << native_reloc.offset << " not in " + auto it = native_object_relocations_.find(m); + CHECK(it != native_object_relocations_.end()) << "No fowarding for " << PrettyMethod(m); + NativeObjectRelocation& relocation = it->second; + CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in " << methods_section; - CHECK(IsArtMethodBin(native_reloc.bin_type)) << native_reloc.bin_type; + CHECK(relocation.IsArtMethodRelocation()) << relocation.type; auto* dest = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset); image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), dest); } @@ -1137,9 +1219,9 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* a for (size_t i = 0, count = num_elements; i < count; ++i) { auto* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_); if (elem != nullptr) { - auto it = native_object_reloc_.find(elem); - if (it == native_object_reloc_.end()) { - if (IsArtMethodBin(array_type)) { + auto it = native_object_relocations_.find(elem); + if (it == native_object_relocations_.end()) { + if (it->second.IsArtMethodRelocation()) { auto* method = reinterpret_cast<ArtMethod*>(elem); LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ " << method << " idx=" << i << "/" << num_elements << " with declaring class " @@ -1185,8 +1267,15 @@ class FixupVisitor { FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) { } + // Ignore class roots since we don't have a way to map them to the destination. These are handled + // with other logic. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset); // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the // image. @@ -1196,8 +1285,7 @@ class FixupVisitor { // java.lang.ref.Reference visitor. void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( mirror::Reference::ReferentOffset(), image_writer_->GetImageAddress(ref->GetReferent())); } @@ -1213,70 +1301,56 @@ class FixupClassVisitor FINAL : public FixupVisitor { } void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(obj->IsClass()); FixupVisitor::operator()(obj, offset, /*is_static*/false); } void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { LOG(FATAL) << "Reference not expected here."; } }; +void* ImageWriter::NativeLocationInImage(void* obj) { + if (obj == nullptr) { + return nullptr; + } + auto it = native_object_relocations_.find(obj); + CHECK(it != native_object_relocations_.end()) << obj; + const NativeObjectRelocation& relocation = it->second; + return reinterpret_cast<void*>(image_begin_ + relocation.offset); +} + void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) { - // Copy and fix up ArtFields in the class. - ArtField* fields[2] = { orig->GetSFields(), orig->GetIFields() }; - size_t num_fields[2] = { orig->NumStaticFields(), orig->NumInstanceFields() }; // Update the field arrays. - for (size_t i = 0; i < 2; ++i) { - if (num_fields[i] == 0) { - CHECK(fields[i] == nullptr); - continue; - } - auto it = native_object_reloc_.find(fields[i]); - CHECK(it != native_object_reloc_.end()) << PrettyClass(orig) << " : " << PrettyField(fields[i]); - auto* image_fields = reinterpret_cast<ArtField*>(image_begin_ + it->second.offset); - if (i == 0) { - copy->SetSFieldsUnchecked(image_fields); - } else { - copy->SetIFieldsUnchecked(image_fields); - } - } - // Update direct / virtual method arrays. - auto* direct_methods = orig->GetDirectMethodsPtr(); - if (direct_methods != nullptr) { - auto it = native_object_reloc_.find(direct_methods); - CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); - copy->SetDirectMethodsPtrUnchecked( - reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset)); - } - auto* virtual_methods = orig->GetVirtualMethodsPtr(); - if (virtual_methods != nullptr) { - auto it = native_object_reloc_.find(virtual_methods); - CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); - copy->SetVirtualMethodsPtr( - reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset)); - } + copy->SetSFieldsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtField>*>( + NativeLocationInImage(orig->GetSFieldsPtr()))); + copy->SetIFieldsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtField>*>( + NativeLocationInImage(orig->GetIFieldsPtr()))); + // Update direct and virtual method arrays. + copy->SetDirectMethodsPtrUnchecked(reinterpret_cast<LengthPrefixedArray<ArtMethod>*>( + NativeLocationInImage(orig->GetDirectMethodsPtr()))); + copy->SetVirtualMethodsPtr(reinterpret_cast<LengthPrefixedArray<ArtMethod>*>( + NativeLocationInImage(orig->GetVirtualMethodsPtr()))); // Fix up embedded tables. if (orig->ShouldHaveEmbeddedImtAndVTable()) { for (int32_t i = 0; i < orig->GetEmbeddedVTableLength(); ++i) { - auto it = native_object_reloc_.find(orig->GetEmbeddedVTableEntry(i, target_ptr_size_)); - CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); + auto it = native_object_relocations_.find(orig->GetEmbeddedVTableEntry(i, target_ptr_size_)); + CHECK(it != native_object_relocations_.end()) << PrettyClass(orig); copy->SetEmbeddedVTableEntryUnchecked( i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_); } for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - auto it = native_object_reloc_.find(orig->GetEmbeddedImTableEntry(i, target_ptr_size_)); - CHECK(it != native_object_reloc_.end()) << PrettyClass(orig); + auto it = native_object_relocations_.find(orig->GetEmbeddedImTableEntry(i, target_ptr_size_)); + CHECK(it != native_object_relocations_.end()) << PrettyClass(orig); copy->SetEmbeddedImTableEntry( i, reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset), target_ptr_size_); } } FixupClassVisitor visitor(this, copy); - static_cast<mirror::Object*>(orig)->VisitReferences<true /*visit class*/>(visitor, visitor); + static_cast<mirror::Object*>(orig)->VisitReferences(visitor, visitor); } void ImageWriter::FixupObject(Object* orig, Object* copy) { @@ -1311,14 +1385,19 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) { auto* dest = down_cast<mirror::AbstractMethod*>(copy); auto* src = down_cast<mirror::AbstractMethod*>(orig); ArtMethod* src_method = src->GetArtMethod(); - auto it = native_object_reloc_.find(src_method); - CHECK(it != native_object_reloc_.end()) << "Missing relocation for AbstractMethod.artMethod " - << PrettyMethod(src_method); + auto it = native_object_relocations_.find(src_method); + CHECK(it != native_object_relocations_.end()) + << "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method); dest->SetArtMethod( reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset)); + } else if (!klass->IsArrayClass() && klass->IsSubClass(down_cast<mirror::Class*>( + Thread::Current()->DecodeJObject(WellKnownClasses::java_lang_ClassLoader)))) { + // If src is a ClassLoader, set the class table to null so that it gets recreated by the + // ClassLoader. + down_cast<mirror::ClassLoader*>(copy)->SetClassTable(nullptr); } FixupVisitor visitor(this, copy); - orig->VisitReferences<true /*visit class*/>(visitor, visitor); + orig->VisitReferences(visitor, visitor); } } @@ -1381,7 +1460,7 @@ const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) { } void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) { - memcpy(copy, orig, ArtMethod::ObjectSize(target_ptr_size_)); + memcpy(copy, orig, ArtMethod::Size(target_ptr_size_)); copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked())); copy->SetDexCacheResolvedMethods(GetImageAddress(orig->GetDexCacheResolvedMethods())); @@ -1417,9 +1496,6 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) { if (UNLIKELY(orig->IsAbstract())) { copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_); - copy->SetEntryPointFromInterpreterPtrSize( - reinterpret_cast<EntryPointFromInterpreter*>(const_cast<uint8_t*>( - GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_); } else { bool quick_is_interpreted; const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted); @@ -1432,16 +1508,6 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) { copy->SetEntryPointFromJniPtrSize( GetOatAddress(jni_dlsym_lookup_offset_), target_ptr_size_); } - - // Interpreter entrypoint: - // Set the interpreter entrypoint depending on whether there is compiled code or not. - uint32_t interpreter_code = (quick_is_interpreted) - ? interpreter_to_interpreter_bridge_offset_ - : interpreter_to_compiled_code_bridge_offset_; - EntryPointFromInterpreter* interpreter_entrypoint = - reinterpret_cast<EntryPointFromInterpreter*>( - const_cast<uint8_t*>(GetOatAddress(interpreter_code))); - copy->SetEntryPointFromInterpreterPtrSize(interpreter_entrypoint, target_ptr_size_); } } } @@ -1506,4 +1572,19 @@ uint8_t* ImageWriter::GetOatFileBegin() const { bin_slot_sizes_[kBinArtMethodClean] + intern_table_bytes_, kPageSize); } +ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) { + switch (type) { + case kNativeObjectRelocationTypeArtField: + case kNativeObjectRelocationTypeArtFieldArray: + return kBinArtField; + case kNativeObjectRelocationTypeArtMethodClean: + case kNativeObjectRelocationTypeArtMethodArrayClean: + return kBinArtMethodClean; + case kNativeObjectRelocationTypeArtMethodDirty: + case kNativeObjectRelocationTypeArtMethodArrayDirty: + return kBinArtMethodDirty; + } + UNREACHABLE(); +} + } // namespace art diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 9d45ce2bd4..c8aa82dc32 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -18,7 +18,7 @@ #define ART_COMPILER_IMAGE_WRITER_H_ #include <stdint.h> -#include <valgrind.h> +#include "base/memory_tool.h" #include <cstddef> #include <memory> @@ -30,6 +30,7 @@ #include "base/macros.h" #include "driver/compiler_driver.h" #include "gc/space/space.h" +#include "length_prefixed_array.h" #include "lock_word.h" #include "mem_map.h" #include "oat_file.h" @@ -53,8 +54,9 @@ class ImageWriter FINAL { quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0), quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic), target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), - bin_slot_sizes_(), bin_slot_previous_sizes_(), bin_slot_count_(), - intern_table_bytes_(0u), dirty_methods_(0u), clean_methods_(0u) { + bin_slot_sizes_(), bin_slot_offsets_(), bin_slot_count_(), + intern_table_bytes_(0u), image_method_array_(ImageHeader::kImageMethodsCount), + dirty_methods_(0u), clean_methods_(0u) { CHECK_NE(image_begin, 0U); std::fill(image_methods_, image_methods_ + arraysize(image_methods_), nullptr); } @@ -69,15 +71,15 @@ class ImageWriter FINAL { } template <typename T> - T* GetImageAddress(T* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) { return object == nullptr ? nullptr : reinterpret_cast<T*>(image_begin_ + GetImageOffset(object)); } - ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); mirror::HeapReference<mirror::Object>* GetDexCacheArrayElementImageAddress( - const DexFile* dex_file, uint32_t offset) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile* dex_file, uint32_t offset) const SHARED_REQUIRES(Locks::mutator_lock_) { auto it = dex_cache_array_starts_.find(dex_file); DCHECK(it != dex_cache_array_starts_.end()); return reinterpret_cast<mirror::HeapReference<mirror::Object>*>( @@ -88,7 +90,7 @@ class ImageWriter FINAL { bool Write(const std::string& image_filename, const std::string& oat_filename, const std::string& oat_location) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); uintptr_t GetOatDataBegin() { return reinterpret_cast<uintptr_t>(oat_data_begin_); @@ -98,7 +100,7 @@ class ImageWriter FINAL { bool AllocMemory(); // Mark the objects defined in this space in the given live bitmap. - void RecordImageAllocations() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_); // Classify different kinds of bins that objects end up getting packed into during image writing. enum Bin { @@ -129,9 +131,18 @@ class ImageWriter FINAL { // Number of bins which are for mirror objects. kBinMirrorCount = kBinArtField, }; - friend std::ostream& operator<<(std::ostream& stream, const Bin& bin); + enum NativeObjectRelocationType { + kNativeObjectRelocationTypeArtField, + kNativeObjectRelocationTypeArtFieldArray, + kNativeObjectRelocationTypeArtMethodClean, + kNativeObjectRelocationTypeArtMethodArrayClean, + kNativeObjectRelocationTypeArtMethodDirty, + kNativeObjectRelocationTypeArtMethodArrayDirty, + }; + friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type); + static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1); // uint32 = typeof(lockword_) // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK @@ -165,32 +176,32 @@ class ImageWriter FINAL { // We use the lock word to store the offset of the object in the image. void AssignImageOffset(mirror::Object* object, BinSlot bin_slot) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetImageOffset(mirror::Object* object, size_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsImageOffsetAssigned(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t GetImageOffset(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); void UpdateImageOffset(mirror::Object* obj, uintptr_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void PrepareDexCacheArraySlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AssignImageBinSlot(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_); + void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsImageBinSlotAssigned(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); - void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_); static void* GetImageAddressCallback(void* writer, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj); } mirror::Object* GetLocalAddress(mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t offset = GetImageOffset(object); uint8_t* dst = image_->Begin() + offset; return reinterpret_cast<mirror::Object*>(dst); @@ -199,84 +210,76 @@ class ImageWriter FINAL { const uint8_t* GetOatAddress(uint32_t offset) const { // With Quick, code is within the OatFile, as there are all in one // .o ELF object. - DCHECK_LT(offset, oat_file_->Size()); + DCHECK_LE(offset, oat_file_->Size()); DCHECK(oat_data_begin_ != nullptr); return offset == 0u ? nullptr : oat_data_begin_ + offset; } - static bool IsArtMethodBin(Bin bin) { - return bin == kBinArtMethodClean || bin == kBinArtMethodDirty; - } - // Returns true if the class was in the original requested image classes list. - bool IsImageClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); // Debug aid that list of requested image classes. void DumpImageClasses(); // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying. void ComputeLazyFieldsForImageClasses() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool ComputeLazyFieldsForClassesVisitor(mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Wire dex cache resolved strings to strings in the image to avoid runtime resolution. - void ComputeEagerResolvedStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ComputeEagerResolvedStrings() SHARED_REQUIRES(Locks::mutator_lock_); static void ComputeEagerResolvedStringsCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Remove unwanted classes from various roots. - void PruneNonImageClasses() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool NonImageClassesVisitor(mirror::Class* c, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_); // Verify unwanted classes removed. - void CheckNonImageClassesRemoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_); static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Lays out where the image objects will be at runtime. void CalculateNewObjectOffsets() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::ObjectArray<mirror::Object>* CreateImageRoots() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CalculateObjectBinSlots(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UnbinObjectsIntoOffset(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void WalkFieldsInOrder(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void WalkFieldsCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Creates the contiguous image in memory and adjusts pointers. - void CopyAndFixupNativeData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CopyAndFixupObjects() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CopyAndFixupNativeData() SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_); static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CopyAndFixupObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupObject(mirror::Object* orig, mirror::Object* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass, - Bin array_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Bin array_type) SHARED_REQUIRES(Locks::mutator_lock_); // Get quick code for non-resolution/imt_conflict/abstract method. const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetQuickEntryPoint(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Patches references in OatFile to expect runtime addresses. void SetOatChecksumFromElfFile(File* elf_file); @@ -285,10 +288,15 @@ class ImageWriter FINAL { size_t GetBinSizeSum(Bin up_to = kBinSize) const; // Return true if a method is likely to be dirtied at runtime. - bool WillMethodBeDirty(ArtMethod* m) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_); // Assign the offset for an ArtMethod. - void AssignMethodOffset(ArtMethod* method, Bin bin) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type) + SHARED_REQUIRES(Locks::mutator_lock_); + + static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type); + + void* NativeLocationInImage(void* obj); const CompilerDriver& compiler_driver_; @@ -351,7 +359,7 @@ class ImageWriter FINAL { // Bin slot tracking for dirty object packing size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin - size_t bin_slot_previous_sizes_[kBinSize]; // Number of bytes in previous bins. + size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins. size_t bin_slot_count_[kBinSize]; // Number of objects in a bin // Cached size of the intern table for when we allocate memory. @@ -360,14 +368,22 @@ class ImageWriter FINAL { // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to // have one entry per art field for convenience. ArtFields are placed right after the end of the // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. - struct NativeObjectReloc { + struct NativeObjectRelocation { uintptr_t offset; - Bin bin_type; + NativeObjectRelocationType type; + + bool IsArtMethodRelocation() const { + return type == kNativeObjectRelocationTypeArtMethodClean || + type == kNativeObjectRelocationTypeArtMethodDirty; + } }; - std::unordered_map<void*, NativeObjectReloc> native_object_reloc_; + std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_; // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; + // Fake length prefixed array for image methods. This array does not contain the actual + // ArtMethods. We only use it for the header and relocation addresses. + LengthPrefixedArray<ArtMethod> image_method_array_; // Counters for measurements, used for logging only. uint64_t dirty_methods_; @@ -376,6 +392,7 @@ class ImageWriter FINAL { friend class FixupClassVisitor; friend class FixupRootVisitor; friend class FixupVisitor; + friend class NonImageClassesVisitor; DISALLOW_COPY_AND_ASSIGN(ImageWriter); }; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index a1d8226f36..4215f3cdd3 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -55,7 +55,7 @@ extern "C" void jit_unload(void* handle) { } extern "C" bool jit_compile_method(void* handle, ArtMethod* method, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle); DCHECK(jit_compiler != nullptr); return jit_compiler->CompileMethod(self, method); @@ -71,18 +71,20 @@ JitCompiler::JitCompiler() : total_time_(0) { CompilerOptions::kDefaultSmallMethodThreshold, CompilerOptions::kDefaultTinyMethodThreshold, CompilerOptions::kDefaultNumDexMethodsThreshold, - false, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, + /* include_patch_information */ false, CompilerOptions::kDefaultTopKProfileThreshold, - false, // TODO: Think about debuggability of JIT-compiled code. + Runtime::Current()->IsDebuggable(), CompilerOptions::kDefaultGenerateDebugInfo, - false, - false, - false, - false, // pic - nullptr, + /* implicit_null_checks */ true, + /* implicit_so_checks */ true, + /* implicit_suspend_checks */ false, + /* pic */ true, // TODO: Support non-PIC in optimizing. + /* verbose_methods */ nullptr, pass_manager_options, - nullptr, - false)); + /* init_failure_output */ nullptr, + /* abort_on_hard_verifier_failure */ false)); const InstructionSet instruction_set = kRuntimeISA; instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines()); cumulative_logger_.reset(new CumulativeLogger("jit times")); @@ -92,10 +94,23 @@ JitCompiler::JitCompiler() : total_time_(0) { method_inliner_map_.get(), CompilerCallbacks::CallbackMode::kCompileApp)); compiler_driver_.reset(new CompilerDriver( - compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(), - Compiler::kQuick, instruction_set, instruction_set_features_.get(), false, - nullptr, nullptr, nullptr, 1, false, true, - std::string(), cumulative_logger_.get(), -1, std::string())); + compiler_options_.get(), + verification_results_.get(), + method_inliner_map_.get(), + Compiler::kOptimizing, + instruction_set, + instruction_set_features_.get(), + /* image */ false, + /* image_classes */ nullptr, + /* compiled_classes */ nullptr, + /* compiled_methods */ nullptr, + /* thread_count */ 1, + /* dump_stats */ false, + /* dump_passes */ false, + /* dump_cfg_file_name */ "", + cumulative_logger_.get(), + /* swap_fd */ -1, + /* profile_file */ "")); // Disable dedupe so we can remove compiled methods. compiler_driver_->SetDedupeEnabled(false); compiler_driver_->SetSupportBootImageFixup(false); @@ -138,7 +153,7 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) { CompiledMethod* compiled_method = nullptr; { TimingLogger::ScopedTiming t2("Compiling", &logger); - compiled_method = compiler_driver_->CompileMethod(self, method); + compiled_method = compiler_driver_->CompileArtMethod(self, method); } { TimingLogger::ScopedTiming t2("TrimMaps", &logger); @@ -195,9 +210,14 @@ uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_me std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr); // After we are done writing we need to update the method header. // Write out the method header last. - method_header = new(method_header)OatQuickMethodHeader( - code_ptr - mapping_table, code_ptr - vmap_table, code_ptr - gc_map, frame_size_in_bytes, - core_spill_mask, fp_spill_mask, code_size); + method_header = new(method_header) OatQuickMethodHeader( + (mapping_table == nullptr) ? 0 : code_ptr - mapping_table, + (vmap_table == nullptr) ? 0 : code_ptr - vmap_table, + (gc_map == nullptr) ? 0 : code_ptr - gc_map, + frame_size_in_bytes, + core_spill_mask, + fp_spill_mask, + code_size); // Return the code ptr. return code_ptr; } @@ -216,23 +236,35 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil auto* const mapping_table = compiled_method->GetMappingTable(); auto* const vmap_table = compiled_method->GetVmapTable(); auto* const gc_map = compiled_method->GetGcMap(); - CHECK(gc_map != nullptr) << PrettyMethod(method); - // Write out pre-header stuff. - uint8_t* const mapping_table_ptr = code_cache->AddDataArray( - self, mapping_table->data(), mapping_table->data() + mapping_table->size()); - if (mapping_table_ptr == nullptr) { - return false; // Out of data cache. + uint8_t* mapping_table_ptr = nullptr; + uint8_t* vmap_table_ptr = nullptr; + uint8_t* gc_map_ptr = nullptr; + + if (mapping_table != nullptr) { + // Write out pre-header stuff. + mapping_table_ptr = code_cache->AddDataArray( + self, mapping_table->data(), mapping_table->data() + mapping_table->size()); + if (mapping_table_ptr == nullptr) { + return false; // Out of data cache. + } } - uint8_t* const vmap_table_ptr = code_cache->AddDataArray( - self, vmap_table->data(), vmap_table->data() + vmap_table->size()); - if (vmap_table_ptr == nullptr) { - return false; // Out of data cache. + + if (vmap_table != nullptr) { + vmap_table_ptr = code_cache->AddDataArray( + self, vmap_table->data(), vmap_table->data() + vmap_table->size()); + if (vmap_table_ptr == nullptr) { + return false; // Out of data cache. + } } - uint8_t* const gc_map_ptr = code_cache->AddDataArray( - self, gc_map->data(), gc_map->data() + gc_map->size()); - if (gc_map_ptr == nullptr) { - return false; // Out of data cache. + + if (gc_map != nullptr) { + gc_map_ptr = code_cache->AddDataArray( + self, gc_map->data(), gc_map->data() + gc_map->size()); + if (gc_map_ptr == nullptr) { + return false; // Out of data cache. + } } + // Don't touch this until you protect / unprotect the code. const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32; uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size); diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index b0010e0eb2..ef68caa5fa 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -38,11 +38,11 @@ class JitCompiler { static JitCompiler* Create(); virtual ~JitCompiler(); bool CompileMethod(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // This is in the compiler since the runtime doesn't have access to the compiled method // structures. bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method, - OatFile::OatMethod* out_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_); CompilerCallbacks* GetCompilerCallbacks() const; size_t GetTotalCompileTime() const { return total_time_; @@ -63,7 +63,7 @@ class JitCompiler { const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end, const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map); bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(JitCompiler); }; diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 016f28ef1e..0bfe8a276a 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -28,7 +28,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class JNICFITest : public CFITest { public: @@ -88,6 +88,6 @@ TEST_ISA(kX86_64) TEST_ISA(kMips) TEST_ISA(kMips64) -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h index dbecb8eb95..35b50937e9 100644 --- a/compiler/jni/quick/arm/calling_convention_arm.h +++ b/compiler/jni/quick/arm/calling_convention_arm.h @@ -48,7 +48,7 @@ class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon class ArmJniCallingConvention FINAL : public JniCallingConvention { public: - explicit ArmJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ArmJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); ~ArmJniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h index 9fd3265c86..37c92b2034 100644 --- a/compiler/jni/quick/arm64/calling_convention_arm64.h +++ b/compiler/jni/quick/arm64/calling_convention_arm64.h @@ -48,7 +48,7 @@ class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingC class Arm64JniCallingConvention FINAL : public JniCallingConvention { public: - explicit Arm64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + Arm64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); ~Arm64JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc index bb8136bfe1..cef8c5d03d 100644 --- a/compiler/jni/quick/calling_convention.cc +++ b/compiler/jni/quick/calling_convention.cc @@ -17,12 +17,30 @@ #include "calling_convention.h" #include "base/logging.h" + +#ifdef ART_ENABLE_CODEGEN_arm #include "jni/quick/arm/calling_convention_arm.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 #include "jni/quick/arm64/calling_convention_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips #include "jni/quick/mips/calling_convention_mips.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 #include "jni/quick/mips64/calling_convention_mips64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 #include "jni/quick/x86/calling_convention_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 #include "jni/quick/x86_64/calling_convention_x86_64.h" +#endif namespace art { @@ -31,19 +49,31 @@ namespace art { ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create( bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) { switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_mips case kMips: return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty); +#endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; return nullptr; @@ -106,19 +136,31 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch const char* shorty, InstructionSet instruction_set) { switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_mips case kMips: return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty); +#endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; return nullptr; diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h index c9b595aeea..243d124455 100644 --- a/compiler/jni/quick/calling_convention.h +++ b/compiler/jni/quick/calling_convention.h @@ -348,8 +348,8 @@ class JniCallingConvention : public CallingConvention { kObjectOrClass = 1 }; - explicit JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty, - size_t frame_pointer_size) + JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty, + size_t frame_pointer_size) : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {} // Number of stack slots for outgoing arguments, above which the handle scope is diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h index 8d82dceef4..dc45432410 100644 --- a/compiler/jni/quick/mips/calling_convention_mips.h +++ b/compiler/jni/quick/mips/calling_convention_mips.h @@ -48,7 +48,7 @@ class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCo class MipsJniCallingConvention FINAL : public JniCallingConvention { public: - explicit MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); ~MipsJniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h index dc9273b92a..3d6aab7399 100644 --- a/compiler/jni/quick/mips64/calling_convention_mips64.h +++ b/compiler/jni/quick/mips64/calling_convention_mips64.h @@ -48,7 +48,7 @@ class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCalling class Mips64JniCallingConvention FINAL : public JniCallingConvention { public: - explicit Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); ~Mips64JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h index b1b3598a8e..cdf0956c9a 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.h +++ b/compiler/jni/quick/x86/calling_convention_x86.h @@ -26,8 +26,7 @@ constexpr size_t kFramePointerSize = 4; class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: - explicit X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, - const char* shorty) + X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize), gpr_arg_count_(0) {} ~X86ManagedRuntimeCallingConvention() OVERRIDE {} @@ -51,7 +50,7 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon class X86JniCallingConvention FINAL : public JniCallingConvention { public: - explicit X86JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + X86JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); ~X86JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h index 7a90c6e94e..6e47c9fae3 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h @@ -26,8 +26,7 @@ constexpr size_t kFramePointerSize = 8; class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: - explicit X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, - const char* shorty) + X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {} ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention @@ -47,7 +46,7 @@ class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCalling class X86_64JniCallingConvention FINAL : public JniCallingConvention { public: - explicit X86_64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + X86_64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); ~X86_64JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc index a057a4cf16..13f67e6fd4 100644 --- a/compiler/linker/arm/relative_patcher_thumb2_test.cc +++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc @@ -50,7 +50,7 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest { // We want to put the method3 at a very precise offset. const uint32_t method3_offset = method1_offset + distance_without_thunks; - CHECK(IsAligned<kArmAlignment>(method3_offset - sizeof(OatQuickMethodHeader))); + CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment); // Calculate size of method2 so that we put method3 at the correct place. const uint32_t method2_offset = @@ -242,8 +242,10 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarAfter) { }; constexpr uint32_t max_positive_disp = 16 * MB - 2u + 4u /* PC adjustment */; - bool thunk_in_gap = Create2MethodsWithGap(method1_code, method1_patches, - kNopCode, ArrayRef<const LinkerPatch>(), + bool thunk_in_gap = Create2MethodsWithGap(method1_code, + ArrayRef<const LinkerPatch>(method1_patches), + kNopCode, + ArrayRef<const LinkerPatch>(), bl_offset_in_method1 + max_positive_disp); ASSERT_FALSE(thunk_in_gap); // There should be no thunk. @@ -262,8 +264,10 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarBefore) { }; constexpr uint32_t just_over_max_negative_disp = 16 * MB - 4u /* PC adjustment */; - bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(), - method3_code, method3_patches, + bool thunk_in_gap = Create2MethodsWithGap(kNopCode, + ArrayRef<const LinkerPatch>(), + method3_code, + ArrayRef<const LinkerPatch>(method3_patches), just_over_max_negative_disp - bl_offset_in_method3); ASSERT_FALSE(thunk_in_gap); // There should be no thunk. @@ -282,8 +286,10 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) { }; constexpr uint32_t just_over_max_positive_disp = 16 * MB + 4u /* PC adjustment */; - bool thunk_in_gap = Create2MethodsWithGap(method1_code, method1_patches, - kNopCode, ArrayRef<const LinkerPatch>(), + bool thunk_in_gap = Create2MethodsWithGap(method1_code, + ArrayRef<const LinkerPatch>(method1_patches), + kNopCode, + ArrayRef<const LinkerPatch>(), bl_offset_in_method1 + just_over_max_positive_disp); ASSERT_TRUE(thunk_in_gap); @@ -311,8 +317,10 @@ TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) { }; constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */; - bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(), - method3_code, method3_patches, + bool thunk_in_gap = Create2MethodsWithGap(kNopCode, + ArrayRef<const LinkerPatch>(), + method3_code, + ArrayRef<const LinkerPatch>(method3_patches), just_over_max_negative_disp - bl_offset_in_method3); ASSERT_FALSE(thunk_in_gap); // There should be a thunk but it should be after the method2. diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc index 29355d6968..6b9c530d7a 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.cc +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -108,7 +108,7 @@ uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { if (!current_method_thunks_.empty()) { uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64); if (kIsDebugBuild) { - CHECK(IsAligned<kAdrpThunkSize>(current_method_thunks_.size())); + CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; CHECK_LE(num_thunks, processed_adrp_thunks_); for (size_t i = 0u; i != num_thunks; ++i) { @@ -203,7 +203,7 @@ void Arm64RelativePatcher::PatchDexCacheReference(std::vector<uint8_t>* code, if ((adrp & 0x9f000000u) != 0x90000000u) { CHECK(fix_cortex_a53_843419_); CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B <thunk> - CHECK(IsAligned<kAdrpThunkSize>(current_method_thunks_.size())); + CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; CHECK_LE(num_thunks, processed_adrp_thunks_); uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset; diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc index 21f93672ad..b3af4c6a05 100644 --- a/compiler/linker/arm64/relative_patcher_arm64_test.cc +++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc @@ -66,7 +66,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest { // We want to put the method3 at a very precise offset. const uint32_t last_method_offset = method1_offset + distance_without_thunks; const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader); - CHECK(IsAligned<kArm64Alignment>(gap_end)); + CHECK_ALIGNED(gap_end, kArm64Alignment); // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB). // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB @@ -396,8 +396,10 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) { }; constexpr uint32_t max_positive_disp = 128 * MB - 4u; - uint32_t last_method_idx = Create2MethodsWithGap(method1_code, method1_patches, - kNopCode, ArrayRef<const LinkerPatch>(), + uint32_t last_method_idx = Create2MethodsWithGap(method1_code, + ArrayRef<const LinkerPatch>(method1_patches), + kNopCode, + ArrayRef<const LinkerPatch>(), bl_offset_in_method1 + max_positive_disp); ASSERT_EQ(expected_last_method_idx, last_method_idx); @@ -420,8 +422,10 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarBefore) { }; constexpr uint32_t max_negative_disp = 128 * MB; - uint32_t last_method_idx = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(), - last_method_code, last_method_patches, + uint32_t last_method_idx = Create2MethodsWithGap(kNopCode, + ArrayRef<const LinkerPatch>(), + last_method_code, + ArrayRef<const LinkerPatch>(last_method_patches), max_negative_disp - bl_offset_in_last_method); uint32_t method1_offset = GetMethodOffset(1u); uint32_t last_method_offset = GetMethodOffset(last_method_idx); @@ -445,7 +449,10 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) { constexpr uint32_t just_over_max_positive_disp = 128 * MB; uint32_t last_method_idx = Create2MethodsWithGap( - method1_code, method1_patches, kNopCode, ArrayRef<const LinkerPatch>(), + method1_code, + ArrayRef<const LinkerPatch>(method1_patches), + kNopCode, + ArrayRef<const LinkerPatch>(), bl_offset_in_method1 + just_over_max_positive_disp); ASSERT_EQ(expected_last_method_idx, last_method_idx); @@ -474,7 +481,8 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) { constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4; uint32_t last_method_idx = Create2MethodsWithGap( - kNopCode, ArrayRef<const LinkerPatch>(), last_method_code, last_method_patches, + kNopCode, ArrayRef<const LinkerPatch>(), last_method_code, + ArrayRef<const LinkerPatch>(last_method_patches), just_over_max_negative_disp - bl_offset_in_last_method); uint32_t method1_offset = GetMethodOffset(1u); uint32_t last_method_offset = GetMethodOffset(last_method_idx); diff --git a/compiler/linker/relative_patcher.cc b/compiler/linker/relative_patcher.cc index 89aed956aa..82702dcf25 100644 --- a/compiler/linker/relative_patcher.cc +++ b/compiler/linker/relative_patcher.cc @@ -16,10 +16,18 @@ #include "linker/relative_patcher.h" +#ifdef ART_ENABLE_CODEGEN_arm #include "linker/arm/relative_patcher_thumb2.h" +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 #include "linker/arm64/relative_patcher_arm64.h" +#endif +#ifdef ART_ENABLE_CODEGEN_x86 #include "linker/x86/relative_patcher_x86.h" +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 #include "linker/x86_64/relative_patcher_x86_64.h" +#endif #include "output_stream.h" namespace art { @@ -64,18 +72,28 @@ std::unique_ptr<RelativePatcher> RelativePatcher::Create( DISALLOW_COPY_AND_ASSIGN(RelativePatcherNone); }; + UNUSED(features); + UNUSED(provider); switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: return std::unique_ptr<RelativePatcher>(new X86RelativePatcher()); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return std::unique_ptr<RelativePatcher>(new X86_64RelativePatcher()); +#endif +#ifdef ART_ENABLE_CODEGEN_arm case kArm: // Fall through: we generate Thumb2 code for "arm". case kThumb2: return std::unique_ptr<RelativePatcher>(new Thumb2RelativePatcher(provider)); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return std::unique_ptr<RelativePatcher>( new Arm64RelativePatcher(provider, features->AsArm64InstructionSetFeatures())); +#endif default: return std::unique_ptr<RelativePatcher>(new RelativePatcherNone); } diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 074775633f..88dc29e6ab 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -44,7 +44,7 @@ class OatTest : public CommonCompilerTest { void CheckMethod(ArtMethod* method, const OatFile::OatMethod& oat_method, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method->GetDexMethodIndex())); @@ -183,7 +183,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { EXPECT_EQ(72U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); EXPECT_EQ(28U, sizeof(OatQuickMethodHeader)); - EXPECT_EQ(112 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); + EXPECT_EQ(113 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints)); } TEST_F(OatTest, OatHeaderIsValid) { diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index a98a3046e5..64e748776d 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -365,7 +365,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -374,9 +374,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { uint32_t quick_code_offset = 0; const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode(); - CHECK(quick_code != nullptr); uint32_t code_size = quick_code->size() * sizeof(uint8_t); - CHECK_NE(code_size, 0U); uint32_t thumb_offset = compiled_method->CodeDelta(); // Deduplicate code arrays if we are not producing debuggable code. @@ -394,16 +392,18 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { } } - MethodReference method_ref(dex_file_, it.GetMemberIndex()); - auto method_lb = writer_->method_offset_map_.map.lower_bound(method_ref); - if (method_lb != writer_->method_offset_map_.map.end() && - !writer_->method_offset_map_.map.key_comp()(method_ref, method_lb->first)) { - // TODO: Should this be a hard failure? - LOG(WARNING) << "Multiple definitions of " - << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file) - << ((method_lb->second != quick_code_offset) ? "; OFFSET MISMATCH" : ""); - } else { - writer_->method_offset_map_.map.PutBefore(method_lb, method_ref, quick_code_offset); + if (code_size != 0) { + MethodReference method_ref(dex_file_, it.GetMemberIndex()); + auto method_lb = writer_->method_offset_map_.map.lower_bound(method_ref); + if (method_lb != writer_->method_offset_map_.map.end() && + !writer_->method_offset_map_.map.key_comp()(method_ref, method_lb->first)) { + // TODO: Should this be a hard failure? + LOG(WARNING) << "Multiple definitions of " + << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file) + << ((method_lb->second != quick_code_offset) ? "; OFFSET MISMATCH" : ""); + } else { + writer_->method_offset_map_.map.PutBefore(method_lb, method_ref, quick_code_offset); + } } // Update quick method header. @@ -411,21 +411,24 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_]; uint32_t mapping_table_offset = method_header->mapping_table_offset_; uint32_t vmap_table_offset = method_header->vmap_table_offset_; + // If we don't have quick code, then we must have a vmap, as that is how the dex2dex + // compiler records its transformations. + DCHECK(quick_code != nullptr || vmap_table_offset != 0); uint32_t gc_map_offset = method_header->gc_map_offset_; // The code offset was 0 when the mapping/vmap table offset was set, so it's set // to 0-offset and we need to adjust it by code_offset. uint32_t code_offset = quick_code_offset - thumb_offset; - if (mapping_table_offset != 0u) { + if (mapping_table_offset != 0u && code_offset != 0u) { mapping_table_offset += code_offset; - DCHECK_LT(mapping_table_offset, code_offset); + DCHECK_LT(mapping_table_offset, code_offset) << "Overflow in oat offsets"; } - if (vmap_table_offset != 0u) { + if (vmap_table_offset != 0u && code_offset != 0u) { vmap_table_offset += code_offset; - DCHECK_LT(vmap_table_offset, code_offset); + DCHECK_LT(vmap_table_offset, code_offset) << "Overflow in oat offsets"; } - if (gc_map_offset != 0u) { + if (gc_map_offset != 0u && code_offset != 0u) { gc_map_offset += code_offset; - DCHECK_LT(gc_map_offset, code_offset); + DCHECK_LT(gc_map_offset, code_offset) << "Overflow in oat offsets"; } uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes(); uint32_t core_spill_mask = compiled_method->GetCoreSpillMask(); @@ -534,7 +537,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { const ClassDataItemIterator& it, uint32_t thumb_offset) { offset_ = writer_->relative_patcher_->ReserveSpace( - offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex())); + offset_, compiled_method, MethodReference(dex_file_, it.GetMemberIndex())); offset_ = compiled_method->AlignCode(offset_); DCHECK_ALIGNED_PARAM(offset_, GetInstructionSetAlignment(compiled_method->GetInstructionSet())); @@ -557,7 +560,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -598,7 +601,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -619,15 +622,19 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor { *dex_file_, it.GetMemberIndex(), dex_cache, NullHandle<mirror::ClassLoader>(), nullptr, invoke_type); if (method == nullptr) { - LOG(ERROR) << "Unexpected failure to resolve a method: " - << PrettyMethod(it.GetMemberIndex(), *dex_file_, true); + LOG(INTERNAL_FATAL) << "Unexpected failure to resolve a method: " + << PrettyMethod(it.GetMemberIndex(), *dex_file_, true); soa.Self()->AssertPendingException(); mirror::Throwable* exc = soa.Self()->GetException(); std::string dump = exc->Dump(); LOG(FATAL) << dump; + UNREACHABLE(); + } + + if (compiled_method != nullptr && compiled_method->GetQuickCode()->size() != 0) { + method->SetEntryPointFromQuickCompiledCodePtrSize( + reinterpret_cast<void*>(offsets.code_offset_), pointer_size_); } - method->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast<void*>(offsets.code_offset_), - pointer_size_); return true; } @@ -658,7 +665,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } bool StartClass(const DexFile* dex_file, size_t class_def_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatDexMethodVisitor::StartClass(dex_file, class_def_index); if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) { dex_cache_ = class_linker_->FindDexCache(*dex_file); @@ -666,7 +673,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return true; } - bool EndClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) { bool result = OatDexMethodVisitor::EndClass(); if (oat_class_index_ == writer_->oat_classes_.size()) { DCHECK(result); // OatDexMethodVisitor::EndClass() never fails. @@ -680,7 +687,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OatClass* oat_class = writer_->oat_classes_[oat_class_index_]; const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); @@ -689,85 +696,82 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { OutputStream* out = out_; const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode(); - if (quick_code != nullptr) { - // Need a wrapper if we create a copy for patching. - ArrayRef<const uint8_t> wrapped(*quick_code); - uint32_t code_size = quick_code->size() * sizeof(uint8_t); - CHECK_NE(code_size, 0U); - - // Deduplicate code arrays. - const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_]; - if (method_offsets.code_offset_ >= offset_) { - offset_ = writer_->relative_patcher_->WriteThunks(out, offset_); - if (offset_ == 0u) { - ReportWriteFailure("relative call thunk", it); - return false; - } - uint32_t aligned_offset = compiled_method->AlignCode(offset_); - uint32_t aligned_code_delta = aligned_offset - offset_; - if (aligned_code_delta != 0) { - if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) { - ReportWriteFailure("code alignment padding", it); - return false; - } - offset_ += aligned_code_delta; - DCHECK_OFFSET_(); - } - DCHECK_ALIGNED_PARAM(offset_, - GetInstructionSetAlignment(compiled_method->GetInstructionSet())); - DCHECK_EQ(method_offsets.code_offset_, - offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta()) - << PrettyMethod(it.GetMemberIndex(), *dex_file_); - const OatQuickMethodHeader& method_header = - oat_class->method_headers_[method_offsets_index_]; - writer_->oat_header_->UpdateChecksum(&method_header, sizeof(method_header)); - if (!out->WriteFully(&method_header, sizeof(method_header))) { - ReportWriteFailure("method header", it); + // Need a wrapper if we create a copy for patching. + ArrayRef<const uint8_t> wrapped(*quick_code); + uint32_t code_size = quick_code->size() * sizeof(uint8_t); + + // Deduplicate code arrays. + const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_]; + if (method_offsets.code_offset_ > offset_) { + offset_ = writer_->relative_patcher_->WriteThunks(out, offset_); + if (offset_ == 0u) { + ReportWriteFailure("relative call thunk", it); + return false; + } + uint32_t aligned_offset = compiled_method->AlignCode(offset_); + uint32_t aligned_code_delta = aligned_offset - offset_; + if (aligned_code_delta != 0) { + if (!writer_->WriteCodeAlignment(out, aligned_code_delta)) { + ReportWriteFailure("code alignment padding", it); return false; } - writer_->size_method_header_ += sizeof(method_header); - offset_ += sizeof(method_header); + offset_ += aligned_code_delta; DCHECK_OFFSET_(); + } + DCHECK_ALIGNED_PARAM(offset_, + GetInstructionSetAlignment(compiled_method->GetInstructionSet())); + DCHECK_EQ(method_offsets.code_offset_, + offset_ + sizeof(OatQuickMethodHeader) + compiled_method->CodeDelta()) + << PrettyMethod(it.GetMemberIndex(), *dex_file_); + const OatQuickMethodHeader& method_header = + oat_class->method_headers_[method_offsets_index_]; + writer_->oat_header_->UpdateChecksum(&method_header, sizeof(method_header)); + if (!out->WriteFully(&method_header, sizeof(method_header))) { + ReportWriteFailure("method header", it); + return false; + } + writer_->size_method_header_ += sizeof(method_header); + offset_ += sizeof(method_header); + DCHECK_OFFSET_(); - if (!compiled_method->GetPatches().empty()) { - patched_code_.assign(quick_code->begin(), quick_code->end()); - wrapped = ArrayRef<const uint8_t>(patched_code_); - for (const LinkerPatch& patch : compiled_method->GetPatches()) { - if (patch.Type() == kLinkerPatchCallRelative) { - // NOTE: Relative calls across oat files are not supported. - uint32_t target_offset = GetTargetOffset(patch); - uint32_t literal_offset = patch.LiteralOffset(); - writer_->relative_patcher_->PatchCall(&patched_code_, literal_offset, - offset_ + literal_offset, target_offset); - } else if (patch.Type() == kLinkerPatchDexCacheArray) { - uint32_t target_offset = GetDexCacheOffset(patch); - uint32_t literal_offset = patch.LiteralOffset(); - writer_->relative_patcher_->PatchDexCacheReference(&patched_code_, patch, - offset_ + literal_offset, - target_offset); - } else if (patch.Type() == kLinkerPatchCall) { - uint32_t target_offset = GetTargetOffset(patch); - PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset); - } else if (patch.Type() == kLinkerPatchMethod) { - ArtMethod* method = GetTargetMethod(patch); - PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method); - } else if (patch.Type() == kLinkerPatchType) { - mirror::Class* type = GetTargetType(patch); - PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type); - } + if (!compiled_method->GetPatches().empty()) { + patched_code_.assign(quick_code->begin(), quick_code->end()); + wrapped = ArrayRef<const uint8_t>(patched_code_); + for (const LinkerPatch& patch : compiled_method->GetPatches()) { + if (patch.Type() == kLinkerPatchCallRelative) { + // NOTE: Relative calls across oat files are not supported. + uint32_t target_offset = GetTargetOffset(patch); + uint32_t literal_offset = patch.LiteralOffset(); + writer_->relative_patcher_->PatchCall(&patched_code_, literal_offset, + offset_ + literal_offset, target_offset); + } else if (patch.Type() == kLinkerPatchDexCacheArray) { + uint32_t target_offset = GetDexCacheOffset(patch); + uint32_t literal_offset = patch.LiteralOffset(); + writer_->relative_patcher_->PatchDexCacheReference(&patched_code_, patch, + offset_ + literal_offset, + target_offset); + } else if (patch.Type() == kLinkerPatchCall) { + uint32_t target_offset = GetTargetOffset(patch); + PatchCodeAddress(&patched_code_, patch.LiteralOffset(), target_offset); + } else if (patch.Type() == kLinkerPatchMethod) { + ArtMethod* method = GetTargetMethod(patch); + PatchMethodAddress(&patched_code_, patch.LiteralOffset(), method); + } else if (patch.Type() == kLinkerPatchType) { + mirror::Class* type = GetTargetType(patch); + PatchObjectAddress(&patched_code_, patch.LiteralOffset(), type); } } + } - writer_->oat_header_->UpdateChecksum(wrapped.data(), code_size); - if (!out->WriteFully(wrapped.data(), code_size)) { - ReportWriteFailure("method code", it); - return false; - } - writer_->size_code_ += code_size; - offset_ += code_size; + writer_->oat_header_->UpdateChecksum(wrapped.data(), code_size); + if (!out->WriteFully(wrapped.data(), code_size)) { + ReportWriteFailure("method code", it); + return false; } - DCHECK_OFFSET_(); + writer_->size_code_ += code_size; + offset_ += code_size; } + DCHECK_OFFSET_(); ++method_offsets_index_; } @@ -789,7 +793,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } ArtMethod* GetTargetMethod(const LinkerPatch& patch) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { MethodReference ref = patch.TargetMethod(); mirror::DexCache* dex_cache = (dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(*ref.dex_file); @@ -799,7 +803,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return method; } - uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { auto target_it = writer_->method_offset_map_.map.find(patch.TargetMethod()); uint32_t target_offset = (target_it != writer_->method_offset_map_.map.end()) ? target_it->second : 0u; @@ -824,7 +828,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } mirror::Class* GetTargetType(const LinkerPatch& patch) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile()) ? dex_cache_ : class_linker_->FindDexCache(*patch.TargetTypeDexFile()); mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex()); @@ -832,7 +836,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return type; } - uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { if (writer_->image_writer_ != nullptr) { auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress( patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset()); @@ -845,7 +849,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // NOTE: Direct method pointers across oat files don't use linker patches. However, direct // type pointers across oat files do. (TODO: Investigate why.) if (writer_->image_writer_ != nullptr) { @@ -861,7 +865,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // NOTE: Direct method pointers across oat files don't use linker patches. However, direct // type pointers across oat files do. (TODO: Investigate why.) if (writer_->image_writer_ != nullptr) { @@ -878,7 +882,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { } void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t address = writer_->image_writer_ == nullptr ? target_offset : PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() + writer_->oat_data_offset_ + target_offset); diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 82b9377c07..760fb7c12c 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -165,9 +165,9 @@ class OatWriter { size_t InitOatClasses(size_t offset); size_t InitOatMaps(size_t offset); size_t InitOatCode(size_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t InitOatCodeDexFiles(size_t offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool WriteTables(OutputStream* out, const size_t file_offset); size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset); @@ -178,7 +178,7 @@ class OatWriter { class OatDexFile { public: - explicit OatDexFile(size_t offset, const DexFile& dex_file); + OatDexFile(size_t offset, const DexFile& dex_file); size_t SizeOf() const; void UpdateChecksum(OatHeader* oat_header) const; bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const; @@ -200,10 +200,10 @@ class OatWriter { class OatClass { public: - explicit OatClass(size_t offset, - const std::vector<CompiledMethod*>& compiled_methods, - uint32_t num_non_null_compiled_methods, - mirror::Class::Status status); + OatClass(size_t offset, + const std::vector<CompiledMethod*>& compiled_methods, + uint32_t num_non_null_compiled_methods, + mirror::Class::Status status); ~OatClass(); size_t GetOatMethodOffsetsOffsetFromOatHeader(size_t class_def_method_index_) const; size_t GetOatMethodOffsetsOffsetFromOatClass(size_t class_def_method_index_) const; diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc index daf7d67746..84201c39a7 100644 --- a/compiler/optimizing/boolean_simplifier.cc +++ b/compiler/optimizing/boolean_simplifier.cc @@ -119,6 +119,14 @@ void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) { // Check if the selection negates/preserves the value of the condition and // if so, generate a suitable replacement instruction. HInstruction* if_condition = if_instruction->InputAt(0); + + // Don't change FP compares. The definition of compares involving NaNs forces + // the compares to be done as written by the user. + if (if_condition->IsCondition() && + Primitive::IsFloatingPointType(if_condition->InputAt(0)->GetType())) { + return; + } + HInstruction* replacement; if (NegatesCondition(true_value, false_value)) { replacement = GetOppositeCondition(if_condition); @@ -146,11 +154,6 @@ void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) { // entry block. Any following blocks would have had the join block // as a dominator, and `MergeWith` handles changing that to the // entry block. - - // Remove the original condition if it is now unused. - if (!if_condition->HasUses()) { - if_condition->GetBlock()->RemoveInstructionOrPhi(if_condition); - } } void HBooleanSimplifier::Run() { diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 54155dbef4..88414980b8 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -259,14 +259,20 @@ bool HGraphBuilder::SkipCompilation(const DexFile::CodeItem& code_item, return false; } -bool HGraphBuilder::IsBlockInPcRange(HBasicBlock* block, - uint32_t dex_pc_start, - uint32_t dex_pc_end) { - uint32_t dex_pc = block->GetDexPc(); - return block != entry_block_ - && block != exit_block_ - && dex_pc >= dex_pc_start - && dex_pc < dex_pc_end; +static const DexFile::TryItem* GetTryItem(HBasicBlock* block, + const DexFile::CodeItem& code_item, + const ArenaBitVector& can_block_throw) { + DCHECK(!block->IsSingleTryBoundary()); + + // Block does not contain throwing instructions. Even if it is covered by + // a TryItem, we will consider it not in a try block. + if (!can_block_throw.IsBitSet(block->GetBlockId())) { + return nullptr; + } + + // Instructions in the block may throw. Find a TryItem covering this block. + int32_t try_item_idx = DexFile::FindTryItem(code_item, block->GetDexPc()); + return (try_item_idx == -1) ? nullptr : DexFile::GetTryItems(code_item, try_item_idx); } void HGraphBuilder::CreateBlocksForTryCatch(const DexFile::CodeItem& code_item) { @@ -327,108 +333,130 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item) return; } - for (size_t idx = 0; idx < code_item.tries_size_; ++idx) { - const DexFile::TryItem* try_item = DexFile::GetTryItems(code_item, idx); - uint32_t try_start = try_item->start_addr_; - uint32_t try_end = try_start + try_item->insn_count_; - - // Iterate over all blocks in the dex pc range of the TryItem and: - // (a) split edges which enter/exit the try range, - // (b) create TryBoundary instructions in the new blocks, - // (c) link the new blocks to corresponding exception handlers. - for (uint32_t inner_pc = try_start; inner_pc < try_end; ++inner_pc) { - HBasicBlock* try_block = FindBlockStartingAt(inner_pc); - if (try_block == nullptr) { - continue; + // Bit vector stores information on which blocks contain throwing instructions. + // Must be expandable because catch blocks may be split into two. + ArenaBitVector can_block_throw(arena_, graph_->GetBlocks().Size(), /* expandable */ true); + + // Scan blocks and mark those which contain throwing instructions. + for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) { + HBasicBlock* block = graph_->GetBlocks().Get(block_id); + bool can_throw = false; + for (HInstructionIterator insn(block->GetInstructions()); !insn.Done(); insn.Advance()) { + if (insn.Current()->CanThrow()) { + can_throw = true; + break; } + } - if (try_block->IsCatchBlock()) { + if (can_throw) { + if (block->IsCatchBlock()) { // Catch blocks are always considered an entry point into the TryItem in - // order to avoid splitting exceptional edges (they might not have been - // created yet). We separate the move-exception (if present) from the - // rest of the block and insert a TryBoundary after it, creating a - // landing pad for the exceptional edges. - HInstruction* first_insn = try_block->GetFirstInstruction(); - HInstruction* split_position = nullptr; + // order to avoid splitting exceptional edges. We split the block after + // the move-exception (if present) and mark the first part non-throwing. + // Later on, a TryBoundary will be inserted between the two blocks. + HInstruction* first_insn = block->GetFirstInstruction(); if (first_insn->IsLoadException()) { // Catch block starts with a LoadException. Split the block after the - // StoreLocal that must come after the load. + // StoreLocal and ClearException which must come after the load. DCHECK(first_insn->GetNext()->IsStoreLocal()); - split_position = first_insn->GetNext()->GetNext(); + DCHECK(first_insn->GetNext()->GetNext()->IsClearException()); + block = block->SplitBefore(first_insn->GetNext()->GetNext()->GetNext()); } else { - // Catch block does not obtain the exception. Split at the beginning - // to create an empty catch block. - split_position = first_insn; + // Catch block does not load the exception. Split at the beginning to + // create an empty catch block. + block = block->SplitBefore(first_insn); } - DCHECK(split_position != nullptr); - HBasicBlock* catch_block = try_block; - try_block = catch_block->SplitBefore(split_position); - SplitTryBoundaryEdge(catch_block, try_block, HTryBoundary::kEntry, code_item, *try_item); - } else { - // For non-catch blocks, find predecessors which are not covered by the - // same TryItem range. Such edges enter the try block and will have - // a TryBoundary inserted. - for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) { - HBasicBlock* predecessor = try_block->GetPredecessors().Get(i); - if (predecessor->IsSingleTryBoundary()) { - // The edge was already split because of an exit from a neighbouring - // TryItem. We split it again and insert an entry point. - if (kIsDebugBuild) { - HTryBoundary* last_insn = predecessor->GetLastInstruction()->AsTryBoundary(); - DCHECK(!last_insn->IsEntry()); - DCHECK_EQ(last_insn->GetNormalFlowSuccessor(), try_block); - DCHECK(try_block->IsFirstIndexOfPredecessor(predecessor, i)); - DCHECK(!IsBlockInPcRange(predecessor->GetSinglePredecessor(), try_start, try_end)); - } - } else if (!IsBlockInPcRange(predecessor, try_start, try_end)) { - // This is an entry point into the TryItem and the edge has not been - // split yet. That means that `predecessor` is not in a TryItem, or - // it is in a different TryItem and we happened to iterate over this - // block first. We split the edge and insert an entry point. - } else { - // Not an edge on the boundary of the try block. - continue; - } - SplitTryBoundaryEdge(predecessor, try_block, HTryBoundary::kEntry, code_item, *try_item); + } + can_block_throw.SetBit(block->GetBlockId()); + } + } + + // Iterate over all blocks, find those covered by some TryItem and: + // (a) split edges which enter/exit the try range, + // (b) create TryBoundary instructions in the new blocks, + // (c) link the new blocks to corresponding exception handlers. + // We cannot iterate only over blocks in `branch_targets_` because switch-case + // blocks share the same dex_pc. + for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) { + HBasicBlock* try_block = graph_->GetBlocks().Get(block_id); + + // TryBoundary blocks are added at the end of the list and not iterated over. + DCHECK(!try_block->IsSingleTryBoundary()); + + // Find the TryItem for this block. + const DexFile::TryItem* try_item = GetTryItem(try_block, code_item, can_block_throw); + if (try_item == nullptr) { + continue; + } + + // Catch blocks were split earlier and cannot throw. + DCHECK(!try_block->IsCatchBlock()); + + // Find predecessors which are not covered by the same TryItem range. Such + // edges enter the try block and will have a TryBoundary inserted. + for (size_t i = 0; i < try_block->GetPredecessors().Size(); ++i) { + HBasicBlock* predecessor = try_block->GetPredecessors().Get(i); + if (predecessor->IsSingleTryBoundary()) { + // The edge was already split because of an exit from a neighbouring + // TryItem. We split it again and insert an entry point. + if (kIsDebugBuild) { + HTryBoundary* last_insn = predecessor->GetLastInstruction()->AsTryBoundary(); + const DexFile::TryItem* predecessor_try_item = + GetTryItem(predecessor->GetSinglePredecessor(), code_item, can_block_throw); + DCHECK(!last_insn->IsEntry()); + DCHECK_EQ(last_insn->GetNormalFlowSuccessor(), try_block); + DCHECK(try_block->IsFirstIndexOfPredecessor(predecessor, i)); + DCHECK_NE(try_item, predecessor_try_item); } + } else if (GetTryItem(predecessor, code_item, can_block_throw) != try_item) { + // This is an entry point into the TryItem and the edge has not been + // split yet. That means that `predecessor` is not in a TryItem, or + // it is in a different TryItem and we happened to iterate over this + // block first. We split the edge and insert an entry point. + } else { + // Not an edge on the boundary of the try block. + continue; } - - // Find successors which are not covered by the same TryItem range. Such - // edges exit the try block and will have a TryBoundary inserted. - for (size_t i = 0; i < try_block->GetSuccessors().Size(); ++i) { - HBasicBlock* successor = try_block->GetSuccessors().Get(i); - if (successor->IsCatchBlock()) { - // A catch block is always considered an entry point into its TryItem. - // We therefore assume this is an exit point, regardless of whether - // the catch block is in a different TryItem or not. - } else if (successor->IsSingleTryBoundary()) { - // The edge was already split because of an entry into a neighbouring - // TryItem. We split it again and insert an exit. - if (kIsDebugBuild) { - HTryBoundary* last_insn = successor->GetLastInstruction()->AsTryBoundary(); - DCHECK_EQ(try_block, successor->GetSinglePredecessor()); - DCHECK(last_insn->IsEntry()); - DCHECK(!IsBlockInPcRange(last_insn->GetNormalFlowSuccessor(), try_start, try_end)); - } - } else if (!IsBlockInPcRange(successor, try_start, try_end)) { - // This is an exit out of the TryItem and the edge has not been split - // yet. That means that either `successor` is not in a TryItem, or it - // is in a different TryItem and we happened to iterate over this - // block first. We split the edge and insert an exit. - HInstruction* last_instruction = try_block->GetLastInstruction(); - if (last_instruction->IsReturn() || last_instruction->IsReturnVoid()) { - DCHECK_EQ(successor, exit_block_); - // Control flow exits the try block with a Return(Void). Because - // splitting the edge would invalidate the invariant that Return - // always jumps to Exit, we move the Return outside the try block. - successor = try_block->SplitBefore(last_instruction); - } - } else { - // Not an edge on the boundary of the try block. - continue; + SplitTryBoundaryEdge(predecessor, try_block, HTryBoundary::kEntry, code_item, *try_item); + } + + // Find successors which are not covered by the same TryItem range. Such + // edges exit the try block and will have a TryBoundary inserted. + for (size_t i = 0; i < try_block->GetSuccessors().Size(); ++i) { + HBasicBlock* successor = try_block->GetSuccessors().Get(i); + if (successor->IsCatchBlock()) { + // A catch block is always considered an entry point into its TryItem. + // We therefore assume this is an exit point, regardless of whether + // the catch block is in a different TryItem or not. + } else if (successor->IsSingleTryBoundary()) { + // The edge was already split because of an entry into a neighbouring + // TryItem. We split it again and insert an exit. + if (kIsDebugBuild) { + HTryBoundary* last_insn = successor->GetLastInstruction()->AsTryBoundary(); + const DexFile::TryItem* successor_try_item = + GetTryItem(last_insn->GetNormalFlowSuccessor(), code_item, can_block_throw); + DCHECK_EQ(try_block, successor->GetSinglePredecessor()); + DCHECK(last_insn->IsEntry()); + DCHECK_NE(try_item, successor_try_item); } - SplitTryBoundaryEdge(try_block, successor, HTryBoundary::kExit, code_item, *try_item); + } else if (GetTryItem(successor, code_item, can_block_throw) != try_item) { + // This is an exit out of the TryItem and the edge has not been split + // yet. That means that either `successor` is not in a TryItem, or it + // is in a different TryItem and we happened to iterate over this + // block first. We split the edge and insert an exit. + HInstruction* last_instruction = try_block->GetLastInstruction(); + if (last_instruction->IsReturn() || last_instruction->IsReturnVoid()) { + DCHECK_EQ(successor, exit_block_); + // Control flow exits the try block with a Return(Void). Because + // splitting the edge would invalidate the invariant that Return + // always jumps to Exit, we move the Return outside the try block. + successor = try_block->SplitBefore(last_instruction); + } + } else { + // Not an edge on the boundary of the try block. + continue; } + SplitTryBoundaryEdge(try_block, successor, HTryBoundary::kExit, code_item, *try_item); } } } @@ -487,14 +515,14 @@ bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) { // Add the suspend check to the entry block. entry_block_->AddInstruction(new (arena_) HSuspendCheck(0)); entry_block_->AddInstruction(new (arena_) HGoto()); + // Add the exit block at the end. + graph_->AddBlock(exit_block_); // Iterate over blocks covered by TryItems and insert TryBoundaries at entry // and exit points. This requires all control-flow instructions and // non-exceptional edges to have been created. InsertTryBoundaryBlocks(code_item); - // Add the exit block at the end to give it the highest id. - graph_->AddBlock(exit_block_); return true; } @@ -563,11 +591,10 @@ bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr, uint32_t target = dex_pc + table.GetEntryAt(i + offset); FindOrCreateBlockStartingAt(target); - // The next case gets its own block. - if (i < num_entries) { - block = new (arena_) HBasicBlock(graph_, target); - branch_targets_.Put(table.GetDexPcForIndex(i), block); - } + // Create a block for the switch-case logic. The block gets the dex_pc + // of the SWITCH instruction because it is part of its semantics. + block = new (arena_) HBasicBlock(graph_, dex_pc); + branch_targets_.Put(table.GetDexPcForIndex(i), block); } // Fall-through. Add a block if there is more code afterwards. @@ -649,7 +676,7 @@ void HGraphBuilder::Binop_23x_shift(const Instruction& instruction, void HGraphBuilder::Binop_23x_cmp(const Instruction& instruction, Primitive::Type type, - HCompare::Bias bias, + ComparisonBias bias, uint32_t dex_pc) { HInstruction* first = LoadLocal(instruction.VRegB(), type); HInstruction* second = LoadLocal(instruction.VRegC(), type); @@ -730,6 +757,35 @@ void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type current_block_ = nullptr; } +void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register, + uint32_t dex_pc, + HInvoke* actual_string) { + if (!graph_->IsDebuggable()) { + // Notify that we cannot compile with baseline. The dex registers aliasing + // with `original_dex_register` will be handled when we optimize + // (see HInstructionSimplifer::VisitFakeString). + can_use_baseline_for_string_init_ = false; + return; + } + const VerifiedMethod* verified_method = + compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex()); + if (verified_method != nullptr) { + UpdateLocal(original_dex_register, actual_string); + const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map = + verified_method->GetStringInitPcRegMap(); + auto map_it = string_init_map.find(dex_pc); + if (map_it != string_init_map.end()) { + std::set<uint32_t> reg_set = map_it->second; + for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) { + HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot); + UpdateLocal(*set_it, load_local); + } + } + } else { + can_use_baseline_for_string_init_ = false; + } +} + bool HGraphBuilder::BuildInvoke(const Instruction& instruction, uint32_t dex_pc, uint32_t method_idx, @@ -749,7 +805,9 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, invoke_type = kDirect; break; case Instruction::INVOKE_VIRTUAL: + case Instruction::INVOKE_VIRTUAL_QUICK: case Instruction::INVOKE_VIRTUAL_RANGE: + case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: invoke_type = kVirtual; break; case Instruction::INVOKE_INTERFACE: @@ -971,34 +1029,23 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, if (clinit_check_requirement == HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit) { // Add the class initialization check as last input of `invoke`. DCHECK(clinit_check != nullptr); + DCHECK(!is_string_init); invoke->SetArgumentAt(argument_index, clinit_check); + argument_index++; } - current_block_->AddInstruction(invoke); - latest_result_ = invoke; - // Add move-result for StringFactory method. if (is_string_init) { uint32_t orig_this_reg = is_range ? register_index : args[0]; - UpdateLocal(orig_this_reg, invoke); - const VerifiedMethod* verified_method = - compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex()); - if (verified_method == nullptr) { - LOG(WARNING) << "No verified method for method calling String.<init>: " - << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_); - return false; - } - const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map = - verified_method->GetStringInitPcRegMap(); - auto map_it = string_init_map.find(dex_pc); - if (map_it != string_init_map.end()) { - std::set<uint32_t> reg_set = map_it->second; - for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) { - HInstruction* load_local = LoadLocal(orig_this_reg, Primitive::kPrimNot); - UpdateLocal(*set_it, load_local); - } - } + HInstruction* fake_string = LoadLocal(orig_this_reg, Primitive::kPrimNot); + invoke->SetArgumentAt(argument_index, fake_string); + current_block_->AddInstruction(invoke); + PotentiallySimplifyFakeString(orig_this_reg, dex_pc, invoke); + } else { + current_block_->AddInstruction(invoke); } + latest_result_ = invoke; + return true; } @@ -1007,7 +1054,15 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction, bool is_put) { uint32_t source_or_dest_reg = instruction.VRegA_22c(); uint32_t obj_reg = instruction.VRegB_22c(); - uint16_t field_index = instruction.VRegC_22c(); + uint16_t field_index; + if (instruction.IsQuickened()) { + if (!CanDecodeQuickenedInfo()) { + return false; + } + field_index = LookupQuickenedInfo(dex_pc); + } else { + field_index = instruction.VRegC_22c(); + } ScopedObjectAccess soa(Thread::Current()); ArtField* resolved_field = @@ -1516,6 +1571,17 @@ void HGraphBuilder::PotentiallyAddSuspendCheck(HBasicBlock* target, uint32_t dex } } +bool HGraphBuilder::CanDecodeQuickenedInfo() const { + return interpreter_metadata_ != nullptr; +} + +uint16_t HGraphBuilder::LookupQuickenedInfo(uint32_t dex_pc) { + DCHECK(interpreter_metadata_ != nullptr); + uint32_t dex_pc_in_map = DecodeUnsignedLeb128(&interpreter_metadata_); + DCHECK_EQ(dex_pc, dex_pc_in_map); + return DecodeUnsignedLeb128(&interpreter_metadata_); +} + bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) { if (current_block_ == nullptr) { return true; // Dead code @@ -1613,6 +1679,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 break; } + case Instruction::RETURN_VOID_NO_BARRIER: case Instruction::RETURN_VOID: { BuildReturn(instruction, Primitive::kPrimVoid); break; @@ -1661,8 +1728,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 case Instruction::INVOKE_INTERFACE: case Instruction::INVOKE_STATIC: case Instruction::INVOKE_SUPER: - case Instruction::INVOKE_VIRTUAL: { - uint32_t method_idx = instruction.VRegB_35c(); + case Instruction::INVOKE_VIRTUAL: + case Instruction::INVOKE_VIRTUAL_QUICK: { + uint16_t method_idx; + if (instruction.Opcode() == Instruction::INVOKE_VIRTUAL_QUICK) { + if (!CanDecodeQuickenedInfo()) { + return false; + } + method_idx = LookupQuickenedInfo(dex_pc); + } else { + method_idx = instruction.VRegB_35c(); + } uint32_t number_of_vreg_arguments = instruction.VRegA_35c(); uint32_t args[5]; instruction.GetVarArgs(args); @@ -1677,8 +1753,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 case Instruction::INVOKE_INTERFACE_RANGE: case Instruction::INVOKE_STATIC_RANGE: case Instruction::INVOKE_SUPER_RANGE: - case Instruction::INVOKE_VIRTUAL_RANGE: { - uint32_t method_idx = instruction.VRegB_3rc(); + case Instruction::INVOKE_VIRTUAL_RANGE: + case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: { + uint16_t method_idx; + if (instruction.Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK) { + if (!CanDecodeQuickenedInfo()) { + return false; + } + method_idx = LookupQuickenedInfo(dex_pc); + } else { + method_idx = instruction.VRegB_3rc(); + } uint32_t number_of_vreg_arguments = instruction.VRegA_3rc(); uint32_t register_index = instruction.VRegC(); if (!BuildInvoke(instruction, dex_pc, method_idx, @@ -2213,10 +2298,10 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 case Instruction::NEW_INSTANCE: { uint16_t type_index = instruction.VRegB_21c(); if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) { - // Turn new-instance of string into a const 0. int32_t register_index = instruction.VRegA(); - HNullConstant* constant = graph_->GetNullConstant(); - UpdateLocal(register_index, constant); + HFakeString* fake_string = new (arena_) HFakeString(); + current_block_->AddInstruction(fake_string); + UpdateLocal(register_index, fake_string); } else { QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index) ? kQuickAllocObjectWithAccessCheck @@ -2303,27 +2388,27 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 } case Instruction::CMP_LONG: { - Binop_23x_cmp(instruction, Primitive::kPrimLong, HCompare::kNoBias, dex_pc); + Binop_23x_cmp(instruction, Primitive::kPrimLong, ComparisonBias::kNoBias, dex_pc); break; } case Instruction::CMPG_FLOAT: { - Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kGtBias, dex_pc); + Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kGtBias, dex_pc); break; } case Instruction::CMPG_DOUBLE: { - Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kGtBias, dex_pc); + Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kGtBias, dex_pc); break; } case Instruction::CMPL_FLOAT: { - Binop_23x_cmp(instruction, Primitive::kPrimFloat, HCompare::kLtBias, dex_pc); + Binop_23x_cmp(instruction, Primitive::kPrimFloat, ComparisonBias::kLtBias, dex_pc); break; } case Instruction::CMPL_DOUBLE: { - Binop_23x_cmp(instruction, Primitive::kPrimDouble, HCompare::kLtBias, dex_pc); + Binop_23x_cmp(instruction, Primitive::kPrimDouble, ComparisonBias::kLtBias, dex_pc); break; } @@ -2331,12 +2416,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 break; case Instruction::IGET: + case Instruction::IGET_QUICK: case Instruction::IGET_WIDE: + case Instruction::IGET_WIDE_QUICK: case Instruction::IGET_OBJECT: + case Instruction::IGET_OBJECT_QUICK: case Instruction::IGET_BOOLEAN: + case Instruction::IGET_BOOLEAN_QUICK: case Instruction::IGET_BYTE: + case Instruction::IGET_BYTE_QUICK: case Instruction::IGET_CHAR: - case Instruction::IGET_SHORT: { + case Instruction::IGET_CHAR_QUICK: + case Instruction::IGET_SHORT: + case Instruction::IGET_SHORT_QUICK: { if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) { return false; } @@ -2344,12 +2436,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 } case Instruction::IPUT: + case Instruction::IPUT_QUICK: case Instruction::IPUT_WIDE: + case Instruction::IPUT_WIDE_QUICK: case Instruction::IPUT_OBJECT: + case Instruction::IPUT_OBJECT_QUICK: case Instruction::IPUT_BOOLEAN: + case Instruction::IPUT_BOOLEAN_QUICK: case Instruction::IPUT_BYTE: + case Instruction::IPUT_BYTE_QUICK: case Instruction::IPUT_CHAR: - case Instruction::IPUT_SHORT: { + case Instruction::IPUT_CHAR_QUICK: + case Instruction::IPUT_SHORT: + case Instruction::IPUT_SHORT_QUICK: { if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) { return false; } @@ -2454,6 +2553,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 case Instruction::MOVE_EXCEPTION: { current_block_->AddInstruction(new (arena_) HLoadException()); UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction()); + current_block_->AddInstruction(new (arena_) HClearException()); break; } diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h index cae762b49f..ad5d92345b 100644 --- a/compiler/optimizing/builder.h +++ b/compiler/optimizing/builder.h @@ -39,7 +39,8 @@ class HGraphBuilder : public ValueObject { const DexCompilationUnit* const outer_compilation_unit, const DexFile* dex_file, CompilerDriver* driver, - OptimizingCompilerStats* compiler_stats) + OptimizingCompilerStats* compiler_stats, + const uint8_t* interpreter_metadata) : arena_(graph->GetArena()), branch_targets_(graph->GetArena(), 0), locals_(graph->GetArena(), 0), @@ -54,7 +55,9 @@ class HGraphBuilder : public ValueObject { return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])), code_start_(nullptr), latest_result_(nullptr), - compilation_stats_(compiler_stats) {} + can_use_baseline_for_string_init_(true), + compilation_stats_(compiler_stats), + interpreter_metadata_(interpreter_metadata) {} // Only for unit testing. HGraphBuilder(HGraph* graph, Primitive::Type return_type = Primitive::kPrimInt) @@ -72,10 +75,15 @@ class HGraphBuilder : public ValueObject { return_type_(return_type), code_start_(nullptr), latest_result_(nullptr), + can_use_baseline_for_string_init_(true), compilation_stats_(nullptr) {} bool BuildGraph(const DexFile::CodeItem& code); + bool CanUseBaselineForStringInit() const { + return can_use_baseline_for_string_init_; + } + static constexpr const char* kBuilderPassName = "builder"; private: @@ -98,9 +106,6 @@ class HGraphBuilder : public ValueObject { HBasicBlock* FindBlockStartingAt(int32_t dex_pc) const; HBasicBlock* FindOrCreateBlockStartingAt(int32_t dex_pc); - // Returns whether the dex_pc of `block` lies within the given range. - bool IsBlockInPcRange(HBasicBlock* block, uint32_t dex_pc_start, uint32_t dex_pc_end); - // Adds new blocks to `branch_targets_` starting at the limits of TryItems and // their exception handlers. void CreateBlocksForTryCatch(const DexFile::CodeItem& code_item); @@ -117,6 +122,9 @@ class HGraphBuilder : public ValueObject { const DexFile::CodeItem& code_item, const DexFile::TryItem& try_item); + bool CanDecodeQuickenedInfo() const; + uint16_t LookupQuickenedInfo(uint32_t dex_pc); + void InitializeLocals(uint16_t count); HLocal* GetLocalAt(int register_index) const; void UpdateLocal(int register_index, HInstruction* instruction) const; @@ -139,7 +147,7 @@ class HGraphBuilder : public ValueObject { void Binop_23x_cmp(const Instruction& instruction, Primitive::Type type, - HCompare::Bias bias, + ComparisonBias bias, uint32_t dex_pc); template<typename T> @@ -254,6 +262,10 @@ class HGraphBuilder : public ValueObject { // Returns whether `type_index` points to the outer-most compiling method's class. bool IsOutermostCompilingClass(uint16_t type_index) const; + void PotentiallySimplifyFakeString(uint16_t original_dex_register, + uint32_t dex_pc, + HInvoke* invoke); + ArenaAllocator* const arena_; // A list of the size of the dex code holding block information for @@ -293,8 +305,15 @@ class HGraphBuilder : public ValueObject { // used by move-result instructions. HInstruction* latest_result_; + // We need to know whether we have built a graph that has calls to StringFactory + // and hasn't gone through the verifier. If the following flag is `false`, then + // we cannot compile with baseline. + bool can_use_baseline_for_string_init_; + OptimizingCompilerStats* compilation_stats_; + const uint8_t* interpreter_metadata_; + DISALLOW_COPY_AND_ASSIGN(HGraphBuilder); }; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 4607ebe548..7d82f185a6 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -16,11 +16,26 @@ #include "code_generator.h" +#ifdef ART_ENABLE_CODEGEN_arm #include "code_generator_arm.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 #include "code_generator_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 #include "code_generator_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 #include "code_generator_x86_64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 #include "code_generator_mips64.h" +#endif + #include "compiled_method.h" #include "dex/verified_method.h" #include "driver/dex_compilation_unit.h" @@ -31,6 +46,7 @@ #include "mirror/array-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object_reference.h" +#include "parallel_move_resolver.h" #include "ssa_liveness_analysis.h" #include "utils/assembler.h" #include "verifier/dex_gc_map.h" @@ -516,34 +532,49 @@ CodeGenerator* CodeGenerator::Create(HGraph* graph, const InstructionSetFeatures& isa_features, const CompilerOptions& compiler_options) { switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: { return new arm::CodeGeneratorARM(graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options); } +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: { return new arm64::CodeGeneratorARM64(graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options); } +#endif +#ifdef ART_ENABLE_CODEGEN_mips case kMips: + UNUSED(compiler_options); + UNUSED(graph); + UNUSED(isa_features); return nullptr; +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: { return new mips64::CodeGeneratorMIPS64(graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options); } +#endif +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: { return new x86::CodeGeneratorX86(graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options); } +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: { return new x86_64::CodeGeneratorX86_64(graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options); } +#endif default: return nullptr; } @@ -1005,7 +1036,39 @@ void CodeGenerator::EmitParallelMoves(Location from1, GetMoveResolver()->EmitNativeCode(¶llel_move); } -void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc) { +void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) { + // Ensure that the call kind indication given to the register allocator is + // coherent with the runtime call generated, and that the GC side effect is + // set when required. + if (slow_path == nullptr) { + DCHECK(instruction->GetLocations()->WillCall()) << instruction->DebugName(); + DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC())) + << instruction->DebugName() << instruction->GetSideEffects().ToString(); + } else { + DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath() || slow_path->IsFatal()) + << instruction->DebugName() << slow_path->GetDescription(); + DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) || + // Control flow would not come back into the code if a fatal slow + // path is taken, so we do not care if it triggers GC. + slow_path->IsFatal() || + // HDeoptimize is a special case: we know we are not coming back from + // it into the code. + instruction->IsDeoptimize()) + << instruction->DebugName() << instruction->GetSideEffects().ToString() + << slow_path->GetDescription(); + } + + // Check the coherency of leaf information. + DCHECK(instruction->IsSuspendCheck() + || ((slow_path != nullptr) && slow_path->IsFatal()) + || instruction->GetLocations()->CanCall() + || !IsLeafMethod()) + << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : ""); +} + +void SlowPathCode::RecordPcInfo(CodeGenerator* codegen, + HInstruction* instruction, + uint32_t dex_pc) { codegen->RecordPcInfo(instruction, dex_pc, this); } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 4cecd61365..25824448c5 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -98,6 +98,8 @@ class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> { return saved_fpu_stack_offsets_[reg]; } + virtual bool IsFatal() const { return false; } + virtual const char* GetDescription() const = 0; protected: @@ -290,10 +292,18 @@ class CodeGenerator { return type == Primitive::kPrimNot && !value->IsNullConstant(); } + void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path); + void AddAllocatedRegister(Location location) { allocated_registers_.Add(location); } + bool HasAllocatedRegister(bool is_core, int reg) const { + return is_core + ? allocated_registers_.ContainsCoreRegister(reg) + : allocated_registers_.ContainsFloatingPointRegister(reg); + } + void AllocateLocations(HInstruction* instruction); // Tells whether the stack frame of the compiled method is diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index e3683ef0dd..1bd42160d7 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -69,6 +69,8 @@ class NullCheckSlowPathARM : public SlowPathCodeARM { QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; } private: @@ -87,6 +89,8 @@ class DivZeroCheckSlowPathARM : public SlowPathCodeARM { QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; } private: @@ -161,6 +165,8 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM { QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; } private: @@ -334,7 +340,7 @@ class DeoptimizationSlowPathARM : public SlowPathCodeARM { #undef __ #define __ down_cast<ArmAssembler*>(GetAssembler())-> -inline Condition ARMCondition(IfCondition cond) { +inline Condition ARMSignedOrFPCondition(IfCondition cond) { switch (cond) { case kCondEQ: return EQ; case kCondNE: return NE; @@ -342,24 +348,22 @@ inline Condition ARMCondition(IfCondition cond) { case kCondLE: return LE; case kCondGT: return GT; case kCondGE: return GE; - default: - LOG(FATAL) << "Unknown if condition"; } - return EQ; // Unreachable. + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); } -inline Condition ARMOppositeCondition(IfCondition cond) { +inline Condition ARMUnsignedCondition(IfCondition cond) { switch (cond) { - case kCondEQ: return NE; - case kCondNE: return EQ; - case kCondLT: return GE; - case kCondLE: return GT; - case kCondGT: return LE; - case kCondGE: return LT; - default: - LOG(FATAL) << "Unknown if condition"; + case kCondEQ: return EQ; + case kCondNE: return NE; + case kCondLT: return LO; + case kCondLE: return LS; + case kCondGT: return HI; + case kCondGE: return HS; } - return EQ; // Unreachable. + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); } void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const { @@ -949,15 +953,10 @@ void CodeGeneratorARM::InvokeRuntime(int32_t entry_point_offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { + ValidateInvokeRuntime(instruction, slow_path); __ LoadFromOffset(kLoadWord, LR, TR, entry_point_offset); __ blx(LR); RecordPcInfo(instruction, dex_pc, slow_path); - DCHECK(instruction->IsSuspendCheck() - || instruction->IsBoundsCheck() - || instruction->IsNullCheck() - || instruction->IsDivZeroCheck() - || instruction->GetLocations()->CanCall() - || !IsLeafMethod()); } void InstructionCodeGeneratorARM::HandleGoto(HInstruction* got, HBasicBlock* successor) { @@ -1008,6 +1007,142 @@ void InstructionCodeGeneratorARM::VisitExit(HExit* exit) { UNUSED(exit); } +void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) { + ShifterOperand operand; + if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, right, &operand)) { + __ cmp(left, operand); + } else { + Register temp = IP; + __ LoadImmediate(temp, right); + __ cmp(left, ShifterOperand(temp)); + } +} + +void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond, + Label* true_label, + Label* false_label) { + __ vmstat(); // transfer FP status register to ARM APSR. + if (cond->IsFPConditionTrueIfNaN()) { + __ b(true_label, VS); // VS for unordered. + } else if (cond->IsFPConditionFalseIfNaN()) { + __ b(false_label, VS); // VS for unordered. + } + __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition())); +} + +void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond, + Label* true_label, + Label* false_label) { + LocationSummary* locations = cond->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + IfCondition if_cond = cond->GetCondition(); + + Register left_high = left.AsRegisterPairHigh<Register>(); + Register left_low = left.AsRegisterPairLow<Register>(); + IfCondition true_high_cond = if_cond; + IfCondition false_high_cond = cond->GetOppositeCondition(); + Condition final_condition = ARMUnsignedCondition(if_cond); + + // Set the conditions for the test, remembering that == needs to be + // decided using the low words. + switch (if_cond) { + case kCondEQ: + case kCondNE: + // Nothing to do. + break; + case kCondLT: + false_high_cond = kCondGT; + break; + case kCondLE: + true_high_cond = kCondLT; + break; + case kCondGT: + false_high_cond = kCondLT; + break; + case kCondGE: + true_high_cond = kCondGT; + break; + } + if (right.IsConstant()) { + int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); + int32_t val_low = Low32Bits(value); + int32_t val_high = High32Bits(value); + + GenerateCompareWithImmediate(left_high, val_high); + if (if_cond == kCondNE) { + __ b(true_label, ARMSignedOrFPCondition(true_high_cond)); + } else if (if_cond == kCondEQ) { + __ b(false_label, ARMSignedOrFPCondition(false_high_cond)); + } else { + __ b(true_label, ARMSignedOrFPCondition(true_high_cond)); + __ b(false_label, ARMSignedOrFPCondition(false_high_cond)); + } + // Must be equal high, so compare the lows. + GenerateCompareWithImmediate(left_low, val_low); + } else { + Register right_high = right.AsRegisterPairHigh<Register>(); + Register right_low = right.AsRegisterPairLow<Register>(); + + __ cmp(left_high, ShifterOperand(right_high)); + if (if_cond == kCondNE) { + __ b(true_label, ARMSignedOrFPCondition(true_high_cond)); + } else if (if_cond == kCondEQ) { + __ b(false_label, ARMSignedOrFPCondition(false_high_cond)); + } else { + __ b(true_label, ARMSignedOrFPCondition(true_high_cond)); + __ b(false_label, ARMSignedOrFPCondition(false_high_cond)); + } + // Must be equal high, so compare the lows. + __ cmp(left_low, ShifterOperand(right_low)); + } + // The last comparison might be unsigned. + __ b(true_label, final_condition); +} + +void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HIf* if_instr, + HCondition* condition, + Label* true_target, + Label* false_target, + Label* always_true_target) { + LocationSummary* locations = condition->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + + // We don't want true_target as a nullptr. + if (true_target == nullptr) { + true_target = always_true_target; + } + bool falls_through = (false_target == nullptr); + + // FP compares don't like null false_targets. + if (false_target == nullptr) { + false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); + } + + Primitive::Type type = condition->InputAt(0)->GetType(); + switch (type) { + case Primitive::kPrimLong: + GenerateLongComparesAndJumps(condition, true_target, false_target); + break; + case Primitive::kPrimFloat: + __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>()); + GenerateFPJumps(condition, true_target, false_target); + break; + case Primitive::kPrimDouble: + __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()), + FromLowSToD(right.AsFpuRegisterPairLow<SRegister>())); + GenerateFPJumps(condition, true_target, false_target); + break; + default: + LOG(FATAL) << "Unexpected compare type " << type; + } + + if (!falls_through) { + __ b(false_target); + } +} + void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instruction, Label* true_target, Label* false_target, @@ -1033,25 +1168,27 @@ void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instructio } else { // Condition has not been materialized, use its inputs as the // comparison and its condition as the branch condition. + Primitive::Type type = + cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt; + // Is this a long or FP comparison that has been folded into the HCondition? + if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { + // Generate the comparison directly. + GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(), + true_target, false_target, always_true_target); + return; + } + LocationSummary* locations = cond->GetLocations(); DCHECK(locations->InAt(0).IsRegister()) << locations->InAt(0); Register left = locations->InAt(0).AsRegister<Register>(); - if (locations->InAt(1).IsRegister()) { - __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>())); + Location right = locations->InAt(1); + if (right.IsRegister()) { + __ cmp(left, ShifterOperand(right.AsRegister<Register>())); } else { - DCHECK(locations->InAt(1).IsConstant()); - HConstant* constant = locations->InAt(1).GetConstant(); - int32_t value = CodeGenerator::GetInt32ValueOf(constant); - ShifterOperand operand; - if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) { - __ cmp(left, operand); - } else { - Register temp = IP; - __ LoadImmediate(temp, value); - __ cmp(left, ShifterOperand(temp)); - } + DCHECK(right.IsConstant()); + GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant())); } - __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition())); + __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition())); } } if (false_target != nullptr) { @@ -1104,37 +1241,88 @@ void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) { void LocationsBuilderARM::VisitCondition(HCondition* cond) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); - if (cond->NeedsMaterialization()) { - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + // Handle the long/FP comparisons made in instruction simplification. + switch (cond->InputAt(0)->GetType()) { + case Primitive::kPrimLong: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); + if (cond->NeedsMaterialization()) { + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); + } + break; + + case Primitive::kPrimFloat: + case Primitive::kPrimDouble: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + if (cond->NeedsMaterialization()) { + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } + break; + + default: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); + if (cond->NeedsMaterialization()) { + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } } } void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) { - if (!cond->NeedsMaterialization()) return; + if (!cond->NeedsMaterialization()) { + return; + } + LocationSummary* locations = cond->GetLocations(); - Register left = locations->InAt(0).AsRegister<Register>(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + Register out = locations->Out().AsRegister<Register>(); + Label true_label, false_label; - if (locations->InAt(1).IsRegister()) { - __ cmp(left, ShifterOperand(locations->InAt(1).AsRegister<Register>())); - } else { - DCHECK(locations->InAt(1).IsConstant()); - int32_t value = CodeGenerator::GetInt32ValueOf(locations->InAt(1).GetConstant()); - ShifterOperand operand; - if (GetAssembler()->ShifterOperandCanHold(R0, left, CMP, value, &operand)) { - __ cmp(left, operand); - } else { - Register temp = IP; - __ LoadImmediate(temp, value); - __ cmp(left, ShifterOperand(temp)); + switch (cond->InputAt(0)->GetType()) { + default: { + // Integer case. + if (right.IsRegister()) { + __ cmp(left.AsRegister<Register>(), ShifterOperand(right.AsRegister<Register>())); + } else { + DCHECK(right.IsConstant()); + GenerateCompareWithImmediate(left.AsRegister<Register>(), + CodeGenerator::GetInt32ValueOf(right.GetConstant())); + } + __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse); + __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1), + ARMSignedOrFPCondition(cond->GetCondition())); + __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0), + ARMSignedOrFPCondition(cond->GetOppositeCondition())); + return; } + case Primitive::kPrimLong: + GenerateLongComparesAndJumps(cond, &true_label, &false_label); + break; + case Primitive::kPrimFloat: + __ vcmps(left.AsFpuRegister<SRegister>(), right.AsFpuRegister<SRegister>()); + GenerateFPJumps(cond, &true_label, &false_label); + break; + case Primitive::kPrimDouble: + __ vcmpd(FromLowSToD(left.AsFpuRegisterPairLow<SRegister>()), + FromLowSToD(right.AsFpuRegisterPairLow<SRegister>())); + GenerateFPJumps(cond, &true_label, &false_label); + break; } - __ it(ARMCondition(cond->GetCondition()), kItElse); - __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1), - ARMCondition(cond->GetCondition())); - __ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0), - ARMOppositeCondition(cond->GetCondition())); + + // Convert the jumps into the result. + Label done_label; + + // False case: result = 0. + __ Bind(&false_label); + __ LoadImmediate(out, 0); + __ b(&done_label); + + // True case: result = 1. + __ Bind(&true_label); + __ LoadImmediate(out, 1); + __ Bind(&done_label); } void LocationsBuilderARM::VisitEqual(HEqual* comp) { @@ -2588,6 +2776,9 @@ void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) Location value = locations->InAt(0); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { if (value.IsRegister()) { __ CompareAndBranchIfZero(value.AsRegister<Register>(), slow_path->GetEntryLabel()); @@ -2913,7 +3104,7 @@ void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) { ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare. __ b(&less, LT); __ b(&greater, GT); - // Do LoadImmediate before any `cmp`, as LoadImmediate might affect the status flags. + // Do LoadImmediate before the last `cmp`, as LoadImmediate might affect the status flags. __ LoadImmediate(out, 0); __ cmp(left.AsRegisterPairLow<Register>(), ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare. @@ -2936,7 +3127,7 @@ void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) { LOG(FATAL) << "Unexpected compare type " << type; } __ b(&done, EQ); - __ b(&less, CC); // CC is for both: unsigned compare for longs and 'less than' for floats. + __ b(&less, LO); // LO is for both: unsigned compare for longs and 'less than' for floats. __ Bind(&greater); __ LoadImmediate(out, 1); @@ -3710,7 +3901,7 @@ void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) { Register length = locations->InAt(1).AsRegister<Register>(); __ cmp(index, ShifterOperand(length)); - __ b(slow_path->GetEntryLabel(), CS); + __ b(slow_path->GetEntryLabel(), HS); } void CodeGeneratorARM::MarkGCCard(Register temp, @@ -4103,6 +4294,10 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { __ Bind(slow_path->GetExitLabel()); } +static int32_t GetExceptionTlsOffset() { + return Thread::ExceptionOffset<kArmWordSize>().Int32Value(); +} + void LocationsBuilderARM::VisitLoadException(HLoadException* load) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); @@ -4111,10 +4306,16 @@ void LocationsBuilderARM::VisitLoadException(HLoadException* load) { void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) { Register out = load->GetLocations()->Out().AsRegister<Register>(); - int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value(); - __ LoadFromOffset(kLoadWord, out, TR, offset); + __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset()); +} + +void LocationsBuilderARM::VisitClearException(HClearException* clear) { + new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorARM::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { __ LoadImmediate(IP, 0); - __ StoreToOffset(kStoreWord, IP, TR, offset); + __ StoreToOffset(kStoreWord, IP, TR, GetExceptionTlsOffset()); } void LocationsBuilderARM::VisitThrow(HThrow* instruction) { @@ -4365,6 +4566,18 @@ void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) { LOG(FATAL) << "Unreachable"; } +void LocationsBuilderARM::VisitFakeString(HFakeString* instruction) { + DCHECK(codegen_->IsBaseline()); + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant())); +} + +void InstructionCodeGeneratorARM::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) { + DCHECK(codegen_->IsBaseline()); + // Will be generated at use site. +} + #undef __ #undef QUICK_ENTRY_POINT diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 1d10293b58..53bd766dd4 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -207,6 +207,14 @@ class InstructionCodeGeneratorARM : public HGraphVisitor { Label* true_target, Label* false_target, Label* always_true_target); + void GenerateCompareWithImmediate(Register left, int32_t right); + void GenerateCompareTestAndBranch(HIf* if_instr, + HCondition* condition, + Label* true_target, + Label* false_target, + Label* always_true_target); + void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label); + void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label); void DivRemOneOrMinusOne(HBinaryOperation* instruction); void DivRemByPowerOfTwo(HBinaryOperation* instruction); void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index a9a95d3649..b8ac421935 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -77,10 +77,9 @@ inline Condition ARM64Condition(IfCondition cond) { case kCondLE: return le; case kCondGT: return gt; case kCondGE: return ge; - default: - LOG(FATAL) << "Unknown if condition"; } - return nv; // Unreachable. + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); } Location ARM64ReturnLocation(Primitive::Type return_type) { @@ -213,6 +212,8 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; } private: @@ -235,6 +236,8 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; } private: @@ -345,6 +348,8 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; } private: @@ -355,8 +360,7 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction, - HBasicBlock* successor) + SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor) : instruction_(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { @@ -657,6 +661,13 @@ void CodeGeneratorARM64::Move(HInstruction* instruction, Primitive::Type type = instruction->GetType(); DCHECK_NE(type, Primitive::kPrimVoid); + if (instruction->IsFakeString()) { + // The fake string is an alias for null. + DCHECK(IsBaseline()); + instruction = locations->Out().GetConstant(); + DCHECK(instruction->IsNullConstant()) << instruction->DebugName(); + } + if (instruction->IsCurrentMethod()) { MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset)); } else if (locations != nullptr && locations->Out().Equals(location)) { @@ -905,7 +916,7 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri (source.IsFpuRegister() == Primitive::IsFloatingPointType(type))); __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); } else if (source.IsConstant()) { - DCHECK(unspecified_type || CoherentConstantAndType(source, type)); + DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type; UseScratchRegisterScope temps(GetVIXLAssembler()); HConstant* src_cst = source.GetConstant(); CPURegister temp; @@ -1091,15 +1102,11 @@ void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { + ValidateInvokeRuntime(instruction, slow_path); BlockPoolsScope block_pools(GetVIXLAssembler()); __ Ldr(lr, MemOperand(tr, entry_point_offset)); __ Blr(lr); RecordPcInfo(instruction, dex_pc, slow_path); - DCHECK(instruction->IsSuspendCheck() - || instruction->IsBoundsCheck() - || instruction->IsNullCheck() - || instruction->IsDivZeroCheck() - || !IsLeafMethod()); } void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, @@ -1477,9 +1484,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { source = HeapOperand(obj, offset); } else { Register temp = temps.AcquireSameSizeAs(obj); - Register index_reg = RegisterFrom(index, Primitive::kPrimInt); - __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(type))); - source = HeapOperand(temp, offset); + __ Add(temp, obj, offset); + source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type)); } codegen_->Load(type, OutputCPURegister(instruction), source); @@ -1562,9 +1568,11 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { destination = HeapOperand(obj, offset); } else { Register temp = temps.AcquireSameSizeAs(obj); - Register index_reg = InputRegisterAt(instruction, 1); - __ Add(temp, obj, Operand(index_reg, LSL, Primitive::ComponentSizeShift(value_type))); - destination = HeapOperand(temp, offset); + __ Add(temp, obj, offset); + destination = HeapOperand(temp, + XRegisterFrom(index), + LSL, + Primitive::ComponentSizeShift(value_type)); } codegen_->Store(value_type, source, destination); @@ -1645,6 +1653,11 @@ void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) { GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0)); } +static bool IsFloatingPointZeroConstant(HInstruction* instruction) { + return (instruction->IsFloatConstant() && (instruction->AsFloatConstant()->GetValue() == 0.0f)) + || (instruction->IsDoubleConstant() && (instruction->AsDoubleConstant()->GetValue() == 0.0)); +} + void LocationsBuilderARM64::VisitCompare(HCompare* compare) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall); @@ -1659,13 +1672,10 @@ void LocationsBuilderARM64::VisitCompare(HCompare* compare) { case Primitive::kPrimFloat: case Primitive::kPrimDouble: { locations->SetInAt(0, Location::RequiresFpuRegister()); - HInstruction* right = compare->InputAt(1); - if ((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) || - (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))) { - locations->SetInAt(1, Location::ConstantLocation(right->AsConstant())); - } else { - locations->SetInAt(1, Location::RequiresFpuRegister()); - } + locations->SetInAt(1, + IsFloatingPointZeroConstant(compare->InputAt(1)) + ? Location::ConstantLocation(compare->InputAt(1)->AsConstant()) + : Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); break; } @@ -1696,12 +1706,8 @@ void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { Register result = OutputRegister(compare); FPRegister left = InputFPRegisterAt(compare, 0); if (compare->GetLocations()->InAt(1).IsConstant()) { - if (kIsDebugBuild) { - HInstruction* right = compare->GetLocations()->InAt(1).GetConstant(); - DCHECK((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) || - (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))); - } - // 0.0 is the only immediate that can be encoded directly in a FCMP instruction. + DCHECK(IsFloatingPointZeroConstant(compare->GetLocations()->InAt(1).GetConstant())); + // 0.0 is the only immediate that can be encoded directly in an FCMP instruction. __ Fcmp(left, 0.0); } else { __ Fcmp(left, InputFPRegisterAt(compare, 1)); @@ -1721,8 +1727,19 @@ void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) { void LocationsBuilderARM64::VisitCondition(HCondition* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction)); + + if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, + IsFloatingPointZeroConstant(instruction->InputAt(1)) + ? Location::ConstantLocation(instruction->InputAt(1)->AsConstant()) + : Location::RequiresFpuRegister()); + } else { + // Integer cases. + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction)); + } + if (instruction->NeedsMaterialization()) { locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } @@ -1734,13 +1751,34 @@ void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) { } LocationSummary* locations = instruction->GetLocations(); - Register lhs = InputRegisterAt(instruction, 0); - Operand rhs = InputOperandAt(instruction, 1); Register res = RegisterFrom(locations->Out(), instruction->GetType()); - Condition cond = ARM64Condition(instruction->GetCondition()); - - __ Cmp(lhs, rhs); - __ Cset(res, cond); + IfCondition if_cond = instruction->GetCondition(); + Condition arm64_cond = ARM64Condition(if_cond); + + if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) { + FPRegister lhs = InputFPRegisterAt(instruction, 0); + if (locations->InAt(1).IsConstant()) { + DCHECK(IsFloatingPointZeroConstant(locations->InAt(1).GetConstant())); + // 0.0 is the only immediate that can be encoded directly in an FCMP instruction. + __ Fcmp(lhs, 0.0); + } else { + __ Fcmp(lhs, InputFPRegisterAt(instruction, 1)); + } + __ Cset(res, arm64_cond); + if (instruction->IsFPConditionTrueIfNaN()) { + // res = IsUnordered(arm64_cond) ? 1 : res <=> res = IsNotUnordered(arm64_cond) ? res : 1 + __ Csel(res, res, Operand(1), vc); // VC for "not unordered". + } else if (instruction->IsFPConditionFalseIfNaN()) { + // res = IsUnordered(arm64_cond) ? 0 : res <=> res = IsNotUnordered(arm64_cond) ? res : 0 + __ Csel(res, res, Operand(0), vc); // VC for "not unordered". + } + } else { + // Integer cases. + Register lhs = InputRegisterAt(instruction, 0); + Operand rhs = InputOperandAt(instruction, 1); + __ Cmp(lhs, rhs); + __ Cset(res, arm64_cond); + } } #define FOR_EACH_CONDITION_INSTRUCTION(M) \ @@ -1961,8 +1999,8 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction Primitive::Type type = instruction->GetType(); - if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { - LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck."; + if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) { + LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; return; } @@ -2072,33 +2110,58 @@ void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruct } else { // The condition instruction has not been materialized, use its inputs as // the comparison and its condition as the branch condition. - Register lhs = InputRegisterAt(condition, 0); - Operand rhs = InputOperandAt(condition, 1); - Condition arm64_cond = ARM64Condition(condition->GetCondition()); - if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) { - switch (arm64_cond) { - case eq: - __ Cbz(lhs, true_target); - break; - case ne: - __ Cbnz(lhs, true_target); - break; - case lt: - // Test the sign bit and branch accordingly. - __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target); - break; - case ge: - // Test the sign bit and branch accordingly. - __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target); - break; - default: - // Without the `static_cast` the compiler throws an error for - // `-Werror=sign-promo`. - LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond); + Primitive::Type type = + cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt; + + if (Primitive::IsFloatingPointType(type)) { + // FP compares don't like null false_targets. + if (false_target == nullptr) { + false_target = codegen_->GetLabelOf(instruction->AsIf()->IfFalseSuccessor()); + } + FPRegister lhs = InputFPRegisterAt(condition, 0); + if (condition->GetLocations()->InAt(1).IsConstant()) { + DCHECK(IsFloatingPointZeroConstant(condition->GetLocations()->InAt(1).GetConstant())); + // 0.0 is the only immediate that can be encoded directly in an FCMP instruction. + __ Fcmp(lhs, 0.0); + } else { + __ Fcmp(lhs, InputFPRegisterAt(condition, 1)); } + if (condition->IsFPConditionTrueIfNaN()) { + __ B(vs, true_target); // VS for unordered. + } else if (condition->IsFPConditionFalseIfNaN()) { + __ B(vs, false_target); // VS for unordered. + } + __ B(ARM64Condition(condition->GetCondition()), true_target); } else { - __ Cmp(lhs, rhs); - __ B(arm64_cond, true_target); + // Integer cases. + Register lhs = InputRegisterAt(condition, 0); + Operand rhs = InputOperandAt(condition, 1); + Condition arm64_cond = ARM64Condition(condition->GetCondition()); + if ((arm64_cond != gt && arm64_cond != le) && rhs.IsImmediate() && (rhs.immediate() == 0)) { + switch (arm64_cond) { + case eq: + __ Cbz(lhs, true_target); + break; + case ne: + __ Cbnz(lhs, true_target); + break; + case lt: + // Test the sign bit and branch accordingly. + __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target); + break; + case ge: + // Test the sign bit and branch accordingly. + __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, true_target); + break; + default: + // Without the `static_cast` the compiler throws an error for + // `-Werror=sign-promo`. + LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond); + } + } else { + __ Cmp(lhs, rhs); + __ B(arm64_cond, true_target); + } } } if (false_target != nullptr) { @@ -2434,6 +2497,10 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { } } +static MemOperand GetExceptionTlsAddress() { + return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); +} + void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); @@ -2441,9 +2508,15 @@ void LocationsBuilderARM64::VisitLoadException(HLoadException* load) { } void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) { - MemOperand exception = MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value()); - __ Ldr(OutputRegister(instruction), exception); - __ Str(wzr, exception); + __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress()); +} + +void LocationsBuilderARM64::VisitClearException(HClearException* clear) { + new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { + __ Str(wzr, GetExceptionTlsAddress()); } void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) { @@ -3038,6 +3111,18 @@ void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) { LOG(FATAL) << "Unreachable"; } +void LocationsBuilderARM64::VisitFakeString(HFakeString* instruction) { + DCHECK(codegen_->IsBaseline()); + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant())); +} + +void InstructionCodeGeneratorARM64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) { + DCHECK(codegen_->IsBaseline()); + // Will be generated at use site. +} + #undef __ #undef QUICK_ENTRY_POINT diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 2c610380ed..ac7ee10a5b 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -191,7 +191,7 @@ class InstructionCodeGeneratorARM64 : public HGraphVisitor { class LocationsBuilderARM64 : public HGraphVisitor { public: - explicit LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen) + LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen) : HGraphVisitor(graph), codegen_(codegen) {} #define DECLARE_VISIT_INSTRUCTION(name, super) \ diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index aa4fd26590..167e025383 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -138,6 +138,8 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>(); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; } private: @@ -162,6 +164,8 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; } private: @@ -278,6 +282,8 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { CheckEntrypointTypes<kQuickThrowNullPointer, void, void>(); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; } private: @@ -288,8 +294,7 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, - HBasicBlock* successor) + SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor) : instruction_(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { @@ -971,15 +976,11 @@ void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { + ValidateInvokeRuntime(instruction, slow_path); // TODO: anything related to T9/GP/GOT/PIC/.so's? __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset); __ Jalr(T9); RecordPcInfo(instruction, dex_pc, slow_path); - DCHECK(instruction->IsSuspendCheck() - || instruction->IsBoundsCheck() - || instruction->IsNullCheck() - || instruction->IsDivZeroCheck() - || !IsLeafMethod()); } void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, @@ -1894,8 +1895,9 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio Primitive::Type type = instruction->GetType(); - if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) { + if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) { LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck."; + return; } if (value.IsConstant()) { @@ -2544,6 +2546,10 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) { } } +static int32_t GetExceptionTlsOffset() { + return Thread::ExceptionOffset<kMips64WordSize>().Int32Value(); +} + void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); @@ -2552,8 +2558,15 @@ void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) { void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) { GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>(); - __ LoadFromOffset(kLoadUnsignedWord, out, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value()); - __ StoreToOffset(kStoreWord, ZERO, TR, Thread::ExceptionOffset<kMips64WordSize>().Int32Value()); + __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset()); +} + +void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) { + new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { + __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset()); } void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) { @@ -3292,5 +3305,17 @@ void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual VisitCondition(comp); } +void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) { + DCHECK(codegen_->IsBaseline()); + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant())); +} + +void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) { + DCHECK(codegen_->IsBaseline()); + // Will be generated at use site. +} + } // namespace mips64 } // namespace art diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 262b234d2d..285cbb4ab6 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -45,17 +45,23 @@ static constexpr int kC2ConditionMask = 0x400; static constexpr int kFakeReturnRegister = Register(8); #define __ down_cast<X86Assembler*>(codegen->GetAssembler())-> +#define QUICK_ENTRY_POINT(x) Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, x)) class NullCheckSlowPathX86 : public SlowPathCodeX86 { public: explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowNullPointer))); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer), + instruction_, + instruction_->GetDexPc(), + this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; } private: @@ -68,11 +74,16 @@ class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 { explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero))); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero), + instruction_, + instruction_->GetDexPc(), + this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; } private: @@ -82,7 +93,7 @@ class DivZeroCheckSlowPathX86 : public SlowPathCodeX86 { class DivRemMinusOneSlowPathX86 : public SlowPathCodeX86 { public: - explicit DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {} + DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { __ Bind(GetEntryLabel()); @@ -124,10 +135,14 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 { length_location_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds))); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds), + instruction_, + instruction_->GetDexPc(), + this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; } private: @@ -147,8 +162,10 @@ class SuspendCheckSlowPathX86 : public SlowPathCodeX86 { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, instruction_->GetLocations()); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pTestSuspend))); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend), + instruction_, + instruction_->GetDexPc(), + this); RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ jmp(GetReturnLabel()); @@ -190,8 +207,10 @@ class LoadStringSlowPathX86 : public SlowPathCodeX86 { InvokeRuntimeCallingConvention calling_convention; __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex())); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString))); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), + instruction_, + instruction_->GetDexPc(), + this); x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); RestoreLiveRegisters(codegen, locations); @@ -224,10 +243,9 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 { InvokeRuntimeCallingConvention calling_convention; __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex())); - __ fs()->call(Address::Absolute(do_clinit_ - ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage) - : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType))); - RecordPcInfo(codegen, at_, dex_pc_); + x86_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) + : QUICK_ENTRY_POINT(pInitializeType), + at_, dex_pc_, this); // Move the class to the desired location. Location out = locations->Out(); @@ -263,12 +281,10 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { public: TypeCheckSlowPathX86(HInstruction* instruction, Location class_to_check, - Location object_class, - uint32_t dex_pc) + Location object_class) : instruction_(instruction), class_to_check_(class_to_check), - object_class_(object_class), - dex_pc_(dex_pc) {} + object_class_(object_class) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -291,14 +307,18 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, - pInstanceofNonTrivial))); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), + instruction_, + instruction_->GetDexPc(), + this); } else { DCHECK(instruction_->IsCheckCast()); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast))); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), + instruction_, + instruction_->GetDexPc(), + this); } - RecordPcInfo(codegen, instruction_, dex_pc_); if (instruction_->IsInstanceOf()) { x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX)); } @@ -313,7 +333,6 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 { HInstruction* const instruction_; const Location class_to_check_; const Location object_class_; - const uint32_t dex_pc_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); }; @@ -324,14 +343,14 @@ class DeoptimizationSlowPathX86 : public SlowPathCodeX86 { : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + DCHECK(instruction_->IsDeoptimize()); + CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, instruction_->GetLocations()); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeoptimize))); - // No need to restore live registers. - DCHECK(instruction_->IsDeoptimize()); - HDeoptimize* deoptimize = instruction_->AsDeoptimize(); - uint32_t dex_pc = deoptimize->GetDexPc(); - codegen->RecordPcInfo(instruction_, dex_pc, this); + x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), + instruction_, + instruction_->GetDexPc(), + this); } const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; } @@ -344,7 +363,7 @@ class DeoptimizationSlowPathX86 : public SlowPathCodeX86 { #undef __ #define __ down_cast<X86Assembler*>(GetAssembler())-> -inline Condition X86Condition(IfCondition cond) { +inline Condition X86SignedCondition(IfCondition cond) { switch (cond) { case kCondEQ: return kEqual; case kCondNE: return kNotEqual; @@ -352,10 +371,22 @@ inline Condition X86Condition(IfCondition cond) { case kCondLE: return kLessEqual; case kCondGT: return kGreater; case kCondGE: return kGreaterEqual; - default: - LOG(FATAL) << "Unknown if condition"; } - return kEqual; + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +inline Condition X86UnsignedOrFPCondition(IfCondition cond) { + switch (cond) { + case kCondEQ: return kEqual; + case kCondNE: return kNotEqual; + case kCondLT: return kBelow; + case kCondLE: return kBelowEqual; + case kCondGT: return kAbove; + case kCondGE: return kAboveEqual; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); } void CodeGeneratorX86::DumpCoreRegister(std::ostream& stream, int reg) const { @@ -386,6 +417,15 @@ size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32 return GetFloatingPointSpillSlotSize(); } +void CodeGeneratorX86::InvokeRuntime(Address entry_point, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path) { + ValidateInvokeRuntime(instruction, slow_path); + __ fs()->call(entry_point); + RecordPcInfo(instruction, dex_pc, slow_path); +} + CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, const X86InstructionSetFeatures& isa_features, const CompilerOptions& compiler_options) @@ -889,6 +929,138 @@ void InstructionCodeGeneratorX86::VisitExit(HExit* exit) { UNUSED(exit); } +void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond, + Label* true_label, + Label* false_label) { + if (cond->IsFPConditionTrueIfNaN()) { + __ j(kUnordered, true_label); + } else if (cond->IsFPConditionFalseIfNaN()) { + __ j(kUnordered, false_label); + } + __ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label); +} + +void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond, + Label* true_label, + Label* false_label) { + LocationSummary* locations = cond->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + IfCondition if_cond = cond->GetCondition(); + + Register left_high = left.AsRegisterPairHigh<Register>(); + Register left_low = left.AsRegisterPairLow<Register>(); + IfCondition true_high_cond = if_cond; + IfCondition false_high_cond = cond->GetOppositeCondition(); + Condition final_condition = X86UnsignedOrFPCondition(if_cond); + + // Set the conditions for the test, remembering that == needs to be + // decided using the low words. + switch (if_cond) { + case kCondEQ: + case kCondNE: + // Nothing to do. + break; + case kCondLT: + false_high_cond = kCondGT; + break; + case kCondLE: + true_high_cond = kCondLT; + break; + case kCondGT: + false_high_cond = kCondLT; + break; + case kCondGE: + true_high_cond = kCondGT; + break; + } + + if (right.IsConstant()) { + int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); + int32_t val_high = High32Bits(value); + int32_t val_low = Low32Bits(value); + + if (val_high == 0) { + __ testl(left_high, left_high); + } else { + __ cmpl(left_high, Immediate(val_high)); + } + if (if_cond == kCondNE) { + __ j(X86SignedCondition(true_high_cond), true_label); + } else if (if_cond == kCondEQ) { + __ j(X86SignedCondition(false_high_cond), false_label); + } else { + __ j(X86SignedCondition(true_high_cond), true_label); + __ j(X86SignedCondition(false_high_cond), false_label); + } + // Must be equal high, so compare the lows. + if (val_low == 0) { + __ testl(left_low, left_low); + } else { + __ cmpl(left_low, Immediate(val_low)); + } + } else { + Register right_high = right.AsRegisterPairHigh<Register>(); + Register right_low = right.AsRegisterPairLow<Register>(); + + __ cmpl(left_high, right_high); + if (if_cond == kCondNE) { + __ j(X86SignedCondition(true_high_cond), true_label); + } else if (if_cond == kCondEQ) { + __ j(X86SignedCondition(false_high_cond), false_label); + } else { + __ j(X86SignedCondition(true_high_cond), true_label); + __ j(X86SignedCondition(false_high_cond), false_label); + } + // Must be equal high, so compare the lows. + __ cmpl(left_low, right_low); + } + // The last comparison might be unsigned. + __ j(final_condition, true_label); +} + +void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HIf* if_instr, + HCondition* condition, + Label* true_target, + Label* false_target, + Label* always_true_target) { + LocationSummary* locations = condition->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + + // We don't want true_target as a nullptr. + if (true_target == nullptr) { + true_target = always_true_target; + } + bool falls_through = (false_target == nullptr); + + // FP compares don't like null false_targets. + if (false_target == nullptr) { + false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); + } + + Primitive::Type type = condition->InputAt(0)->GetType(); + switch (type) { + case Primitive::kPrimLong: + GenerateLongComparesAndJumps(condition, true_target, false_target); + break; + case Primitive::kPrimFloat: + __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); + GenerateFPJumps(condition, true_target, false_target); + break; + case Primitive::kPrimDouble: + __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); + GenerateFPJumps(condition, true_target, false_target); + break; + default: + LOG(FATAL) << "Unexpected compare type " << type; + } + + if (!falls_through) { + __ jmp(false_target); + } +} + void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instruction, Label* true_target, Label* false_target, @@ -906,14 +1078,17 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio DCHECK_EQ(cond_value, 0); } } else { - bool materialized = + bool is_materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization(); // Moves do not affect the eflags register, so if the condition is // evaluated just before the if, we don't need to evaluate it - // again. + // again. We can't use the eflags on long/FP conditions if they are + // materialized due to the complex branching. + Primitive::Type type = cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt; bool eflags_set = cond->IsCondition() - && cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction); - if (materialized) { + && cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction) + && (type != Primitive::kPrimLong && !Primitive::IsFloatingPointType(type)); + if (is_materialized) { if (!eflags_set) { // Materialized condition, compare against 0. Location lhs = instruction->GetLocations()->InAt(0); @@ -924,9 +1099,23 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio } __ j(kNotEqual, true_target); } else { - __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target); + __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target); } } else { + // Condition has not been materialized, use its inputs as the + // comparison and its condition as the branch condition. + + // Is this a long or FP comparison that has been folded into the HCondition? + if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { + // Generate the comparison directly. + GenerateCompareTestAndBranch(instruction->AsIf(), + cond->AsCondition(), + true_target, + false_target, + always_true_target); + return; + } + Location lhs = cond->GetLocations()->InAt(0); Location rhs = cond->GetLocations()->InAt(1); // LHS is guaranteed to be in a register (see @@ -943,7 +1132,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio } else { __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex())); } - __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target); + __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target); } } if (false_target != nullptr) { @@ -1041,36 +1230,94 @@ void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) { void LocationsBuilderX86::VisitCondition(HCondition* cond) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); - if (cond->NeedsMaterialization()) { - // We need a byte register. - locations->SetOut(Location::RegisterLocation(ECX)); + // Handle the long/FP comparisons made in instruction simplification. + switch (cond->InputAt(0)->GetType()) { + case Primitive::kPrimLong: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1))); + if (cond->NeedsMaterialization()) { + locations->SetOut(Location::RequiresRegister()); + } + break; + } + case Primitive::kPrimFloat: + case Primitive::kPrimDouble: { + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::RequiresFpuRegister()); + if (cond->NeedsMaterialization()) { + locations->SetOut(Location::RequiresRegister()); + } + break; + } + default: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + if (cond->NeedsMaterialization()) { + // We need a byte register. + locations->SetOut(Location::RegisterLocation(ECX)); + } + break; } } void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) { - if (cond->NeedsMaterialization()) { - LocationSummary* locations = cond->GetLocations(); - Register reg = locations->Out().AsRegister<Register>(); - // Clear register: setcc only sets the low byte. - __ xorl(reg, reg); - Location lhs = locations->InAt(0); - Location rhs = locations->InAt(1); - if (rhs.IsRegister()) { - __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>()); - } else if (rhs.IsConstant()) { - int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); - if (constant == 0) { - __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>()); + if (!cond->NeedsMaterialization()) { + return; + } + + LocationSummary* locations = cond->GetLocations(); + Location lhs = locations->InAt(0); + Location rhs = locations->InAt(1); + Register reg = locations->Out().AsRegister<Register>(); + Label true_label, false_label; + + switch (cond->InputAt(0)->GetType()) { + default: { + // Integer case. + + // Clear output register: setcc only sets the low byte. + __ xorl(reg, reg); + + if (rhs.IsRegister()) { + __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>()); + } else if (rhs.IsConstant()) { + int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); + if (constant == 0) { + __ testl(lhs.AsRegister<Register>(), lhs.AsRegister<Register>()); + } else { + __ cmpl(lhs.AsRegister<Register>(), Immediate(constant)); + } } else { - __ cmpl(lhs.AsRegister<Register>(), Immediate(constant)); + __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex())); } - } else { - __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex())); + __ setb(X86SignedCondition(cond->GetCondition()), reg); + return; } - __ setb(X86Condition(cond->GetCondition()), reg); + case Primitive::kPrimLong: + GenerateLongComparesAndJumps(cond, &true_label, &false_label); + break; + case Primitive::kPrimFloat: + __ ucomiss(lhs.AsFpuRegister<XmmRegister>(), rhs.AsFpuRegister<XmmRegister>()); + GenerateFPJumps(cond, &true_label, &false_label); + break; + case Primitive::kPrimDouble: + __ ucomisd(lhs.AsFpuRegister<XmmRegister>(), rhs.AsFpuRegister<XmmRegister>()); + GenerateFPJumps(cond, &true_label, &false_label); + break; } + + // Convert the jumps into the result. + Label done_label; + + // False case: result = 0. + __ Bind(&false_label); + __ xorl(reg, reg); + __ jmp(&done_label); + + // True case: result = 1. + __ Bind(&true_label); + __ movl(reg, Immediate(1)); + __ Bind(&done_label); } void LocationsBuilderX86::VisitEqual(HEqual* comp) { @@ -1796,14 +2043,18 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio case Primitive::kPrimFloat: // Processing a Dex `float-to-long' instruction. - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pF2l))); - codegen_->RecordPcInfo(conversion, conversion->GetDexPc()); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pF2l), + conversion, + conversion->GetDexPc(), + nullptr); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-long' instruction. - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pD2l))); - codegen_->RecordPcInfo(conversion, conversion->GetDexPc()); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pD2l), + conversion, + conversion->GetDexPc(), + nullptr); break; default: @@ -2129,7 +2380,12 @@ void LocationsBuilderX86::VisitMul(HMul* mul) { case Primitive::kPrimInt: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); - locations->SetOut(Location::SameAsFirstInput()); + if (mul->InputAt(1)->IsIntConstant()) { + // Can use 3 operand multiply. + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } else { + locations->SetOut(Location::SameAsFirstInput()); + } break; case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); @@ -2157,21 +2413,24 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) { LocationSummary* locations = mul->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); - DCHECK(first.Equals(locations->Out())); + Location out = locations->Out(); switch (mul->GetResultType()) { - case Primitive::kPrimInt: { - if (second.IsRegister()) { + case Primitive::kPrimInt: + // The constant may have ended up in a register, so test explicitly to avoid + // problems where the output may not be the same as the first operand. + if (mul->InputAt(1)->IsIntConstant()) { + Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); + __ imull(out.AsRegister<Register>(), first.AsRegister<Register>(), imm); + } else if (second.IsRegister()) { + DCHECK(first.Equals(out)); __ imull(first.AsRegister<Register>(), second.AsRegister<Register>()); - } else if (second.IsConstant()) { - Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); - __ imull(first.AsRegister<Register>(), imm); } else { DCHECK(second.IsStackSlot()); + DCHECK(first.Equals(out)); __ imull(first.AsRegister<Register>(), Address(ESP, second.GetStackIndex())); } break; - } case Primitive::kPrimLong: { Register in1_hi = first.AsRegisterPairHigh<Register>(); @@ -2552,15 +2811,16 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>()); if (is_div) { - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv))); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), + instruction, + instruction->GetDexPc(), + nullptr); } else { - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLmod))); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod), + instruction, + instruction->GetDexPc(), + nullptr); } - uint32_t dex_pc = is_div - ? instruction->AsDiv()->GetDexPc() - : instruction->AsRem()->GetDexPc(); - codegen_->RecordPcInfo(instruction, dex_pc); - break; } @@ -2710,6 +2970,9 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { locations->SetInAt(0, Location::Any()); break; @@ -2737,6 +3000,9 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) Location value = locations->InAt(0); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { if (value.IsRegister()) { __ testl(value.AsRegister<Register>(), value.AsRegister<Register>()); @@ -3006,9 +3272,11 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) { __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex())); // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. - __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint()))); - - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime( + Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())), + instruction, + instruction->GetDexPc(), + nullptr); DCHECK(!codegen_->IsLeafMethod()); } @@ -3028,9 +3296,11 @@ void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) { // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. - __ fs()->call(Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint()))); - - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime( + Address::Absolute(GetThreadOffset<kX86WordSize>(instruction->GetEntrypoint())), + instruction, + instruction->GetDexPc(), + nullptr); DCHECK(!codegen_->IsLeafMethod()); } @@ -3933,8 +4203,10 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { DCHECK(!codegen_->IsLeafMethod()); // Note: if heap poisoning is enabled, pAputObject takes cares // of poisoning the reference. - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAputObject))); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), + instruction, + instruction->GetDexPc(), + nullptr); } break; } @@ -4316,7 +4588,11 @@ void ParallelMoveResolverX86::EmitSwap(size_t index) { Location destination = move->GetDestination(); if (source.IsRegister() && destination.IsRegister()) { - __ xchgl(destination.AsRegister<Register>(), source.AsRegister<Register>()); + // Use XOR swap algorithm to avoid serializing XCHG instruction or using a temporary. + DCHECK_NE(destination.AsRegister<Register>(), source.AsRegister<Register>()); + __ xorl(destination.AsRegister<Register>(), source.AsRegister<Register>()); + __ xorl(source.AsRegister<Register>(), destination.AsRegister<Register>()); + __ xorl(destination.AsRegister<Register>(), source.AsRegister<Register>()); } else if (source.IsRegister() && destination.IsStackSlot()) { Exchange(source.AsRegister<Register>(), destination.GetStackIndex()); } else if (source.IsStackSlot() && destination.IsRegister()) { @@ -4462,6 +4738,10 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) { __ Bind(slow_path->GetExitLabel()); } +static Address GetExceptionTlsAddress() { + return Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value()); +} + void LocationsBuilderX86::VisitLoadException(HLoadException* load) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); @@ -4469,9 +4749,15 @@ void LocationsBuilderX86::VisitLoadException(HLoadException* load) { } void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) { - Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value()); - __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), address); - __ fs()->movl(address, Immediate(0)); + __ fs()->movl(load->GetLocations()->Out().AsRegister<Register>(), GetExceptionTlsAddress()); +} + +void LocationsBuilderX86::VisitClearException(HClearException* clear) { + new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorX86::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { + __ fs()->movl(GetExceptionTlsAddress(), Immediate(0)); } void LocationsBuilderX86::VisitThrow(HThrow* instruction) { @@ -4482,8 +4768,10 @@ void LocationsBuilderX86::VisitThrow(HThrow* instruction) { } void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException))); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException), + instruction, + instruction->GetDexPc(), + nullptr); } void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { @@ -4530,7 +4818,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { // If the classes are not equal, we go into a slow path. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( - instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc()); + instruction, locations->InAt(1), locations->Out()); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -4563,7 +4851,7 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { Register temp = locations->GetTemp(0).AsRegister<Register>(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86( - instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc()); + instruction, locations->InAt(1), locations->GetTemp(0)); codegen_->AddSlowPath(slow_path); // Avoid null check if we know obj is not null. @@ -4594,10 +4882,11 @@ void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) } void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) { - __ fs()->call(Address::Absolute(instruction->IsEnter() - ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject) - : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject))); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject) + : QUICK_ENTRY_POINT(pUnlockObject), + instruction, + instruction->GetDexPc(), + nullptr); } void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); } @@ -4745,6 +5034,18 @@ void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) { LOG(FATAL) << "Unreachable"; } +void LocationsBuilderX86::VisitFakeString(HFakeString* instruction) { + DCHECK(codegen_->IsBaseline()); + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant())); +} + +void InstructionCodeGeneratorX86::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) { + DCHECK(codegen_->IsBaseline()); + // Will be generated at use site. +} + #undef __ } // namespace x86 diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 623e83222d..2e3d4d4bf7 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -177,7 +177,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor { void DivRemOneOrMinusOne(HBinaryOperation* instruction); void DivByPowerOfTwo(HDiv* instruction); void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); - void GenerateRemFP(HRem *rem); + void GenerateRemFP(HRem* rem); void HandleShift(HBinaryOperation* instruction); void GenerateShlLong(const Location& loc, Register shifter); void GenerateShrLong(const Location& loc, Register shifter); @@ -201,6 +201,13 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor { Label* true_target, Label* false_target, Label* always_true_target); + void GenerateCompareTestAndBranch(HIf* if_inst, + HCondition* condition, + Label* true_target, + Label* false_target, + Label* always_true_target); + void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label); + void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label); void HandleGoto(HInstruction* got, HBasicBlock* successor); X86Assembler* const assembler_; @@ -225,6 +232,12 @@ class CodeGeneratorX86 : public CodeGenerator { size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + // Generate code to invoke a runtime entry point. + void InvokeRuntime(Address entry_point, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path); + size_t GetWordSize() const OVERRIDE { return kX86WordSize; } diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index c9d19c8f66..2c5cef3822 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -47,18 +47,23 @@ static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 static constexpr int kC2ConditionMask = 0x400; #define __ down_cast<X86_64Assembler*>(codegen->GetAssembler())-> +#define QUICK_ENTRY_POINT(x) Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, x), true) class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 { public: explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - __ gs()->call( - Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true)); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer), + instruction_, + instruction_->GetDexPc(), + this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; } private: @@ -71,12 +76,16 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 { explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - __ gs()->call( - Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true)); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero), + instruction_, + instruction_->GetDexPc(), + this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; } private: @@ -86,7 +95,7 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 { class DivRemMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 { public: - explicit DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div) + DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div) : cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { @@ -95,7 +104,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 { if (is_div_) { __ negl(cpu_reg_); } else { - __ movl(cpu_reg_, Immediate(0)); + __ xorl(cpu_reg_, cpu_reg_); } } else { @@ -120,15 +129,17 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 { class SuspendCheckSlowPathX86_64 : public SlowPathCodeX86_64 { public: - explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor) + SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor) : instruction_(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, instruction_->GetLocations()); - __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pTestSuspend), true)); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend), + instruction_, + instruction_->GetDexPc(), + this); RestoreLiveRegisters(codegen, instruction_->GetLocations()); if (successor_ == nullptr) { __ jmp(GetReturnLabel()); @@ -166,6 +177,7 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 { length_location_(length_location) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. @@ -177,11 +189,12 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 { length_location_, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt); - __ gs()->call(Address::Absolute( - QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true)); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds), + instruction_, instruction_->GetDexPc(), this); } + bool IsFatal() const OVERRIDE { return true; } + const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; } private: @@ -211,10 +224,9 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 { InvokeRuntimeCallingConvention calling_convention; __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex())); - __ gs()->call(Address::Absolute((do_clinit_ - ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage) - : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)), true)); - RecordPcInfo(codegen, at_, dex_pc_); + x64_codegen->InvokeRuntime(do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage) + : QUICK_ENTRY_POINT(pInitializeType), + at_, dex_pc_, this); Location out = locations->Out(); // Move the class to the desired location. @@ -261,9 +273,10 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 { InvokeRuntimeCallingConvention calling_convention; __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(instruction_->GetStringIndex())); - __ gs()->call(Address::Absolute( - QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true)); - RecordPcInfo(codegen, instruction_, instruction_->GetDexPc()); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), + instruction_, + instruction_->GetDexPc(), + this); x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); RestoreLiveRegisters(codegen, locations); __ jmp(GetExitLabel()); @@ -309,14 +322,17 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 { Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { - __ gs()->call( - Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true)); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), + instruction_, + dex_pc_, + this); } else { DCHECK(instruction_->IsCheckCast()); - __ gs()->call( - Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true)); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), + instruction_, + dex_pc_, + this); } - RecordPcInfo(codegen, instruction_, dex_pc_); if (instruction_->IsInstanceOf()) { x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX)); @@ -343,14 +359,15 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCodeX86_64 { : instruction_(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, instruction_->GetLocations()); - __ gs()->call( - Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeoptimize), true)); DCHECK(instruction_->IsDeoptimize()); HDeoptimize* deoptimize = instruction_->AsDeoptimize(); - uint32_t dex_pc = deoptimize->GetDexPc(); - codegen->RecordPcInfo(instruction_, dex_pc, this); + x64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), + deoptimize, + deoptimize->GetDexPc(), + this); } const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; } @@ -363,7 +380,7 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCodeX86_64 { #undef __ #define __ down_cast<X86_64Assembler*>(GetAssembler())-> -inline Condition X86_64Condition(IfCondition cond) { +inline Condition X86_64IntegerCondition(IfCondition cond) { switch (cond) { case kCondEQ: return kEqual; case kCondNE: return kNotEqual; @@ -371,10 +388,22 @@ inline Condition X86_64Condition(IfCondition cond) { case kCondLE: return kLessEqual; case kCondGT: return kGreater; case kCondGE: return kGreaterEqual; - default: - LOG(FATAL) << "Unknown if condition"; } - return kEqual; + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + +inline Condition X86_64FPCondition(IfCondition cond) { + switch (cond) { + case kCondEQ: return kEqual; + case kCondNE: return kNotEqual; + case kCondLT: return kBelow; + case kCondLE: return kBelowEqual; + case kCondGT: return kAbove; + case kCondGE: return kAboveEqual; + }; + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); } void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, @@ -451,6 +480,15 @@ size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uin return kX86_64WordSize; } +void CodeGeneratorX86_64::InvokeRuntime(Address entry_point, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path) { + ValidateInvokeRuntime(instruction, slow_path); + __ gs()->call(entry_point); + RecordPcInfo(instruction, dex_pc, slow_path); +} + static constexpr int kNumberOfCpuRegisterPairs = 0; // Use a fake return address register to mimic Quick. static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1); @@ -699,8 +737,7 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) { DCHECK(constant->IsLongConstant()); value = constant->AsLongConstant()->GetValue(); } - Load64BitValue(CpuRegister(TMP), value); - __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP)); + Store64BitValueToStack(destination, value); } else { DCHECK(source.IsDoubleStackSlot()); __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex())); @@ -734,8 +771,7 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction, if (location.IsRegister()) { Load64BitValue(location.AsRegister<CpuRegister>(), value); } else if (location.IsDoubleStackSlot()) { - Load64BitValue(CpuRegister(TMP), value); - __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP)); + Store64BitValueToStack(location, value); } else { DCHECK(location.IsConstant()); DCHECK_EQ(location.GetConstant(), const_to_move); @@ -833,6 +869,100 @@ void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) { UNUSED(exit); } +void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond, + Label* true_label, + Label* false_label) { + if (cond->IsFPConditionTrueIfNaN()) { + __ j(kUnordered, true_label); + } else if (cond->IsFPConditionFalseIfNaN()) { + __ j(kUnordered, false_label); + } + __ j(X86_64FPCondition(cond->GetCondition()), true_label); +} + +void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HIf* if_instr, + HCondition* condition, + Label* true_target, + Label* false_target, + Label* always_true_target) { + LocationSummary* locations = condition->GetLocations(); + Location left = locations->InAt(0); + Location right = locations->InAt(1); + + // We don't want true_target as a nullptr. + if (true_target == nullptr) { + true_target = always_true_target; + } + bool falls_through = (false_target == nullptr); + + // FP compares don't like null false_targets. + if (false_target == nullptr) { + false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor()); + } + + Primitive::Type type = condition->InputAt(0)->GetType(); + switch (type) { + case Primitive::kPrimLong: { + CpuRegister left_reg = left.AsRegister<CpuRegister>(); + if (right.IsConstant()) { + int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); + if (IsInt<32>(value)) { + if (value == 0) { + __ testq(left_reg, left_reg); + } else { + __ cmpq(left_reg, Immediate(static_cast<int32_t>(value))); + } + } else { + // Value won't fit in a 32-bit integer. + __ cmpq(left_reg, codegen_->LiteralInt64Address(value)); + } + } else if (right.IsDoubleStackSlot()) { + __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); + } else { + __ cmpq(left_reg, right.AsRegister<CpuRegister>()); + } + __ j(X86_64IntegerCondition(condition->GetCondition()), true_target); + break; + } + case Primitive::kPrimFloat: { + if (right.IsFpuRegister()) { + __ ucomiss(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); + } else if (right.IsConstant()) { + __ ucomiss(left.AsFpuRegister<XmmRegister>(), + codegen_->LiteralFloatAddress( + right.GetConstant()->AsFloatConstant()->GetValue())); + } else { + DCHECK(right.IsStackSlot()); + __ ucomiss(left.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), right.GetStackIndex())); + } + GenerateFPJumps(condition, true_target, false_target); + break; + } + case Primitive::kPrimDouble: { + if (right.IsFpuRegister()) { + __ ucomisd(left.AsFpuRegister<XmmRegister>(), right.AsFpuRegister<XmmRegister>()); + } else if (right.IsConstant()) { + __ ucomisd(left.AsFpuRegister<XmmRegister>(), + codegen_->LiteralDoubleAddress( + right.GetConstant()->AsDoubleConstant()->GetValue())); + } else { + DCHECK(right.IsDoubleStackSlot()); + __ ucomisd(left.AsFpuRegister<XmmRegister>(), + Address(CpuRegister(RSP), right.GetStackIndex())); + } + GenerateFPJumps(condition, true_target, false_target); + break; + } + default: + LOG(FATAL) << "Unexpected condition type " << type; + } + + if (!falls_through) { + __ jmp(false_target); + } +} + void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruction, Label* true_target, Label* false_target, @@ -850,14 +980,18 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc DCHECK_EQ(cond_value, 0); } } else { - bool materialized = + bool is_materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization(); // Moves do not affect the eflags register, so if the condition is // evaluated just before the if, we don't need to evaluate it - // again. + // again. We can't use the eflags on FP conditions if they are + // materialized due to the complex branching. + Primitive::Type type = cond->IsCondition() ? cond->InputAt(0)->GetType() : Primitive::kPrimInt; bool eflags_set = cond->IsCondition() - && cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction); - if (materialized) { + && cond->AsCondition()->IsBeforeWhenDisregardMoves(instruction) + && !Primitive::IsFloatingPointType(type); + + if (is_materialized) { if (!eflags_set) { // Materialized condition, compare against 0. Location lhs = instruction->GetLocations()->InAt(0); @@ -869,9 +1003,20 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc } __ j(kNotEqual, true_target); } else { - __ j(X86_64Condition(cond->AsCondition()->GetCondition()), true_target); + __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target); } } else { + // Condition has not been materialized, use its inputs as the + // comparison and its condition as the branch condition. + + // Is this a long or FP comparison that has been folded into the HCondition? + if (type == Primitive::kPrimLong || Primitive::IsFloatingPointType(type)) { + // Generate the comparison directly. + GenerateCompareTestAndBranch(instruction->AsIf(), cond->AsCondition(), + true_target, false_target, always_true_target); + return; + } + Location lhs = cond->GetLocations()->InAt(0); Location rhs = cond->GetLocations()->InAt(1); if (rhs.IsRegister()) { @@ -887,7 +1032,7 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex())); } - __ j(X86_64Condition(cond->AsCondition()->GetCondition()), true_target); + __ j(X86_64IntegerCondition(cond->AsCondition()->GetCondition()), true_target); } } if (false_target != nullptr) { @@ -985,35 +1130,122 @@ void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) { void LocationsBuilderX86_64::VisitCondition(HCondition* cond) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); + // Handle the long/FP comparisons made in instruction simplification. + switch (cond->InputAt(0)->GetType()) { + case Primitive::kPrimLong: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + break; + case Primitive::kPrimFloat: + case Primitive::kPrimDouble: + locations->SetInAt(0, Location::RequiresFpuRegister()); + locations->SetInAt(1, Location::Any()); + break; + default: + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::Any()); + break; + } if (cond->NeedsMaterialization()) { locations->SetOut(Location::RequiresRegister()); } } void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* cond) { - if (cond->NeedsMaterialization()) { - LocationSummary* locations = cond->GetLocations(); - CpuRegister reg = locations->Out().AsRegister<CpuRegister>(); - // Clear register: setcc only sets the low byte. - __ xorl(reg, reg); - Location lhs = locations->InAt(0); - Location rhs = locations->InAt(1); - if (rhs.IsRegister()) { - __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>()); - } else if (rhs.IsConstant()) { - int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); - if (constant == 0) { - __ testl(lhs.AsRegister<CpuRegister>(), lhs.AsRegister<CpuRegister>()); + if (!cond->NeedsMaterialization()) { + return; + } + + LocationSummary* locations = cond->GetLocations(); + Location lhs = locations->InAt(0); + Location rhs = locations->InAt(1); + CpuRegister reg = locations->Out().AsRegister<CpuRegister>(); + Label true_label, false_label; + + switch (cond->InputAt(0)->GetType()) { + default: + // Integer case. + + // Clear output register: setcc only sets the low byte. + __ xorl(reg, reg); + + if (rhs.IsRegister()) { + __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>()); + } else if (rhs.IsConstant()) { + int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); + if (constant == 0) { + __ testl(lhs.AsRegister<CpuRegister>(), lhs.AsRegister<CpuRegister>()); + } else { + __ cmpl(lhs.AsRegister<CpuRegister>(), Immediate(constant)); + } } else { - __ cmpl(lhs.AsRegister<CpuRegister>(), Immediate(constant)); + __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex())); } - } else { - __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex())); + __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); + return; + case Primitive::kPrimLong: + // Clear output register: setcc only sets the low byte. + __ xorl(reg, reg); + + if (rhs.IsRegister()) { + __ cmpq(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>()); + } else if (rhs.IsConstant()) { + int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue(); + if (IsInt<32>(value)) { + if (value == 0) { + __ testq(lhs.AsRegister<CpuRegister>(), lhs.AsRegister<CpuRegister>()); + } else { + __ cmpq(lhs.AsRegister<CpuRegister>(), Immediate(static_cast<int32_t>(value))); + } + } else { + // Value won't fit in an int. + __ cmpq(lhs.AsRegister<CpuRegister>(), codegen_->LiteralInt64Address(value)); + } + } else { + __ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex())); + } + __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); + return; + case Primitive::kPrimFloat: { + XmmRegister lhs_reg = lhs.AsFpuRegister<XmmRegister>(); + if (rhs.IsConstant()) { + float value = rhs.GetConstant()->AsFloatConstant()->GetValue(); + __ ucomiss(lhs_reg, codegen_->LiteralFloatAddress(value)); + } else if (rhs.IsStackSlot()) { + __ ucomiss(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex())); + } else { + __ ucomiss(lhs_reg, rhs.AsFpuRegister<XmmRegister>()); + } + GenerateFPJumps(cond, &true_label, &false_label); + break; + } + case Primitive::kPrimDouble: { + XmmRegister lhs_reg = lhs.AsFpuRegister<XmmRegister>(); + if (rhs.IsConstant()) { + double value = rhs.GetConstant()->AsDoubleConstant()->GetValue(); + __ ucomisd(lhs_reg, codegen_->LiteralDoubleAddress(value)); + } else if (rhs.IsDoubleStackSlot()) { + __ ucomisd(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex())); + } else { + __ ucomisd(lhs_reg, rhs.AsFpuRegister<XmmRegister>()); + } + GenerateFPJumps(cond, &true_label, &false_label); + break; } - __ setcc(X86_64Condition(cond->GetCondition()), reg); } + + // Convert the jumps into the result. + Label done_label; + + // False case: result = 0. + __ Bind(&false_label); + __ xorl(reg, reg); + __ jmp(&done_label); + + // True case: result = 1. + __ Bind(&true_label); + __ movl(reg, Immediate(1)); + __ Bind(&done_label); } void LocationsBuilderX86_64::VisitEqual(HEqual* comp) { @@ -1603,14 +1835,12 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { // Processing a Dex `float-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); - locations->AddTemp(Location::RequiresFpuRegister()); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-int' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); - locations->AddTemp(Location::RequiresFpuRegister()); break; default: @@ -1638,14 +1868,12 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) { // Processing a Dex `float-to-long' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); - locations->AddTemp(Location::RequiresFpuRegister()); break; case Primitive::kPrimDouble: // Processing a Dex `double-to-long' instruction. locations->SetInAt(0, Location::RequiresFpuRegister()); locations->SetOut(Location::RequiresRegister()); - locations->AddTemp(Location::RequiresFpuRegister()); break; default: @@ -1821,14 +2049,11 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver // Processing a Dex `float-to-int' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); - XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); Label done, nan; __ movl(output, Immediate(kPrimIntMax)); - // temp = int-to-float(output) - __ cvtsi2ss(temp, output, false); - // if input >= temp goto done - __ comiss(input, temp); + // if input >= (float)INT_MAX goto done + __ comiss(input, codegen_->LiteralFloatAddress(kPrimIntMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); @@ -1846,14 +2071,11 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver // Processing a Dex `double-to-int' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); - XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); Label done, nan; __ movl(output, Immediate(kPrimIntMax)); - // temp = int-to-double(output) - __ cvtsi2sd(temp, output); - // if input >= temp goto done - __ comisd(input, temp); + // if input >= (double)INT_MAX goto done + __ comisd(input, codegen_->LiteralDoubleAddress(kPrimIntMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); @@ -1891,14 +2113,11 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver // Processing a Dex `float-to-long' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); - XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); Label done, nan; codegen_->Load64BitValue(output, kPrimLongMax); - // temp = long-to-float(output) - __ cvtsi2ss(temp, output, true); - // if input >= temp goto done - __ comiss(input, temp); + // if input >= (float)LONG_MAX goto done + __ comiss(input, codegen_->LiteralFloatAddress(kPrimLongMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); @@ -1916,14 +2135,11 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver // Processing a Dex `double-to-long' instruction. XmmRegister input = in.AsFpuRegister<XmmRegister>(); CpuRegister output = out.AsRegister<CpuRegister>(); - XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); Label done, nan; codegen_->Load64BitValue(output, kPrimLongMax); - // temp = long-to-double(output) - __ cvtsi2sd(temp, output, true); - // if input >= temp goto done - __ comisd(input, temp); + // if input >= (double)LONG_MAX goto done + __ comisd(input, codegen_->LiteralDoubleAddress(kPrimLongMax)); __ j(kAboveEqual, &done); // if input == NaN goto nan __ j(kUnordered, &nan); @@ -2327,13 +2543,19 @@ void LocationsBuilderX86_64::VisitMul(HMul* mul) { case Primitive::kPrimInt: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); - locations->SetOut(Location::SameAsFirstInput()); + if (mul->InputAt(1)->IsIntConstant()) { + // Can use 3 operand multiply. + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + } else { + locations->SetOut(Location::SameAsFirstInput()); + } break; } case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrInt32LongConstant(mul->InputAt(1))); - if (locations->InAt(1).IsConstant()) { + locations->SetInAt(1, Location::Any()); + if (mul->InputAt(1)->IsLongConstant() && + IsInt<32>(mul->InputAt(1)->AsLongConstant()->GetValue())) { // Can use 3 operand multiply. locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } else { @@ -2358,37 +2580,51 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { LocationSummary* locations = mul->GetLocations(); Location first = locations->InAt(0); Location second = locations->InAt(1); + Location out = locations->Out(); switch (mul->GetResultType()) { - case Primitive::kPrimInt: { - DCHECK(first.Equals(locations->Out())); - if (second.IsRegister()) { + case Primitive::kPrimInt: + // The constant may have ended up in a register, so test explicitly to avoid + // problems where the output may not be the same as the first operand. + if (mul->InputAt(1)->IsIntConstant()) { + Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); + __ imull(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), imm); + } else if (second.IsRegister()) { + DCHECK(first.Equals(out)); __ imull(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>()); - } else if (second.IsConstant()) { - Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); - __ imull(first.AsRegister<CpuRegister>(), imm); } else { + DCHECK(first.Equals(out)); DCHECK(second.IsStackSlot()); __ imull(first.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex())); } break; - } case Primitive::kPrimLong: { - if (second.IsConstant()) { - int64_t value = second.GetConstant()->AsLongConstant()->GetValue(); - DCHECK(IsInt<32>(value)); - __ imulq(locations->Out().AsRegister<CpuRegister>(), - first.AsRegister<CpuRegister>(), - Immediate(static_cast<int32_t>(value))); - } else { - DCHECK(first.Equals(locations->Out())); + // The constant may have ended up in a register, so test explicitly to avoid + // problems where the output may not be the same as the first operand. + if (mul->InputAt(1)->IsLongConstant()) { + int64_t value = mul->InputAt(1)->AsLongConstant()->GetValue(); + if (IsInt<32>(value)) { + __ imulq(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), + Immediate(static_cast<int32_t>(value))); + } else { + // Have to use the constant area. + DCHECK(first.Equals(out)); + __ imulq(first.AsRegister<CpuRegister>(), codegen_->LiteralInt64Address(value)); + } + } else if (second.IsRegister()) { + DCHECK(first.Equals(out)); __ imulq(first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>()); + } else { + DCHECK(second.IsDoubleStackSlot()); + DCHECK(first.Equals(out)); + __ imulq(first.AsRegister<CpuRegister>(), + Address(CpuRegister(RSP), second.GetStackIndex())); } break; } case Primitive::kPrimFloat: { - DCHECK(first.Equals(locations->Out())); + DCHECK(first.Equals(out)); if (second.IsFpuRegister()) { __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -2403,7 +2639,7 @@ void InstructionCodeGeneratorX86_64::VisitMul(HMul* mul) { } case Primitive::kPrimDouble: { - DCHECK(first.Equals(locations->Out())); + DCHECK(first.Equals(out)); if (second.IsFpuRegister()) { __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>()); } else if (second.IsConstant()) { @@ -2913,6 +3149,9 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio Location value = locations->InAt(0); switch (instruction->GetType()) { + case Primitive::kPrimByte: + case Primitive::kPrimChar: + case Primitive::kPrimShort: case Primitive::kPrimInt: { if (value.IsRegister()) { __ testl(value.AsRegister<CpuRegister>(), value.AsRegister<CpuRegister>()); @@ -3064,11 +3303,14 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) instruction->GetTypeIndex()); // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. - __ gs()->call( - Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true)); + + codegen_->InvokeRuntime( + Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true), + instruction, + instruction->GetDexPc(), + nullptr); DCHECK(!codegen_->IsLeafMethod()); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) { @@ -3088,11 +3330,13 @@ void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) { // Note: if heap poisoning is enabled, the entry point takes cares // of poisoning the reference. - __ gs()->call( - Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true)); + codegen_->InvokeRuntime( + Address::Absolute(GetThreadOffset<kX86_64WordSize>(instruction->GetEntrypoint()), true), + instruction, + instruction->GetDexPc(), + nullptr); DCHECK(!codegen_->IsLeafMethod()); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) { @@ -3779,10 +4023,11 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { DCHECK_EQ(value_type, Primitive::kPrimNot); // Note: if heap poisoning is enabled, pAputObject takes cares // of poisoning the reference. - __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAputObject), - true)); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject), + instruction, + instruction->GetDexPc(), + nullptr); DCHECK(!codegen_->IsLeafMethod()); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); } break; } @@ -4064,8 +4309,7 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) { codegen_->Load64BitValue(destination.AsRegister<CpuRegister>(), value); } else { DCHECK(destination.IsDoubleStackSlot()) << destination; - codegen_->Load64BitValue(CpuRegister(TMP), value); - __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP)); + codegen_->Store64BitValueToStack(destination, value); } } else if (constant->IsFloatConstant()) { float fp_value = constant->AsFloatConstant()->GetValue(); @@ -4096,8 +4340,7 @@ void ParallelMoveResolverX86_64::EmitMove(size_t index) { } } else { DCHECK(destination.IsDoubleStackSlot()) << destination; - codegen_->Load64BitValue(CpuRegister(TMP), value); - __ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP)); + codegen_->Store64BitValueToStack(destination, value); } } } else if (source.IsFpuRegister()) { @@ -4299,6 +4542,10 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) { __ Bind(slow_path->GetExitLabel()); } +static Address GetExceptionTlsAddress() { + return Address::Absolute(Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true); +} + void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall); @@ -4306,10 +4553,15 @@ void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) { } void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) { - Address address = Address::Absolute( - Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true); - __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), address); - __ gs()->movl(address, Immediate(0)); + __ gs()->movl(load->GetLocations()->Out().AsRegister<CpuRegister>(), GetExceptionTlsAddress()); +} + +void LocationsBuilderX86_64::VisitClearException(HClearException* clear) { + new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall); +} + +void InstructionCodeGeneratorX86_64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) { + __ gs()->movl(GetExceptionTlsAddress(), Immediate(0)); } void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) { @@ -4320,9 +4572,10 @@ void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) { } void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) { - __ gs()->call( - Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeliverException), true)); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException), + instruction, + instruction->GetDexPc(), + nullptr); } void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) { @@ -4432,11 +4685,11 @@ void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instructio } void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) { - __ gs()->call(Address::Absolute(instruction->IsEnter() - ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject) - : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject), - true)); - codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); + codegen_->InvokeRuntime(instruction->IsEnter() ? QUICK_ENTRY_POINT(pLockObject) + : QUICK_ENTRY_POINT(pUnlockObject), + instruction, + instruction->GetDexPc(), + nullptr); } void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); } @@ -4566,6 +4819,18 @@ void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) { LOG(FATAL) << "Unreachable"; } +void LocationsBuilderX86_64::VisitFakeString(HFakeString* instruction) { + DCHECK(codegen_->IsBaseline()); + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant())); +} + +void InstructionCodeGeneratorX86_64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) { + DCHECK(codegen_->IsBaseline()); + // Will be generated at use site. +} + void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) { if (value == 0) { __ xorl(dest, dest); @@ -4577,6 +4842,18 @@ void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) { } } +void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) { + DCHECK(dest.IsDoubleStackSlot()); + if (IsInt<32>(value)) { + // Can move directly as an int32 constant. + __ movq(Address(CpuRegister(RSP), dest.GetStackIndex()), + Immediate(static_cast<int32_t>(value))); + } else { + Load64BitValue(CpuRegister(TMP), value); + __ movq(Address(CpuRegister(RSP), dest.GetStackIndex()), CpuRegister(TMP)); + } +} + void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) { // Generate the constant area if needed. X86_64Assembler* assembler = GetAssembler(); diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index c2aa56b63f..41bebac240 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -183,7 +183,7 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor { void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor); void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg); void HandleBitwiseOperation(HBinaryOperation* operation); - void GenerateRemFP(HRem *rem); + void GenerateRemFP(HRem* rem); void DivRemOneOrMinusOne(HBinaryOperation* instruction); void DivByPowerOfTwo(HDiv* instruction); void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); @@ -202,6 +202,12 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor { Label* true_target, Label* false_target, Label* always_true_target); + void GenerateCompareTestAndBranch(HIf* if_inst, + HCondition* condition, + Label* true_target, + Label* false_target, + Label* always_true_target); + void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label); void HandleGoto(HInstruction* got, HBasicBlock* successor); X86_64Assembler* const assembler_; @@ -226,6 +232,12 @@ class CodeGeneratorX86_64 : public CodeGenerator { size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + // Generate code to invoke a runtime entry point. + void InvokeRuntime(Address entry_point, + HInstruction* instruction, + uint32_t dex_pc, + SlowPathCode* slow_path); + size_t GetWordSize() const OVERRIDE { return kX86_64WordSize; } @@ -310,6 +322,9 @@ class CodeGeneratorX86_64 : public CodeGenerator { // Load a 64 bit value into a register in the most efficient manner. void Load64BitValue(CpuRegister dest, int64_t value); + // Store a 64 bit value into a DoubleStackSlot in the most efficient manner. + void Store64BitValueToStack(Location dest, int64_t value); + private: // Labels for each block that will be compiled. GrowableArray<Label> block_labels_; diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index 246fff99ac..f54547534f 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -151,6 +151,15 @@ static inline vixl::MemOperand HeapOperand(const vixl::Register& base, size_t of return vixl::MemOperand(base.X(), offset); } +static inline vixl::MemOperand HeapOperand(const vixl::Register& base, + const vixl::Register& regoffset, + vixl::Shift shift = vixl::LSL, + unsigned shift_amount = 0) { + // A heap reference must be 32bit, so fit in a W register. + DCHECK(base.IsW()); + return vixl::MemOperand(base.X(), regoffset, shift, shift_amount); +} + static inline vixl::MemOperand HeapOperand(const vixl::Register& base, Offset offset) { return HeapOperand(base, offset.SizeValue()); } diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc index 11f6362294..10e4bc98a6 100644 --- a/compiler/optimizing/constant_folding_test.cc +++ b/compiler/optimizing/constant_folding_test.cc @@ -81,7 +81,7 @@ static void TestCode(const uint16_t* data, * offset * ------ * v0 <- 1 0. const/4 v0, #+1 - * v1 <- -v0 1. neg-int v0, v1 + * v1 <- -v0 1. neg-int v1, v0 * return v1 2. return v1 */ TEST(ConstantFolding, IntConstantFoldingNegation) { @@ -132,6 +132,69 @@ TEST(ConstantFolding, IntConstantFoldingNegation) { } /** + * Tiny three-register program exercising long constant folding on negation. + * + * 16-bit + * offset + * ------ + * (v0, v1) <- 4294967296 0. const-wide v0 #+4294967296 + * (v2, v3) <- -(v0, v1) 1. neg-long v2, v0 + * return (v2, v3) 2. return-wide v2 + */ +TEST(ConstantFolding, LongConstantFoldingNegation) { + const int64_t input = INT64_C(4294967296); // 2^32 + const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW. + const uint16_t word1 = High16Bits(Low32Bits(input)); + const uint16_t word2 = Low16Bits(High32Bits(input)); + const uint16_t word3 = High16Bits(High32Bits(input)); // MSW. + const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM( + Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, + Instruction::NEG_LONG | 2 << 8 | 0 << 12, + Instruction::RETURN_WIDE | 2 << 8); + + std::string expected_before = + "BasicBlock 0, succ: 1\n" + " 4: LongConstant [7]\n" + " 12: SuspendCheck\n" + " 13: Goto 1\n" + "BasicBlock 1, pred: 0, succ: 2\n" + " 7: Neg(4) [10]\n" + " 10: Return(7)\n" + "BasicBlock 2, pred: 1\n" + " 11: Exit\n"; + + // Expected difference after constant folding. + diff_t expected_cf_diff = { + { " 4: LongConstant [7]\n", " 4: LongConstant\n" }, + { " 12: SuspendCheck\n", " 12: SuspendCheck\n" + " 14: LongConstant [10]\n" }, + { " 7: Neg(4) [10]\n", removed }, + { " 10: Return(7)\n", " 10: Return(14)\n" } + }; + std::string expected_after_cf = Patch(expected_before, expected_cf_diff); + + // Check the value of the computed constant. + auto check_after_cf = [](HGraph* graph) { + HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0); + ASSERT_TRUE(inst->IsLongConstant()); + ASSERT_EQ(inst->AsLongConstant()->GetValue(), INT64_C(-4294967296)); + }; + + // Expected difference after dead code elimination. + diff_t expected_dce_diff = { + { " 4: LongConstant\n", removed }, + }; + std::string expected_after_dce = Patch(expected_after_cf, expected_dce_diff); + + TestCode(data, + expected_before, + expected_after_cf, + expected_after_dce, + check_after_cf, + Primitive::kPrimLong); +} + +/** * Tiny three-register program exercising int constant folding on addition. * * 16-bit diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index 9679d0ab70..5406a0ccf4 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -89,6 +89,33 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) { block->GetBlockId())); } + // Ensure that only Return(Void) and Throw jump to Exit. An exiting + // TryBoundary may be between a Throw and the Exit if the Throw is in a try. + if (block->IsExitBlock()) { + for (size_t i = 0, e = block->GetPredecessors().Size(); i < e; ++i) { + HBasicBlock* predecessor = block->GetPredecessors().Get(i); + if (predecessor->IsSingleTryBoundary() + && !predecessor->GetLastInstruction()->AsTryBoundary()->IsEntry()) { + HBasicBlock* real_predecessor = predecessor->GetSinglePredecessor(); + HInstruction* last_instruction = real_predecessor->GetLastInstruction(); + if (!last_instruction->IsThrow()) { + AddError(StringPrintf("Unexpected TryBoundary between %s:%d and Exit.", + last_instruction->DebugName(), + last_instruction->GetId())); + } + } else { + HInstruction* last_instruction = predecessor->GetLastInstruction(); + if (!last_instruction->IsReturn() + && !last_instruction->IsReturnVoid() + && !last_instruction->IsThrow()) { + AddError(StringPrintf("Unexpected instruction %s:%d jumps into the exit block.", + last_instruction->DebugName(), + last_instruction->GetId())); + } + } + } + } + // Visit this block's list of phis. for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); @@ -136,6 +163,33 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) { VisitInstruction(check); } +void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) { + // Ensure that all exception handlers are catch blocks and that handlers + // are not listed multiple times. + // Note that a normal-flow successor may be a catch block before CFG + // simplification. We only test normal-flow successors in SsaChecker. + for (HExceptionHandlerIterator it(*try_boundary); !it.Done(); it.Advance()) { + HBasicBlock* handler = it.Current(); + if (!handler->IsCatchBlock()) { + AddError(StringPrintf("Block %d with %s:%d has exceptional successor %d which " + "is not a catch block.", + current_block_->GetBlockId(), + try_boundary->DebugName(), + try_boundary->GetId(), + handler->GetBlockId())); + } + if (current_block_->GetSuccessors().Contains( + handler, /* start_from */ it.CurrentSuccessorIndex() + 1)) { + AddError(StringPrintf("Exception handler block %d of %s:%d is listed multiple times.", + handler->GetBlockId(), + try_boundary->DebugName(), + try_boundary->GetId())); + } + } + + VisitInstruction(try_boundary); +} + void GraphChecker::VisitInstruction(HInstruction* instruction) { if (seen_ids_.IsBitSet(instruction->GetId())) { AddError(StringPrintf("Instruction id %d is duplicate in graph.", @@ -301,11 +355,32 @@ void GraphChecker::VisitInstanceOf(HInstanceOf* instruction) { void SSAChecker::VisitBasicBlock(HBasicBlock* block) { super_type::VisitBasicBlock(block); + // Ensure that catch blocks are not normal successors, and normal blocks are + // never exceptional successors. + const size_t num_normal_successors = block->NumberOfNormalSuccessors(); + for (size_t j = 0; j < num_normal_successors; ++j) { + HBasicBlock* successor = block->GetSuccessors().Get(j); + if (successor->IsCatchBlock()) { + AddError(StringPrintf("Catch block %d is a normal successor of block %d.", + successor->GetBlockId(), + block->GetBlockId())); + } + } + for (size_t j = num_normal_successors, e = block->GetSuccessors().Size(); j < e; ++j) { + HBasicBlock* successor = block->GetSuccessors().Get(j); + if (!successor->IsCatchBlock()) { + AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.", + successor->GetBlockId(), + block->GetBlockId())); + } + } + // Ensure there is no critical edge (i.e., an edge connecting a // block with multiple successors to a block with multiple - // predecessors). - if (block->GetSuccessors().Size() > 1) { - for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) { + // predecessors). Exceptional edges are synthesized and hence + // not accounted for. + if (block->NumberOfNormalSuccessors() > 1) { + for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) { HBasicBlock* successor = block->GetSuccessors().Get(j); if (successor->GetPredecessors().Size() > 1) { AddError(StringPrintf("Critical edge between blocks %d and %d.", @@ -326,6 +401,54 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) { } } + // Ensure try membership information is consistent. + HTryBoundary* try_entry = block->GetTryEntry(); + if (block->IsCatchBlock()) { + if (try_entry != nullptr) { + AddError(StringPrintf("Catch blocks should not be try blocks but catch block %d " + "has try entry %s:%d.", + block->GetBlockId(), + try_entry->DebugName(), + try_entry->GetId())); + } + + if (block->IsLoopHeader()) { + AddError(StringPrintf("Catch blocks should not be loop headers but catch block %d is.", + block->GetBlockId())); + } + } else { + for (size_t i = 0; i < block->GetPredecessors().Size(); ++i) { + HBasicBlock* predecessor = block->GetPredecessors().Get(i); + HTryBoundary* incoming_try_entry = predecessor->ComputeTryEntryOfSuccessors(); + if (try_entry == nullptr) { + if (incoming_try_entry != nullptr) { + AddError(StringPrintf("Block %d has no try entry but try entry %s:%d follows " + "from predecessor %d.", + block->GetBlockId(), + incoming_try_entry->DebugName(), + incoming_try_entry->GetId(), + predecessor->GetBlockId())); + } + } else if (incoming_try_entry == nullptr) { + AddError(StringPrintf("Block %d has try entry %s:%d but no try entry follows " + "from predecessor %d.", + block->GetBlockId(), + try_entry->DebugName(), + try_entry->GetId(), + predecessor->GetBlockId())); + } else if (!incoming_try_entry->HasSameExceptionHandlersAs(*try_entry)) { + AddError(StringPrintf("Block %d has try entry %s:%d which is not consistent " + "with %s:%d that follows from predecessor %d.", + block->GetBlockId(), + try_entry->DebugName(), + try_entry->GetId(), + incoming_try_entry->DebugName(), + incoming_try_entry->GetId(), + predecessor->GetBlockId())); + } + } + } + if (block->IsLoopHeader()) { CheckLoop(block); } @@ -472,32 +595,6 @@ void SSAChecker::VisitPhi(HPhi* phi) { phi->GetBlock()->GetBlockId())); } - // Ensure the number of inputs of a phi is the same as the number of - // its predecessors. - const GrowableArray<HBasicBlock*>& predecessors = - phi->GetBlock()->GetPredecessors(); - if (phi->InputCount() != predecessors.Size()) { - AddError(StringPrintf( - "Phi %d in block %d has %zu inputs, " - "but block %d has %zu predecessors.", - phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(), - phi->GetBlock()->GetBlockId(), predecessors.Size())); - } else { - // Ensure phi input at index I either comes from the Ith - // predecessor or from a block that dominates this predecessor. - for (size_t i = 0, e = phi->InputCount(); i < e; ++i) { - HInstruction* input = phi->InputAt(i); - HBasicBlock* predecessor = predecessors.Get(i); - if (!(input->GetBlock() == predecessor - || input->GetBlock()->Dominates(predecessor))) { - AddError(StringPrintf( - "Input %d at index %zu of phi %d from block %d is not defined in " - "predecessor number %zu nor in a block dominating it.", - input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(), - i)); - } - } - } // Ensure that the inputs have the same primitive kind as the phi. for (size_t i = 0, e = phi->InputCount(); i < e; ++i) { HInstruction* input = phi->InputAt(i); @@ -516,6 +613,38 @@ void SSAChecker::VisitPhi(HPhi* phi) { phi->GetBlock()->GetBlockId(), Primitive::PrettyDescriptor(phi->GetType()))); } + + if (phi->IsCatchPhi()) { + // The number of inputs of a catch phi corresponds to the total number of + // throwing instructions caught by this catch block. + } else { + // Ensure the number of inputs of a non-catch phi is the same as the number + // of its predecessors. + const GrowableArray<HBasicBlock*>& predecessors = + phi->GetBlock()->GetPredecessors(); + if (phi->InputCount() != predecessors.Size()) { + AddError(StringPrintf( + "Phi %d in block %d has %zu inputs, " + "but block %d has %zu predecessors.", + phi->GetId(), phi->GetBlock()->GetBlockId(), phi->InputCount(), + phi->GetBlock()->GetBlockId(), predecessors.Size())); + } else { + // Ensure phi input at index I either comes from the Ith + // predecessor or from a block that dominates this predecessor. + for (size_t i = 0, e = phi->InputCount(); i < e; ++i) { + HInstruction* input = phi->InputAt(i); + HBasicBlock* predecessor = predecessors.Get(i); + if (!(input->GetBlock() == predecessor + || input->GetBlock()->Dominates(predecessor))) { + AddError(StringPrintf( + "Input %d at index %zu of phi %d from block %d is not defined in " + "predecessor number %zu nor in a block dominating it.", + input->GetId(), i, phi->GetId(), phi->GetBlock()->GetBlockId(), + i)); + } + } + } + } } void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_index) { diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index 7c72e23e2d..0e270dbe18 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -48,6 +48,9 @@ class GraphChecker : public HGraphDelegateVisitor { // Check that the HasBoundsChecks() flag is set for bounds checks. void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE; + // Check successors of blocks ending in TryBoundary. + void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE; + // Check that HCheckCast and HInstanceOf have HLoadClass as second input. void VisitCheckCast(HCheckCast* check) OVERRIDE; void VisitInstanceOf(HInstanceOf* check) OVERRIDE; diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc index eca0d9344f..0f6677519e 100644 --- a/compiler/optimizing/graph_checker_test.cc +++ b/compiler/optimizing/graph_checker_test.cc @@ -25,14 +25,14 @@ namespace art { * Create a simple control-flow graph composed of two blocks: * * BasicBlock 0, succ: 1 - * 0: Goto 1 + * 0: ReturnVoid 1 * BasicBlock 1, pred: 0 * 1: Exit */ HGraph* CreateSimpleCFG(ArenaAllocator* allocator) { HGraph* graph = CreateGraph(allocator); HBasicBlock* entry_block = new (allocator) HBasicBlock(graph); - entry_block->AddInstruction(new (allocator) HGoto()); + entry_block->AddInstruction(new (allocator) HReturnVoid()); graph->AddBlock(entry_block); graph->SetEntryBlock(entry_block); HBasicBlock* exit_block = new (allocator) HBasicBlock(graph); diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 504c141799..069a7a460b 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -158,12 +158,14 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { std::ostream& output, const char* pass_name, bool is_after_pass, + bool graph_in_bad_state, const CodeGenerator& codegen, const DisassemblyInformation* disasm_info = nullptr) : HGraphDelegateVisitor(graph), output_(output), pass_name_(pass_name), is_after_pass_(is_after_pass), + graph_in_bad_state_(graph_in_bad_state), codegen_(codegen), disasm_info_(disasm_info), disassembler_(disasm_info_ != nullptr @@ -251,11 +253,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { void PrintSuccessors(HBasicBlock* block) { AddIndent(); output_ << "successors"; - for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) { - if (!block->IsExceptionalSuccessor(i)) { - HBasicBlock* successor = block->GetSuccessors().Get(i); - output_ << " \"B" << successor->GetBlockId() << "\" "; - } + for (size_t i = 0; i < block->NumberOfNormalSuccessors(); ++i) { + HBasicBlock* successor = block->GetSuccessors().Get(i); + output_ << " \"B" << successor->GetBlockId() << "\" "; } output_<< std::endl; } @@ -263,11 +263,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { void PrintExceptionHandlers(HBasicBlock* block) { AddIndent(); output_ << "xhandlers"; - for (size_t i = 0, e = block->GetSuccessors().Size(); i < e; ++i) { - if (block->IsExceptionalSuccessor(i)) { - HBasicBlock* handler = block->GetSuccessors().Get(i); - output_ << " \"B" << handler->GetBlockId() << "\" "; - } + for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().Size(); ++i) { + HBasicBlock* handler = block->GetSuccessors().Get(i); + output_ << " \"B" << handler->GetBlockId() << "\" "; } if (block->IsExitBlock() && (disasm_info_ != nullptr) && @@ -351,12 +349,17 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { void VisitPhi(HPhi* phi) OVERRIDE { StartAttributeStream("reg") << phi->GetRegNumber(); + StartAttributeStream("is_catch_phi") << std::boolalpha << phi->IsCatchPhi() << std::noboolalpha; } void VisitMemoryBarrier(HMemoryBarrier* barrier) OVERRIDE { StartAttributeStream("kind") << barrier->GetBarrierKind(); } + void VisitMonitorOperation(HMonitorOperation* monitor) OVERRIDE { + StartAttributeStream("kind") << (monitor->IsEnter() ? "enter" : "exit"); + } + void VisitLoadClass(HLoadClass* load_class) OVERRIDE { StartAttributeStream("gen_clinit_check") << std::boolalpha << load_class->MustGenerateClinitCheck() << std::noboolalpha; @@ -383,6 +386,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("recursive") << std::boolalpha << invoke->IsRecursive() << std::noboolalpha; + StartAttributeStream("intrinsic") << invoke->GetIntrinsic(); } void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE { @@ -393,6 +397,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { return strcmp(pass_name_, name) == 0; } + bool IsReferenceTypePropagationPass() { + return strstr(pass_name_, ReferenceTypePropagation::kReferenceTypePropagationPassName) + != nullptr; + } + void PrintInstruction(HInstruction* instruction) { output_ << instruction->DebugName(); if (instruction->InputCount() > 0) { @@ -456,27 +465,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } else { StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId(); } - } else if (IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName) - && is_after_pass_) { - if (instruction->GetType() == Primitive::kPrimNot) { - if (instruction->IsLoadClass()) { - ReferenceTypeInfo info = instruction->AsLoadClass()->GetLoadedClassRTI(); - ScopedObjectAccess soa(Thread::Current()); - if (info.GetTypeHandle().GetReference() != nullptr) { - StartAttributeStream("klass") << PrettyClass(info.GetTypeHandle().Get()); - } else { - StartAttributeStream("klass") << "unresolved"; - } - } else { - ReferenceTypeInfo info = instruction->GetReferenceTypeInfo(); - if (info.IsTop()) { - StartAttributeStream("klass") << "java.lang.Object"; - } else { - ScopedObjectAccess soa(Thread::Current()); - StartAttributeStream("klass") << PrettyClass(info.GetTypeHandle().Get()); - } - StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha; - } + } else if (IsReferenceTypePropagationPass() + && (instruction->GetType() == Primitive::kPrimNot)) { + ReferenceTypeInfo info = instruction->IsLoadClass() + ? instruction->AsLoadClass()->GetLoadedClassRTI() + : instruction->GetReferenceTypeInfo(); + ScopedObjectAccess soa(Thread::Current()); + if (info.IsValid()) { + StartAttributeStream("klass") << PrettyDescriptor(info.GetTypeHandle().Get()); + StartAttributeStream("can_be_null") + << std::boolalpha << instruction->CanBeNull() << std::noboolalpha; + StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha; + } else { + DCHECK(!is_after_pass_) << "Type info should be valid after reference type propagation"; } } if (disasm_info_ != nullptr) { @@ -578,7 +579,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { void Run() { StartTag("cfg"); - std::string pass_desc = std::string(pass_name_) + (is_after_pass_ ? " (after)" : " (before)"); + std::string pass_desc = std::string(pass_name_) + + " (" + + (is_after_pass_ ? "after" : "before") + + (graph_in_bad_state_ ? ", bad_state" : "") + + ")"; PrintProperty("name", pass_desc.c_str()); if (disasm_info_ != nullptr) { DumpDisassemblyBlockForFrameEntry(); @@ -647,6 +652,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { std::ostream& output_; const char* pass_name_; const bool is_after_pass_; + const bool graph_in_bad_state_; const CodeGenerator& codegen_; const DisassemblyInformation* disasm_info_; std::unique_ptr<HGraphVisualizerDisassembler> disassembler_; @@ -662,7 +668,7 @@ HGraphVisualizer::HGraphVisualizer(std::ostream* output, void HGraphVisualizer::PrintHeader(const char* method_name) const { DCHECK(output_ != nullptr); - HGraphVisualizerPrinter printer(graph_, *output_, "", true, codegen_); + HGraphVisualizerPrinter printer(graph_, *output_, "", true, false, codegen_); printer.StartTag("compilation"); printer.PrintProperty("name", method_name); printer.PrintProperty("method", method_name); @@ -670,10 +676,17 @@ void HGraphVisualizer::PrintHeader(const char* method_name) const { printer.EndTag("compilation"); } -void HGraphVisualizer::DumpGraph(const char* pass_name, bool is_after_pass) const { +void HGraphVisualizer::DumpGraph(const char* pass_name, + bool is_after_pass, + bool graph_in_bad_state) const { DCHECK(output_ != nullptr); if (!graph_->GetBlocks().IsEmpty()) { - HGraphVisualizerPrinter printer(graph_, *output_, pass_name, is_after_pass, codegen_); + HGraphVisualizerPrinter printer(graph_, + *output_, + pass_name, + is_after_pass, + graph_in_bad_state, + codegen_); printer.Run(); } } @@ -681,8 +694,13 @@ void HGraphVisualizer::DumpGraph(const char* pass_name, bool is_after_pass) cons void HGraphVisualizer::DumpGraphWithDisassembly() const { DCHECK(output_ != nullptr); if (!graph_->GetBlocks().IsEmpty()) { - HGraphVisualizerPrinter printer( - graph_, *output_, "disassembly", true, codegen_, codegen_.GetDisassemblyInformation()); + HGraphVisualizerPrinter printer(graph_, + *output_, + "disassembly", + /* is_after_pass */ true, + /* graph_in_bad_state */ false, + codegen_, + codegen_.GetDisassemblyInformation()); printer.Run(); } } diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h index b6b66df601..66588f6e36 100644 --- a/compiler/optimizing/graph_visualizer.h +++ b/compiler/optimizing/graph_visualizer.h @@ -104,7 +104,7 @@ class HGraphVisualizer : public ValueObject { const CodeGenerator& codegen); void PrintHeader(const char* method_name) const; - void DumpGraph(const char* pass_name, bool is_after_pass = true) const; + void DumpGraph(const char* pass_name, bool is_after_pass, bool graph_in_bad_state) const; void DumpGraphWithDisassembly() const; private: diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index 708733e28c..833dfb00a1 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -120,7 +120,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> { // Removes all instructions in the set affected by the given side effects. void Kill(SideEffects side_effects) { DeleteAllImpureWhich([side_effects](Node* node) { - return node->GetInstruction()->GetSideEffects().DependsOn(side_effects); + return node->GetInstruction()->GetSideEffects().MayDependOn(side_effects); }); } diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc index d8a09ffc38..42ef3ff4a5 100644 --- a/compiler/optimizing/gvn_test.cc +++ b/compiler/optimizing/gvn_test.cc @@ -206,7 +206,7 @@ TEST(GVNTest, LoopFieldElimination) { // and the body to be GVN'ed. loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, parameter, - Primitive::kPrimNot, + Primitive::kPrimBoolean, MemberOffset(42), false, kUnknownFieldIndex, @@ -266,6 +266,8 @@ TEST(GVNTest, LoopSideEffects) { ArenaPool pool; ArenaAllocator allocator(&pool); + static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC(); + HGraph* graph = CreateGraph(&allocator); HBasicBlock* entry = new (&allocator) HBasicBlock(graph); graph->AddBlock(entry); @@ -309,7 +311,7 @@ TEST(GVNTest, LoopSideEffects) { ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn( *outer_loop_header->GetLoopInformation())); - // Check that the loops don't have side effects. + // Check that the only side effect of loops is to potentially trigger GC. { // Make one block with a side effect. entry->AddInstruction(new (&allocator) HInstanceFieldSet(parameter, @@ -323,9 +325,12 @@ TEST(GVNTest, LoopSideEffects) { SideEffectsAnalysis side_effects(graph); side_effects.Run(); - ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects()); - ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects()); - ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects()); + ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite()); + ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite()); + ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite()); + ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite()); + ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).Equals(kCanTriggerGC)); + ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).Equals(kCanTriggerGC)); } // Check that the side effects of the outer loop does not affect the inner loop. @@ -343,10 +348,11 @@ TEST(GVNTest, LoopSideEffects) { SideEffectsAnalysis side_effects(graph); side_effects.Run(); - ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects()); - ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).HasSideEffects()); - ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects()); - ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects()); + ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite()); + ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite()); + ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite()); + ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite()); + ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).Equals(kCanTriggerGC)); } // Check that the side effects of the inner loop affects the outer loop. @@ -365,10 +371,10 @@ TEST(GVNTest, LoopSideEffects) { SideEffectsAnalysis side_effects(graph); side_effects.Run(); - ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects()); - ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).HasSideEffects()); - ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects()); - ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects()); + ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite()); + ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite()); + ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite()); + ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite()); } } } // namespace art diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 3efe7c77fa..4c746798be 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -22,8 +22,10 @@ #include "constant_folding.h" #include "dead_code_elimination.h" #include "driver/compiler_driver-inl.h" +#include "driver/compiler_options.h" #include "driver/dex_compilation_unit.h" #include "instruction_simplifier.h" +#include "intrinsics.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" @@ -38,9 +40,6 @@ namespace art { -static constexpr int kMaxInlineCodeUnits = 18; -static constexpr int kDepthLimit = 3; - void HInliner::Run() { if (graph_->IsDebuggable()) { // For simplicity, we currently never inline when the graph is debuggable. This avoids @@ -86,7 +85,7 @@ void HInliner::Run() { } static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return method->IsFinal() || method->GetDeclaringClass()->IsFinal(); } @@ -96,7 +95,7 @@ static bool IsMethodOrDeclaringClassFinal(ArtMethod* method) * Return nullptr if the runtime target cannot be proven. */ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (IsMethodOrDeclaringClassFinal(resolved_method)) { // No need to lookup further, the resolved method will be the target. return resolved_method; @@ -109,10 +108,8 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol receiver = receiver->InputAt(0); } ReferenceTypeInfo info = receiver->GetReferenceTypeInfo(); - if (info.IsTop()) { - // We have no information on the receiver. - return nullptr; - } else if (!info.IsExact()) { + DCHECK(info.IsValid()) << "Invalid RTI for " << receiver->DebugName(); + if (!info.IsExact()) { // We currently only support inlining with known receivers. // TODO: Remove this check, we should be able to inline final methods // on unknown receivers. @@ -164,7 +161,7 @@ static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resol static uint32_t FindMethodIndexIn(ArtMethod* method, const DexFile& dex_file, uint32_t referrer_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (method->GetDexFile()->GetLocation().compare(dex_file.GetLocation()) == 0) { return method->GetDexMethodIndex(); } else { @@ -221,7 +218,8 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, uint32_t method_index) con return false; } - if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) { + size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits(); + if (code_item->insns_size_in_code_units_ > inline_max_code_units) { VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file) << " is too big to inline"; return false; @@ -273,17 +271,17 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, const DexFile::CodeItem* code_item = resolved_method->GetCodeItem(); const DexFile& callee_dex_file = *resolved_method->GetDexFile(); uint32_t method_index = resolved_method->GetDexMethodIndex(); - + ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker(); DexCompilationUnit dex_compilation_unit( nullptr, caller_compilation_unit_.GetClassLoader(), - caller_compilation_unit_.GetClassLinker(), - *resolved_method->GetDexFile(), + class_linker, + callee_dex_file, code_item, resolved_method->GetDeclaringClass()->GetDexClassDefIndex(), - resolved_method->GetDexMethodIndex(), + method_index, resolved_method->GetAccessFlags(), - nullptr); + compiler_driver_->GetVerifiedMethod(&callee_dex_file, method_index)); bool requires_ctor_barrier = false; @@ -326,7 +324,8 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, &outer_compilation_unit_, resolved_method->GetDexFile(), compiler_driver_, - &inline_stats); + &inline_stats, + resolved_method->GetQuickenedInfo()); if (!builder.BuildGraph(*code_item)) { VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file) @@ -357,8 +356,10 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, HConstantFolding fold(callee_graph); ReferenceTypePropagation type_propagation(callee_graph, handles_); InstructionSimplifier simplify(callee_graph, stats_); + IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_); HOptimization* optimizations[] = { + &intrinsics, &dce, &fold, &type_propagation, @@ -370,7 +371,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, optimization->Run(); } - if (depth_ + 1 < kDepthLimit) { + if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) { HInliner inliner(callee_graph, outer_compilation_unit_, dex_compilation_unit, @@ -449,7 +450,33 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, } } - callee_graph->InlineInto(graph_, invoke_instruction); + HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction); + + // When merging the graph we might create a new NullConstant in the caller graph which does + // not have the chance to be typed. We assign the correct type here so that we can keep the + // assertion that every reference has a valid type. This also simplifies checks along the way. + HNullConstant* null_constant = graph_->GetNullConstant(); + if (!null_constant->GetReferenceTypeInfo().IsValid()) { + ReferenceTypeInfo::TypeHandle obj_handle = + handles_->NewHandle(class_linker->GetClassRoot(ClassLinker::kJavaLangObject)); + null_constant->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(obj_handle, false /* is_exact */)); + } + + if ((return_replacement != nullptr) + && (return_replacement->GetType() == Primitive::kPrimNot)) { + if (!return_replacement->GetReferenceTypeInfo().IsValid()) { + // Make sure that we have a valid type for the return. We may get an invalid one when + // we inline invokes with multiple branches and create a Phi for the result. + // TODO: we could be more precise by merging the phi inputs but that requires + // some functionality from the reference type propagation. + DCHECK(return_replacement->IsPhi()); + ReferenceTypeInfo::TypeHandle return_handle = + handles_->NewHandle(resolved_method->GetReturnType()); + return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create( + return_handle, return_handle->IsFinal() /* is_exact */)); + } + } return true; } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 62f90c2f5e..df6e550b4a 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -54,6 +54,11 @@ class InstructionSimplifierVisitor : public HGraphVisitor { void VisitCheckCast(HCheckCast* instruction) OVERRIDE; void VisitAdd(HAdd* instruction) OVERRIDE; void VisitAnd(HAnd* instruction) OVERRIDE; + void VisitCondition(HCondition* instruction) OVERRIDE; + void VisitGreaterThan(HGreaterThan* condition) OVERRIDE; + void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE; + void VisitLessThan(HLessThan* condition) OVERRIDE; + void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE; void VisitDiv(HDiv* instruction) OVERRIDE; void VisitMul(HMul* instruction) OVERRIDE; void VisitNeg(HNeg* instruction) OVERRIDE; @@ -65,6 +70,7 @@ class InstructionSimplifierVisitor : public HGraphVisitor { void VisitUShr(HUShr* instruction) OVERRIDE; void VisitXor(HXor* instruction) OVERRIDE; void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE; + void VisitFakeString(HFakeString* fake_string) OVERRIDE; bool IsDominatedByInputNullCheck(HInstruction* instr); OptimizingCompilerStats* stats_; @@ -189,16 +195,17 @@ bool InstructionSimplifierVisitor::IsDominatedByInputNullCheck(HInstruction* ins // Returns whether doing a type test between the class of `object` against `klass` has // a statically known outcome. The result of the test is stored in `outcome`. static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bool* outcome) { - if (!klass->IsResolved()) { - // If the class couldn't be resolve it's not safe to compare against it. It's - // default type would be Top which might be wider that the actual class type - // and thus producing wrong results. + DCHECK(!object->IsNullConstant()) << "Null constants should be special cased"; + ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo(); + ScopedObjectAccess soa(Thread::Current()); + if (!obj_rti.IsValid()) { + // We run the simplifier before the reference type propagation so type info might not be + // available. return false; } - ReferenceTypeInfo obj_rti = object->GetReferenceTypeInfo(); ReferenceTypeInfo class_rti = klass->GetLoadedClassRTI(); - ScopedObjectAccess soa(Thread::Current()); + DCHECK(class_rti.IsValid() && class_rti.IsExact()); if (class_rti.IsSupertypeOf(obj_rti)) { *outcome = true; return true; @@ -231,12 +238,19 @@ void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) { } bool outcome; - if (TypeCheckHasKnownOutcome(check_cast->InputAt(1)->AsLoadClass(), object, &outcome)) { + HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass(); + if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) { if (outcome) { check_cast->GetBlock()->RemoveInstruction(check_cast); if (stats_ != nullptr) { stats_->RecordStat(MethodCompilationStat::kRemovedCheckedCast); } + if (!load_class->HasUses()) { + // We cannot rely on DCE to remove the class because the `HLoadClass` thinks it can throw. + // However, here we know that it cannot because the checkcast was successfull, hence + // the class was already loaded. + load_class->GetBlock()->RemoveInstruction(load_class); + } } else { // Don't do anything for exceptional cases for now. Ideally we should remove // all instructions and blocks this instruction dominates. @@ -261,7 +275,8 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) { } bool outcome; - if (TypeCheckHasKnownOutcome(instruction->InputAt(1)->AsLoadClass(), object, &outcome)) { + HLoadClass* load_class = instruction->InputAt(1)->AsLoadClass(); + if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) { if (outcome && can_be_null) { // Type test will succeed, we just need a null test. HNotEqual* test = new (graph->GetArena()) HNotEqual(graph->GetNullConstant(), object); @@ -273,6 +288,12 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) { } RecordSimplification(); instruction->GetBlock()->RemoveInstruction(instruction); + if (outcome && !load_class->HasUses()) { + // We cannot rely on DCE to remove the class because the `HLoadClass` thinks it can throw. + // However, here we know that it cannot because the instanceof check was successfull, hence + // the class was already loaded. + load_class->GetBlock()->RemoveInstruction(load_class); + } } } @@ -330,7 +351,11 @@ void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) { block->RemoveInstruction(equal); RecordSimplification(); } + } else { + VisitCondition(equal); } + } else { + VisitCondition(equal); } } @@ -358,7 +383,11 @@ void InstructionSimplifierVisitor::VisitNotEqual(HNotEqual* not_equal) { block->RemoveInstruction(not_equal); RecordSimplification(); } + } else { + VisitCondition(not_equal); } + } else { + VisitCondition(not_equal); } } @@ -423,9 +452,14 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) { // ADD dst, src, 0 // with // src - instruction->ReplaceWith(input_other); - instruction->GetBlock()->RemoveInstruction(instruction); - return; + // Note that we cannot optimize `x + 0.0` to `x` for floating-point. When + // `x` is `-0.0`, the former expression yields `0.0`, while the later + // yields `-0.0`. + if (Primitive::IsIntegralType(instruction->GetType())) { + instruction->ReplaceWith(input_other); + instruction->GetBlock()->RemoveInstruction(instruction); + return; + } } HInstruction* left = instruction->GetLeft(); @@ -485,6 +519,76 @@ void InstructionSimplifierVisitor::VisitAnd(HAnd* instruction) { } } +void InstructionSimplifierVisitor::VisitGreaterThan(HGreaterThan* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitLessThan(HLessThan* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitLessThanOrEqual(HLessThanOrEqual* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) { + // Try to fold an HCompare into this HCondition. + + // This simplification is currently supported on x86, x86_64, ARM and ARM64. + // TODO: Implement it for MIPS64. + InstructionSet instruction_set = GetGraph()->GetInstructionSet(); + if (instruction_set == kMips64) { + return; + } + + HInstruction* left = condition->GetLeft(); + HInstruction* right = condition->GetRight(); + // We can only replace an HCondition which compares a Compare to 0. + // Both 'dx' and 'jack' generate a compare to 0 when compiling a + // condition with a long, float or double comparison as input. + if (!left->IsCompare() || !right->IsConstant() || right->AsIntConstant()->GetValue() != 0) { + // Conversion is not possible. + return; + } + + // Is the Compare only used for this purpose? + if (!left->GetUses().HasOnlyOneUse()) { + // Someone else also wants the result of the compare. + return; + } + + if (!left->GetEnvUses().IsEmpty()) { + // There is a reference to the compare result in an environment. Do we really need it? + if (GetGraph()->IsDebuggable()) { + return; + } + + // We have to ensure that there are no deopt points in the sequence. + if (left->HasAnyEnvironmentUseBefore(condition)) { + return; + } + } + + // Clean up any environment uses from the HCompare, if any. + left->RemoveEnvironmentUsers(); + + // We have decided to fold the HCompare into the HCondition. Transfer the information. + condition->SetBias(left->AsCompare()->GetBias()); + + // Replace the operands of the HCondition. + condition->ReplaceInput(left->InputAt(0), 0); + condition->ReplaceInput(left->InputAt(1), 1); + + // Remove the HCompare. + left->GetBlock()->RemoveInstruction(left); + + RecordSimplification(); +} + void InstructionSimplifierVisitor::VisitDiv(HDiv* instruction) { HConstant* input_cst = instruction->GetConstantRight(); HInstruction* input_other = instruction->GetLeastConstantLeft(); @@ -715,21 +819,24 @@ void InstructionSimplifierVisitor::VisitSub(HSub* instruction) { HConstant* input_cst = instruction->GetConstantRight(); HInstruction* input_other = instruction->GetLeastConstantLeft(); + Primitive::Type type = instruction->GetType(); + if (Primitive::IsFloatingPointType(type)) { + return; + } + if ((input_cst != nullptr) && input_cst->IsZero()) { // Replace code looking like // SUB dst, src, 0 // with // src + // Note that we cannot optimize `x - 0.0` to `x` for floating-point. When + // `x` is `-0.0`, the former expression yields `0.0`, while the later + // yields `-0.0`. instruction->ReplaceWith(input_other); instruction->GetBlock()->RemoveInstruction(instruction); return; } - Primitive::Type type = instruction->GetType(); - if (!Primitive::IsIntegralType(type)) { - return; - } - HBasicBlock* block = instruction->GetBlock(); ArenaAllocator* allocator = GetGraph()->GetArena(); @@ -820,4 +927,46 @@ void InstructionSimplifierVisitor::VisitXor(HXor* instruction) { } } +void InstructionSimplifierVisitor::VisitFakeString(HFakeString* instruction) { + HInstruction* actual_string = nullptr; + + // Find the string we need to replace this instruction with. The actual string is + // the return value of a StringFactory call. + for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) { + HInstruction* use = it.Current()->GetUser(); + if (use->IsInvokeStaticOrDirect() + && use->AsInvokeStaticOrDirect()->IsStringFactoryFor(instruction)) { + use->AsInvokeStaticOrDirect()->RemoveFakeStringArgumentAsLastInput(); + actual_string = use; + break; + } + } + + // Check that there is no other instruction that thinks it is the factory for that string. + if (kIsDebugBuild) { + CHECK(actual_string != nullptr); + for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) { + HInstruction* use = it.Current()->GetUser(); + if (use->IsInvokeStaticOrDirect()) { + CHECK(!use->AsInvokeStaticOrDirect()->IsStringFactoryFor(instruction)); + } + } + } + + // We need to remove any environment uses of the fake string that are not dominated by + // `actual_string` to null. + for (HUseIterator<HEnvironment*> it(instruction->GetEnvUses()); !it.Done(); it.Advance()) { + HEnvironment* environment = it.Current()->GetUser(); + if (!actual_string->StrictlyDominates(environment->GetHolder())) { + environment->RemoveAsUserOfInput(it.Current()->GetIndex()); + environment->SetRawEnvAt(it.Current()->GetIndex(), nullptr); + } + } + + // Only uses dominated by `actual_string` must remain. We can safely replace and remove + // `instruction`. + instruction->ReplaceWith(actual_string); + instruction->GetBlock()->RemoveInstruction(instruction); +} + } // namespace art diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h index faee2dd91e..cc4b6f6adc 100644 --- a/compiler/optimizing/instruction_simplifier.h +++ b/compiler/optimizing/instruction_simplifier.h @@ -31,7 +31,7 @@ class InstructionSimplifier : public HOptimization { InstructionSimplifier(HGraph* graph, OptimizingCompilerStats* stats = nullptr, const char* name = kInstructionSimplifierPassName) - : HOptimization(graph, name, stats) {} + : HOptimization(graph, name, stats) {} static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier"; diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 8ef13e125e..3db9816173 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -31,7 +31,7 @@ static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) { switch (i) { case Intrinsics::kNone: return kInterface; // Non-sensical for intrinsic. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ case Intrinsics::k ## Name: \ return IsStatic; #include "intrinsics_list.h" @@ -42,7 +42,21 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) return kInterface; } - +// Function that returns whether an intrinsic needs an environment or not. +static inline IntrinsicNeedsEnvironment IntrinsicNeedsEnvironment(Intrinsics i) { + switch (i) { + case Intrinsics::kNone: + return kNeedsEnvironment; // Non-sensical for intrinsic. +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ + case Intrinsics::k ## Name: \ + return NeedsEnvironment; +#include "intrinsics_list.h" +INTRINSICS_LIST(OPTIMIZING_INTRINSICS) +#undef INTRINSICS_LIST +#undef OPTIMIZING_INTRINSICS + } + return kNeedsEnvironment; +} static Primitive::Type GetType(uint64_t data, bool is_op_size) { if (is_op_size) { @@ -70,7 +84,10 @@ static Primitive::Type GetType(uint64_t data, bool is_op_size) { } } -static Intrinsics GetIntrinsic(InlineMethod method) { +static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_set) { + if (instruction_set == kMips || instruction_set == kMips64) { + return Intrinsics::kNone; + } switch (method.opcode) { // Floating-point conversions. case kIntrinsicDoubleCvt: @@ -103,6 +120,16 @@ static Intrinsics GetIntrinsic(InlineMethod method) { LOG(FATAL) << "Unknown/unsupported op size " << method.d.data; UNREACHABLE(); } + case kIntrinsicNumberOfLeadingZeros: + switch (GetType(method.d.data, true)) { + case Primitive::kPrimInt: + return Intrinsics::kIntegerNumberOfLeadingZeros; + case Primitive::kPrimLong: + return Intrinsics::kLongNumberOfLeadingZeros; + default: + LOG(FATAL) << "Unknown/unsupported op size " << method.d.data; + UNREACHABLE(); + } // Abs. case kIntrinsicAbsDouble: @@ -187,6 +214,8 @@ static Intrinsics GetIntrinsic(InlineMethod method) { return Intrinsics::kStringCharAt; case kIntrinsicCompareTo: return Intrinsics::kStringCompareTo; + case kIntrinsicEquals: + return Intrinsics::kStringEquals; case kIntrinsicGetCharsNoCheck: return Intrinsics::kStringGetCharsNoCheck; case kIntrinsicIsEmptyOrLength: @@ -339,7 +368,7 @@ void IntrinsicsRecognizer::Run() { driver_->GetMethodInlinerMap()->GetMethodInliner(&invoke->GetDexFile()); DCHECK(inliner != nullptr); if (inliner->IsIntrinsic(invoke->GetDexMethodIndex(), &method)) { - Intrinsics intrinsic = GetIntrinsic(method); + Intrinsics intrinsic = GetIntrinsic(method, graph_->GetInstructionSet()); if (intrinsic != Intrinsics::kNone) { if (!CheckInvokeType(intrinsic, invoke)) { @@ -347,7 +376,7 @@ void IntrinsicsRecognizer::Run() { << intrinsic << " for " << PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile()); } else { - invoke->SetIntrinsic(intrinsic); + invoke->SetIntrinsic(intrinsic, IntrinsicNeedsEnvironment(intrinsic)); } } } @@ -359,9 +388,9 @@ void IntrinsicsRecognizer::Run() { std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) { switch (intrinsic) { case Intrinsics::kNone: - os << "No intrinsic."; + os << "None"; break; -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ case Intrinsics::k ## Name: \ os << # Name; \ break; diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 90449828ad..d1a17b6def 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -54,7 +54,7 @@ class IntrinsicVisitor : public ValueObject { switch (invoke->GetIntrinsic()) { case Intrinsics::kNone: return; -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ case Intrinsics::k ## Name: \ Visit ## Name(invoke); \ return; @@ -69,7 +69,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS) // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ virtual void Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ } #include "intrinsics_list.h" diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index b4dbf75f0a..1527a6aa23 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -224,6 +224,48 @@ static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } +static void GenNumberOfLeadingZeros(LocationSummary* locations, + Primitive::Type type, + ArmAssembler* assembler) { + Location in = locations->InAt(0); + Register out = locations->Out().AsRegister<Register>(); + + DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong)); + + if (type == Primitive::kPrimLong) { + Register in_reg_lo = in.AsRegisterPairLow<Register>(); + Register in_reg_hi = in.AsRegisterPairHigh<Register>(); + Label end; + __ clz(out, in_reg_hi); + __ CompareAndBranchIfNonZero(in_reg_hi, &end); + __ clz(out, in_reg_lo); + __ AddConstant(out, 32); + __ Bind(&end); + } else { + __ clz(out, in.AsRegister<Register>()); + } +} + +void IntrinsicLocationsBuilderARM::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler()); +} + +void IntrinsicLocationsBuilderARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); +} + +void IntrinsicCodeGeneratorARM::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler()); +} + static void MathAbsFP(LocationSummary* locations, bool is64bit, ArmAssembler* assembler) { Location in = locations->InAt(0); Location out = locations->Out(); @@ -1068,6 +1110,7 @@ UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure. UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) +UNIMPLEMENTED_INTRINSIC(StringEquals) #undef UNIMPLEMENTED_INTRINSIC diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h index 8bfb7d4686..f013bd6083 100644 --- a/compiler/optimizing/intrinsics_arm.h +++ b/compiler/optimizing/intrinsics_arm.h @@ -33,13 +33,12 @@ class CodeGeneratorARM; class IntrinsicLocationsBuilderARM FINAL : public IntrinsicVisitor { public: - explicit IntrinsicLocationsBuilderARM(ArenaAllocator* arena, - const ArmInstructionSetFeatures& features) + IntrinsicLocationsBuilderARM(ArenaAllocator* arena, const ArmInstructionSetFeatures& features) : arena_(arena), features_(features) {} // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) @@ -65,7 +64,7 @@ class IntrinsicCodeGeneratorARM FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 78ac167a87..a5332ea794 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -260,6 +260,33 @@ void IntrinsicCodeGeneratorARM64::VisitShortReverseBytes(HInvoke* invoke) { GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetVIXLAssembler()); } +static void GenNumberOfLeadingZeros(LocationSummary* locations, + Primitive::Type type, + vixl::MacroAssembler* masm) { + DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + + Location in = locations->InAt(0); + Location out = locations->Out(); + + __ Clz(RegisterFrom(out, type), RegisterFrom(in, type)); +} + +void IntrinsicLocationsBuilderARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimInt, GetVIXLAssembler()); +} + +void IntrinsicLocationsBuilderARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + GenNumberOfLeadingZeros(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler()); +} + static void GenReverse(LocationSummary* locations, Primitive::Type type, vixl::MacroAssembler* masm) { @@ -1028,6 +1055,102 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + // Temporary registers to store lengths of strings and for calculations. + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); +} + +void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) { + vixl::MacroAssembler* masm = GetVIXLAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register str = WRegisterFrom(locations->InAt(0)); + Register arg = WRegisterFrom(locations->InAt(1)); + Register out = XRegisterFrom(locations->Out()); + + UseScratchRegisterScope scratch_scope(masm); + Register temp = scratch_scope.AcquireW(); + Register temp1 = WRegisterFrom(locations->GetTemp(0)); + Register temp2 = WRegisterFrom(locations->GetTemp(1)); + + vixl::Label loop; + vixl::Label end; + vixl::Label return_true; + vixl::Label return_false; + + // Get offsets of count, value, and class fields within a string object. + const int32_t count_offset = mirror::String::CountOffset().Int32Value(); + const int32_t value_offset = mirror::String::ValueOffset().Int32Value(); + const int32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + // Check if input is null, return false if it is. + __ Cbz(arg, &return_false); + + // Reference equality check, return true if same reference. + __ Cmp(str, arg); + __ B(&return_true, eq); + + // Instanceof check for the argument by comparing class fields. + // All string objects must have the same type since String cannot be subclassed. + // Receiver must be a string object, so its class field is equal to all strings' class fields. + // If the argument is a string object, its class field must be equal to receiver's class field. + __ Ldr(temp, MemOperand(str.X(), class_offset)); + __ Ldr(temp1, MemOperand(arg.X(), class_offset)); + __ Cmp(temp, temp1); + __ B(&return_false, ne); + + // Load lengths of this and argument strings. + __ Ldr(temp, MemOperand(str.X(), count_offset)); + __ Ldr(temp1, MemOperand(arg.X(), count_offset)); + // Check if lengths are equal, return false if they're not. + __ Cmp(temp, temp1); + __ B(&return_false, ne); + // Store offset of string value in preparation for comparison loop + __ Mov(temp1, value_offset); + // Return true if both strings are empty. + __ Cbz(temp, &return_true); + + // Assertions that must hold in order to compare strings 4 characters at a time. + DCHECK_ALIGNED(value_offset, 8); + static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded"); + + temp1 = temp1.X(); + temp2 = temp2.X(); + + // Loop to compare strings 4 characters at a time starting at the beginning of the string. + // Ok to do this because strings are zero-padded to be 8-byte aligned. + __ Bind(&loop); + __ Ldr(out, MemOperand(str.X(), temp1)); + __ Ldr(temp2, MemOperand(arg.X(), temp1)); + __ Add(temp1, temp1, Operand(sizeof(uint64_t))); + __ Cmp(out, temp2); + __ B(&return_false, ne); + __ Sub(temp, temp, Operand(4), SetFlags); + __ B(&loop, gt); + + // Return true and exit the function. + // If loop does not result in returning false, we return true. + __ Bind(&return_true); + __ Mov(out, 1); + __ B(&end); + + // Return false and exit the function. + __ Bind(&return_false); + __ Mov(out, 0); + __ Bind(&end); +} + static void GenerateVisitStringIndexOf(HInvoke* invoke, vixl::MacroAssembler* masm, CodeGeneratorARM64* codegen, diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h index ba21889839..ebaf5e5952 100644 --- a/compiler/optimizing/intrinsics_arm64.h +++ b/compiler/optimizing/intrinsics_arm64.h @@ -41,7 +41,7 @@ class IntrinsicLocationsBuilderARM64 FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) @@ -65,7 +65,7 @@ class IntrinsicCodeGeneratorARM64 FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h index 2c9248f52c..15ee5d4d12 100644 --- a/compiler/optimizing/intrinsics_list.h +++ b/compiler/optimizing/intrinsics_list.h @@ -18,73 +18,77 @@ #define ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_ // All intrinsics supported by the optimizing compiler. Format is name, then whether it is expected -// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual). +// to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an +// environment. #define INTRINSICS_LIST(V) \ - V(DoubleDoubleToRawLongBits, kStatic) \ - V(DoubleLongBitsToDouble, kStatic) \ - V(FloatFloatToRawIntBits, kStatic) \ - V(FloatIntBitsToFloat, kStatic) \ - V(IntegerReverse, kStatic) \ - V(IntegerReverseBytes, kStatic) \ - V(LongReverse, kStatic) \ - V(LongReverseBytes, kStatic) \ - V(ShortReverseBytes, kStatic) \ - V(MathAbsDouble, kStatic) \ - V(MathAbsFloat, kStatic) \ - V(MathAbsLong, kStatic) \ - V(MathAbsInt, kStatic) \ - V(MathMinDoubleDouble, kStatic) \ - V(MathMinFloatFloat, kStatic) \ - V(MathMinLongLong, kStatic) \ - V(MathMinIntInt, kStatic) \ - V(MathMaxDoubleDouble, kStatic) \ - V(MathMaxFloatFloat, kStatic) \ - V(MathMaxLongLong, kStatic) \ - V(MathMaxIntInt, kStatic) \ - V(MathSqrt, kStatic) \ - V(MathCeil, kStatic) \ - V(MathFloor, kStatic) \ - V(MathRint, kStatic) \ - V(MathRoundDouble, kStatic) \ - V(MathRoundFloat, kStatic) \ - V(SystemArrayCopyChar, kStatic) \ - V(ThreadCurrentThread, kStatic) \ - V(MemoryPeekByte, kStatic) \ - V(MemoryPeekIntNative, kStatic) \ - V(MemoryPeekLongNative, kStatic) \ - V(MemoryPeekShortNative, kStatic) \ - V(MemoryPokeByte, kStatic) \ - V(MemoryPokeIntNative, kStatic) \ - V(MemoryPokeLongNative, kStatic) \ - V(MemoryPokeShortNative, kStatic) \ - V(StringCharAt, kDirect) \ - V(StringCompareTo, kDirect) \ - V(StringGetCharsNoCheck, kDirect) \ - V(StringIndexOf, kDirect) \ - V(StringIndexOfAfter, kDirect) \ - V(StringNewStringFromBytes, kStatic) \ - V(StringNewStringFromChars, kStatic) \ - V(StringNewStringFromString, kStatic) \ - V(UnsafeCASInt, kDirect) \ - V(UnsafeCASLong, kDirect) \ - V(UnsafeCASObject, kDirect) \ - V(UnsafeGet, kDirect) \ - V(UnsafeGetVolatile, kDirect) \ - V(UnsafeGetObject, kDirect) \ - V(UnsafeGetObjectVolatile, kDirect) \ - V(UnsafeGetLong, kDirect) \ - V(UnsafeGetLongVolatile, kDirect) \ - V(UnsafePut, kDirect) \ - V(UnsafePutOrdered, kDirect) \ - V(UnsafePutVolatile, kDirect) \ - V(UnsafePutObject, kDirect) \ - V(UnsafePutObjectOrdered, kDirect) \ - V(UnsafePutObjectVolatile, kDirect) \ - V(UnsafePutLong, kDirect) \ - V(UnsafePutLongOrdered, kDirect) \ - V(UnsafePutLongVolatile, kDirect) \ - V(ReferenceGetReferent, kDirect) + V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironment) \ + V(DoubleLongBitsToDouble, kStatic, kNeedsEnvironment) \ + V(FloatFloatToRawIntBits, kStatic, kNeedsEnvironment) \ + V(FloatIntBitsToFloat, kStatic, kNeedsEnvironment) \ + V(IntegerReverse, kStatic, kNeedsEnvironment) \ + V(IntegerReverseBytes, kStatic, kNeedsEnvironment) \ + V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironment) \ + V(LongReverse, kStatic, kNeedsEnvironment) \ + V(LongReverseBytes, kStatic, kNeedsEnvironment) \ + V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironment) \ + V(ShortReverseBytes, kStatic, kNeedsEnvironment) \ + V(MathAbsDouble, kStatic, kNeedsEnvironment) \ + V(MathAbsFloat, kStatic, kNeedsEnvironment) \ + V(MathAbsLong, kStatic, kNeedsEnvironment) \ + V(MathAbsInt, kStatic, kNeedsEnvironment) \ + V(MathMinDoubleDouble, kStatic, kNeedsEnvironment) \ + V(MathMinFloatFloat, kStatic, kNeedsEnvironment) \ + V(MathMinLongLong, kStatic, kNeedsEnvironment) \ + V(MathMinIntInt, kStatic, kNeedsEnvironment) \ + V(MathMaxDoubleDouble, kStatic, kNeedsEnvironment) \ + V(MathMaxFloatFloat, kStatic, kNeedsEnvironment) \ + V(MathMaxLongLong, kStatic, kNeedsEnvironment) \ + V(MathMaxIntInt, kStatic, kNeedsEnvironment) \ + V(MathSqrt, kStatic, kNeedsEnvironment) \ + V(MathCeil, kStatic, kNeedsEnvironment) \ + V(MathFloor, kStatic, kNeedsEnvironment) \ + V(MathRint, kStatic, kNeedsEnvironment) \ + V(MathRoundDouble, kStatic, kNeedsEnvironment) \ + V(MathRoundFloat, kStatic, kNeedsEnvironment) \ + V(SystemArrayCopyChar, kStatic, kNeedsEnvironment) \ + V(ThreadCurrentThread, kStatic, kNeedsEnvironment) \ + V(MemoryPeekByte, kStatic, kNeedsEnvironment) \ + V(MemoryPeekIntNative, kStatic, kNeedsEnvironment) \ + V(MemoryPeekLongNative, kStatic, kNeedsEnvironment) \ + V(MemoryPeekShortNative, kStatic, kNeedsEnvironment) \ + V(MemoryPokeByte, kStatic, kNeedsEnvironment) \ + V(MemoryPokeIntNative, kStatic, kNeedsEnvironment) \ + V(MemoryPokeLongNative, kStatic, kNeedsEnvironment) \ + V(MemoryPokeShortNative, kStatic, kNeedsEnvironment) \ + V(StringCharAt, kDirect, kNeedsEnvironment) \ + V(StringCompareTo, kDirect, kNeedsEnvironment) \ + V(StringEquals, kDirect, kNeedsEnvironment) \ + V(StringGetCharsNoCheck, kDirect, kNeedsEnvironment) \ + V(StringIndexOf, kDirect, kNeedsEnvironment) \ + V(StringIndexOfAfter, kDirect, kNeedsEnvironment) \ + V(StringNewStringFromBytes, kStatic, kNeedsEnvironment) \ + V(StringNewStringFromChars, kStatic, kNeedsEnvironment) \ + V(StringNewStringFromString, kStatic, kNeedsEnvironment) \ + V(UnsafeCASInt, kDirect, kNeedsEnvironment) \ + V(UnsafeCASLong, kDirect, kNeedsEnvironment) \ + V(UnsafeCASObject, kDirect, kNeedsEnvironment) \ + V(UnsafeGet, kDirect, kNeedsEnvironment) \ + V(UnsafeGetVolatile, kDirect, kNeedsEnvironment) \ + V(UnsafeGetObject, kDirect, kNeedsEnvironment) \ + V(UnsafeGetObjectVolatile, kDirect, kNeedsEnvironment) \ + V(UnsafeGetLong, kDirect, kNeedsEnvironment) \ + V(UnsafeGetLongVolatile, kDirect, kNeedsEnvironment) \ + V(UnsafePut, kDirect, kNeedsEnvironment) \ + V(UnsafePutOrdered, kDirect, kNeedsEnvironment) \ + V(UnsafePutVolatile, kDirect, kNeedsEnvironment) \ + V(UnsafePutObject, kDirect, kNeedsEnvironment) \ + V(UnsafePutObjectOrdered, kDirect, kNeedsEnvironment) \ + V(UnsafePutObjectVolatile, kDirect, kNeedsEnvironment) \ + V(UnsafePutLong, kDirect, kNeedsEnvironment) \ + V(UnsafePutLongOrdered, kDirect, kNeedsEnvironment) \ + V(UnsafePutLongVolatile, kDirect, kNeedsEnvironment) \ + V(ReferenceGetReferent, kDirect, kNeedsEnvironment) #endif // ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_ #undef ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_ // #define is only for lint. diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 0d6ca09f31..b7126b24e3 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -20,6 +20,7 @@ #include "arch/x86/instruction_set_features_x86.h" #include "art_method.h" +#include "base/bit_utils.h" #include "code_generator_x86.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" @@ -945,6 +946,97 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + + // Request temporary registers, ECX and EDI needed for repe_cmpsl instruction. + locations->AddTemp(Location::RegisterLocation(ECX)); + locations->AddTemp(Location::RegisterLocation(EDI)); + + // Set output, ESI needed for repe_cmpsl instruction anyways. + locations->SetOut(Location::RegisterLocation(ESI), Location::kOutputOverlap); +} + +void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) { + X86Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register str = locations->InAt(0).AsRegister<Register>(); + Register arg = locations->InAt(1).AsRegister<Register>(); + Register ecx = locations->GetTemp(0).AsRegister<Register>(); + Register edi = locations->GetTemp(1).AsRegister<Register>(); + Register esi = locations->Out().AsRegister<Register>(); + + Label end; + Label return_true; + Label return_false; + + // Get offsets of count, value, and class fields within a string object. + const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value(); + const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value(); + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + // Check if input is null, return false if it is. + __ testl(arg, arg); + __ j(kEqual, &return_false); + + // Instanceof check for the argument by comparing class fields. + // All string objects must have the same type since String cannot be subclassed. + // Receiver must be a string object, so its class field is equal to all strings' class fields. + // If the argument is a string object, its class field must be equal to receiver's class field. + __ movl(ecx, Address(str, class_offset)); + __ cmpl(ecx, Address(arg, class_offset)); + __ j(kNotEqual, &return_false); + + // Reference equality check, return true if same reference. + __ cmpl(str, arg); + __ j(kEqual, &return_true); + + // Load length of receiver string. + __ movl(ecx, Address(str, count_offset)); + // Check if lengths are equal, return false if they're not. + __ cmpl(ecx, Address(arg, count_offset)); + __ j(kNotEqual, &return_false); + // Return true if both strings are empty. + __ testl(ecx, ecx); + __ j(kEqual, &return_true); + + // Load starting addresses of string values into ESI/EDI as required for repe_cmpsl instruction. + __ leal(esi, Address(str, value_offset)); + __ leal(edi, Address(arg, value_offset)); + + // Divide string length by 2 to compare characters 2 at a time and adjust for odd lengths. + __ addl(ecx, Immediate(1)); + __ shrl(ecx, Immediate(1)); + + // Assertions that must hold in order to compare strings 2 characters at a time. + DCHECK_ALIGNED(value_offset, 4); + static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded"); + + // Loop to compare strings two characters at a time starting at the beginning of the string. + __ repe_cmpsl(); + // If strings are not equal, zero flag will be cleared. + __ j(kNotEqual, &return_false); + + // Return true and exit the function. + // If loop does not result in returning false, we return true. + __ Bind(&return_true); + __ movl(esi, Immediate(1)); + __ jmp(&end); + + // Return false and exit the function. + __ Bind(&return_false); + __ xorl(esi, esi); + __ Bind(&end); +} + static void CreateStringIndexOfLocations(HInvoke* invoke, ArenaAllocator* allocator, bool start_at_zero) { @@ -1744,6 +1836,115 @@ void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) { SwapBits(reg_high, temp, 4, 0x0f0f0f0f, assembler); } +static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke, bool is_long) { + LocationSummary* locations = new (arena) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + if (is_long) { + locations->SetInAt(0, Location::RequiresRegister()); + } else { + locations->SetInAt(0, Location::Any()); + } + locations->SetOut(Location::RequiresRegister()); +} + +static void GenLeadingZeros(X86Assembler* assembler, HInvoke* invoke, bool is_long) { + LocationSummary* locations = invoke->GetLocations(); + Location src = locations->InAt(0); + Register out = locations->Out().AsRegister<Register>(); + + if (invoke->InputAt(0)->IsConstant()) { + // Evaluate this at compile time. + int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant()); + if (value == 0) { + value = is_long ? 64 : 32; + } else { + value = is_long ? CLZ(static_cast<uint64_t>(value)) : CLZ(static_cast<uint32_t>(value)); + } + if (value == 0) { + __ xorl(out, out); + } else { + __ movl(out, Immediate(value)); + } + return; + } + + // Handle the non-constant cases. + if (!is_long) { + if (src.IsRegister()) { + __ bsrl(out, src.AsRegister<Register>()); + } else { + DCHECK(src.IsStackSlot()); + __ bsrl(out, Address(ESP, src.GetStackIndex())); + } + + // BSR sets ZF if the input was zero, and the output is undefined. + Label all_zeroes, done; + __ j(kEqual, &all_zeroes); + + // Correct the result from BSR to get the final CLZ result. + __ xorl(out, Immediate(31)); + __ jmp(&done); + + // Fix the zero case with the expected result. + __ Bind(&all_zeroes); + __ movl(out, Immediate(32)); + + __ Bind(&done); + return; + } + + // 64 bit case needs to worry about both parts of the register. + DCHECK(src.IsRegisterPair()); + Register src_lo = src.AsRegisterPairLow<Register>(); + Register src_hi = src.AsRegisterPairHigh<Register>(); + Label handle_low, done, all_zeroes; + + // Is the high word zero? + __ testl(src_hi, src_hi); + __ j(kEqual, &handle_low); + + // High word is not zero. We know that the BSR result is defined in this case. + __ bsrl(out, src_hi); + + // Correct the result from BSR to get the final CLZ result. + __ xorl(out, Immediate(31)); + __ jmp(&done); + + // High word was zero. We have to compute the low word count and add 32. + __ Bind(&handle_low); + __ bsrl(out, src_lo); + __ j(kEqual, &all_zeroes); + + // We had a valid result. Use an XOR to both correct the result and add 32. + __ xorl(out, Immediate(63)); + __ jmp(&done); + + // All zero case. + __ Bind(&all_zeroes); + __ movl(out, Immediate(64)); + + __ Bind(&done); +} + +void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + CreateLeadingZeroLocations(arena_, invoke, /* is_long */ false); +} + +void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler()); + GenLeadingZeros(assembler, invoke, /* is_long */ false); +} + +void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + CreateLeadingZeroLocations(arena_, invoke, /* is_long */ true); +} + +void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler()); + GenLeadingZeros(assembler, invoke, /* is_long */ true); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h index 4292ec7b99..ac68f39904 100644 --- a/compiler/optimizing/intrinsics_x86.h +++ b/compiler/optimizing/intrinsics_x86.h @@ -36,7 +36,7 @@ class IntrinsicLocationsBuilderX86 FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) @@ -61,7 +61,7 @@ class IntrinsicCodeGeneratorX86 FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index ea342e9382..15fbac1c63 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -20,6 +20,7 @@ #include "arch/x86_64/instruction_set_features_x86_64.h" #include "art_method-inl.h" +#include "base/bit_utils.h" #include "code_generator_x86_64.h" #include "entrypoints/quick/quick_entrypoints.h" #include "intrinsics.h" @@ -282,8 +283,6 @@ static void CreateFloatToFloatPlusTemps(ArenaAllocator* arena, HInvoke* invoke) LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); - // TODO: Allow x86 to work with memory. This requires assembler support, see below. - // locations->SetInAt(0, Location::Any()); // X86 can work on memory directly. locations->SetOut(Location::SameAsFirstInput()); locations->AddTemp(Location::RequiresFpuRegister()); // FP reg to hold mask. } @@ -294,34 +293,18 @@ static void MathAbsFP(LocationSummary* locations, CodeGeneratorX86_64* codegen) { Location output = locations->Out(); - if (output.IsFpuRegister()) { - // In-register - XmmRegister xmm_temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); + DCHECK(output.IsFpuRegister()); + XmmRegister xmm_temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>(); - // TODO: Can mask directly with constant area using pand if we can guarantee - // that the literal is aligned on a 16 byte boundary. This will avoid a - // temporary. - if (is64bit) { - __ movsd(xmm_temp, codegen->LiteralInt64Address(INT64_C(0x7FFFFFFFFFFFFFFF))); - __ andpd(output.AsFpuRegister<XmmRegister>(), xmm_temp); - } else { - __ movss(xmm_temp, codegen->LiteralInt32Address(INT32_C(0x7FFFFFFF))); - __ andps(output.AsFpuRegister<XmmRegister>(), xmm_temp); - } + // TODO: Can mask directly with constant area using pand if we can guarantee + // that the literal is aligned on a 16 byte boundary. This will avoid a + // temporary. + if (is64bit) { + __ movsd(xmm_temp, codegen->LiteralInt64Address(INT64_C(0x7FFFFFFFFFFFFFFF))); + __ andpd(output.AsFpuRegister<XmmRegister>(), xmm_temp); } else { - // TODO: update when assember support is available. - UNIMPLEMENTED(FATAL) << "Needs assembler support."; -// Once assembler support is available, in-memory operations look like this: -// if (is64bit) { -// DCHECK(output.IsDoubleStackSlot()); -// // No 64b and with literal. -// __ movq(cpu_temp, Immediate(INT64_C(0x7FFFFFFFFFFFFFFF))); -// __ andq(Address(CpuRegister(RSP), output.GetStackIndex()), cpu_temp); -// } else { -// DCHECK(output.IsStackSlot()); -// // Can use and with a literal directly. -// __ andl(Address(CpuRegister(RSP), output.GetStackIndex()), Immediate(INT64_C(0x7FFFFFFF))); -// } + __ movss(xmm_temp, codegen->LiteralInt32Address(INT32_C(0x7FFFFFFF))); + __ andps(output.AsFpuRegister<XmmRegister>(), xmm_temp); } } @@ -690,7 +673,7 @@ static void CreateSSE41FPToIntLocations(ArenaAllocator* arena, LocationSummary::kNoCall, kIntrinsified); locations->SetInAt(0, Location::RequiresFpuRegister()); - locations->SetOut(Location::RequiresFpuRegister()); + locations->SetOut(Location::RequiresRegister()); locations->AddTemp(Location::RequiresFpuRegister()); return; } @@ -732,7 +715,11 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) { // And truncate to an integer. __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1)); + // Load maxInt into out. + codegen_->Load64BitValue(out, kPrimIntMax); + // if inPlusPointFive >= maxInt goto done + __ movl(out, Immediate(kPrimIntMax)); __ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax))); __ j(kAboveEqual, &done); @@ -776,7 +763,11 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { // And truncate to an integer. __ roundsd(inPlusPointFive, inPlusPointFive, Immediate(1)); + // Load maxLong into out. + codegen_->Load64BitValue(out, kPrimLongMax); + // if inPlusPointFive >= maxLong goto done + __ movq(out, Immediate(kPrimLongMax)); __ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax))); __ j(kAboveEqual, &done); @@ -864,6 +855,97 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + + // Request temporary registers, RCX and RDI needed for repe_cmpsq instruction. + locations->AddTemp(Location::RegisterLocation(RCX)); + locations->AddTemp(Location::RegisterLocation(RDI)); + + // Set output, RSI needed for repe_cmpsq instruction anyways. + locations->SetOut(Location::RegisterLocation(RSI), Location::kOutputOverlap); +} + +void IntrinsicCodeGeneratorX86_64::VisitStringEquals(HInvoke* invoke) { + X86_64Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + CpuRegister str = locations->InAt(0).AsRegister<CpuRegister>(); + CpuRegister arg = locations->InAt(1).AsRegister<CpuRegister>(); + CpuRegister rcx = locations->GetTemp(0).AsRegister<CpuRegister>(); + CpuRegister rdi = locations->GetTemp(1).AsRegister<CpuRegister>(); + CpuRegister rsi = locations->Out().AsRegister<CpuRegister>(); + + Label end; + Label return_true; + Label return_false; + + // Get offsets of count, value, and class fields within a string object. + const uint32_t count_offset = mirror::String::CountOffset().Uint32Value(); + const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value(); + const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value(); + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + // Check if input is null, return false if it is. + __ testl(arg, arg); + __ j(kEqual, &return_false); + + // Instanceof check for the argument by comparing class fields. + // All string objects must have the same type since String cannot be subclassed. + // Receiver must be a string object, so its class field is equal to all strings' class fields. + // If the argument is a string object, its class field must be equal to receiver's class field. + __ movl(rcx, Address(str, class_offset)); + __ cmpl(rcx, Address(arg, class_offset)); + __ j(kNotEqual, &return_false); + + // Reference equality check, return true if same reference. + __ cmpl(str, arg); + __ j(kEqual, &return_true); + + // Load length of receiver string. + __ movl(rcx, Address(str, count_offset)); + // Check if lengths are equal, return false if they're not. + __ cmpl(rcx, Address(arg, count_offset)); + __ j(kNotEqual, &return_false); + // Return true if both strings are empty. + __ testl(rcx, rcx); + __ j(kEqual, &return_true); + + // Load starting addresses of string values into RSI/RDI as required for repe_cmpsq instruction. + __ leal(rsi, Address(str, value_offset)); + __ leal(rdi, Address(arg, value_offset)); + + // Divide string length by 4 and adjust for lengths not divisible by 4. + __ addl(rcx, Immediate(3)); + __ shrl(rcx, Immediate(2)); + + // Assertions that must hold in order to compare strings 4 characters at a time. + DCHECK_ALIGNED(value_offset, 8); + static_assert(IsAligned<8>(kObjectAlignment), "String is not zero padded"); + + // Loop to compare strings four characters at a time starting at the beginning of the string. + __ repe_cmpsq(); + // If strings are not equal, zero flag will be cleared. + __ j(kNotEqual, &return_false); + + // Return true and exit the function. + // If loop does not result in returning false, we return true. + __ Bind(&return_true); + __ movl(rsi, Immediate(1)); + __ jmp(&end); + + // Return false and exit the function. + __ Bind(&return_false); + __ xorl(rsi, rsi); + __ Bind(&end); +} + static void CreateStringIndexOfLocations(HInvoke* invoke, ArenaAllocator* allocator, bool start_at_zero) { @@ -1604,6 +1686,84 @@ void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) { SwapBits64(reg, temp1, temp2, 4, INT64_C(0x0f0f0f0f0f0f0f0f), assembler); } +static void CreateLeadingZeroLocations(ArenaAllocator* arena, HInvoke* invoke) { + LocationSummary* locations = new (arena) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::Any()); + locations->SetOut(Location::RequiresRegister()); +} + +static void GenLeadingZeros(X86_64Assembler* assembler, HInvoke* invoke, bool is_long) { + LocationSummary* locations = invoke->GetLocations(); + Location src = locations->InAt(0); + CpuRegister out = locations->Out().AsRegister<CpuRegister>(); + + int zero_value_result = is_long ? 64 : 32; + if (invoke->InputAt(0)->IsConstant()) { + // Evaluate this at compile time. + int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant()); + if (value == 0) { + value = zero_value_result; + } else { + value = is_long ? CLZ(static_cast<uint64_t>(value)) : CLZ(static_cast<uint32_t>(value)); + } + if (value == 0) { + __ xorl(out, out); + } else { + __ movl(out, Immediate(value)); + } + return; + } + + // Handle the non-constant cases. + if (src.IsRegister()) { + if (is_long) { + __ bsrq(out, src.AsRegister<CpuRegister>()); + } else { + __ bsrl(out, src.AsRegister<CpuRegister>()); + } + } else if (is_long) { + DCHECK(src.IsDoubleStackSlot()); + __ bsrq(out, Address(CpuRegister(RSP), src.GetStackIndex())); + } else { + DCHECK(src.IsStackSlot()); + __ bsrl(out, Address(CpuRegister(RSP), src.GetStackIndex())); + } + + // BSR sets ZF if the input was zero, and the output is undefined. + Label is_zero, done; + __ j(kEqual, &is_zero); + + // Correct the result from BSR to get the CLZ result. + __ xorl(out, Immediate(zero_value_result - 1)); + __ jmp(&done); + + // Fix the zero case with the expected result. + __ Bind(&is_zero); + __ movl(out, Immediate(zero_value_result)); + + __ Bind(&done); +} + +void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + CreateLeadingZeroLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { + X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler()); + GenLeadingZeros(assembler, invoke, /* is_long */ false); +} + +void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + CreateLeadingZeroLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { + X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler()); + GenLeadingZeros(assembler, invoke, /* is_long */ true); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h index 0e0e72c1fc..17293af4d2 100644 --- a/compiler/optimizing/intrinsics_x86_64.h +++ b/compiler/optimizing/intrinsics_x86_64.h @@ -36,7 +36,7 @@ class IntrinsicLocationsBuilderX86_64 FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) @@ -61,7 +61,7 @@ class IntrinsicCodeGeneratorX86_64 FINAL : public IntrinsicVisitor { // Define visitor methods. -#define OPTIMIZING_INTRINSICS(Name, IsStatic) \ +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \ void Visit ## Name(HInvoke* invoke) OVERRIDE; #include "intrinsics_list.h" INTRINSICS_LIST(OPTIMIZING_INTRINSICS) diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc index 2535ea274a..5b89b4ec74 100644 --- a/compiler/optimizing/licm.cc +++ b/compiler/optimizing/licm.cc @@ -115,7 +115,7 @@ void LICM::Run() { HInstruction* instruction = inst_it.Current(); if (instruction->CanBeMoved() && (!instruction->CanThrow() || !found_first_non_hoisted_throwing_instruction_in_loop) - && !instruction->GetSideEffects().DependsOn(loop_effects) + && !instruction->GetSideEffects().MayDependOn(loop_effects) && InputsAreDefinedBeforeLoop(instruction)) { // We need to update the environment if the instruction has a loop header // phi in it. diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc new file mode 100644 index 0000000000..2fc66e6de4 --- /dev/null +++ b/compiler/optimizing/licm_test.cc @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/arena_allocator.h" +#include "builder.h" +#include "gtest/gtest.h" +#include "licm.h" +#include "nodes.h" +#include "optimizing_unit_test.h" +#include "side_effects_analysis.h" + +namespace art { + +/** + * Fixture class for the LICM tests. + */ +class LICMTest : public testing::Test { + public: + LICMTest() : pool_(), allocator_(&pool_) { + graph_ = CreateGraph(&allocator_); + } + + ~LICMTest() { } + + // Builds a singly-nested loop structure in CFG. Tests can further populate + // the basic blocks with instructions to set up interesting scenarios. + void BuildLoop() { + entry_ = new (&allocator_) HBasicBlock(graph_); + loop_preheader_ = new (&allocator_) HBasicBlock(graph_); + loop_header_ = new (&allocator_) HBasicBlock(graph_); + loop_body_ = new (&allocator_) HBasicBlock(graph_); + exit_ = new (&allocator_) HBasicBlock(graph_); + + graph_->AddBlock(entry_); + graph_->AddBlock(loop_preheader_); + graph_->AddBlock(loop_header_); + graph_->AddBlock(loop_body_); + graph_->AddBlock(exit_); + + graph_->SetEntryBlock(entry_); + graph_->SetExitBlock(exit_); + + // Set up loop flow in CFG. + entry_->AddSuccessor(loop_preheader_); + loop_preheader_->AddSuccessor(loop_header_); + loop_header_->AddSuccessor(loop_body_); + loop_header_->AddSuccessor(exit_); + loop_body_->AddSuccessor(loop_header_); + + // Provide boiler-plate instructions. + parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot); + entry_->AddInstruction(parameter_); + constant_ = new (&allocator_) HConstant(Primitive::kPrimInt); + loop_preheader_->AddInstruction(constant_); + loop_header_->AddInstruction(new (&allocator_) HIf(parameter_)); + loop_body_->AddInstruction(new (&allocator_) HGoto()); + exit_->AddInstruction(new (&allocator_) HExit()); + } + + // Performs LICM optimizations (after proper set up). + void PerformLICM() { + ASSERT_TRUE(graph_->TryBuildingSsa()); + SideEffectsAnalysis side_effects(graph_); + side_effects.Run(); + LICM licm(graph_, side_effects); + licm.Run(); + } + + // General building fields. + ArenaPool pool_; + ArenaAllocator allocator_; + HGraph* graph_; + + // Specific basic blocks. + HBasicBlock* entry_; + HBasicBlock* loop_preheader_; + HBasicBlock* loop_header_; + HBasicBlock* loop_body_; + HBasicBlock* exit_; + + HInstruction* parameter_; // "this" + HInstruction* constant_; +}; + +// +// The actual LICM tests. +// + +TEST_F(LICMTest, ConstantHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set array to constant. + HInstruction* constant = new (&allocator_) HConstant(Primitive::kPrimDouble); + loop_body_->InsertInstructionBefore(constant, loop_body_->GetLastInstruction()); + HInstruction* set_array = new (&allocator_) HArraySet( + parameter_, constant_, constant, Primitive::kPrimDouble, 0); + loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); + + EXPECT_EQ(constant->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(constant->GetBlock(), loop_preheader_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, FieldHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get field with different types. + HInstruction* get_field = new (&allocator_) HInstanceFieldGet( + parameter_, Primitive::kPrimLong, MemberOffset(10), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); + HInstruction* set_field = new (&allocator_) HInstanceFieldSet( + parameter_, constant_, Primitive::kPrimInt, MemberOffset(20), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_field->GetBlock(), loop_body_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_field->GetBlock(), loop_preheader_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, NoFieldHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get field with same types. + HInstruction* get_field = new (&allocator_) HInstanceFieldGet( + parameter_, Primitive::kPrimLong, MemberOffset(10), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); + HInstruction* set_field = new (&allocator_) HInstanceFieldSet( + parameter_, get_field, Primitive::kPrimLong, MemberOffset(10), + false, kUnknownFieldIndex, graph_->GetDexFile()); + loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_field->GetBlock(), loop_body_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_field->GetBlock(), loop_body_); + EXPECT_EQ(set_field->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, ArrayHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get array with different types. + HInstruction* get_array = new (&allocator_) HArrayGet( + parameter_, constant_, Primitive::kPrimLong); + loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); + HInstruction* set_array = new (&allocator_) HArraySet( + parameter_, constant_, constant_, Primitive::kPrimInt, 0); + loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_array->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_array->GetBlock(), loop_preheader_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); +} + +TEST_F(LICMTest, NoArrayHoisting) { + BuildLoop(); + + // Populate the loop with instructions: set/get array with same types. + HInstruction* get_array = new (&allocator_) HArrayGet( + parameter_, constant_, Primitive::kPrimLong); + loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); + HInstruction* set_array = new (&allocator_) HArraySet( + parameter_, get_array, constant_, Primitive::kPrimLong, 0); + loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); + + EXPECT_EQ(get_array->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); + PerformLICM(); + EXPECT_EQ(get_array->GetBlock(), loop_body_); + EXPECT_EQ(set_array->GetBlock(), loop_body_); +} + +} // namespace art diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index f41a782fe6..4b250465aa 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -427,11 +427,11 @@ class RegisterSet : public ValueObject { } } - bool ContainsCoreRegister(uint32_t id) { + bool ContainsCoreRegister(uint32_t id) const { return Contains(core_registers_, id); } - bool ContainsFloatingPointRegister(uint32_t id) { + bool ContainsFloatingPointRegister(uint32_t id) const { return Contains(floating_point_registers_, id); } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index b82e37cb4e..f2b63ae678 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -98,26 +98,31 @@ void HGraph::VisitBlockForBackEdges(HBasicBlock* block, } void HGraph::BuildDominatorTree() { + // (1) Simplify the CFG so that catch blocks have only exceptional incoming + // edges. This invariant simplifies building SSA form because Phis cannot + // collect both normal- and exceptional-flow values at the same time. + SimplifyCatchBlocks(); + ArenaBitVector visited(arena_, blocks_.Size(), false); - // (1) Find the back edges in the graph doing a DFS traversal. + // (2) Find the back edges in the graph doing a DFS traversal. FindBackEdges(&visited); - // (2) Remove instructions and phis from blocks not visited during + // (3) Remove instructions and phis from blocks not visited during // the initial DFS as users from other instructions, so that // users can be safely removed before uses later. RemoveInstructionsAsUsersFromDeadBlocks(visited); - // (3) Remove blocks not visited during the initial DFS. + // (4) Remove blocks not visited during the initial DFS. // Step (4) requires dead blocks to be removed from the // predecessors list of live blocks. RemoveDeadBlocks(visited); - // (4) Simplify the CFG now, so that we don't need to recompute + // (5) Simplify the CFG now, so that we don't need to recompute // dominators and the reverse post order. SimplifyCFG(); - // (5) Compute the dominance information and the reverse post order. + // (6) Compute the dominance information and the reverse post order. ComputeDominanceInformation(); } @@ -261,6 +266,83 @@ void HGraph::SimplifyLoop(HBasicBlock* header) { info->SetSuspendCheck(first_instruction->AsSuspendCheck()); } +static bool CheckIfPredecessorAtIsExceptional(const HBasicBlock& block, size_t pred_idx) { + HBasicBlock* predecessor = block.GetPredecessors().Get(pred_idx); + if (!predecessor->EndsWithTryBoundary()) { + // Only edges from HTryBoundary can be exceptional. + return false; + } + HTryBoundary* try_boundary = predecessor->GetLastInstruction()->AsTryBoundary(); + if (try_boundary->GetNormalFlowSuccessor() == &block) { + // This block is the normal-flow successor of `try_boundary`, but it could + // also be one of its exception handlers if catch blocks have not been + // simplified yet. Predecessors are unordered, so we will consider the first + // occurrence to be the normal edge and a possible second occurrence to be + // the exceptional edge. + return !block.IsFirstIndexOfPredecessor(predecessor, pred_idx); + } else { + // This is not the normal-flow successor of `try_boundary`, hence it must be + // one of its exception handlers. + DCHECK(try_boundary->HasExceptionHandler(block)); + return true; + } +} + +void HGraph::SimplifyCatchBlocks() { + for (size_t i = 0; i < blocks_.Size(); ++i) { + HBasicBlock* catch_block = blocks_.Get(i); + if (!catch_block->IsCatchBlock()) { + continue; + } + + bool exceptional_predecessors_only = true; + for (size_t j = 0; j < catch_block->GetPredecessors().Size(); ++j) { + if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) { + exceptional_predecessors_only = false; + break; + } + } + + if (!exceptional_predecessors_only) { + // Catch block has normal-flow predecessors and needs to be simplified. + // Splitting the block before its first instruction moves all its + // instructions into `normal_block` and links the two blocks with a Goto. + // Afterwards, incoming normal-flow edges are re-linked to `normal_block`, + // leaving `catch_block` with the exceptional edges only. + // Note that catch blocks with normal-flow predecessors cannot begin with + // a MOVE_EXCEPTION instruction, as guaranteed by the verifier. + DCHECK(!catch_block->GetFirstInstruction()->IsLoadException()); + HBasicBlock* normal_block = catch_block->SplitBefore(catch_block->GetFirstInstruction()); + for (size_t j = 0; j < catch_block->GetPredecessors().Size(); ++j) { + if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) { + catch_block->GetPredecessors().Get(j)->ReplaceSuccessor(catch_block, normal_block); + --j; + } + } + } + } +} + +void HGraph::ComputeTryBlockInformation() { + // Iterate in reverse post order to propagate try membership information from + // predecessors to their successors. + for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { + HBasicBlock* block = it.Current(); + if (block->IsEntryBlock() || block->IsCatchBlock()) { + // Catch blocks after simplification have only exceptional predecessors + // and hence are never in tries. + continue; + } + + // Infer try membership from the first predecessor. Having simplified loops, + // the first predecessor can never be a back edge and therefore it must have + // been visited already and had its try membership set. + HBasicBlock* first_predecessor = block->GetPredecessors().Get(0); + DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor)); + block->SetTryEntry(first_predecessor->ComputeTryEntryOfSuccessors()); + } +} + void HGraph::SimplifyCFG() { // Simplify the CFG for future analysis, and code generation: // (1): Split critical edges. @@ -268,9 +350,10 @@ void HGraph::SimplifyCFG() { for (size_t i = 0; i < blocks_.Size(); ++i) { HBasicBlock* block = blocks_.Get(i); if (block == nullptr) continue; - if (block->GetSuccessors().Size() > 1) { + if (block->NumberOfNormalSuccessors() > 1) { for (size_t j = 0; j < block->GetSuccessors().Size(); ++j) { HBasicBlock* successor = block->GetSuccessors().Get(j); + DCHECK(!successor->IsCatchBlock()); if (successor->GetPredecessors().Size() > 1) { SplitCriticalEdge(block, successor); --j; @@ -288,6 +371,11 @@ bool HGraph::AnalyzeNaturalLoops() const { for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); if (block->IsLoopHeader()) { + if (block->IsCatchBlock()) { + // TODO: Dealing with exceptional back edges could be tricky because + // they only approximate the real control flow. Bail out for now. + return false; + } HLoopInformation* info = block->GetLoopInformation(); if (!info->Populate()) { // Abort if the loop is non natural. We currently bailout in such cases. @@ -917,32 +1005,25 @@ HConstant* HTypeConversion::TryStaticEvaluation() const { HConstant* HUnaryOperation::TryStaticEvaluation() const { if (GetInput()->IsIntConstant()) { - int32_t value = Evaluate(GetInput()->AsIntConstant()->GetValue()); - return GetBlock()->GetGraph()->GetIntConstant(value); + return Evaluate(GetInput()->AsIntConstant()); } else if (GetInput()->IsLongConstant()) { - // TODO: Implement static evaluation of long unary operations. - // - // Do not exit with a fatal condition here. Instead, simply - // return `null' to notify the caller that this instruction - // cannot (yet) be statically evaluated. - return nullptr; + return Evaluate(GetInput()->AsLongConstant()); } return nullptr; } HConstant* HBinaryOperation::TryStaticEvaluation() const { - if (GetLeft()->IsIntConstant() && GetRight()->IsIntConstant()) { - int32_t value = Evaluate(GetLeft()->AsIntConstant()->GetValue(), - GetRight()->AsIntConstant()->GetValue()); - return GetBlock()->GetGraph()->GetIntConstant(value); - } else if (GetLeft()->IsLongConstant() && GetRight()->IsLongConstant()) { - int64_t value = Evaluate(GetLeft()->AsLongConstant()->GetValue(), - GetRight()->AsLongConstant()->GetValue()); - if (GetResultType() == Primitive::kPrimLong) { - return GetBlock()->GetGraph()->GetLongConstant(value); - } else { - DCHECK_EQ(GetResultType(), Primitive::kPrimInt); - return GetBlock()->GetGraph()->GetIntConstant(static_cast<int32_t>(value)); + if (GetLeft()->IsIntConstant()) { + if (GetRight()->IsIntConstant()) { + return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsIntConstant()); + } else if (GetRight()->IsLongConstant()) { + return Evaluate(GetLeft()->AsIntConstant(), GetRight()->AsLongConstant()); + } + } else if (GetLeft()->IsLongConstant()) { + if (GetRight()->IsIntConstant()) { + return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsIntConstant()); + } else if (GetRight()->IsLongConstant()) { + return Evaluate(GetLeft()->AsLongConstant(), GetRight()->AsLongConstant()); } } return nullptr; @@ -1083,10 +1164,20 @@ HBasicBlock* HBasicBlock::SplitAfter(HInstruction* cursor) { return new_block; } -bool HBasicBlock::IsExceptionalSuccessor(size_t idx) const { - return !GetInstructions().IsEmpty() - && GetLastInstruction()->IsTryBoundary() - && GetLastInstruction()->AsTryBoundary()->IsExceptionalSuccessor(idx); +HTryBoundary* HBasicBlock::ComputeTryEntryOfSuccessors() const { + if (EndsWithTryBoundary()) { + HTryBoundary* try_boundary = GetLastInstruction()->AsTryBoundary(); + if (try_boundary->IsEntry()) { + DCHECK(try_entry_ == nullptr); + return try_boundary; + } else { + DCHECK(try_entry_ != nullptr); + DCHECK(try_entry_->HasSameExceptionHandlersAs(*try_boundary)); + return nullptr; + } + } else { + return try_entry_; + } } static bool HasOnlyOneInstruction(const HBasicBlock& block) { @@ -1111,10 +1202,31 @@ bool HBasicBlock::EndsWithIf() const { return !GetInstructions().IsEmpty() && GetLastInstruction()->IsIf(); } +bool HBasicBlock::EndsWithTryBoundary() const { + return !GetInstructions().IsEmpty() && GetLastInstruction()->IsTryBoundary(); +} + bool HBasicBlock::HasSinglePhi() const { return !GetPhis().IsEmpty() && GetFirstPhi()->GetNext() == nullptr; } +bool HTryBoundary::HasSameExceptionHandlersAs(const HTryBoundary& other) const { + if (GetBlock()->GetSuccessors().Size() != other.GetBlock()->GetSuccessors().Size()) { + return false; + } + + // Exception handlers need to be stored in the same order. + for (HExceptionHandlerIterator it1(*this), it2(other); + !it1.Done(); + it1.Advance(), it2.Advance()) { + DCHECK(!it2.Done()); + if (it1.Current() != it2.Current()) { + return false; + } + } + return true; +} + size_t HInstructionList::CountSize() const { size_t size = 0; HInstruction* current = first_instruction_; @@ -1365,7 +1477,7 @@ void HGraph::DeleteDeadBlock(HBasicBlock* block) { blocks_.Put(block->GetBlockId(), nullptr); } -void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { +HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { DCHECK(HasExitBlock()) << "Unimplemented scenario"; // Update the environments in this graph to have the invoke's environment // as parent. @@ -1390,6 +1502,7 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { outer_graph->SetHasBoundsChecks(true); } + HInstruction* return_value = nullptr; if (GetBlocks().Size() == 3) { // Simple case of an entry block, a body block, and an exit block. // Put the body block's instruction into `invoke`'s block. @@ -1404,7 +1517,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Replace the invoke with the return value of the inlined graph. if (last->IsReturn()) { - invoke->ReplaceWith(last->InputAt(0)); + return_value = last->InputAt(0); + invoke->ReplaceWith(return_value); } else { DCHECK(last->IsReturnVoid()); } @@ -1426,7 +1540,6 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Update all predecessors of the exit block (now the `to` block) // to not `HReturn` but `HGoto` instead. - HInstruction* return_value = nullptr; bool returns_void = to->GetPredecessors().Get(0)->GetLastInstruction()->IsReturnVoid(); if (to->GetPredecessors().Size() == 1) { HBasicBlock* predecessor = to->GetPredecessors().Get(0); @@ -1560,6 +1673,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { // Finally remove the invoke from the caller. invoke->GetBlock()->RemoveInstruction(invoke); + + return return_value; } /* @@ -1637,14 +1752,76 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) { } } +void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) { + if (kIsDebugBuild) { + DCHECK_EQ(GetType(), Primitive::kPrimNot); + ScopedObjectAccess soa(Thread::Current()); + DCHECK(rti.IsValid()) << "Invalid RTI for " << DebugName(); + if (IsBoundType()) { + // Having the test here spares us from making the method virtual just for + // the sake of a DCHECK. + ReferenceTypeInfo upper_bound_rti = AsBoundType()->GetUpperBound(); + DCHECK(upper_bound_rti.IsSupertypeOf(rti)) + << " upper_bound_rti: " << upper_bound_rti + << " rti: " << rti; + DCHECK(!upper_bound_rti.GetTypeHandle()->IsFinal() || rti.IsExact()); + } + } + reference_type_info_ = rti; +} + +ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {} + +ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact) + : type_handle_(type_handle), is_exact_(is_exact) { + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); + DCHECK(IsValidHandle(type_handle)); + } +} + std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs) { ScopedObjectAccess soa(Thread::Current()); os << "[" - << " is_top=" << rhs.IsTop() - << " type=" << (rhs.IsTop() ? "?" : PrettyClass(rhs.GetTypeHandle().Get())) + << " is_valid=" << rhs.IsValid() + << " type=" << (!rhs.IsValid() ? "?" : PrettyClass(rhs.GetTypeHandle().Get())) << " is_exact=" << rhs.IsExact() << " ]"; return os; } +bool HInstruction::HasAnyEnvironmentUseBefore(HInstruction* other) { + // For now, assume that instructions in different blocks may use the + // environment. + // TODO: Use the control flow to decide if this is true. + if (GetBlock() != other->GetBlock()) { + return true; + } + + // We know that we are in the same block. Walk from 'this' to 'other', + // checking to see if there is any instruction with an environment. + HInstruction* current = this; + for (; current != other && current != nullptr; current = current->GetNext()) { + // This is a conservative check, as the instruction result may not be in + // the referenced environment. + if (current->HasEnvironment()) { + return true; + } + } + + // We should have been called with 'this' before 'other' in the block. + // Just confirm this. + DCHECK(current != nullptr); + return false; +} + +void HInstruction::RemoveEnvironmentUsers() { + for (HUseIterator<HEnvironment*> use_it(GetEnvUses()); !use_it.Done(); use_it.Advance()) { + HUseListNode<HEnvironment*>* user_node = use_it.Current(); + HEnvironment* user = user_node->GetUser(); + user->SetRawEnvAt(user_node->GetIndex(), nullptr); + } + env_uses_.Clear(); +} + } // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 04c3963675..f09e958d29 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -17,6 +17,8 @@ #ifndef ART_COMPILER_OPTIMIZING_NODES_H_ #define ART_COMPILER_OPTIMIZING_NODES_H_ +#include <type_traits> + #include "base/arena_containers.h" #include "base/arena_object.h" #include "dex/compiler_enums.h" @@ -38,6 +40,7 @@ class HBasicBlock; class HCurrentMethod; class HDoubleConstant; class HEnvironment; +class HFakeString; class HFloatConstant; class HGraphBuilder; class HGraphVisitor; @@ -48,6 +51,7 @@ class HLongConstant; class HNullConstant; class HPhi; class HSuspendCheck; +class HTryBoundary; class LiveInterval; class LocationSummary; class SlowPathCode; @@ -56,6 +60,7 @@ class SsaBuilder; static const int kDefaultNumberOfBlocks = 8; static const int kDefaultNumberOfSuccessors = 2; static const int kDefaultNumberOfPredecessors = 2; +static const int kDefaultNumberOfExceptionalPredecessors = 0; static const int kDefaultNumberOfDominatedBlocks = 1; static const int kDefaultNumberOfBackEdges = 1; @@ -181,6 +186,10 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { // visit for eliminating dead phis: a dead phi can only have loop header phi // users remaining when being visited. if (!AnalyzeNaturalLoops()) return false; + // Precompute per-block try membership before entering the SSA builder, + // which needs the information to build catch block phis from values of + // locals at throwing instructions inside try blocks. + ComputeTryBlockInformation(); TransformToSsa(); in_ssa_form_ = true; return true; @@ -192,14 +201,21 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { void BuildDominatorTree(); void TransformToSsa(); void SimplifyCFG(); + void SimplifyCatchBlocks(); // Analyze all natural loops in this graph. Returns false if one // loop is not natural, that is the header does not dominate the // back edge. bool AnalyzeNaturalLoops() const; + // Iterate over blocks to compute try block membership. Needs reverse post + // order and loop information. + void ComputeTryBlockInformation(); + // Inline this graph in `outer_graph`, replacing the given `invoke` instruction. - void InlineInto(HGraph* outer_graph, HInvoke* invoke); + // Returns the instruction used to replace the invoke expression or null if the + // invoke is for a void method. + HInstruction* InlineInto(HGraph* outer_graph, HInvoke* invoke); // Need to add a couple of blocks to test if the loop body is entered and // put deoptimization instructions, etc. @@ -295,7 +311,12 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { // already, it is created and inserted into the graph. This method is only for // integral types. HConstant* GetConstant(Primitive::Type type, int64_t value); + + // TODO: This is problematic for the consistency of reference type propagation + // because it can be created anytime after the pass and thus it will be left + // with an invalid type. HNullConstant* GetNullConstant(); + HIntConstant* GetIntConstant(int32_t value) { return CreateConstant(value, &cached_int_constants_); } @@ -325,6 +346,10 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { return invoke_type_; } + InstructionSet GetInstructionSet() const { + return instruction_set_; + } + private: void VisitBlockForDominatorTree(HBasicBlock* block, HBasicBlock* predecessor, @@ -725,8 +750,11 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { return GetPredecessorIndexOf(predecessor) == idx; } - // Returns whether successor at index `idx` is an exception handler. - bool IsExceptionalSuccessor(size_t idx) const; + // Returns the number of non-exceptional successors. SsaChecker ensures that + // these are stored at the beginning of the successor list. + size_t NumberOfNormalSuccessors() const { + return EndsWithTryBoundary() ? 1 : GetSuccessors().Size(); + } // Split the block into two blocks just before `cursor`. Returns the newly // created, latter block. Note that this method will add the block to the @@ -825,6 +853,15 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { bool IsInLoop() const { return loop_information_ != nullptr; } + HTryBoundary* GetTryEntry() const { return try_entry_; } + void SetTryEntry(HTryBoundary* try_entry) { try_entry_ = try_entry; } + bool IsInTry() const { return try_entry_ != nullptr; } + + // Returns the try entry that this block's successors should have. They will + // be in the same try, unless the block ends in a try boundary. In that case, + // the appropriate try entry will be returned. + HTryBoundary* ComputeTryEntryOfSuccessors() const; + // Returns whether this block dominates the blocked passed as parameter. bool Dominates(HBasicBlock* block) const; @@ -841,6 +878,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { bool EndsWithControlFlowInstruction() const; bool EndsWithIf() const; + bool EndsWithTryBoundary() const; bool HasSinglePhi() const; private: @@ -859,6 +897,10 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> { size_t lifetime_end_; bool is_catch_block_; + // If this block is in a try block, `try_entry_` stores one of, possibly + // several, TryBoundary instructions entering it. + HTryBoundary* try_entry_; + friend class HGraph; friend class HInstruction; @@ -900,6 +942,7 @@ class HLoopInformationOutwardIterator : public ValueObject { M(BoundsCheck, Instruction) \ M(BoundType, Instruction) \ M(CheckCast, Instruction) \ + M(ClearException, Instruction) \ M(ClinitCheck, Instruction) \ M(Compare, BinaryOperation) \ M(Condition, BinaryOperation) \ @@ -910,6 +953,7 @@ class HLoopInformationOutwardIterator : public ValueObject { M(DoubleConstant, Constant) \ M(Equal, Condition) \ M(Exit, Instruction) \ + M(FakeString, Instruction) \ M(FloatConstant, Constant) \ M(Goto, Instruction) \ M(GreaterThan, Condition) \ @@ -1149,13 +1193,35 @@ class HUserRecord : public ValueObject { HUseListNode<T>* use_node_; }; -// TODO: Add better documentation to this class and maybe refactor with more suggestive names. -// - Has(All)SideEffects suggests that all the side effects are present but only ChangesSomething -// flag is consider. -// - DependsOn suggests that there is a real dependency between side effects but it only -// checks DependendsOnSomething flag. -// -// Represents the side effects an instruction may have. +/** + * Side-effects representation. + * + * For write/read dependences on fields/arrays, the dependence analysis uses + * type disambiguation (e.g. a float field write cannot modify the value of an + * integer field read) and the access type (e.g. a reference array write cannot + * modify the value of a reference field read [although it may modify the + * reference fetch prior to reading the field, which is represented by its own + * write/read dependence]). The analysis makes conservative points-to + * assumptions on reference types (e.g. two same typed arrays are assumed to be + * the same, and any reference read depends on any reference read without + * further regard of its type). + * + * The internal representation uses 38-bit and is described in the table below. + * The first line indicates the side effect, and for field/array accesses the + * second line indicates the type of the access (in the order of the + * Primitive::Type enum). + * The two numbered lines below indicate the bit position in the bitfield (read + * vertically). + * + * |Depends on GC|ARRAY-R |FIELD-R |Can trigger GC|ARRAY-W |FIELD-W | + * +-------------+---------+---------+--------------+---------+---------+ + * | |DFJISCBZL|DFJISCBZL| |DFJISCBZL|DFJISCBZL| + * | 3 |333333322|222222221| 1 |111111110|000000000| + * | 7 |654321098|765432109| 8 |765432109|876543210| + * + * Note that, to ease the implementation, 'changes' bits are least significant + * bits, while 'dependency' bits are most significant bits. + */ class SideEffects : public ValueObject { public: SideEffects() : flags_(0) {} @@ -1165,57 +1231,204 @@ class SideEffects : public ValueObject { } static SideEffects All() { - return SideEffects(ChangesSomething().flags_ | DependsOnSomething().flags_); + return SideEffects(kAllChangeBits | kAllDependOnBits); + } + + static SideEffects AllChanges() { + return SideEffects(kAllChangeBits); + } + + static SideEffects AllDependencies() { + return SideEffects(kAllDependOnBits); + } + + static SideEffects AllExceptGCDependency() { + return AllWritesAndReads().Union(SideEffects::CanTriggerGC()); + } + + static SideEffects AllWritesAndReads() { + return SideEffects(kAllWrites | kAllReads); + } + + static SideEffects AllWrites() { + return SideEffects(kAllWrites); + } + + static SideEffects AllReads() { + return SideEffects(kAllReads); } - static SideEffects ChangesSomething() { - return SideEffects((1 << kFlagChangesCount) - 1); + static SideEffects FieldWriteOfType(Primitive::Type type, bool is_volatile) { + return is_volatile + ? AllWritesAndReads() + : SideEffects(TypeFlagWithAlias(type, kFieldWriteOffset)); } - static SideEffects DependsOnSomething() { - int count = kFlagDependsOnCount - kFlagChangesCount; - return SideEffects(((1 << count) - 1) << kFlagChangesCount); + static SideEffects ArrayWriteOfType(Primitive::Type type) { + return SideEffects(TypeFlagWithAlias(type, kArrayWriteOffset)); } + static SideEffects FieldReadOfType(Primitive::Type type, bool is_volatile) { + return is_volatile + ? AllWritesAndReads() + : SideEffects(TypeFlagWithAlias(type, kFieldReadOffset)); + } + + static SideEffects ArrayReadOfType(Primitive::Type type) { + return SideEffects(TypeFlagWithAlias(type, kArrayReadOffset)); + } + + static SideEffects CanTriggerGC() { + return SideEffects(1ULL << kCanTriggerGCBit); + } + + static SideEffects DependsOnGC() { + return SideEffects(1ULL << kDependsOnGCBit); + } + + // Combines the side-effects of this and the other. SideEffects Union(SideEffects other) const { return SideEffects(flags_ | other.flags_); } - bool HasSideEffects() const { - size_t all_bits_set = (1 << kFlagChangesCount) - 1; - return (flags_ & all_bits_set) != 0; + SideEffects Exclusion(SideEffects other) const { + return SideEffects(flags_ & ~other.flags_); } - bool HasAllSideEffects() const { - size_t all_bits_set = (1 << kFlagChangesCount) - 1; - return all_bits_set == (flags_ & all_bits_set); + bool Includes(SideEffects other) const { + return (other.flags_ & flags_) == other.flags_; } - bool DependsOn(SideEffects other) const { - size_t depends_flags = other.ComputeDependsFlags(); - return (flags_ & depends_flags) != 0; + bool HasSideEffects() const { + return (flags_ & kAllChangeBits); } bool HasDependencies() const { - int count = kFlagDependsOnCount - kFlagChangesCount; - size_t all_bits_set = (1 << count) - 1; - return ((flags_ >> kFlagChangesCount) & all_bits_set) != 0; + return (flags_ & kAllDependOnBits); + } + + // Returns true if there are no side effects or dependencies. + bool DoesNothing() const { + return flags_ == 0; + } + + // Returns true if something is written. + bool DoesAnyWrite() const { + return (flags_ & kAllWrites); } + // Returns true if something is read. + bool DoesAnyRead() const { + return (flags_ & kAllReads); + } + + // Returns true if potentially everything is written and read + // (every type and every kind of access). + bool DoesAllReadWrite() const { + return (flags_ & (kAllWrites | kAllReads)) == (kAllWrites | kAllReads); + } + + bool DoesAll() const { + return flags_ == (kAllChangeBits | kAllDependOnBits); + } + + // Returns true if this may read something written by other. + bool MayDependOn(SideEffects other) const { + const uint64_t depends_on_flags = (flags_ & kAllDependOnBits) >> kChangeBits; + return (other.flags_ & depends_on_flags); + } + + // Returns string representation of flags (for debugging only). + // Format: |x|DFJISCBZL|DFJISCBZL|y|DFJISCBZL|DFJISCBZL| + std::string ToString() const { + std::string flags = "|"; + for (int s = kLastBit; s >= 0; s--) { + bool current_bit_is_set = ((flags_ >> s) & 1) != 0; + if ((s == kDependsOnGCBit) || (s == kCanTriggerGCBit)) { + // This is a bit for the GC side effect. + if (current_bit_is_set) { + flags += "GC"; + } + flags += "|"; + } else { + // This is a bit for the array/field analysis. + // The underscore character stands for the 'can trigger GC' bit. + static const char *kDebug = "LZBCSIJFDLZBCSIJFD_LZBCSIJFDLZBCSIJFD"; + if (current_bit_is_set) { + flags += kDebug[s]; + } + if ((s == kFieldWriteOffset) || (s == kArrayWriteOffset) || + (s == kFieldReadOffset) || (s == kArrayReadOffset)) { + flags += "|"; + } + } + } + return flags; + } + + bool Equals(const SideEffects& other) const { return flags_ == other.flags_; } + private: - static constexpr int kFlagChangesSomething = 0; - static constexpr int kFlagChangesCount = kFlagChangesSomething + 1; + static constexpr int kFieldArrayAnalysisBits = 9; + + static constexpr int kFieldWriteOffset = 0; + static constexpr int kArrayWriteOffset = kFieldWriteOffset + kFieldArrayAnalysisBits; + static constexpr int kLastBitForWrites = kArrayWriteOffset + kFieldArrayAnalysisBits - 1; + static constexpr int kCanTriggerGCBit = kLastBitForWrites + 1; + + static constexpr int kChangeBits = kCanTriggerGCBit + 1; + + static constexpr int kFieldReadOffset = kCanTriggerGCBit + 1; + static constexpr int kArrayReadOffset = kFieldReadOffset + kFieldArrayAnalysisBits; + static constexpr int kLastBitForReads = kArrayReadOffset + kFieldArrayAnalysisBits - 1; + static constexpr int kDependsOnGCBit = kLastBitForReads + 1; + + static constexpr int kLastBit = kDependsOnGCBit; + static constexpr int kDependOnBits = kLastBit + 1 - kChangeBits; - static constexpr int kFlagDependsOnSomething = kFlagChangesCount; - static constexpr int kFlagDependsOnCount = kFlagDependsOnSomething + 1; + // Aliases. - explicit SideEffects(size_t flags) : flags_(flags) {} + static_assert(kChangeBits == kDependOnBits, + "the 'change' bits should match the 'depend on' bits."); - size_t ComputeDependsFlags() const { - return flags_ << kFlagChangesCount; + static constexpr uint64_t kAllChangeBits = ((1ULL << kChangeBits) - 1); + static constexpr uint64_t kAllDependOnBits = ((1ULL << kDependOnBits) - 1) << kChangeBits; + static constexpr uint64_t kAllWrites = + ((1ULL << (kLastBitForWrites + 1 - kFieldWriteOffset)) - 1) << kFieldWriteOffset; + static constexpr uint64_t kAllReads = + ((1ULL << (kLastBitForReads + 1 - kFieldReadOffset)) - 1) << kFieldReadOffset; + + // Work around the fact that HIR aliases I/F and J/D. + // TODO: remove this interceptor once HIR types are clean + static uint64_t TypeFlagWithAlias(Primitive::Type type, int offset) { + switch (type) { + case Primitive::kPrimInt: + case Primitive::kPrimFloat: + return TypeFlag(Primitive::kPrimInt, offset) | + TypeFlag(Primitive::kPrimFloat, offset); + case Primitive::kPrimLong: + case Primitive::kPrimDouble: + return TypeFlag(Primitive::kPrimLong, offset) | + TypeFlag(Primitive::kPrimDouble, offset); + default: + return TypeFlag(type, offset); + } } - size_t flags_; + // Translates type to bit flag. + static uint64_t TypeFlag(Primitive::Type type, int offset) { + CHECK_NE(type, Primitive::kPrimVoid); + const uint64_t one = 1; + const int shift = type; // 0-based consecutive enum + DCHECK_LE(kFieldWriteOffset, shift); + DCHECK_LT(shift, kArrayWriteOffset); + return one << (type + offset); + } + + // Private constructor on direct flags value. + explicit SideEffects(uint64_t flags) : flags_(flags) {} + + uint64_t flags_; }; // A HEnvironment object contains the values of virtual registers at a given location. @@ -1335,8 +1548,7 @@ class HEnvironment : public ArenaObject<kArenaAllocMisc> { const uint32_t dex_pc_; const InvokeType invoke_type_; - // The instruction that holds this environment. Only used in debug mode - // to ensure the graph is consistent. + // The instruction that holds this environment. HInstruction* const holder_; friend class HInstruction; @@ -1348,79 +1560,64 @@ class ReferenceTypeInfo : ValueObject { public: typedef Handle<mirror::Class> TypeHandle; - static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (type_handle->IsObjectClass()) { - // Override the type handle to be consistent with the case when we get to - // Top but don't have the Object class available. It avoids having to guess - // what value the type_handle has when it's Top. - return ReferenceTypeInfo(TypeHandle(), is_exact, true); - } else { - return ReferenceTypeInfo(type_handle, is_exact, false); - } + static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) { + // The constructor will check that the type_handle is valid. + return ReferenceTypeInfo(type_handle, is_exact); } - static ReferenceTypeInfo CreateTop(bool is_exact) { - return ReferenceTypeInfo(TypeHandle(), is_exact, true); + static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); } + + static bool IsValidHandle(TypeHandle handle) SHARED_REQUIRES(Locks::mutator_lock_) { + return handle.GetReference() != nullptr; } + bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) { + return IsValidHandle(type_handle_); + } bool IsExact() const { return is_exact_; } - bool IsTop() const { return is_top_; } - bool IsInterface() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return !IsTop() && GetTypeHandle()->IsInterface(); + + bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(IsValid()); + return GetTypeHandle()->IsObjectClass(); + } + bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(IsValid()); + return GetTypeHandle()->IsInterface(); } Handle<mirror::Class> GetTypeHandle() const { return type_handle_; } - bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsTop()) { - // Top (equivalent for java.lang.Object) is supertype of anything. - return true; - } - if (rti.IsTop()) { - // If we get here `this` is not Top() so it can't be a supertype. - return false; - } + bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(IsValid()); + DCHECK(rti.IsValid()); return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get()); } // Returns true if the type information provide the same amount of details. // Note that it does not mean that the instructions have the same actual type - // (e.g. tops are equal but they can be the result of a merge). - bool IsEqual(ReferenceTypeInfo rti) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsExact() != rti.IsExact()) { - return false; - } - if (IsTop() && rti.IsTop()) { - // `Top` means java.lang.Object, so the types are equivalent. + // (because the type can be the result of a merge). + bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) { + if (!IsValid() && !rti.IsValid()) { + // Invalid types are equal. return true; } - if (IsTop() || rti.IsTop()) { - // If only one is top or object than they are not equivalent. - // NB: We need this extra check because the type_handle of `Top` is invalid - // and we cannot inspect its reference. + if (!IsValid() || !rti.IsValid()) { + // One is valid, the other not. return false; } - - // Finally check the types. - return GetTypeHandle().Get() == rti.GetTypeHandle().Get(); + return IsExact() == rti.IsExact() + && GetTypeHandle().Get() == rti.GetTypeHandle().Get(); } private: - ReferenceTypeInfo() : ReferenceTypeInfo(TypeHandle(), false, true) {} - ReferenceTypeInfo(TypeHandle type_handle, bool is_exact, bool is_top) - : type_handle_(type_handle), is_exact_(is_exact), is_top_(is_top) {} + ReferenceTypeInfo(); + ReferenceTypeInfo(TypeHandle type_handle, bool is_exact); // The class of the object. TypeHandle type_handle_; // Whether or not the type is exact or a superclass of the actual type. // Whether or not we have any information about this type. bool is_exact_; - // A true value here means that the object type should be java.lang.Object. - // We don't have access to the corresponding mirror object every time so this - // flag acts as a substitute. When true, the TypeHandle refers to a null - // pointer and should not be used. - bool is_top_; }; std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs); @@ -1438,7 +1635,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { live_interval_(nullptr), lifetime_position_(kNoLifetime), side_effects_(side_effects), - reference_type_info_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {} + reference_type_info_(ReferenceTypeInfo::CreateInvalid()) {} virtual ~HInstruction() {} @@ -1455,6 +1652,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { HInstruction* GetPreviousDisregardingMoves() const; HBasicBlock* GetBlock() const { return block_; } + ArenaAllocator* GetArena() const { return block_->GetGraph()->GetArena(); } void SetBlock(HBasicBlock* block) { block_ = block; } bool IsInBlock() const { return block_ != nullptr; } bool IsInLoop() const { return block_->IsInLoop(); } @@ -1479,10 +1677,13 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { } virtual bool IsControlFlow() const { return false; } virtual bool CanThrow() const { return false; } + bool HasSideEffects() const { return side_effects_.HasSideEffects(); } + bool DoesAnyWrite() const { return side_effects_.DoesAnyWrite(); } // Does not apply for all instructions, but having this at top level greatly // simplifies the null check elimination. + // TODO: Consider merging can_be_null into ReferenceTypeInfo. virtual bool CanBeNull() const { DCHECK_EQ(GetType(), Primitive::kPrimNot) << "CanBeNull only applies to reference types"; return true; @@ -1493,10 +1694,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { return false; } - void SetReferenceTypeInfo(ReferenceTypeInfo reference_type_info) { - DCHECK_EQ(GetType(), Primitive::kPrimNot); - reference_type_info_ = reference_type_info; - } + void SetReferenceTypeInfo(ReferenceTypeInfo rti); ReferenceTypeInfo GetReferenceTypeInfo() const { DCHECK_EQ(GetType(), Primitive::kPrimNot); @@ -1659,6 +1857,14 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> { virtual bool NeedsDexCache() const { return false; } + // Does this instruction have any use in an environment before + // control flow hits 'other'? + bool HasAnyEnvironmentUseBefore(HInstruction* other); + + // Remove all references to environment uses of this instruction. + // The caller must ensure that this is safe to do. + void RemoveEnvironmentUsers(); + protected: virtual const HUserRecord<HInstruction*> InputRecordAt(size_t i) const = 0; virtual void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) = 0; @@ -1914,6 +2120,95 @@ class HGoto : public HTemplateInstruction<0> { DISALLOW_COPY_AND_ASSIGN(HGoto); }; +class HConstant : public HExpression<0> { + public: + explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {} + + bool CanBeMoved() const OVERRIDE { return true; } + + virtual bool IsMinusOne() const { return false; } + virtual bool IsZero() const { return false; } + virtual bool IsOne() const { return false; } + + DECLARE_INSTRUCTION(Constant); + + private: + DISALLOW_COPY_AND_ASSIGN(HConstant); +}; + +class HNullConstant : public HConstant { + public: + bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { + return true; + } + + size_t ComputeHashCode() const OVERRIDE { return 0; } + + DECLARE_INSTRUCTION(NullConstant); + + private: + HNullConstant() : HConstant(Primitive::kPrimNot) {} + + friend class HGraph; + DISALLOW_COPY_AND_ASSIGN(HNullConstant); +}; + +// Constants of the type int. Those can be from Dex instructions, or +// synthesized (for example with the if-eqz instruction). +class HIntConstant : public HConstant { + public: + int32_t GetValue() const { return value_; } + + bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + DCHECK(other->IsIntConstant()); + return other->AsIntConstant()->value_ == value_; + } + + size_t ComputeHashCode() const OVERRIDE { return GetValue(); } + + bool IsMinusOne() const OVERRIDE { return GetValue() == -1; } + bool IsZero() const OVERRIDE { return GetValue() == 0; } + bool IsOne() const OVERRIDE { return GetValue() == 1; } + + DECLARE_INSTRUCTION(IntConstant); + + private: + explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {} + explicit HIntConstant(bool value) : HConstant(Primitive::kPrimInt), value_(value ? 1 : 0) {} + + const int32_t value_; + + friend class HGraph; + ART_FRIEND_TEST(GraphTest, InsertInstructionBefore); + ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast); + DISALLOW_COPY_AND_ASSIGN(HIntConstant); +}; + +class HLongConstant : public HConstant { + public: + int64_t GetValue() const { return value_; } + + bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + DCHECK(other->IsLongConstant()); + return other->AsLongConstant()->value_ == value_; + } + + size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); } + + bool IsMinusOne() const OVERRIDE { return GetValue() == -1; } + bool IsZero() const OVERRIDE { return GetValue() == 0; } + bool IsOne() const OVERRIDE { return GetValue() == 1; } + + DECLARE_INSTRUCTION(LongConstant); + + private: + explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {} + + const int64_t value_; + + friend class HGraph; + DISALLOW_COPY_AND_ASSIGN(HLongConstant); +}; // Conditional branch. A block ending with an HIf instruction must have // two successors. @@ -1962,29 +2257,24 @@ class HTryBoundary : public HTemplateInstruction<0> { // Returns whether `handler` is among its exception handlers (non-zero index // successors). - bool HasExceptionHandler(HBasicBlock* handler) const { - DCHECK(handler->IsCatchBlock()); - return GetBlock()->GetSuccessors().Contains(handler, /* start_from */ 1); - } - - // Returns whether successor at index `idx` is an exception handler. - bool IsExceptionalSuccessor(size_t idx) const { - DCHECK_LT(idx, GetBlock()->GetSuccessors().Size()); - bool is_handler = (idx != 0); - DCHECK(!is_handler || GetBlock()->GetSuccessors().Get(idx)->IsCatchBlock()); - return is_handler; + bool HasExceptionHandler(const HBasicBlock& handler) const { + DCHECK(handler.IsCatchBlock()); + return GetBlock()->GetSuccessors().Contains( + const_cast<HBasicBlock*>(&handler), /* start_from */ 1); } // If not present already, adds `handler` to its block's list of exception // handlers. void AddExceptionHandler(HBasicBlock* handler) { - if (!HasExceptionHandler(handler)) { + if (!HasExceptionHandler(*handler)) { GetBlock()->AddSuccessor(handler); } } bool IsEntry() const { return kind_ == BoundaryKind::kEntry; } + bool HasSameExceptionHandlersAs(const HTryBoundary& other) const; + DECLARE_INSTRUCTION(TryBoundary); private: @@ -1993,6 +2283,24 @@ class HTryBoundary : public HTemplateInstruction<0> { DISALLOW_COPY_AND_ASSIGN(HTryBoundary); }; +// Iterator over exception handlers of a given HTryBoundary, i.e. over +// exceptional successors of its basic block. +class HExceptionHandlerIterator : public ValueObject { + public: + explicit HExceptionHandlerIterator(const HTryBoundary& try_boundary) + : block_(*try_boundary.GetBlock()), index_(block_.NumberOfNormalSuccessors()) {} + + bool Done() const { return index_ == block_.GetSuccessors().Size(); } + HBasicBlock* Current() const { return block_.GetSuccessors().Get(index_); } + size_t CurrentSuccessorIndex() const { return index_; } + void Advance() { ++index_; } + + private: + const HBasicBlock& block_; + size_t index_; + + DISALLOW_COPY_AND_ASSIGN(HExceptionHandlerIterator); +}; // Deoptimize to interpreter, upon checking a condition. class HDeoptimize : public HTemplateInstruction<1> { @@ -2050,8 +2358,8 @@ class HUnaryOperation : public HExpression<1> { HConstant* TryStaticEvaluation() const; // Apply this operation to `x`. - virtual int32_t Evaluate(int32_t x) const = 0; - virtual int64_t Evaluate(int64_t x) const = 0; + virtual HConstant* Evaluate(HIntConstant* x) const = 0; + virtual HConstant* Evaluate(HLongConstant* x) const = 0; DECLARE_INSTRUCTION(UnaryOperation); @@ -2063,7 +2371,9 @@ class HBinaryOperation : public HExpression<2> { public: HBinaryOperation(Primitive::Type result_type, HInstruction* left, - HInstruction* right) : HExpression(result_type, SideEffects::None()) { + HInstruction* right, + SideEffects side_effects = SideEffects::None()) + : HExpression(result_type, side_effects) { SetRawInputAt(0, left); SetRawInputAt(1, right); } @@ -2118,8 +2428,18 @@ class HBinaryOperation : public HExpression<2> { HConstant* TryStaticEvaluation() const; // Apply this operation to `x` and `y`. - virtual int32_t Evaluate(int32_t x, int32_t y) const = 0; - virtual int64_t Evaluate(int64_t x, int64_t y) const = 0; + virtual HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const = 0; + virtual HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const = 0; + virtual HConstant* Evaluate(HIntConstant* x ATTRIBUTE_UNUSED, + HLongConstant* y ATTRIBUTE_UNUSED) const { + VLOG(compiler) << DebugName() << " is not defined for the (int, long) case."; + return nullptr; + } + virtual HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED, + HIntConstant* y ATTRIBUTE_UNUSED) const { + VLOG(compiler) << DebugName() << " is not defined for the (long, int) case."; + return nullptr; + } // Returns an input that can legally be used as the right input and is // constant, or null. @@ -2135,11 +2455,20 @@ class HBinaryOperation : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HBinaryOperation); }; +// The comparison bias applies for floating point operations and indicates how NaN +// comparisons are treated: +enum class ComparisonBias { + kNoBias, // bias is not applicable (i.e. for long operation) + kGtBias, // return 1 for NaN comparisons + kLtBias, // return -1 for NaN comparisons +}; + class HCondition : public HBinaryOperation { public: HCondition(HInstruction* first, HInstruction* second) : HBinaryOperation(Primitive::kPrimBoolean, first, second), - needs_materialization_(true) {} + needs_materialization_(true), + bias_(ComparisonBias::kNoBias) {} bool NeedsMaterialization() const { return needs_materialization_; } void ClearNeedsMaterialization() { needs_materialization_ = false; } @@ -2152,11 +2481,36 @@ class HCondition : public HBinaryOperation { virtual IfCondition GetCondition() const = 0; + virtual IfCondition GetOppositeCondition() const = 0; + + bool IsGtBias() const { return bias_ == ComparisonBias::kGtBias; } + + void SetBias(ComparisonBias bias) { bias_ = bias; } + + bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + return bias_ == other->AsCondition()->bias_; + } + + bool IsFPConditionTrueIfNaN() const { + DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())); + IfCondition if_cond = GetCondition(); + return IsGtBias() ? ((if_cond == kCondGT) || (if_cond == kCondGE)) : (if_cond == kCondNE); + } + + bool IsFPConditionFalseIfNaN() const { + DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())); + IfCondition if_cond = GetCondition(); + return IsGtBias() ? ((if_cond == kCondLT) || (if_cond == kCondLE)) : (if_cond == kCondEQ); + } + private: // For register allocation purposes, returns whether this instruction needs to be // materialized (that is, not just be in the processor flags). bool needs_materialization_; + // Needed if we merge a HCompare into a HCondition. + ComparisonBias bias_; + DISALLOW_COPY_AND_ASSIGN(HCondition); }; @@ -2168,11 +2522,13 @@ class HEqual : public HCondition { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x == y ? 1 : 0; + template <typename T> bool Compute(T x, T y) const { return x == y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x == y ? 1 : 0; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(Equal); @@ -2181,6 +2537,10 @@ class HEqual : public HCondition { return kCondEQ; } + IfCondition GetOppositeCondition() const OVERRIDE { + return kCondNE; + } + private: DISALLOW_COPY_AND_ASSIGN(HEqual); }; @@ -2192,11 +2552,13 @@ class HNotEqual : public HCondition { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x != y ? 1 : 0; + template <typename T> bool Compute(T x, T y) const { return x != y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x != y ? 1 : 0; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(NotEqual); @@ -2205,6 +2567,10 @@ class HNotEqual : public HCondition { return kCondNE; } + IfCondition GetOppositeCondition() const OVERRIDE { + return kCondEQ; + } + private: DISALLOW_COPY_AND_ASSIGN(HNotEqual); }; @@ -2214,11 +2580,13 @@ class HLessThan : public HCondition { HLessThan(HInstruction* first, HInstruction* second) : HCondition(first, second) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x < y ? 1 : 0; + template <typename T> bool Compute(T x, T y) const { return x < y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x < y ? 1 : 0; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(LessThan); @@ -2227,6 +2595,10 @@ class HLessThan : public HCondition { return kCondLT; } + IfCondition GetOppositeCondition() const OVERRIDE { + return kCondGE; + } + private: DISALLOW_COPY_AND_ASSIGN(HLessThan); }; @@ -2236,11 +2608,13 @@ class HLessThanOrEqual : public HCondition { HLessThanOrEqual(HInstruction* first, HInstruction* second) : HCondition(first, second) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x <= y ? 1 : 0; + template <typename T> bool Compute(T x, T y) const { return x <= y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x <= y ? 1 : 0; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(LessThanOrEqual); @@ -2249,6 +2623,10 @@ class HLessThanOrEqual : public HCondition { return kCondLE; } + IfCondition GetOppositeCondition() const OVERRIDE { + return kCondGT; + } + private: DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual); }; @@ -2258,11 +2636,13 @@ class HGreaterThan : public HCondition { HGreaterThan(HInstruction* first, HInstruction* second) : HCondition(first, second) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x > y ? 1 : 0; + template <typename T> bool Compute(T x, T y) const { return x > y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x > y ? 1 : 0; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(GreaterThan); @@ -2271,6 +2651,10 @@ class HGreaterThan : public HCondition { return kCondGT; } + IfCondition GetOppositeCondition() const OVERRIDE { + return kCondLE; + } + private: DISALLOW_COPY_AND_ASSIGN(HGreaterThan); }; @@ -2280,11 +2664,13 @@ class HGreaterThanOrEqual : public HCondition { HGreaterThanOrEqual(HInstruction* first, HInstruction* second) : HCondition(first, second) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x >= y ? 1 : 0; + template <typename T> bool Compute(T x, T y) const { return x >= y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x >= y ? 1 : 0; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(GreaterThanOrEqual); @@ -2293,6 +2679,10 @@ class HGreaterThanOrEqual : public HCondition { return kCondGE; } + IfCondition GetOppositeCondition() const OVERRIDE { + return kCondLT; + } + private: DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual); }; @@ -2302,50 +2692,47 @@ class HGreaterThanOrEqual : public HCondition { // Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1. class HCompare : public HBinaryOperation { public: - // The bias applies for floating point operations and indicates how NaN - // comparisons are treated: - enum Bias { - kNoBias, // bias is not applicable (i.e. for long operation) - kGtBias, // return 1 for NaN comparisons - kLtBias, // return -1 for NaN comparisons - }; - HCompare(Primitive::Type type, HInstruction* first, HInstruction* second, - Bias bias, + ComparisonBias bias, uint32_t dex_pc) - : HBinaryOperation(Primitive::kPrimInt, first, second), bias_(bias), dex_pc_(dex_pc) { + : HBinaryOperation(Primitive::kPrimInt, first, second, SideEffectsForArchRuntimeCalls(type)), + bias_(bias), + dex_pc_(dex_pc) { DCHECK_EQ(type, first->GetType()); DCHECK_EQ(type, second->GetType()); } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return - x == y ? 0 : - x > y ? 1 : - -1; - } + template <typename T> + int32_t Compute(T x, T y) const { return x == y ? 0 : x > y ? 1 : -1; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return - x == y ? 0 : - x > y ? 1 : - -1; + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } bool InstructionDataEquals(HInstruction* other) const OVERRIDE { return bias_ == other->AsCompare()->bias_; } - bool IsGtBias() { return bias_ == kGtBias; } + ComparisonBias GetBias() const { return bias_; } - uint32_t GetDexPc() const { return dex_pc_; } + bool IsGtBias() { return bias_ == ComparisonBias::kGtBias; } + + uint32_t GetDexPc() const OVERRIDE { return dex_pc_; } + + static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type type) { + // MIPS64 uses a runtime call for FP comparisons. + return Primitive::IsFloatingPointType(type) ? SideEffects::CanTriggerGC() : SideEffects::None(); + } DECLARE_INSTRUCTION(Compare); private: - const Bias bias_; + const ComparisonBias bias_; const uint32_t dex_pc_; DISALLOW_COPY_AND_ASSIGN(HCompare); @@ -2401,27 +2788,12 @@ class HStoreLocal : public HTemplateInstruction<2> { DISALLOW_COPY_AND_ASSIGN(HStoreLocal); }; -class HConstant : public HExpression<0> { - public: - explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {} - - bool CanBeMoved() const OVERRIDE { return true; } - - virtual bool IsMinusOne() const { return false; } - virtual bool IsZero() const { return false; } - virtual bool IsOne() const { return false; } - - DECLARE_INSTRUCTION(Constant); - - private: - DISALLOW_COPY_AND_ASSIGN(HConstant); -}; - class HFloatConstant : public HConstant { public: float GetValue() const { return value_; } bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + DCHECK(other->IsFloatConstant()); return bit_cast<uint32_t, float>(other->AsFloatConstant()->value_) == bit_cast<uint32_t, float>(value_); } @@ -2461,6 +2833,7 @@ class HDoubleConstant : public HConstant { double GetValue() const { return value_; } bool InstructionDataEquals(HInstruction* other) const OVERRIDE { + DCHECK(other->IsDoubleConstant()); return bit_cast<uint64_t, double>(other->AsDoubleConstant()->value_) == bit_cast<uint64_t, double>(value_); } @@ -2495,79 +2868,8 @@ class HDoubleConstant : public HConstant { DISALLOW_COPY_AND_ASSIGN(HDoubleConstant); }; -class HNullConstant : public HConstant { - public: - bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { - return true; - } - - size_t ComputeHashCode() const OVERRIDE { return 0; } - - DECLARE_INSTRUCTION(NullConstant); - - private: - HNullConstant() : HConstant(Primitive::kPrimNot) {} - - friend class HGraph; - DISALLOW_COPY_AND_ASSIGN(HNullConstant); -}; - -// Constants of the type int. Those can be from Dex instructions, or -// synthesized (for example with the if-eqz instruction). -class HIntConstant : public HConstant { - public: - int32_t GetValue() const { return value_; } - - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - return other->AsIntConstant()->value_ == value_; - } - - size_t ComputeHashCode() const OVERRIDE { return GetValue(); } - - bool IsMinusOne() const OVERRIDE { return GetValue() == -1; } - bool IsZero() const OVERRIDE { return GetValue() == 0; } - bool IsOne() const OVERRIDE { return GetValue() == 1; } - - DECLARE_INSTRUCTION(IntConstant); - - private: - explicit HIntConstant(int32_t value) : HConstant(Primitive::kPrimInt), value_(value) {} - - const int32_t value_; - - friend class HGraph; - ART_FRIEND_TEST(GraphTest, InsertInstructionBefore); - ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast); - DISALLOW_COPY_AND_ASSIGN(HIntConstant); -}; - -class HLongConstant : public HConstant { - public: - int64_t GetValue() const { return value_; } - - bool InstructionDataEquals(HInstruction* other) const OVERRIDE { - return other->AsLongConstant()->value_ == value_; - } - - size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); } - - bool IsMinusOne() const OVERRIDE { return GetValue() == -1; } - bool IsZero() const OVERRIDE { return GetValue() == 0; } - bool IsOne() const OVERRIDE { return GetValue() == 1; } - - DECLARE_INSTRUCTION(LongConstant); - - private: - explicit HLongConstant(int64_t value) : HConstant(Primitive::kPrimLong), value_(value) {} - - const int64_t value_; - - friend class HGraph; - DISALLOW_COPY_AND_ASSIGN(HLongConstant); -}; - enum class Intrinsics { -#define OPTIMIZING_INTRINSICS(Name, IsStatic) k ## Name, +#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) k ## Name, #include "intrinsics_list.h" kNone, INTRINSICS_LIST(OPTIMIZING_INTRINSICS) @@ -2576,13 +2878,18 @@ enum class Intrinsics { }; std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic); +enum IntrinsicNeedsEnvironment { + kNoEnvironment, // Intrinsic does not require an environment. + kNeedsEnvironment // Intrinsic requires an environment. +}; + class HInvoke : public HInstruction { public: size_t InputCount() const OVERRIDE { return inputs_.Size(); } // Runtime needs to walk the stack, so Dex -> Dex calls need to // know their environment. - bool NeedsEnvironment() const OVERRIDE { return true; } + bool NeedsEnvironment() const OVERRIDE { return needs_environment_ == kNeedsEnvironment; } void SetArgumentAt(size_t index, HInstruction* argument) { SetRawInputAt(index, argument); @@ -2607,8 +2914,9 @@ class HInvoke : public HInstruction { return intrinsic_; } - void SetIntrinsic(Intrinsics intrinsic) { + void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironment needs_environment) { intrinsic_ = intrinsic; + needs_environment_ = needs_environment; } bool IsFromInlinedInvoke() const { @@ -2627,14 +2935,16 @@ class HInvoke : public HInstruction { uint32_t dex_pc, uint32_t dex_method_index, InvokeType original_invoke_type) - : HInstruction(SideEffects::All()), + : HInstruction( + SideEffects::AllExceptGCDependency()), // Assume write/read on all fields/arrays. number_of_arguments_(number_of_arguments), inputs_(arena, number_of_arguments), return_type_(return_type), dex_pc_(dex_pc), dex_method_index_(dex_method_index), original_invoke_type_(original_invoke_type), - intrinsic_(Intrinsics::kNone) { + intrinsic_(Intrinsics::kNone), + needs_environment_(kNeedsEnvironment) { uint32_t number_of_inputs = number_of_arguments + number_of_other_inputs; inputs_.SetSize(number_of_inputs); } @@ -2651,6 +2961,7 @@ class HInvoke : public HInstruction { const uint32_t dex_method_index_; const InvokeType original_invoke_type_; Intrinsics intrinsic_; + IntrinsicNeedsEnvironment needs_environment_; private: DISALLOW_COPY_AND_ASSIGN(HInvoke); @@ -2678,9 +2989,11 @@ class HInvokeStaticOrDirect : public HInvoke { ClinitCheckRequirement clinit_check_requirement) : HInvoke(arena, number_of_arguments, - // There is one extra argument for the HCurrentMethod node, and - // potentially one other if the clinit check is explicit. - clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 2u : 1u, + // There is one extra argument for the HCurrentMethod node, and + // potentially one other if the clinit check is explicit, and one other + // if the method is a string factory. + 1u + (clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u) + + (string_init_offset ? 1u : 0u), return_type, dex_pc, dex_method_index, @@ -2697,6 +3010,10 @@ class HInvokeStaticOrDirect : public HInvoke { return false; } + bool CanBeNull() const OVERRIDE { + return return_type_ == Primitive::kPrimNot && !IsStringInit(); + } + InvokeType GetInvokeType() const { return invoke_type_; } bool IsRecursive() const { return is_recursive_; } bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); } @@ -2725,6 +3042,23 @@ class HInvokeStaticOrDirect : public HInvoke { DCHECK(IsStaticWithImplicitClinitCheck()); } + bool IsStringFactoryFor(HFakeString* str) const { + if (!IsStringInit()) return false; + // +1 for the current method. + if (InputCount() == (number_of_arguments_ + 1)) return false; + return InputAt(InputCount() - 1)->AsFakeString() == str; + } + + void RemoveFakeStringArgumentAsLastInput() { + DCHECK(IsStringInit()); + size_t last_input_index = InputCount() - 1; + HInstruction* last_input = InputAt(last_input_index); + DCHECK(last_input != nullptr); + DCHECK(last_input->IsFakeString()) << last_input->DebugName(); + RemoveAsUserOfInput(last_input_index); + inputs_.DeleteAt(last_input_index); + } + // Is this a call to a static method whose declaring class has an // explicit intialization check in the graph? bool IsStaticWithExplicitClinitCheck() const { @@ -2825,7 +3159,7 @@ class HNewInstance : public HExpression<1> { uint16_t type_index, const DexFile& dex_file, QuickEntrypointEnum entrypoint) - : HExpression(Primitive::kPrimNot, SideEffects::None()), + : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC()), dex_pc_(dex_pc), type_index_(type_index), dex_file_(dex_file), @@ -2862,11 +3196,17 @@ class HNewInstance : public HExpression<1> { class HNeg : public HUnaryOperation { public: - explicit HNeg(Primitive::Type result_type, HInstruction* input) + HNeg(Primitive::Type result_type, HInstruction* input) : HUnaryOperation(result_type, input) {} - int32_t Evaluate(int32_t x) const OVERRIDE { return -x; } - int64_t Evaluate(int64_t x) const OVERRIDE { return -x; } + template <typename T> T Compute(T x) const { return -x; } + + HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue())); + } + HConstant* Evaluate(HLongConstant* x) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue())); + } DECLARE_INSTRUCTION(Neg); @@ -2882,7 +3222,7 @@ class HNewArray : public HExpression<2> { uint16_t type_index, const DexFile& dex_file, QuickEntrypointEnum entrypoint) - : HExpression(Primitive::kPrimNot, SideEffects::None()), + : HExpression(Primitive::kPrimNot, SideEffects::CanTriggerGC()), dex_pc_(dex_pc), type_index_(type_index), dex_file_(dex_file), @@ -2923,11 +3263,13 @@ class HAdd : public HBinaryOperation { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x + y; + template <typename T> T Compute(T x, T y) const { return x + y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x + y; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(Add); @@ -2941,11 +3283,13 @@ class HSub : public HBinaryOperation { HSub(Primitive::Type result_type, HInstruction* left, HInstruction* right) : HBinaryOperation(result_type, left, right) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - return x - y; + template <typename T> T Compute(T x, T y) const { return x - y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - return x - y; + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); } DECLARE_INSTRUCTION(Sub); @@ -2961,8 +3305,14 @@ class HMul : public HBinaryOperation { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x * y; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x * y; } + template <typename T> T Compute(T x, T y) const { return x * y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } DECLARE_INSTRUCTION(Mul); @@ -2973,23 +3323,32 @@ class HMul : public HBinaryOperation { class HDiv : public HBinaryOperation { public: HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc) - : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {} + : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls()), + dex_pc_(dex_pc) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - // Our graph structure ensures we never have 0 for `y` during constant folding. + template <typename T> + T Compute(T x, T y) const { + // Our graph structure ensures we never have 0 for `y` during + // constant folding. DCHECK_NE(y, 0); // Special case -1 to avoid getting a SIGFPE on x86(_64). return (y == -1) ? -x : x / y; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - DCHECK_NE(y, 0); - // Special case -1 to avoid getting a SIGFPE on x86(_64). - return (y == -1) ? -x : x / y; + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); } uint32_t GetDexPc() const OVERRIDE { return dex_pc_; } + static SideEffects SideEffectsForArchRuntimeCalls() { + // The generated code can use a runtime call. + return SideEffects::CanTriggerGC(); + } + DECLARE_INSTRUCTION(Div); private: @@ -3001,22 +3360,31 @@ class HDiv : public HBinaryOperation { class HRem : public HBinaryOperation { public: HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc) - : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {} + : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls()), + dex_pc_(dex_pc) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { + template <typename T> + T Compute(T x, T y) const { + // Our graph structure ensures we never have 0 for `y` during + // constant folding. DCHECK_NE(y, 0); // Special case -1 to avoid getting a SIGFPE on x86(_64). return (y == -1) ? 0 : x % y; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - DCHECK_NE(y, 0); - // Special case -1 to avoid getting a SIGFPE on x86(_64). - return (y == -1) ? 0 : x % y; + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); } uint32_t GetDexPc() const OVERRIDE { return dex_pc_; } + static SideEffects SideEffectsForArchRuntimeCalls() { + return SideEffects::CanTriggerGC(); + } + DECLARE_INSTRUCTION(Rem); private: @@ -3032,6 +3400,8 @@ class HDivZeroCheck : public HExpression<1> { SetRawInputAt(0, value); } + Primitive::Type GetType() const OVERRIDE { return InputAt(0)->GetType(); } + bool CanBeMoved() const OVERRIDE { return true; } bool InstructionDataEquals(HInstruction* other) const OVERRIDE { @@ -3057,8 +3427,27 @@ class HShl : public HBinaryOperation { HShl(Primitive::Type result_type, HInstruction* left, HInstruction* right) : HBinaryOperation(result_type, left, right) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x << (y & kMaxIntShiftValue); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x << (y & kMaxLongShiftValue); } + template <typename T, typename U, typename V> + T Compute(T x, U y, V max_shift_value) const { + static_assert(std::is_same<V, typename std::make_unsigned<T>::type>::value, + "V is not the unsigned integer type corresponding to T"); + return x << (y & max_shift_value); + } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant( + Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue)); + } + // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this + // case is handled as `x << static_cast<int>(y)`. + HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue)); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue)); + } DECLARE_INSTRUCTION(Shl); @@ -3071,8 +3460,27 @@ class HShr : public HBinaryOperation { HShr(Primitive::Type result_type, HInstruction* left, HInstruction* right) : HBinaryOperation(result_type, left, right) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x >> (y & kMaxIntShiftValue); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x >> (y & kMaxLongShiftValue); } + template <typename T, typename U, typename V> + T Compute(T x, U y, V max_shift_value) const { + static_assert(std::is_same<V, typename std::make_unsigned<T>::type>::value, + "V is not the unsigned integer type corresponding to T"); + return x >> (y & max_shift_value); + } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant( + Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue)); + } + // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this + // case is handled as `x >> static_cast<int>(y)`. + HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue)); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue)); + } DECLARE_INSTRUCTION(Shr); @@ -3085,16 +3493,27 @@ class HUShr : public HBinaryOperation { HUShr(Primitive::Type result_type, HInstruction* left, HInstruction* right) : HBinaryOperation(result_type, left, right) {} - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { - uint32_t ux = static_cast<uint32_t>(x); - uint32_t uy = static_cast<uint32_t>(y) & kMaxIntShiftValue; - return static_cast<int32_t>(ux >> uy); + template <typename T, typename U, typename V> + T Compute(T x, U y, V max_shift_value) const { + static_assert(std::is_same<V, typename std::make_unsigned<T>::type>::value, + "V is not the unsigned integer type corresponding to T"); + V ux = static_cast<V>(x); + return static_cast<T>(ux >> (y & max_shift_value)); } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { - uint64_t ux = static_cast<uint64_t>(x); - uint64_t uy = static_cast<uint64_t>(y) & kMaxLongShiftValue; - return static_cast<int64_t>(ux >> uy); + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant( + Compute(x->GetValue(), y->GetValue(), kMaxIntShiftValue)); + } + // There is no `Evaluate(HIntConstant* x, HLongConstant* y)`, as this + // case is handled as `x >>> static_cast<int>(y)`. + HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue)); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue)); } DECLARE_INSTRUCTION(UShr); @@ -3110,8 +3529,21 @@ class HAnd : public HBinaryOperation { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x & y; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x & y; } + template <typename T, typename U> + auto Compute(T x, U y) const -> decltype(x & y) { return x & y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } DECLARE_INSTRUCTION(And); @@ -3126,8 +3558,21 @@ class HOr : public HBinaryOperation { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x | y; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x | y; } + template <typename T, typename U> + auto Compute(T x, U y) const -> decltype(x | y) { return x | y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } DECLARE_INSTRUCTION(Or); @@ -3142,8 +3587,21 @@ class HXor : public HBinaryOperation { bool IsCommutative() const OVERRIDE { return true; } - int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x ^ y; } - int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x ^ y; } + template <typename T, typename U> + auto Compute(T x, U y) const -> decltype(x ^ y) { return x ^ y; } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HIntConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue(), y->GetValue())); + } DECLARE_INSTRUCTION(Xor); @@ -3179,7 +3637,7 @@ class HParameterValue : public HExpression<0> { class HNot : public HUnaryOperation { public: - explicit HNot(Primitive::Type result_type, HInstruction* input) + HNot(Primitive::Type result_type, HInstruction* input) : HUnaryOperation(result_type, input) {} bool CanBeMoved() const OVERRIDE { return true; } @@ -3188,8 +3646,14 @@ class HNot : public HUnaryOperation { return true; } - int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; } - int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; } + template <typename T> T Compute(T x) const { return ~x; } + + HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue())); + } + HConstant* Evaluate(HLongConstant* x) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue())); + } DECLARE_INSTRUCTION(Not); @@ -3208,13 +3672,16 @@ class HBooleanNot : public HUnaryOperation { return true; } - int32_t Evaluate(int32_t x) const OVERRIDE { + template <typename T> bool Compute(T x) const { DCHECK(IsUint<1>(x)); return !x; } - int64_t Evaluate(int64_t x ATTRIBUTE_UNUSED) const OVERRIDE { - LOG(FATAL) << DebugName() << " cannot be used with 64-bit values"; + HConstant* Evaluate(HIntConstant* x) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue())); + } + HConstant* Evaluate(HLongConstant* x ATTRIBUTE_UNUSED) const OVERRIDE { + LOG(FATAL) << DebugName() << " is not defined for long values"; UNREACHABLE(); } @@ -3228,7 +3695,8 @@ class HTypeConversion : public HExpression<1> { public: // Instantiate a type conversion of `input` to `result_type`. HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc) - : HExpression(result_type, SideEffects::None()), dex_pc_(dex_pc) { + : HExpression(result_type, SideEffectsForArchRuntimeCalls(input->GetType(), result_type)), + dex_pc_(dex_pc) { SetRawInputAt(0, input); DCHECK_NE(input->GetType(), result_type); } @@ -3248,6 +3716,18 @@ class HTypeConversion : public HExpression<1> { // containing the result. If the input cannot be converted, return nullptr. HConstant* TryStaticEvaluation() const; + static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type input_type, + Primitive::Type result_type) { + // Some architectures may not require the 'GC' side effects, but at this point + // in the compilation process we do not know what architecture we will + // generate code for, so we must be conservative. + if ((Primitive::IsFloatingPointType(input_type) && Primitive::IsIntegralType(result_type)) + || (input_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(result_type))) { + return SideEffects::CanTriggerGC(); + } + return SideEffects::None(); + } + DECLARE_INSTRUCTION(TypeConversion); private: @@ -3283,6 +3763,8 @@ class HPhi : public HInstruction { } } + bool IsCatchPhi() const { return GetBlock()->IsCatchBlock(); } + size_t InputCount() const OVERRIDE { return inputs_.Size(); } void AddInput(HInstruction* input); @@ -3398,7 +3880,9 @@ class HInstanceFieldGet : public HExpression<1> { bool is_volatile, uint32_t field_idx, const DexFile& dex_file) - : HExpression(field_type, SideEffects::DependsOnSomething()), + : HExpression( + field_type, + SideEffects::FieldReadOfType(field_type, is_volatile)), field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) { SetRawInputAt(0, value); } @@ -3440,7 +3924,8 @@ class HInstanceFieldSet : public HTemplateInstruction<2> { bool is_volatile, uint32_t field_idx, const DexFile& dex_file) - : HTemplateInstruction(SideEffects::ChangesSomething()), + : HTemplateInstruction( + SideEffects::FieldWriteOfType(field_type, is_volatile)), field_info_(field_offset, field_type, is_volatile, field_idx, dex_file), value_can_be_null_(true) { SetRawInputAt(0, object); @@ -3471,7 +3956,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> { class HArrayGet : public HExpression<2> { public: HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type) - : HExpression(type, SideEffects::DependsOnSomething()) { + : HExpression(type, SideEffects::ArrayReadOfType(type)) { SetRawInputAt(0, array); SetRawInputAt(1, index); } @@ -3509,7 +3994,9 @@ class HArraySet : public HTemplateInstruction<3> { HInstruction* value, Primitive::Type expected_component_type, uint32_t dex_pc) - : HTemplateInstruction(SideEffects::ChangesSomething()), + : HTemplateInstruction( + SideEffects::ArrayWriteOfType(expected_component_type).Union( + SideEffectsForArchRuntimeCalls(value->GetType()))), dex_pc_(dex_pc), expected_component_type_(expected_component_type), needs_type_check_(value->GetType() == Primitive::kPrimNot), @@ -3562,6 +4049,10 @@ class HArraySet : public HTemplateInstruction<3> { : expected_component_type_; } + static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) { + return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None(); + } + DECLARE_INSTRUCTION(ArraySet); private: @@ -3656,7 +4147,7 @@ class HTemporary : public HTemplateInstruction<0> { class HSuspendCheck : public HTemplateInstruction<0> { public: explicit HSuspendCheck(uint32_t dex_pc) - : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc), slow_path_(nullptr) {} + : HTemplateInstruction(SideEffects::CanTriggerGC()), dex_pc_(dex_pc), slow_path_(nullptr) {} bool NeedsEnvironment() const OVERRIDE { return true; @@ -3688,13 +4179,13 @@ class HLoadClass : public HExpression<1> { const DexFile& dex_file, bool is_referrers_class, uint32_t dex_pc) - : HExpression(Primitive::kPrimNot, SideEffects::None()), + : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls()), type_index_(type_index), dex_file_(dex_file), is_referrers_class_(is_referrers_class), dex_pc_(dex_pc), generate_clinit_check_(false), - loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) { + loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) { SetRawInputAt(0, current_method); } @@ -3709,6 +4200,7 @@ class HLoadClass : public HExpression<1> { uint32_t GetDexPc() const OVERRIDE { return dex_pc_; } uint16_t GetTypeIndex() const { return type_index_; } bool IsReferrersClass() const { return is_referrers_class_; } + bool CanBeNull() const OVERRIDE { return false; } bool NeedsEnvironment() const OVERRIDE { // Will call runtime and load the class if the class is not loaded yet. @@ -3744,14 +4236,14 @@ class HLoadClass : public HExpression<1> { loaded_class_rti_ = rti; } - bool IsResolved() { - return loaded_class_rti_.IsExact(); - } - const DexFile& GetDexFile() { return dex_file_; } bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; } + static SideEffects SideEffectsForArchRuntimeCalls() { + return SideEffects::CanTriggerGC(); + } + DECLARE_INSTRUCTION(LoadClass); private: @@ -3771,7 +4263,7 @@ class HLoadClass : public HExpression<1> { class HLoadString : public HExpression<1> { public: HLoadString(HCurrentMethod* current_method, uint32_t string_index, uint32_t dex_pc) - : HExpression(Primitive::kPrimNot, SideEffects::None()), + : HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls()), string_index_(string_index), dex_pc_(dex_pc) { SetRawInputAt(0, current_method); @@ -3792,6 +4284,10 @@ class HLoadString : public HExpression<1> { bool NeedsEnvironment() const OVERRIDE { return false; } bool NeedsDexCache() const OVERRIDE { return true; } + static SideEffects SideEffectsForArchRuntimeCalls() { + return SideEffects::CanTriggerGC(); + } + DECLARE_INSTRUCTION(LoadString); private: @@ -3806,8 +4302,10 @@ class HLoadString : public HExpression<1> { */ class HClinitCheck : public HExpression<1> { public: - explicit HClinitCheck(HLoadClass* constant, uint32_t dex_pc) - : HExpression(Primitive::kPrimNot, SideEffects::ChangesSomething()), + HClinitCheck(HLoadClass* constant, uint32_t dex_pc) + : HExpression( + Primitive::kPrimNot, + SideEffects::AllChanges()), // Assume write/read on all fields/arrays. dex_pc_(dex_pc) { SetRawInputAt(0, constant); } @@ -3843,7 +4341,9 @@ class HStaticFieldGet : public HExpression<1> { bool is_volatile, uint32_t field_idx, const DexFile& dex_file) - : HExpression(field_type, SideEffects::DependsOnSomething()), + : HExpression( + field_type, + SideEffects::FieldReadOfType(field_type, is_volatile)), field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) { SetRawInputAt(0, cls); } @@ -3882,7 +4382,8 @@ class HStaticFieldSet : public HTemplateInstruction<2> { bool is_volatile, uint32_t field_idx, const DexFile& dex_file) - : HTemplateInstruction(SideEffects::ChangesSomething()), + : HTemplateInstruction( + SideEffects::FieldWriteOfType(field_type, is_volatile)), field_info_(field_offset, field_type, is_volatile, field_idx, dex_file), value_can_be_null_(true) { SetRawInputAt(0, cls); @@ -3918,10 +4419,22 @@ class HLoadException : public HExpression<0> { DISALLOW_COPY_AND_ASSIGN(HLoadException); }; +// Implicit part of move-exception which clears thread-local exception storage. +// Must not be removed because the runtime expects the TLS to get cleared. +class HClearException : public HTemplateInstruction<0> { + public: + HClearException() : HTemplateInstruction(SideEffects::AllWrites()) {} + + DECLARE_INSTRUCTION(ClearException); + + private: + DISALLOW_COPY_AND_ASSIGN(HClearException); +}; + class HThrow : public HTemplateInstruction<1> { public: HThrow(HInstruction* exception, uint32_t dex_pc) - : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) { + : HTemplateInstruction(SideEffects::CanTriggerGC()), dex_pc_(dex_pc) { SetRawInputAt(0, exception); } @@ -3947,7 +4460,7 @@ class HInstanceOf : public HExpression<2> { HLoadClass* constant, bool class_is_final, uint32_t dex_pc) - : HExpression(Primitive::kPrimBoolean, SideEffects::None()), + : HExpression(Primitive::kPrimBoolean, SideEffectsForArchRuntimeCalls(class_is_final)), class_is_final_(class_is_final), must_do_null_check_(true), dex_pc_(dex_pc) { @@ -3973,6 +4486,10 @@ class HInstanceOf : public HExpression<2> { bool MustDoNullCheck() const { return must_do_null_check_; } void ClearMustDoNullCheck() { must_do_null_check_ = false; } + static SideEffects SideEffectsForArchRuntimeCalls(bool class_is_final) { + return class_is_final ? SideEffects::None() : SideEffects::CanTriggerGC(); + } + DECLARE_INSTRUCTION(InstanceOf); private: @@ -3985,27 +4502,43 @@ class HInstanceOf : public HExpression<2> { class HBoundType : public HExpression<1> { public: - HBoundType(HInstruction* input, ReferenceTypeInfo bound_type) + // Constructs an HBoundType with the given upper_bound. + // Ensures that the upper_bound is valid. + HBoundType(HInstruction* input, ReferenceTypeInfo upper_bound, bool upper_can_be_null) : HExpression(Primitive::kPrimNot, SideEffects::None()), - bound_type_(bound_type) { + upper_bound_(upper_bound), + upper_can_be_null_(upper_can_be_null), + can_be_null_(upper_can_be_null) { DCHECK_EQ(input->GetType(), Primitive::kPrimNot); SetRawInputAt(0, input); + SetReferenceTypeInfo(upper_bound_); } - const ReferenceTypeInfo& GetBoundType() const { return bound_type_; } + // GetUpper* should only be used in reference type propagation. + const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; } + bool GetUpperCanBeNull() const { return upper_can_be_null_; } - bool CanBeNull() const OVERRIDE { - // `null instanceof ClassX` always return false so we can't be null. - return false; + void SetCanBeNull(bool can_be_null) { + DCHECK(upper_can_be_null_ || !can_be_null); + can_be_null_ = can_be_null; } + bool CanBeNull() const OVERRIDE { return can_be_null_; } + DECLARE_INSTRUCTION(BoundType); private: // Encodes the most upper class that this instruction can have. In other words - // it is always the case that GetBoundType().IsSupertypeOf(GetReferenceType()). - // It is used to bound the type in cases like `if (x instanceof ClassX) {}` - const ReferenceTypeInfo bound_type_; + // it is always the case that GetUpperBound().IsSupertypeOf(GetReferenceType()). + // It is used to bound the type in cases like: + // if (x instanceof ClassX) { + // // uper_bound_ will be ClassX + // } + const ReferenceTypeInfo upper_bound_; + // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this + // is false then can_be_null_ cannot be true). + const bool upper_can_be_null_; + bool can_be_null_; DISALLOW_COPY_AND_ASSIGN(HBoundType); }; @@ -4016,7 +4549,7 @@ class HCheckCast : public HTemplateInstruction<2> { HLoadClass* constant, bool class_is_final, uint32_t dex_pc) - : HTemplateInstruction(SideEffects::None()), + : HTemplateInstruction(SideEffects::CanTriggerGC()), class_is_final_(class_is_final), must_do_null_check_(true), dex_pc_(dex_pc) { @@ -4057,7 +4590,8 @@ class HCheckCast : public HTemplateInstruction<2> { class HMemoryBarrier : public HTemplateInstruction<0> { public: explicit HMemoryBarrier(MemBarrierKind barrier_kind) - : HTemplateInstruction(SideEffects::None()), + : HTemplateInstruction( + SideEffects::AllWritesAndReads()), // Assume write/read on all fields/arrays. barrier_kind_(barrier_kind) {} MemBarrierKind GetBarrierKind() { return barrier_kind_; } @@ -4078,13 +4612,21 @@ class HMonitorOperation : public HTemplateInstruction<1> { }; HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc) - : HTemplateInstruction(SideEffects::None()), kind_(kind), dex_pc_(dex_pc) { + : HTemplateInstruction( + SideEffects::AllExceptGCDependency()), // Assume write/read on all fields/arrays. + kind_(kind), dex_pc_(dex_pc) { SetRawInputAt(0, object); } // Instruction may throw a Java exception, so we need an environment. - bool NeedsEnvironment() const OVERRIDE { return true; } - bool CanThrow() const OVERRIDE { return true; } + bool NeedsEnvironment() const OVERRIDE { return CanThrow(); } + + bool CanThrow() const OVERRIDE { + // Verifier guarantees that monitor-exit cannot throw. + // This is important because it allows the HGraphBuilder to remove + // a dead throw-catch loop generated for `synchronized` blocks/methods. + return IsEnter(); + } uint32_t GetDexPc() const OVERRIDE { return dex_pc_; } @@ -4100,6 +4642,25 @@ class HMonitorOperation : public HTemplateInstruction<1> { DISALLOW_COPY_AND_ASSIGN(HMonitorOperation); }; +/** + * A HInstruction used as a marker for the replacement of new + <init> + * of a String to a call to a StringFactory. Only baseline will see + * the node at code generation, where it will be be treated as null. + * When compiling non-baseline, `HFakeString` instructions are being removed + * in the instruction simplifier. + */ +class HFakeString : public HTemplateInstruction<0> { + public: + HFakeString() : HTemplateInstruction(SideEffects::None()) {} + + Primitive::Type GetType() const OVERRIDE { return Primitive::kPrimNot; } + + DECLARE_INSTRUCTION(FakeString); + + private: + DISALLOW_COPY_AND_ASSIGN(HFakeString); +}; + class MoveOperands : public ArenaObject<kArenaAllocMisc> { public: MoveOperands(Location source, diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h index bc565468b2..f793a65bf3 100644 --- a/compiler/optimizing/optimization.h +++ b/compiler/optimizing/optimization.h @@ -40,7 +40,7 @@ class HOptimization : public ArenaObject<kArenaAllocMisc> { // Return the name of the pass. const char* GetPassName() const { return pass_name_; } - // Peform the analysis itself. + // Perform the analysis itself. virtual void Run() = 0; protected: diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc index fe3bb1a2b4..f455571636 100644 --- a/compiler/optimizing/optimizing_cfi_test.cc +++ b/compiler/optimizing/optimizing_cfi_test.cc @@ -29,7 +29,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class OptimizingCFITest : public CFITest { public: @@ -125,6 +125,6 @@ TEST_ISA(kArm64) TEST_ISA(kX86) TEST_ISA(kX86_64) -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 1e515307b4..6a50b7d4a4 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -35,6 +35,7 @@ #include "dex/verified_method.h" #include "dex/verification_results.h" #include "driver/compiler_driver.h" +#include "driver/compiler_driver-inl.h" #include "driver/compiler_options.h" #include "driver/dex_compilation_unit.h" #include "elf_writer_quick.h" @@ -85,7 +86,7 @@ class CodeVectorAllocator FINAL : public CodeAllocator { * Filter to apply to the visualizer. Methods whose name contain that filter will * be dumped. */ -static const char* kStringFilter = ""; +static constexpr const char kStringFilter[] = ""; class PassScope; @@ -104,12 +105,14 @@ class PassObserver : public ValueObject { visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()), visualizer_(visualizer_output, graph, *codegen), graph_in_bad_state_(false) { - if (strstr(method_name, kStringFilter) == nullptr) { - timing_logger_enabled_ = visualizer_enabled_ = false; - } - if (visualizer_enabled_) { - visualizer_.PrintHeader(method_name_); - codegen->SetDisassemblyInformation(&disasm_info_); + if (timing_logger_enabled_ || visualizer_enabled_) { + if (!IsVerboseMethod(compiler_driver, method_name)) { + timing_logger_enabled_ = visualizer_enabled_ = false; + } + if (visualizer_enabled_) { + visualizer_.PrintHeader(method_name_); + codegen->SetDisassemblyInformation(&disasm_info_); + } } } @@ -132,7 +135,7 @@ class PassObserver : public ValueObject { void StartPass(const char* pass_name) { // Dump graph first, then start timer. if (visualizer_enabled_) { - visualizer_.DumpGraph(pass_name, /* is_after_pass */ false); + visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_); } if (timing_logger_enabled_) { timing_logger_.StartTiming(pass_name); @@ -145,7 +148,7 @@ class PassObserver : public ValueObject { timing_logger_.EndTiming(); } if (visualizer_enabled_) { - visualizer_.DumpGraph(pass_name, /* is_after_pass */ true); + visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_); } // Validate the HGraph if running in debug mode. @@ -168,6 +171,23 @@ class PassObserver : public ValueObject { } } + static bool IsVerboseMethod(CompilerDriver* compiler_driver, const char* method_name) { + // Test an exact match to --verbose-methods. If verbose-methods is set, this overrides an + // empty kStringFilter matching all methods. + if (compiler_driver->GetCompilerOptions().HasVerboseMethods()) { + return compiler_driver->GetCompilerOptions().IsVerboseMethod(method_name); + } + + // Test the kStringFilter sub-string. constexpr helper variable to silence unreachable-code + // warning when the string is empty. + constexpr bool kStringFilterEmpty = arraysize(kStringFilter) <= 1; + if (kStringFilterEmpty || strstr(method_name, kStringFilter) != nullptr) { + return true; + } + + return false; + } + HGraph* const graph_; const char* method_name_; @@ -236,7 +256,7 @@ class OptimizingCompiler FINAL : public Compiler { } uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()))); } @@ -349,6 +369,36 @@ static void RunOptimizations(HOptimization* optimizations[], } } +static void MaybeRunInliner(HGraph* graph, + CompilerDriver* driver, + OptimizingCompilerStats* stats, + const DexCompilationUnit& dex_compilation_unit, + PassObserver* pass_observer, + StackHandleScopeCollection* handles) { + const CompilerOptions& compiler_options = driver->GetCompilerOptions(); + bool should_inline = (compiler_options.GetInlineDepthLimit() > 0) + && (compiler_options.GetInlineMaxCodeUnits() > 0); + if (!should_inline) { + return; + } + + ArenaAllocator* arena = graph->GetArena(); + HInliner* inliner = new (arena) HInliner( + graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats); + ReferenceTypePropagation* type_propagation = + new (arena) ReferenceTypePropagation(graph, handles, + "reference_type_propagation_after_inlining"); + + HOptimization* optimizations[] = { + inliner, + // Run another type propagation phase: inlining will open up more opportunities + // to remove checkcast/instanceof and null checks. + type_propagation, + }; + + RunOptimizations(optimizations, arraysize(optimizations), pass_observer); +} + static void RunOptimizations(HGraph* graph, CompilerDriver* driver, OptimizingCompilerStats* stats, @@ -363,10 +413,6 @@ static void RunOptimizations(HGraph* graph, HConstantFolding* fold1 = new (arena) HConstantFolding(graph); InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); HBooleanSimplifier* boolean_simplify = new (arena) HBooleanSimplifier(graph); - - HInliner* inliner = new (arena) HInliner( - graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats); - HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining"); SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph); GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects); @@ -378,28 +424,29 @@ static void RunOptimizations(HGraph* graph, graph, stats, "instruction_simplifier_after_types"); InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( graph, stats, "instruction_simplifier_after_bce"); - ReferenceTypePropagation* type_propagation2 = - new (arena) ReferenceTypePropagation(graph, handles); InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier( graph, stats, "instruction_simplifier_before_codegen"); IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver); - HOptimization* optimizations[] = { + HOptimization* optimizations1[] = { intrinsics, fold1, simplify1, type_propagation, dce1, - simplify2, - inliner, - // Run another type propagation phase: inlining will open up more opprotunities - // to remove checkast/instanceof and null checks. - type_propagation2, + simplify2 + }; + + RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer); + + MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles); + + HOptimization* optimizations2[] = { // BooleanSimplifier depends on the InstructionSimplifier removing redundant // suspend checks to recognize empty blocks. boolean_simplify, - fold2, + fold2, // TODO: if we don't inline we can also skip fold2. side_effects, gvn, licm, @@ -412,7 +459,7 @@ static void RunOptimizations(HGraph* graph, simplify4, }; - RunOptimizations(optimizations, arraysize(optimizations), pass_observer); + RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); } // The stack map we generate must be 4-byte aligned on ARM. Since existing @@ -555,8 +602,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite } // Implementation of the space filter: do not compile a code item whose size in - // code units is bigger than 256. - static constexpr size_t kSpaceFilterOptimizingThreshold = 256; + // code units is bigger than 128. + static constexpr size_t kSpaceFilterOptimizingThreshold = 128; const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions(); if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace) && (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) { @@ -565,7 +612,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite } DexCompilationUnit dex_compilation_unit( - nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item, + nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item, class_def_idx, method_idx, access_flags, compiler_driver->GetVerifiedMethod(&dex_file, method_idx)); @@ -602,12 +649,29 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite visualizer_output_.get(), compiler_driver); + const uint8_t* interpreter_metadata = nullptr; + { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<4> hs(soa.Self()); + ClassLinker* class_linker = dex_compilation_unit.GetClassLinker(); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file))); + Handle<mirror::ClassLoader> loader(hs.NewHandle( + soa.Decode<mirror::ClassLoader*>(class_loader))); + ArtMethod* art_method = compiler_driver->ResolveMethod( + soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type); + // We may not get a method, for example if its class is erroneous. + // TODO: Clean this up, the compiler driver should just pass the ArtMethod to compile. + if (art_method != nullptr) { + interpreter_metadata = art_method->GetQuickenedInfo(); + } + } HGraphBuilder builder(graph, &dex_compilation_unit, &dex_compilation_unit, &dex_file, compiler_driver, - compilation_stats_.get()); + compilation_stats_.get(), + interpreter_metadata); VLOG(compiler) << "Building " << method_name; @@ -627,8 +691,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite // `run_optimizations_` is set explicitly (either through a compiler filter // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back // to Quick. - bool can_use_baseline = !run_optimizations_; - if (run_optimizations_ && can_optimize && can_allocate_registers) { + bool can_use_baseline = !run_optimizations_ && builder.CanUseBaselineForStringInit(); + if (run_optimizations_ && can_allocate_registers) { VLOG(compiler) << "Optimizing " << method_name; { @@ -637,16 +701,21 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite // We could not transform the graph to SSA, bailout. LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop"; MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA); + pass_observer.SetGraphInBadState(); return nullptr; } } - return CompileOptimized(graph, - codegen.get(), - compiler_driver, - dex_compilation_unit, - &pass_observer); - } else if (shouldOptimize && can_allocate_registers) { + if (can_optimize) { + return CompileOptimized(graph, + codegen.get(), + compiler_driver, + dex_compilation_unit, + &pass_observer); + } + } + + if (shouldOptimize && can_allocate_registers) { LOG(FATAL) << "Could not allocate registers in optimizing compiler"; UNREACHABLE(); } else if (can_use_baseline) { diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc index 54ea6f19d4..f9d812f6a6 100644 --- a/compiler/optimizing/parallel_move_resolver.cc +++ b/compiler/optimizing/parallel_move_resolver.cc @@ -38,6 +38,20 @@ void ParallelMoveResolverWithSwap::EmitNativeCode(HParallelMove* parallel_move) // Build up a worklist of moves. BuildInitialMoveList(parallel_move); + // Move stack/stack slot to take advantage of a free register on constrained machines. + for (size_t i = 0; i < moves_.Size(); ++i) { + const MoveOperands& move = *moves_.Get(i); + // Ignore constants and moves already eliminated. + if (move.IsEliminated() || move.GetSource().IsConstant()) { + continue; + } + + if ((move.GetSource().IsStackSlot() || move.GetSource().IsDoubleStackSlot()) && + (move.GetDestination().IsStackSlot() || move.GetDestination().IsDoubleStackSlot())) { + PerformMove(i); + } + } + for (size_t i = 0; i < moves_.Size(); ++i) { const MoveOperands& move = *moves_.Get(i); // Skip constants to perform them last. They don't block other moves diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 3d6606b8dc..1349df9b16 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -25,19 +25,35 @@ namespace art { class RTPVisitor : public HGraphDelegateVisitor { public: - RTPVisitor(HGraph* graph, StackHandleScopeCollection* handles) + RTPVisitor(HGraph* graph, + StackHandleScopeCollection* handles, + GrowableArray<HInstruction*>* worklist, + ReferenceTypeInfo::TypeHandle object_class_handle, + ReferenceTypeInfo::TypeHandle class_class_handle, + ReferenceTypeInfo::TypeHandle string_class_handle) : HGraphDelegateVisitor(graph), - handles_(handles) {} + handles_(handles), + object_class_handle_(object_class_handle), + class_class_handle_(class_class_handle), + string_class_handle_(string_class_handle), + worklist_(worklist) {} + void VisitNullConstant(HNullConstant* null_constant) OVERRIDE; void VisitNewInstance(HNewInstance* new_instance) OVERRIDE; void VisitLoadClass(HLoadClass* load_class) OVERRIDE; + void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE; + void VisitLoadString(HLoadString* instr) OVERRIDE; void VisitNewArray(HNewArray* instr) OVERRIDE; + void VisitParameterValue(HParameterValue* instr) OVERRIDE; void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info); void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact); void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE; void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE; void VisitInvoke(HInvoke* instr) OVERRIDE; void VisitArrayGet(HArrayGet* instr) OVERRIDE; + void VisitCheckCast(HCheckCast* instr) OVERRIDE; + void VisitNullCheck(HNullCheck* instr) OVERRIDE; + void VisitFakeString(HFakeString* instr) OVERRIDE; void UpdateReferenceTypeInfo(HInstruction* instr, uint16_t type_idx, const DexFile& dex_file, @@ -45,8 +61,33 @@ class RTPVisitor : public HGraphDelegateVisitor { private: StackHandleScopeCollection* handles_; + ReferenceTypeInfo::TypeHandle object_class_handle_; + ReferenceTypeInfo::TypeHandle class_class_handle_; + ReferenceTypeInfo::TypeHandle string_class_handle_; + GrowableArray<HInstruction*>* worklist_; + + static constexpr size_t kDefaultWorklistSize = 8; }; +ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph, + StackHandleScopeCollection* handles, + const char* name) + : HOptimization(graph, name), + handles_(handles), + worklist_(graph->GetArena(), kDefaultWorklistSize) { + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + object_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject)); + string_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangString)); + class_class_handle_ = handles_->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangClass)); + + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); + DCHECK(ReferenceTypeInfo::IsValidHandle(object_class_handle_)); + DCHECK(ReferenceTypeInfo::IsValidHandle(class_class_handle_)); + DCHECK(ReferenceTypeInfo::IsValidHandle(string_class_handle_)); + } +} + void ReferenceTypePropagation::Run() { // To properly propagate type info we need to visit in the dominator-based order. // Reverse post order guarantees a node's dominators are visited first. @@ -55,29 +96,125 @@ void ReferenceTypePropagation::Run() { VisitBasicBlock(it.Current()); } ProcessWorklist(); + + if (kIsDebugBuild) { + // TODO: move this to the graph checker. + ScopedObjectAccess soa(Thread::Current()); + for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { + HBasicBlock* block = it.Current(); + for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) { + HInstruction* instr = iti.Current(); + if (instr->GetType() == Primitive::kPrimNot) { + DCHECK(instr->GetReferenceTypeInfo().IsValid()) + << "Invalid RTI for instruction: " << instr->DebugName(); + if (instr->IsBoundType()) { + DCHECK(instr->AsBoundType()->GetUpperBound().IsValid()); + } else if (instr->IsLoadClass()) { + DCHECK(instr->AsLoadClass()->GetReferenceTypeInfo().IsExact()); + DCHECK(instr->AsLoadClass()->GetLoadedClassRTI().IsValid()); + } else if (instr->IsNullCheck()) { + DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo())) + << "NullCheck " << instr->GetReferenceTypeInfo() + << "Input(0) " << instr->InputAt(0)->GetReferenceTypeInfo(); + } + } + } + } + } } void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { - // TODO: handle other instructions that give type info - // (array accesses) + RTPVisitor visitor(graph_, + handles_, + &worklist_, + object_class_handle_, + class_class_handle_, + string_class_handle_); + // Handle Phis first as there might be instructions in the same block who depend on them. + for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { + VisitPhi(it.Current()->AsPhi()); + } - RTPVisitor visitor(graph_, handles_); - // Initialize exact types first for faster convergence. + // Handle instructions. for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* instr = it.Current(); instr->Accept(&visitor); } - // Handle Phis. - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - VisitPhi(it.Current()->AsPhi()); - } - // Add extra nodes to bound types. BoundTypeForIfNotNull(block); BoundTypeForIfInstanceOf(block); } +// Create a bound type for the given object narrowing the type as much as possible. +// The BoundType upper values for the super type and can_be_null will be taken from +// load_class.GetLoadedClassRTI() and upper_can_be_null. +static HBoundType* CreateBoundType(ArenaAllocator* arena, + HInstruction* obj, + HLoadClass* load_class, + bool upper_can_be_null) + SHARED_REQUIRES(Locks::mutator_lock_) { + ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo(); + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null); + // Narrow the type as much as possible. + if (class_rti.GetTypeHandle()->IsFinal()) { + bound_type->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true)); + } else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) { + bound_type->SetReferenceTypeInfo(obj_rti); + } else { + bound_type->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false)); + } + if (upper_can_be_null) { + bound_type->SetCanBeNull(obj->CanBeNull()); + } + return bound_type; +} + +// Check if we should create a bound type for the given object at the specified +// position. Because of inlining and the fact we run RTP more than once and we +// might have a HBoundType already. If we do, we should not create a new one. +// In this case we also assert that there are no other uses of the object (except +// the bound type) dominated by the specified dominator_instr or dominator_block. +static bool ShouldCreateBoundType(HInstruction* position, + HInstruction* obj, + ReferenceTypeInfo upper_bound, + HInstruction* dominator_instr, + HBasicBlock* dominator_block) + SHARED_REQUIRES(Locks::mutator_lock_) { + // If the position where we should insert the bound type is not already a + // a bound type then we need to create one. + if (position == nullptr || !position->IsBoundType()) { + return true; + } + + HBoundType* existing_bound_type = position->AsBoundType(); + if (existing_bound_type->GetUpperBound().IsSupertypeOf(upper_bound)) { + if (kIsDebugBuild) { + // Check that the existing HBoundType dominates all the uses. + for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) { + HInstruction* user = it.Current()->GetUser(); + if (dominator_instr != nullptr) { + DCHECK(!dominator_instr->StrictlyDominates(user) + || user == existing_bound_type + || existing_bound_type->StrictlyDominates(user)); + } else if (dominator_block != nullptr) { + DCHECK(!dominator_block->Dominates(user->GetBlock()) + || user == existing_bound_type + || existing_bound_type->StrictlyDominates(user)); + } + } + } + } else { + // TODO: if the current bound type is a refinement we could update the + // existing_bound_type with the a new upper limit. However, we also need to + // update its users and have access to the work list. + } + return false; +} + void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { HIf* ifInstruction = block->GetLastInstruction()->AsIf(); if (ifInstruction == nullptr) { @@ -99,6 +236,12 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { return; } + if (!obj->CanBeNull() || obj->IsNullConstant()) { + // Null check is dead code and will be removed by DCE. + return; + } + DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions"; + // We only need to bound the type if we have uses in the relevant block. // So start with null and create the HBoundType lazily, only if it's needed. HBoundType* bound_type = nullptr; @@ -110,8 +253,23 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { HInstruction* user = it.Current()->GetUser(); if (notNullBlock->Dominates(user->GetBlock())) { if (bound_type == nullptr) { - bound_type = new (graph_->GetArena()) HBoundType(obj, ReferenceTypeInfo::CreateTop(false)); - notNullBlock->InsertInstructionBefore(bound_type, notNullBlock->GetFirstInstruction()); + ScopedObjectAccess soa(Thread::Current()); + HInstruction* insert_point = notNullBlock->GetFirstInstruction(); + ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( + object_class_handle_, /* is_exact */ true); + if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) { + bound_type = new (graph_->GetArena()) HBoundType( + obj, object_rti, /* bound_can_be_null */ false); + if (obj->GetReferenceTypeInfo().IsValid()) { + bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo()); + } + notNullBlock->InsertInstructionBefore(bound_type, insert_point); + } else { + // We already have a bound type on the position we would need to insert + // the new one. The existing bound type should dominate all the users + // (dchecked) so there's no need to continue. + break; + } } user->ReplaceInput(bound_type, it.Current()->GetIndex()); } @@ -160,29 +318,28 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { // input. return; } + DCHECK(!obj->IsLoadClass()) << "We should not replace HLoadClass instructions"; for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) { HInstruction* user = it.Current()->GetUser(); if (instanceOfTrueBlock->Dominates(user->GetBlock())) { if (bound_type == nullptr) { + ScopedObjectAccess soa(Thread::Current()); HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass(); - - ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo(); ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); - bound_type = new (graph_->GetArena()) HBoundType(obj, class_rti); - - // Narrow the type as much as possible. - { - ScopedObjectAccess soa(Thread::Current()); - if (!load_class->IsResolved() || class_rti.IsSupertypeOf(obj_rti)) { - bound_type->SetReferenceTypeInfo(obj_rti); - } else { - bound_type->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false)); - } + HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction(); + if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) { + bound_type = CreateBoundType( + graph_->GetArena(), + obj, + load_class, + false /* InstanceOf ensures the object is not null. */); + instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point); + } else { + // We already have a bound type on the position we would need to insert + // the new one. The existing bound type should dominate all the users + // (dchecked) so there's no need to continue. + break; } - - instanceOfTrueBlock->InsertInstructionBefore( - bound_type, instanceOfTrueBlock->GetFirstInstruction()); } user->ReplaceInput(bound_type, it.Current()->GetIndex()); } @@ -192,11 +349,32 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { void RTPVisitor::SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact) { - if (klass != nullptr) { + if (instr->IsInvokeStaticOrDirect() && instr->AsInvokeStaticOrDirect()->IsStringInit()) { + // Calls to String.<init> are replaced with a StringFactory. + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* cl = Runtime::Current()->GetClassLinker(); + mirror::DexCache* dex_cache = cl->FindDexCache(instr->AsInvoke()->GetDexFile()); + ArtMethod* method = dex_cache->GetResolvedMethod( + instr->AsInvoke()->GetDexMethodIndex(), cl->GetImagePointerSize()); + DCHECK(method != nullptr); + mirror::Class* declaring_class = method->GetDeclaringClass(); + DCHECK(declaring_class != nullptr); + DCHECK(declaring_class->IsStringClass()) + << "Expected String class: " << PrettyDescriptor(declaring_class); + DCHECK(method->IsConstructor()) + << "Expected String.<init>: " << PrettyMethod(method); + } + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true)); + } else if (klass != nullptr) { ScopedObjectAccess soa(Thread::Current()); - MutableHandle<mirror::Class> handle = handles_->NewHandle(klass); + ReferenceTypeInfo::TypeHandle handle = handles_->NewHandle(klass); is_exact = is_exact || klass->IsFinal(); instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact)); + } else { + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false)); } } @@ -212,6 +390,13 @@ void RTPVisitor::UpdateReferenceTypeInfo(HInstruction* instr, SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact); } +void RTPVisitor::VisitNullConstant(HNullConstant* instr) { + // TODO: The null constant could be bound contextually (e.g. based on return statements) + // to a more precise type. + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false)); +} + void RTPVisitor::VisitNewInstance(HNewInstance* instr) { UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true); } @@ -220,6 +405,13 @@ void RTPVisitor::VisitNewArray(HNewArray* instr) { UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true); } +void RTPVisitor::VisitParameterValue(HParameterValue* instr) { + if (instr->GetType() == Primitive::kPrimNot) { + // TODO: parse the signature and add precise types for the parameters. + SetClassAsTypeInfo(instr, nullptr, /* is_exact */ false); + } +} + void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info) { // The field index is unknown only during tests. @@ -231,10 +423,10 @@ void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr, ClassLinker* cl = Runtime::Current()->GetClassLinker(); mirror::DexCache* dex_cache = cl->FindDexCache(info.GetDexFile()); ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), dex_cache); - if (field != nullptr) { - mirror::Class* klass = field->GetType<false>(); - SetClassAsTypeInfo(instr, klass, /* is_exact */ false); - } + // TODO: There are certain cases where we can't resolve the field. + // b/21914925 is open to keep track of a repro case for this issue. + mirror::Class* klass = (field == nullptr) ? nullptr : field->GetType<false>(); + SetClassAsTypeInfo(instr, klass, /* is_exact */ false); } void RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) { @@ -251,12 +443,64 @@ void RTPVisitor::VisitLoadClass(HLoadClass* instr) { Runtime::Current()->GetClassLinker()->FindDexCache(instr->GetDexFile()); // Get type from dex cache assuming it was populated by the verifier. mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex()); - if (resolved_class != nullptr) { - Handle<mirror::Class> handle = handles_->NewHandle(resolved_class); - instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true)); + // TODO: investigating why we are still getting unresolved classes: b/22821472. + ReferenceTypeInfo::TypeHandle handle = (resolved_class != nullptr) + ? handles_->NewHandle(resolved_class) + : object_class_handle_; + instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true)); + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true)); +} + +void RTPVisitor::VisitClinitCheck(HClinitCheck* instr) { + instr->SetReferenceTypeInfo(instr->InputAt(0)->GetReferenceTypeInfo()); +} + +void RTPVisitor::VisitLoadString(HLoadString* instr) { + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true)); +} + +void RTPVisitor::VisitNullCheck(HNullCheck* instr) { + ScopedObjectAccess soa(Thread::Current()); + ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo(); + DCHECK(parent_rti.IsValid()); + instr->SetReferenceTypeInfo(parent_rti); +} + +void RTPVisitor::VisitFakeString(HFakeString* instr) { + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true)); +} + +void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) { + HInstruction* obj = check_cast->InputAt(0); + HBoundType* bound_type = nullptr; + for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) { + HInstruction* user = it.Current()->GetUser(); + if (check_cast->StrictlyDominates(user)) { + if (bound_type == nullptr) { + ScopedObjectAccess soa(Thread::Current()); + HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass(); + ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); + if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) { + bound_type = CreateBoundType( + GetGraph()->GetArena(), + obj, + load_class, + true /* CheckCast succeeds for nulls. */); + check_cast->GetBlock()->InsertInstructionAfter(bound_type, check_cast); + } else { + // Update nullability of the existing bound type, which may not have known + // that its input was not null when it was being created. + bound_type = check_cast->GetNext()->AsBoundType(); + bound_type->SetCanBeNull(obj->CanBeNull()); + // We already have a bound type on the position we would need to insert + // the new one. The existing bound type should dominate all the users + // (dchecked) so there's no need to continue. + break; + } + } + user->ReplaceInput(bound_type, it.Current()->GetIndex()); + } } - Handle<mirror::Class> class_handle = handles_->NewHandle(mirror::Class::GetJavaLangClass()); - instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_handle, /* is_exact */ true)); } void ReferenceTypePropagation::VisitPhi(HPhi* phi) { @@ -283,29 +527,54 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) { ReferenceTypeInfo ReferenceTypePropagation::MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b) { + if (!b.IsValid()) { + return a; + } + if (!a.IsValid()) { + return b; + } + bool is_exact = a.IsExact() && b.IsExact(); - bool is_top = a.IsTop() || b.IsTop(); Handle<mirror::Class> type_handle; - if (!is_top) { - if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) { - type_handle = a.GetTypeHandle(); - } else if (a.IsSupertypeOf(b)) { - type_handle = a.GetTypeHandle(); - is_exact = false; - } else if (b.IsSupertypeOf(a)) { - type_handle = b.GetTypeHandle(); - is_exact = false; - } else { - // TODO: Find a common super class. - is_top = true; - is_exact = false; - } + if (a.GetTypeHandle().Get() == b.GetTypeHandle().Get()) { + type_handle = a.GetTypeHandle(); + } else if (a.IsSupertypeOf(b)) { + type_handle = a.GetTypeHandle(); + is_exact = false; + } else if (b.IsSupertypeOf(a)) { + type_handle = b.GetTypeHandle(); + is_exact = false; + } else { + // TODO: Find the first common super class. + type_handle = object_class_handle_; + is_exact = false; + } + + return ReferenceTypeInfo::Create(type_handle, is_exact); +} + +static void UpdateArrayGet(HArrayGet* instr, + StackHandleScopeCollection* handles, + ReferenceTypeInfo::TypeHandle object_class_handle) + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK_EQ(Primitive::kPrimNot, instr->GetType()); + + ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo(); + DCHECK(parent_rti.IsValid()); + + Handle<mirror::Class> handle = parent_rti.GetTypeHandle(); + if (handle->IsObjectArrayClass()) { + ReferenceTypeInfo::TypeHandle component_handle = handles->NewHandle(handle->GetComponentType()); + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(component_handle, /* is_exact */ false)); + } else { + // We don't know what the parent actually is, so we fallback to object. + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(object_class_handle, /* is_exact */ false)); } - return is_top - ? ReferenceTypeInfo::CreateTop(is_exact) - : ReferenceTypeInfo::Create(type_handle, is_exact); + return; } bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) { @@ -316,6 +585,15 @@ bool ReferenceTypePropagation::UpdateReferenceTypeInfo(HInstruction* instr) { UpdateBoundType(instr->AsBoundType()); } else if (instr->IsPhi()) { UpdatePhi(instr->AsPhi()); + } else if (instr->IsNullCheck()) { + ReferenceTypeInfo parent_rti = instr->InputAt(0)->GetReferenceTypeInfo(); + if (parent_rti.IsValid()) { + instr->SetReferenceTypeInfo(parent_rti); + } + } else if (instr->IsArrayGet()) { + // TODO: consider if it's worth "looking back" and bounding the input object + // to an array type. + UpdateArrayGet(instr->AsArrayGet(), handles_, object_class_handle_); } else { LOG(FATAL) << "Invalid instruction (should not get here)"; } @@ -333,45 +611,45 @@ void RTPVisitor::VisitInvoke(HInvoke* instr) { mirror::DexCache* dex_cache = cl->FindDexCache(instr->GetDexFile()); ArtMethod* method = dex_cache->GetResolvedMethod( instr->GetDexMethodIndex(), cl->GetImagePointerSize()); - if (method != nullptr) { - mirror::Class* klass = method->GetReturnType(false); - SetClassAsTypeInfo(instr, klass, /* is_exact */ false); - } + mirror::Class* klass = (method == nullptr) ? nullptr : method->GetReturnType(false); + SetClassAsTypeInfo(instr, klass, /* is_exact */ false); } void RTPVisitor::VisitArrayGet(HArrayGet* instr) { if (instr->GetType() != Primitive::kPrimNot) { return; } - - HInstruction* parent = instr->InputAt(0); ScopedObjectAccess soa(Thread::Current()); - Handle<mirror::Class> handle = parent->GetReferenceTypeInfo().GetTypeHandle(); - if (handle.GetReference() != nullptr && handle->IsObjectArrayClass()) { - SetClassAsTypeInfo(instr, handle->GetComponentType(), /* is_exact */ false); + UpdateArrayGet(instr, handles_, object_class_handle_); + if (!instr->GetReferenceTypeInfo().IsValid()) { + worklist_->Add(instr); } } void ReferenceTypePropagation::UpdateBoundType(HBoundType* instr) { ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo(); - // Be sure that we don't go over the bounded type. - ReferenceTypeInfo bound_rti = instr->GetBoundType(); - if (!bound_rti.IsSupertypeOf(new_rti)) { - new_rti = bound_rti; + if (!new_rti.IsValid()) { + return; // No new info yet. + } + + // Make sure that we don't go over the bounded type. + ReferenceTypeInfo upper_bound_rti = instr->GetUpperBound(); + if (!upper_bound_rti.IsSupertypeOf(new_rti)) { + new_rti = upper_bound_rti; } instr->SetReferenceTypeInfo(new_rti); } void ReferenceTypePropagation::UpdatePhi(HPhi* instr) { ReferenceTypeInfo new_rti = instr->InputAt(0)->GetReferenceTypeInfo(); - if (new_rti.IsTop() && !new_rti.IsExact()) { - // Early return if we are Top and inexact. + if (new_rti.IsValid() && new_rti.IsObjectClass() && !new_rti.IsExact()) { + // Early return if we are Object and inexact. instr->SetReferenceTypeInfo(new_rti); return; } for (size_t i = 1; i < instr->InputCount(); i++) { new_rti = MergeTypes(new_rti, instr->InputAt(i)->GetReferenceTypeInfo()); - if (new_rti.IsTop()) { + if (new_rti.IsValid() && new_rti.IsObjectClass()) { if (!new_rti.IsExact()) { break; } else { @@ -385,21 +663,31 @@ void ReferenceTypePropagation::UpdatePhi(HPhi* instr) { // Re-computes and updates the nullability of the instruction. Returns whether or // not the nullability was changed. bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) { - DCHECK(instr->IsPhi() || instr->IsBoundType()); + DCHECK(instr->IsPhi() + || instr->IsBoundType() + || instr->IsNullCheck() + || instr->IsArrayGet()); - if (!instr->IsPhi()) { + if (!instr->IsPhi() && !instr->IsBoundType()) { return false; } - HPhi* phi = instr->AsPhi(); - bool existing_can_be_null = phi->CanBeNull(); - bool new_can_be_null = false; - for (size_t i = 0; i < phi->InputCount(); i++) { - new_can_be_null |= phi->InputAt(i)->CanBeNull(); + bool existing_can_be_null = instr->CanBeNull(); + if (instr->IsPhi()) { + HPhi* phi = instr->AsPhi(); + bool new_can_be_null = false; + for (size_t i = 0; i < phi->InputCount(); i++) { + if (phi->InputAt(i)->CanBeNull()) { + new_can_be_null = true; + break; + } + } + phi->SetCanBeNull(new_can_be_null); + } else if (instr->IsBoundType()) { + HBoundType* bound_type = instr->AsBoundType(); + bound_type->SetCanBeNull(instr->InputAt(0)->CanBeNull() && bound_type->GetUpperCanBeNull()); } - phi->SetCanBeNull(new_can_be_null); - - return existing_can_be_null != new_can_be_null; + return existing_can_be_null != instr->CanBeNull(); } void ReferenceTypePropagation::ProcessWorklist() { @@ -412,14 +700,18 @@ void ReferenceTypePropagation::ProcessWorklist() { } void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) { - DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot) << instruction->GetType(); + DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot) + << instruction->DebugName() << ":" << instruction->GetType(); worklist_.Add(instruction); } void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) { for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) { HInstruction* user = it.Current()->GetUser(); - if (user->IsPhi() || user->IsBoundType()) { + if (user->IsPhi() + || user->IsBoundType() + || user->IsNullCheck() + || (user->IsArrayGet() && (user->GetType() == Primitive::kPrimNot))) { AddToWorklist(user); } } diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index 17cfed45d5..14d4a82e9b 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -30,10 +30,9 @@ namespace art { */ class ReferenceTypePropagation : public HOptimization { public: - ReferenceTypePropagation(HGraph* graph, StackHandleScopeCollection* handles) - : HOptimization(graph, kReferenceTypePropagationPassName), - handles_(handles), - worklist_(graph->GetArena(), kDefaultWorklistSize) {} + ReferenceTypePropagation(HGraph* graph, + StackHandleScopeCollection* handles, + const char* name = kReferenceTypePropagationPassName); void Run() OVERRIDE; @@ -42,8 +41,8 @@ class ReferenceTypePropagation : public HOptimization { private: void VisitPhi(HPhi* phi); void VisitBasicBlock(HBasicBlock* block); - void UpdateBoundType(HBoundType* bound_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UpdatePhi(HPhi* phi) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_); + void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_); void BoundTypeForIfNotNull(HBasicBlock* block); void BoundTypeForIfInstanceOf(HBasicBlock* block); void ProcessWorklist(); @@ -54,12 +53,16 @@ class ReferenceTypePropagation : public HOptimization { bool UpdateReferenceTypeInfo(HInstruction* instr); ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); StackHandleScopeCollection* handles_; GrowableArray<HInstruction*> worklist_; + ReferenceTypeInfo::TypeHandle object_class_handle_; + ReferenceTypeInfo::TypeHandle class_class_handle_; + ReferenceTypeInfo::TypeHandle string_class_handle_; + static constexpr size_t kDefaultWorklistSize = 8; DISALLOW_COPY_AND_ASSIGN(ReferenceTypePropagation); diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index 7b23d020c2..9f32a9eaf8 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -209,6 +209,8 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) { Location temp = locations->GetTemp(i); if (temp.IsRegister() || temp.IsFpuRegister()) { BlockRegister(temp, position, position + 1); + // Ensure that an explicit temporary register is marked as being allocated. + codegen_->AddAllocatedRegister(temp); } else { DCHECK(temp.IsUnallocated()); switch (temp.GetPolicy()) { @@ -246,7 +248,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) { bool core_register = (instruction->GetType() != Primitive::kPrimDouble) && (instruction->GetType() != Primitive::kPrimFloat); - if (locations->CanCall()) { + if (locations->NeedsSafepoint()) { if (codegen_->IsLeafMethod()) { // TODO: We do this here because we do not want the suspend check to artificially // create live registers. We should find another place, but this is currently the @@ -507,6 +509,11 @@ bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& in } if (current->HasRegister()) { + if (kIsDebugBuild && log_fatal_on_failure && !current->IsFixed()) { + // Only check when an error is fatal. Only tests code ask for non-fatal failures + // and test code may not properly fill the right information to the code generator. + CHECK(codegen.HasAllocatedRegister(processing_core_registers, current->GetRegister())); + } BitVector* liveness_of_register = liveness_of_values.Get(current->GetRegister()); for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) { if (liveness_of_register->IsBitSet(j)) { @@ -775,7 +782,10 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) { } else { DCHECK(!current->IsHighInterval()); int hint = current->FindFirstRegisterHint(free_until, liveness_); - if (hint != kNoRegister) { + if ((hint != kNoRegister) + // For simplicity, if the hint we are getting for a pair cannot be used, + // we are just going to allocate a new pair. + && !(current->IsLowInterval() && IsBlocked(GetHighForLowRegister(hint)))) { DCHECK(!IsBlocked(hint)); reg = hint; } else if (current->IsLowInterval()) { @@ -942,7 +952,16 @@ bool RegisterAllocator::PotentiallyRemoveOtherHalf(LiveInterval* interval, // we spill `current` instead. bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) { size_t first_register_use = current->FirstRegisterUse(); - if (first_register_use == kNoLifetime) { + if (current->HasRegister()) { + DCHECK(current->IsHighInterval()); + // The low interval has allocated the register for the high interval. In + // case the low interval had to split both intervals, we may end up in a + // situation where the high interval does not have a register use anymore. + // We must still proceed in order to split currently active and inactive + // uses of the high interval's register, and put the high interval in the + // active set. + DCHECK(first_register_use != kNoLifetime || (current->GetNextSibling() != nullptr)); + } else if (first_register_use == kNoLifetime) { AllocateSpillSlotFor(current); return false; } @@ -1009,7 +1028,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) { // When allocating the low part, we made sure the high register was available. DCHECK_LT(first_use, next_use[reg]); } else if (current->IsLowInterval()) { - reg = FindAvailableRegisterPair(next_use, first_register_use); + reg = FindAvailableRegisterPair(next_use, first_use); // We should spill if both registers are not available. should_spill = (first_use >= next_use[reg]) || (first_use >= next_use[GetHighForLowRegister(reg)]); @@ -1023,16 +1042,28 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) { if (should_spill) { DCHECK(!current->IsHighInterval()); bool is_allocation_at_use_site = (current->GetStart() >= (first_register_use - 1)); - if (current->IsLowInterval() - && is_allocation_at_use_site - && TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(), - first_register_use, - next_use)) { + if (is_allocation_at_use_site) { + if (!current->IsLowInterval()) { + DumpInterval(std::cerr, current); + DumpAllIntervals(std::cerr); + // This situation has the potential to infinite loop, so we make it a non-debug CHECK. + HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2); + CHECK(false) << "There is not enough registers available for " + << current->GetParent()->GetDefinedBy()->DebugName() << " " + << current->GetParent()->GetDefinedBy()->GetId() + << " at " << first_register_use - 1 << " " + << (at == nullptr ? "" : at->DebugName()); + } + // If we're allocating a register for `current` because the instruction at // that position requires it, but we think we should spill, then there are // non-pair intervals or unaligned pair intervals blocking the allocation. // We split the first interval found, and put ourselves first in the // `unhandled_` list. + bool success = TrySplitNonPairOrUnalignedPairIntervalAt(current->GetStart(), + first_register_use, + next_use); + DCHECK(success); LiveInterval* existing = unhandled_->Peek(); DCHECK(existing->IsHighInterval()); DCHECK_EQ(existing->GetLowInterval(), current); @@ -1042,17 +1073,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) { // register, we split this interval just before its first register use. AllocateSpillSlotFor(current); LiveInterval* split = SplitBetween(current, current->GetStart(), first_register_use - 1); - if (current == split) { - DumpInterval(std::cerr, current); - DumpAllIntervals(std::cerr); - // This situation has the potential to infinite loop, so we make it a non-debug CHECK. - HInstruction* at = liveness_.GetInstructionFromPosition(first_register_use / 2); - CHECK(false) << "There is not enough registers available for " - << split->GetParent()->GetDefinedBy()->DebugName() << " " - << split->GetParent()->GetDefinedBy()->GetId() - << " at " << first_register_use - 1 << " " - << (at == nullptr ? "" : at->DebugName()); - } + DCHECK(current != split); AddSorted(unhandled_, split); } return false; @@ -1233,7 +1254,9 @@ LiveInterval* RegisterAllocator::Split(LiveInterval* interval, size_t position) void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) { if (interval->IsHighInterval()) { - // The low interval will contain the spill slot. + // The low interval already took care of allocating the spill slot. + DCHECK(!interval->GetLowInterval()->HasRegister()); + DCHECK(interval->GetLowInterval()->GetParent()->HasSpillSlot()); return; } diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc index ea1ca5a731..1c3e255339 100644 --- a/compiler/optimizing/side_effects_analysis.cc +++ b/compiler/optimizing/side_effects_analysis.cc @@ -24,14 +24,15 @@ void SideEffectsAnalysis::Run() { block_effects_.SetSize(graph_->GetBlocks().Size()); loop_effects_.SetSize(graph_->GetBlocks().Size()); + // In DEBUG mode, ensure side effects are properly initialized to empty. if (kIsDebugBuild) { for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); SideEffects effects = GetBlockEffects(block); - DCHECK(!effects.HasSideEffects() && !effects.HasDependencies()); + DCHECK(effects.DoesNothing()); if (block->IsLoopHeader()) { effects = GetLoopEffects(block); - DCHECK(!effects.HasSideEffects() && !effects.HasDependencies()); + DCHECK(effects.DoesNothing()); } } } @@ -46,7 +47,9 @@ void SideEffectsAnalysis::Run() { inst_it.Advance()) { HInstruction* instruction = inst_it.Current(); effects = effects.Union(instruction->GetSideEffects()); - if (effects.HasAllSideEffects()) { + // If all side effects are represented, scanning further will not add any + // more information to side-effects of this block. + if (effects.DoesAll()) { break; } } diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc new file mode 100644 index 0000000000..ec45d6b2ca --- /dev/null +++ b/compiler/optimizing/side_effects_test.cc @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not read this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gtest/gtest.h" +#include "nodes.h" +#include "primitive.h" + +namespace art { + +/** + * Tests for the SideEffects class. + */ + +// +// Helper methods. +// + +void testWriteAndReadSanity(SideEffects write, SideEffects read) { + EXPECT_FALSE(write.DoesNothing()); + EXPECT_FALSE(read.DoesNothing()); + + EXPECT_TRUE(write.DoesAnyWrite()); + EXPECT_FALSE(write.DoesAnyRead()); + EXPECT_FALSE(read.DoesAnyWrite()); + EXPECT_TRUE(read.DoesAnyRead()); + + // All-dependences. + SideEffects all = SideEffects::All(); + EXPECT_TRUE(all.MayDependOn(write)); + EXPECT_FALSE(write.MayDependOn(all)); + EXPECT_FALSE(all.MayDependOn(read)); + EXPECT_TRUE(read.MayDependOn(all)); + + // None-dependences. + SideEffects none = SideEffects::None(); + EXPECT_FALSE(none.MayDependOn(write)); + EXPECT_FALSE(write.MayDependOn(none)); + EXPECT_FALSE(none.MayDependOn(read)); + EXPECT_FALSE(read.MayDependOn(none)); +} + +void testWriteAndReadDependence(SideEffects write, SideEffects read) { + testWriteAndReadSanity(write, read); + + // Dependence only in one direction. + EXPECT_FALSE(write.MayDependOn(read)); + EXPECT_TRUE(read.MayDependOn(write)); +} + +void testNoWriteAndReadDependence(SideEffects write, SideEffects read) { + testWriteAndReadSanity(write, read); + + // No dependence in any direction. + EXPECT_FALSE(write.MayDependOn(read)); + EXPECT_FALSE(read.MayDependOn(write)); +} + +// +// Actual tests. +// + +TEST(SideEffectsTest, All) { + SideEffects all = SideEffects::All(); + EXPECT_TRUE(all.DoesAnyWrite()); + EXPECT_TRUE(all.DoesAnyRead()); + EXPECT_FALSE(all.DoesNothing()); + EXPECT_TRUE(all.DoesAllReadWrite()); +} + +TEST(SideEffectsTest, None) { + SideEffects none = SideEffects::None(); + EXPECT_FALSE(none.DoesAnyWrite()); + EXPECT_FALSE(none.DoesAnyRead()); + EXPECT_TRUE(none.DoesNothing()); + EXPECT_FALSE(none.DoesAllReadWrite()); +} + +TEST(SideEffectsTest, DependencesAndNoDependences) { + // Apply test to each individual primitive type. + for (Primitive::Type type = Primitive::kPrimNot; + type < Primitive::kPrimVoid; + type = Primitive::Type(type + 1)) { + // Same primitive type and access type: proper write/read dep. + testWriteAndReadDependence( + SideEffects::FieldWriteOfType(type, false), + SideEffects::FieldReadOfType(type, false)); + testWriteAndReadDependence( + SideEffects::ArrayWriteOfType(type), + SideEffects::ArrayReadOfType(type)); + // Same primitive type but different access type: no write/read dep. + testNoWriteAndReadDependence( + SideEffects::FieldWriteOfType(type, false), + SideEffects::ArrayReadOfType(type)); + testNoWriteAndReadDependence( + SideEffects::ArrayWriteOfType(type), + SideEffects::FieldReadOfType(type, false)); + } +} + +TEST(SideEffectsTest, NoDependences) { + // Different primitive type, same access type: no write/read dep. + testNoWriteAndReadDependence( + SideEffects::FieldWriteOfType(Primitive::kPrimInt, false), + SideEffects::FieldReadOfType(Primitive::kPrimDouble, false)); + testNoWriteAndReadDependence( + SideEffects::ArrayWriteOfType(Primitive::kPrimInt), + SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + // Everything different: no write/read dep. + testNoWriteAndReadDependence( + SideEffects::FieldWriteOfType(Primitive::kPrimInt, false), + SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + testNoWriteAndReadDependence( + SideEffects::ArrayWriteOfType(Primitive::kPrimInt), + SideEffects::FieldReadOfType(Primitive::kPrimDouble, false)); +} + +TEST(SideEffectsTest, VolatileDependences) { + SideEffects volatile_write = + SideEffects::FieldWriteOfType(Primitive::kPrimInt, true); + SideEffects any_write = + SideEffects::FieldWriteOfType(Primitive::kPrimInt, false); + SideEffects volatile_read = + SideEffects::FieldReadOfType(Primitive::kPrimByte, true); + SideEffects any_read = + SideEffects::FieldReadOfType(Primitive::kPrimByte, false); + + EXPECT_FALSE(volatile_write.MayDependOn(any_read)); + EXPECT_TRUE(any_read.MayDependOn(volatile_write)); + EXPECT_TRUE(volatile_write.MayDependOn(any_write)); + EXPECT_FALSE(any_write.MayDependOn(volatile_write)); + + EXPECT_FALSE(volatile_read.MayDependOn(any_read)); + EXPECT_TRUE(any_read.MayDependOn(volatile_read)); + EXPECT_TRUE(volatile_read.MayDependOn(any_write)); + EXPECT_FALSE(any_write.MayDependOn(volatile_read)); +} + +TEST(SideEffectsTest, SameWidthTypes) { + // Type I/F. + testWriteAndReadDependence( + SideEffects::FieldWriteOfType(Primitive::kPrimInt, false), + SideEffects::FieldReadOfType(Primitive::kPrimFloat, false)); + testWriteAndReadDependence( + SideEffects::ArrayWriteOfType(Primitive::kPrimInt), + SideEffects::ArrayReadOfType(Primitive::kPrimFloat)); + // Type L/D. + testWriteAndReadDependence( + SideEffects::FieldWriteOfType(Primitive::kPrimLong, false), + SideEffects::FieldReadOfType(Primitive::kPrimDouble, false)); + testWriteAndReadDependence( + SideEffects::ArrayWriteOfType(Primitive::kPrimLong), + SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); +} + +TEST(SideEffectsTest, AllWritesAndReads) { + SideEffects s = SideEffects::None(); + // Keep taking the union of different writes and reads. + for (Primitive::Type type = Primitive::kPrimNot; + type < Primitive::kPrimVoid; + type = Primitive::Type(type + 1)) { + s = s.Union(SideEffects::FieldWriteOfType(type, false)); + s = s.Union(SideEffects::ArrayWriteOfType(type)); + s = s.Union(SideEffects::FieldReadOfType(type, false)); + s = s.Union(SideEffects::ArrayReadOfType(type)); + } + EXPECT_TRUE(s.DoesAllReadWrite()); +} + +TEST(SideEffectsTest, GC) { + SideEffects can_trigger_gc = SideEffects::CanTriggerGC(); + SideEffects depends_on_gc = SideEffects::DependsOnGC(); + SideEffects all_changes = SideEffects::AllChanges(); + SideEffects all_dependencies = SideEffects::AllDependencies(); + + EXPECT_TRUE(depends_on_gc.MayDependOn(can_trigger_gc)); + EXPECT_TRUE(depends_on_gc.Union(can_trigger_gc).MayDependOn(can_trigger_gc)); + EXPECT_FALSE(can_trigger_gc.MayDependOn(depends_on_gc)); + + EXPECT_TRUE(depends_on_gc.MayDependOn(all_changes)); + EXPECT_TRUE(depends_on_gc.Union(can_trigger_gc).MayDependOn(all_changes)); + EXPECT_FALSE(can_trigger_gc.MayDependOn(all_changes)); + + EXPECT_TRUE(all_changes.Includes(can_trigger_gc)); + EXPECT_FALSE(all_changes.Includes(depends_on_gc)); + EXPECT_TRUE(all_dependencies.Includes(depends_on_gc)); + EXPECT_FALSE(all_dependencies.Includes(can_trigger_gc)); +} + +TEST(SideEffectsTest, BitStrings) { + EXPECT_STREQ( + "|||||||", + SideEffects::None().ToString().c_str()); + EXPECT_STREQ( + "|GC|DFJISCBZL|DFJISCBZL|GC|DFJISCBZL|DFJISCBZL|", + SideEffects::All().ToString().c_str()); + EXPECT_STREQ( + "|||||DFJISCBZL|DFJISCBZL|", + SideEffects::AllWrites().ToString().c_str()); + EXPECT_STREQ( + "||DFJISCBZL|DFJISCBZL||||", + SideEffects::AllReads().ToString().c_str()); + EXPECT_STREQ( + "||||||L|", + SideEffects::FieldWriteOfType(Primitive::kPrimNot, false).ToString().c_str()); + EXPECT_STREQ( + "|||||Z||", + SideEffects::ArrayWriteOfType(Primitive::kPrimBoolean).ToString().c_str()); + EXPECT_STREQ( + "|||B||||", + SideEffects::FieldReadOfType(Primitive::kPrimByte, false).ToString().c_str()); + EXPECT_STREQ( + "||DJ|||||", // note: DJ alias + SideEffects::ArrayReadOfType(Primitive::kPrimDouble).ToString().c_str()); + SideEffects s = SideEffects::None(); + s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, false)); + s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimLong, false)); + s = s.Union(SideEffects::ArrayWriteOfType(Primitive::kPrimShort)); + s = s.Union(SideEffects::FieldReadOfType(Primitive::kPrimInt, false)); + s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimFloat)); + s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimDouble)); + EXPECT_STREQ( + "||DFJI|FI||S|DJC|", // note: DJ/FI alias. + s.ToString().c_str()); +} + +} // namespace art diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index c37b1995fa..ff2e6ad821 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -350,7 +350,9 @@ HInstruction* SsaBuilder::ValueOfLocal(HBasicBlock* block, size_t local) { void SsaBuilder::VisitBasicBlock(HBasicBlock* block) { current_locals_ = GetLocalsFor(block); - if (block->IsLoopHeader()) { + if (block->IsCatchBlock()) { + // Catch phis were already created and inputs collected from throwing sites. + } else if (block->IsLoopHeader()) { // If the block is a loop header, we know we only have visited the pre header // because we are visiting in reverse post order. We create phis for all initialized // locals from the pre header. Their inputs will be populated at the end of @@ -551,19 +553,32 @@ void SsaBuilder::VisitStoreLocal(HStoreLocal* store) { } void SsaBuilder::VisitInstruction(HInstruction* instruction) { - if (!instruction->NeedsEnvironment()) { - return; + if (instruction->NeedsEnvironment()) { + HEnvironment* environment = new (GetGraph()->GetArena()) HEnvironment( + GetGraph()->GetArena(), + current_locals_->Size(), + GetGraph()->GetDexFile(), + GetGraph()->GetMethodIdx(), + instruction->GetDexPc(), + GetGraph()->GetInvokeType(), + instruction); + environment->CopyFrom(*current_locals_); + instruction->SetRawEnvironment(environment); + } + + // If in a try block, propagate values of locals into catch blocks. + if (instruction->GetBlock()->IsInTry() && instruction->CanThrow()) { + HTryBoundary* try_block = instruction->GetBlock()->GetTryEntry(); + for (HExceptionHandlerIterator it(*try_block); !it.Done(); it.Advance()) { + GrowableArray<HInstruction*>* handler_locals = GetLocalsFor(it.Current()); + for (size_t i = 0, e = current_locals_->Size(); i < e; ++i) { + HInstruction* local_value = current_locals_->Get(i); + if (local_value != nullptr) { + handler_locals->Get(i)->AsPhi()->AddInput(local_value); + } + } + } } - HEnvironment* environment = new (GetGraph()->GetArena()) HEnvironment( - GetGraph()->GetArena(), - current_locals_->Size(), - GetGraph()->GetDexFile(), - GetGraph()->GetMethodIdx(), - instruction->GetDexPc(), - GetGraph()->GetInvokeType(), - instruction); - environment->CopyFrom(*current_locals_); - instruction->SetRawEnvironment(environment); } void SsaBuilder::VisitTemporary(HTemporary* temp) { diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index 1c83c4ba48..64600db648 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -61,9 +61,22 @@ class SsaBuilder : public HGraphVisitor { GrowableArray<HInstruction*>* GetLocalsFor(HBasicBlock* block) { GrowableArray<HInstruction*>* locals = locals_for_.Get(block->GetBlockId()); if (locals == nullptr) { - locals = new (GetGraph()->GetArena()) GrowableArray<HInstruction*>( - GetGraph()->GetArena(), GetGraph()->GetNumberOfVRegs()); - locals->SetSize(GetGraph()->GetNumberOfVRegs()); + const size_t vregs = GetGraph()->GetNumberOfVRegs(); + ArenaAllocator* arena = GetGraph()->GetArena(); + locals = new (arena) GrowableArray<HInstruction*>(arena, vregs); + locals->SetSize(vregs); + + if (block->IsCatchBlock()) { + // We record incoming inputs of catch phis at throwing instructions and + // must therefore eagerly create the phis. Unused phis will be removed + // in the dead phi analysis. + for (size_t i = 0; i < vregs; ++i) { + HPhi* phi = new (arena) HPhi(arena, i, 0, Primitive::kPrimVoid); + block->AddPhi(phi); + locals->Put(i, phi); + } + } + locals_for_.Put(block->GetBlockId(), locals); } return locals; diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 701dbb019b..40502c173b 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -225,7 +225,7 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { // SsaLivenessAnalysis. for (size_t i = 0, e = environment->Size(); i < e; ++i) { HInstruction* instruction = environment->GetInstructionAt(i); - bool should_be_live = ShouldBeLiveForEnvironment(instruction); + bool should_be_live = ShouldBeLiveForEnvironment(current, instruction); if (should_be_live) { DCHECK(instruction->HasSsaIndex()); live_in->SetBit(instruction->GetSsaIndex()); diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 220ee6a8d0..a7044de850 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1201,8 +1201,14 @@ class SsaLivenessAnalysis : public ValueObject { // Update the live_out set of the block and returns whether it has changed. bool UpdateLiveOut(const HBasicBlock& block); - static bool ShouldBeLiveForEnvironment(HInstruction* instruction) { + // Returns whether `instruction` in an HEnvironment held by `env_holder` + // should be kept live by the HEnvironment. + static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, + HInstruction* instruction) { if (instruction == nullptr) return false; + // A value that's not live in compiled code may still be needed in interpreter, + // due to code motion, etc. + if (env_holder->IsDeoptimize()) return true; if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true; return instruction->GetType() == Primitive::kPrimNot; } diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 2f2e2d1fab..917341a1e7 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -114,6 +114,12 @@ void SsaRedundantPhiElimination::Run() { continue; } + if (phi->InputCount() == 0) { + DCHECK(phi->IsCatchPhi()); + DCHECK(phi->IsDead()); + continue; + } + // Find if the inputs of the phi are the same instruction. HInstruction* candidate = phi->InputAt(0); // A loop phi cannot have itself as the first phi. Note that this @@ -137,6 +143,11 @@ void SsaRedundantPhiElimination::Run() { continue; } + // The candidate may not dominate a phi in a catch block. + if (phi->IsCatchPhi() && !candidate->StrictlyDominates(phi)) { + continue; + } + if (phi->IsInLoop()) { // Because we're updating the users of this phi, we may have new // phis candidate for elimination if this phi is in a loop. Add phis that diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 65610d54a6..1f1530fa1e 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -248,7 +248,7 @@ void StackMapStream::FillIn(MemoryRegion region) { DCHECK_EQ(code_info.GetStackMapsSize(code_info.ExtractEncoding()), stack_maps_size_); // Set the Dex register location catalog. - code_info.SetNumberOfDexRegisterLocationCatalogEntries(location_catalog_entries_.Size()); + code_info.SetNumberOfLocationCatalogEntries(location_catalog_entries_.Size()); MemoryRegion dex_register_location_catalog_region = region.Subregion( dex_register_location_catalog_start_, dex_register_location_catalog_size_); DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region); diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 550ed70e0f..703b6f7e13 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -19,6 +19,7 @@ #include "base/arena_containers.h" #include "base/bit_vector-inl.h" +#include "base/hash_map.h" #include "base/value_object.h" #include "memory_region.h" #include "nodes.h" diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index b4ac1b4d1a..33207d92d2 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -55,8 +55,7 @@ TEST(StackMapTest, Test1) { ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask()); ASSERT_EQ(1u, code_info.GetNumberOfStackMaps()); - uint32_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); + uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); ASSERT_EQ(2u, number_of_location_catalog_entries); DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding); // The Dex register location catalog contains: @@ -154,8 +153,7 @@ TEST(StackMapTest, Test2) { ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask()); ASSERT_EQ(2u, code_info.GetNumberOfStackMaps()); - uint32_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); + uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); ASSERT_EQ(4u, number_of_location_catalog_entries); DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding); // The Dex register location catalog contains: @@ -304,8 +302,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask()); ASSERT_EQ(1u, code_info.GetNumberOfStackMaps()); - uint32_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); + uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); ASSERT_EQ(1u, number_of_location_catalog_entries); DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding); // The Dex register location catalog contains: @@ -398,8 +395,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { // The location catalog contains two entries (DexRegisterLocation(kConstant, 0) // and DexRegisterLocation(kConstant, 1)), therefore the location catalog index // has a size of 1 bit. - uint32_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); + uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); ASSERT_EQ(2u, number_of_location_catalog_entries); ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_location_catalog_entries)); @@ -501,8 +497,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask()); ASSERT_EQ(1u, code_info.GetNumberOfStackMaps()); - uint32_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); + uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); ASSERT_EQ(0u, number_of_location_catalog_entries); DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding); ASSERT_EQ(0u, location_catalog.Size()); diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc index facc6304e5..39e5259f04 100644 --- a/compiler/trampolines/trampoline_compiler.cc +++ b/compiler/trampolines/trampoline_compiler.cc @@ -17,17 +17,36 @@ #include "trampoline_compiler.h" #include "jni_env_ext.h" + +#ifdef ART_ENABLE_CODEGEN_arm #include "utils/arm/assembler_thumb2.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 #include "utils/arm64/assembler_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips #include "utils/mips/assembler_mips.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 #include "utils/mips64/assembler_mips64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 #include "utils/x86/assembler_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 #include "utils/x86_64/assembler_x86_64.h" +#endif #define __ assembler. namespace art { +#ifdef ART_ENABLE_CODEGEN_arm namespace arm { static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, ThreadOffset<4> offset) { @@ -55,7 +74,9 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention return entry_stub.release(); } } // namespace arm +#endif // ART_ENABLE_CODEGEN_arm +#ifdef ART_ENABLE_CODEGEN_arm64 namespace arm64 { static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, ThreadOffset<8> offset) { @@ -92,7 +113,9 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention return entry_stub.release(); } } // namespace arm64 +#endif // ART_ENABLE_CODEGEN_arm64 +#ifdef ART_ENABLE_CODEGEN_mips namespace mips { static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, ThreadOffset<4> offset) { @@ -122,7 +145,9 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention return entry_stub.release(); } } // namespace mips +#endif // ART_ENABLE_CODEGEN_mips +#ifdef ART_ENABLE_CODEGEN_mips64 namespace mips64 { static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi, ThreadOffset<8> offset) { @@ -152,7 +177,9 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention return entry_stub.release(); } } // namespace mips64 +#endif // ART_ENABLE_CODEGEN_mips +#ifdef ART_ENABLE_CODEGEN_x86 namespace x86 { static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) { X86Assembler assembler; @@ -170,7 +197,9 @@ static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) { return entry_stub.release(); } } // namespace x86 +#endif // ART_ENABLE_CODEGEN_x86 +#ifdef ART_ENABLE_CODEGEN_x86_64 namespace x86_64 { static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) { x86_64::X86_64Assembler assembler; @@ -188,17 +217,26 @@ static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) { return entry_stub.release(); } } // namespace x86_64 +#endif // ART_ENABLE_CODEGEN_x86_64 const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi, ThreadOffset<8> offset) { switch (isa) { +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return arm64::CreateTrampoline(abi, offset); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return mips64::CreateTrampoline(abi, offset); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return x86_64::CreateTrampoline(offset); +#endif default: + UNUSED(abi); + UNUSED(offset); LOG(FATAL) << "Unexpected InstructionSet: " << isa; UNREACHABLE(); } @@ -207,13 +245,20 @@ const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCal const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi, ThreadOffset<4> offset) { switch (isa) { +#ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: return arm::CreateTrampoline(abi, offset); +#endif +#ifdef ART_ENABLE_CODEGEN_mips case kMips: return mips::CreateTrampoline(abi, offset); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: + UNUSED(abi); return x86::CreateTrampoline(offset); +#endif default: LOG(FATAL) << "Unexpected InstructionSet: " << isa; UNREACHABLE(); diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h index bdab2796d8..9fb22452ea 100644 --- a/compiler/trampolines/trampoline_compiler.h +++ b/compiler/trampolines/trampoline_compiler.h @@ -27,10 +27,10 @@ namespace art { // Create code that will invoke the function held in thread local storage. const std::vector<uint8_t>* CreateTrampoline32(InstructionSet isa, EntryPointCallingConvention abi, ThreadOffset<4> entry_point_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCallingConvention abi, ThreadOffset<8> entry_point_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace art diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 09d22703fe..0e3e08c2da 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -252,11 +252,11 @@ uint32_t Address::encodingThumbLdrdStrd() const { if (offset_ < 0) { int32_t off = -offset_; CHECK_LT(off, 1024); - CHECK_EQ((off & 3 /* 0b11 */), 0); // Must be multiple of 4. + CHECK_ALIGNED(off, 4); encoding = (am ^ (1 << kUShift)) | off >> 2; // Flip U to adjust sign. } else { CHECK_LT(offset_, 1024); - CHECK_EQ((offset_ & 3 /* 0b11 */), 0); // Must be multiple of 4. + CHECK_ALIGNED(offset_, 4); encoding = am | offset_ >> 2; } encoding |= static_cast<uint32_t>(rn_) << 16; diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index 5d85d11054..ef60fefe4d 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -888,7 +888,7 @@ class ArmAssembler : public Assembler { // Slowpath entered when Thread::Current()->_exception is non-null class ArmExceptionSlowPath FINAL : public SlowPath { public: - explicit ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust) + ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust) : scratch_(scratch), stack_adjust_(stack_adjust) { } void Emit(Assembler *sp_asm) OVERRIDE; diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index 2dde0149a6..b499dddb0c 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -25,6 +25,58 @@ namespace art { namespace arm { +void Thumb2Assembler::Fixup::PrepareDependents(Thumb2Assembler* assembler) { + // For each Fixup, it's easy to find the Fixups that it depends on as they are either + // the following or the preceding Fixups until we find the target. However, for fixup + // adjustment we need the reverse lookup, i.e. what Fixups depend on a given Fixup. + // This function creates a compact representation of this relationship, where we have + // all the dependents in a single array and Fixups reference their ranges by start + // index and count. (Instead of having a per-fixup vector.) + + // Count the number of dependents of each Fixup. + const FixupId end_id = assembler->fixups_.size(); + Fixup* fixups = assembler->fixups_.data(); + for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) { + uint32_t target = fixups[fixup_id].target_; + if (target > fixups[fixup_id].location_) { + for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) { + fixups[id].dependents_count_ += 1u; + } + } else { + for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) { + fixups[id - 1u].dependents_count_ += 1u; + } + } + } + // Assign index ranges in fixup_dependents_ to individual fixups. Record the end of the + // range in dependents_start_, we shall later decrement it as we fill in fixup_dependents_. + uint32_t number_of_dependents = 0u; + for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) { + number_of_dependents += fixups[fixup_id].dependents_count_; + fixups[fixup_id].dependents_start_ = number_of_dependents; + } + if (number_of_dependents == 0u) { + return; + } + // Create and fill in the fixup_dependents_. + assembler->fixup_dependents_.reset(new FixupId[number_of_dependents]); + FixupId* dependents = assembler->fixup_dependents_.get(); + for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) { + uint32_t target = fixups[fixup_id].target_; + if (target > fixups[fixup_id].location_) { + for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) { + fixups[id].dependents_start_ -= 1u; + dependents[fixups[id].dependents_start_] = fixup_id; + } + } else { + for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) { + fixups[id - 1u].dependents_start_ -= 1u; + dependents[fixups[id - 1u].dependents_start_] = fixup_id; + } + } + } +} + void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) { CHECK(!label->IsBound()); @@ -32,10 +84,6 @@ void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) { FixupId fixup_id = label->Position(); // The id for linked Fixup. Fixup* fixup = GetFixup(fixup_id); // Get the Fixup at this id. fixup->Resolve(bound_pc); // Fixup can be resolved now. - // Add this fixup as a dependency of all later fixups. - for (FixupId id = fixup_id + 1u, end = fixups_.size(); id != end; ++id) { - GetFixup(id)->AddDependent(fixup_id); - } uint32_t fixup_location = fixup->GetLocation(); uint16_t next = buffer_.Load<uint16_t>(fixup_location); // Get next in chain. buffer_.Store<int16_t>(fixup_location, 0); @@ -59,7 +107,7 @@ void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_s uint32_t adjustment = fixup->AdjustSizeIfNeeded(*current_code_size); if (adjustment != 0u) { *current_code_size += adjustment; - for (FixupId dependent_id : fixup->Dependents()) { + for (FixupId dependent_id : fixup->Dependents(*this)) { Fixup* dependent = GetFixup(dependent_id); dependent->IncreaseAdjustment(adjustment); if (buffer_.Load<int16_t>(dependent->GetLocation()) == 0) { @@ -71,6 +119,7 @@ void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_s } uint32_t Thumb2Assembler::AdjustFixups() { + Fixup::PrepareDependents(this); uint32_t current_code_size = buffer_.Size(); std::deque<FixupId> fixups_to_recalculate; if (kIsDebugBuild) { @@ -84,14 +133,27 @@ uint32_t Thumb2Assembler::AdjustFixups() { AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate); } while (!fixups_to_recalculate.empty()) { - // Pop the fixup. - FixupId fixup_id = fixups_to_recalculate.front(); - fixups_to_recalculate.pop_front(); - Fixup* fixup = GetFixup(fixup_id); - DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0); - buffer_.Store<int16_t>(fixup->GetLocation(), 0); - // See if it needs adjustment. - AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate); + do { + // Pop the fixup. + FixupId fixup_id = fixups_to_recalculate.front(); + fixups_to_recalculate.pop_front(); + Fixup* fixup = GetFixup(fixup_id); + DCHECK_NE(buffer_.Load<int16_t>(fixup->GetLocation()), 0); + buffer_.Store<int16_t>(fixup->GetLocation(), 0); + // See if it needs adjustment. + AdjustFixupIfNeeded(fixup, ¤t_code_size, &fixups_to_recalculate); + } while (!fixups_to_recalculate.empty()); + + if ((current_code_size & 2) != 0 && !literals_.empty()) { + // If we need to add padding before literals, this may just push some out of range, + // so recalculate all load literals. This makes up for the fact that we don't mark + // load literal as a dependency of all previous Fixups even though it actually is. + for (Fixup& fixup : fixups_) { + if (fixup.IsLoadLiteral()) { + AdjustFixupIfNeeded(&fixup, ¤t_code_size, &fixups_to_recalculate); + } + } + } } if (kIsDebugBuild) { // Check that no fixup is marked as being in fixups_to_recalculate anymore. @@ -101,7 +163,7 @@ uint32_t Thumb2Assembler::AdjustFixups() { } // Adjust literal pool labels for padding. - DCHECK_EQ(current_code_size & 1u, 0u); + DCHECK_ALIGNED(current_code_size, 2); uint32_t literals_adjustment = current_code_size + (current_code_size & 2) - buffer_.Size(); if (literals_adjustment != 0u) { for (Literal& literal : literals_) { @@ -152,7 +214,7 @@ void Thumb2Assembler::EmitLiterals() { // Load literal instructions (LDR, LDRD, VLDR) require 4-byte alignment. // We don't support byte and half-word literals. uint32_t code_size = buffer_.Size(); - DCHECK_EQ(code_size & 1u, 0u); + DCHECK_ALIGNED(code_size, 2); if ((code_size & 2u) != 0u) { Emit16(0); } @@ -168,7 +230,7 @@ void Thumb2Assembler::EmitLiterals() { } inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) { - DCHECK_EQ(offset & 1, 0); + DCHECK_ALIGNED(offset, 2); int16_t encoding = B15 | B14; if (cond != AL) { DCHECK(IsInt<9>(offset)); @@ -181,7 +243,7 @@ inline int16_t Thumb2Assembler::BEncoding16(int32_t offset, Condition cond) { } inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) { - DCHECK_EQ(offset & 1, 0); + DCHECK_ALIGNED(offset, 2); int32_t s = (offset >> 31) & 1; // Sign bit. int32_t encoding = B31 | B30 | B29 | B28 | B15 | (s << 26) | // Sign bit goes to bit 26. @@ -205,7 +267,7 @@ inline int32_t Thumb2Assembler::BEncoding32(int32_t offset, Condition cond) { inline int16_t Thumb2Assembler::CbxzEncoding16(Register rn, int32_t offset, Condition cond) { DCHECK(!IsHighRegister(rn)); - DCHECK_EQ(offset & 1, 0); + DCHECK_ALIGNED(offset, 2); DCHECK(IsUint<7>(offset)); DCHECK(cond == EQ || cond == NE); return B15 | B13 | B12 | B8 | (cond == NE ? B11 : 0) | static_cast<int32_t>(rn) | @@ -250,7 +312,7 @@ inline int32_t Thumb2Assembler::MovModImmEncoding32(Register rd, int32_t value) inline int16_t Thumb2Assembler::LdrLitEncoding16(Register rt, int32_t offset) { DCHECK(!IsHighRegister(rt)); - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); DCHECK(IsUint<10>(offset)); return B14 | B11 | (static_cast<int32_t>(rt) << 8) | (offset >> 2); } @@ -261,7 +323,7 @@ inline int32_t Thumb2Assembler::LdrLitEncoding32(Register rt, int32_t offset) { } inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset) { - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); CHECK(IsUint<10>(offset)); return B31 | B30 | B29 | B27 | B24 /* P = 1 */ | B23 /* U = 1 */ | B22 | 0 /* W = 0 */ | B20 | @@ -270,7 +332,7 @@ inline int32_t Thumb2Assembler::LdrdEncoding32(Register rt, Register rt2, Regist } inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32_t offset) { - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); CHECK(IsUint<10>(offset)); return B31 | B30 | B29 | B27 | B26 | B24 | B23 /* U = 1 */ | B20 | B11 | B9 | @@ -281,7 +343,7 @@ inline int32_t Thumb2Assembler::VldrsEncoding32(SRegister sd, Register rn, int32 } inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32_t offset) { - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); CHECK(IsUint<10>(offset)); return B31 | B30 | B29 | B27 | B26 | B24 | B23 /* U = 1 */ | B20 | B11 | B9 | B8 | @@ -294,7 +356,7 @@ inline int32_t Thumb2Assembler::VldrdEncoding32(DRegister dd, Register rn, int32 inline int16_t Thumb2Assembler::LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset) { DCHECK(!IsHighRegister(rt)); DCHECK(!IsHighRegister(rn)); - DCHECK_EQ(offset & 3, 0); + DCHECK_ALIGNED(offset, 4); DCHECK(IsUint<7>(offset)); return B14 | B13 | B11 | (static_cast<int32_t>(rn) << 3) | static_cast<int32_t>(rt) | @@ -975,6 +1037,7 @@ void Thumb2Assembler::vcmpdz(DRegister dd, Condition cond) { } void Thumb2Assembler::b(Label* label, Condition cond) { + DCHECK_EQ(next_condition_, AL); EmitBranch(cond, label, false, false); } @@ -1422,7 +1485,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED, thumb_opcode = 3U /* 0b11 */; opcode_shift = 12; CHECK_LT(immediate, (1u << 9)); - CHECK_EQ((immediate & 3u /* 0b11 */), 0u); + CHECK_ALIGNED(immediate, 4); // Remove rd and rn from instruction by orring it with immed and clearing bits. rn = R0; @@ -1436,7 +1499,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED, thumb_opcode = 5U /* 0b101 */; opcode_shift = 11; CHECK_LT(immediate, (1u << 10)); - CHECK_EQ((immediate & 3u /* 0b11 */), 0u); + CHECK_ALIGNED(immediate, 4); // Remove rn from instruction. rn = R0; @@ -1473,7 +1536,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED, thumb_opcode = 0x61 /* 0b1100001 */; opcode_shift = 7; CHECK_LT(immediate, (1u << 9)); - CHECK_EQ((immediate & 3u /* 0b11 */), 0u); + CHECK_ALIGNED(immediate, 4); // Remove rd and rn from instruction by orring it with immed and clearing bits. rn = R0; @@ -1651,7 +1714,7 @@ inline uint32_t Thumb2Assembler::Fixup::GetSizeInBytes() const { inline size_t Thumb2Assembler::Fixup::LiteralPoolPaddingSize(uint32_t current_code_size) { // The code size must be a multiple of 2. - DCHECK_EQ(current_code_size & 1u, 0u); + DCHECK_ALIGNED(current_code_size, 2); // If it isn't a multiple of 4, we need to add a 2-byte padding before the literal pool. return current_code_size & 2; } @@ -1696,7 +1759,7 @@ inline int32_t Thumb2Assembler::Fixup::GetOffset(uint32_t current_code_size) con // Load literal instructions round down the PC+4 to a multiple of 4, so if the PC // isn't a multiple of 2, we need to adjust. Since we already adjusted for the target // being aligned, current PC alignment can be inferred from diff. - DCHECK_EQ(diff & 1, 0); + DCHECK_ALIGNED(diff, 2); diff = diff + (diff & 2); DCHECK_GE(diff, 0); break; @@ -2044,7 +2107,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond, if (sp_relative) { // SP relative, 10 bit offset. CHECK_LT(offset, (1 << 10)); - CHECK_EQ((offset & 3 /* 0b11 */), 0); + CHECK_ALIGNED(offset, 4); encoding |= rd << 8 | offset >> 2; } else { // No SP relative. The offset is shifted right depending on @@ -2057,12 +2120,12 @@ void Thumb2Assembler::EmitLoadStore(Condition cond, } else if (half) { // 6 bit offset, shifted by 1. CHECK_LT(offset, (1 << 6)); - CHECK_EQ((offset & 1 /* 0b1 */), 0); + CHECK_ALIGNED(offset, 2); offset >>= 1; } else { // 7 bit offset, shifted by 2. CHECK_LT(offset, (1 << 7)); - CHECK_EQ((offset & 3 /* 0b11 */), 0); + CHECK_ALIGNED(offset, 4); offset >>= 2; } encoding |= rn << 3 | offset << 6; @@ -2219,17 +2282,7 @@ void Thumb2Assembler::EmitBranch(Condition cond, Label* label, bool link, bool x if (label->IsBound()) { // The branch is to a bound label which means that it's a backwards branch. - // Record this branch as a dependency of all Fixups between the label and the branch. GetFixup(branch_id)->Resolve(label->Position()); - for (FixupId fixup_id = branch_id; fixup_id != 0u; ) { - --fixup_id; - Fixup* fixup = GetFixup(fixup_id); - DCHECK_GE(label->Position(), 0); - if (fixup->GetLocation() < static_cast<uint32_t>(label->Position())) { - break; - } - fixup->AddDependent(branch_id); - } Emit16(0); } else { // Branch target is an unbound label. Add it to a singly-linked list maintained within diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h index 5e6969b4c2..41eb5d36f2 100644 --- a/compiler/utils/arm/assembler_thumb2.h +++ b/compiler/utils/arm/assembler_thumb2.h @@ -24,6 +24,7 @@ #include "constants_arm.h" #include "utils/arm/managed_register_arm.h" #include "utils/arm/assembler_arm.h" +#include "utils/array_ref.h" #include "offsets.h" namespace art { @@ -37,6 +38,7 @@ class Thumb2Assembler FINAL : public ArmAssembler { it_cond_index_(kNoItCondition), next_condition_(AL), fixups_(), + fixup_dependents_(), literals_(), last_position_adjustment_(0u), last_old_position_(0u), @@ -487,6 +489,10 @@ class Thumb2Assembler FINAL : public ArmAssembler { return type_; } + bool IsLoadLiteral() const { + return GetType() >= kLoadLiteralNarrow; + } + Size GetOriginalSize() const { return original_size_; } @@ -507,12 +513,12 @@ class Thumb2Assembler FINAL : public ArmAssembler { return adjustment_; } - const std::vector<FixupId>& Dependents() const { - return dependents_; - } + // Prepare the assembler->fixup_dependents_ and each Fixup's dependents_start_/count_. + static void PrepareDependents(Thumb2Assembler* assembler); - void AddDependent(FixupId dependent_id) { - dependents_.push_back(dependent_id); + ArrayRef<FixupId> Dependents(const Thumb2Assembler& assembler) const { + return ArrayRef<FixupId>(assembler.fixup_dependents_.get() + dependents_start_, + dependents_count_); } // Resolve a branch when the target is known. @@ -557,7 +563,8 @@ class Thumb2Assembler FINAL : public ArmAssembler { location_(location), target_(kUnresolved), adjustment_(0u), - dependents_() { + dependents_count_(0u), + dependents_start_(0u) { } static size_t SizeInBytes(Size size); @@ -584,7 +591,10 @@ class Thumb2Assembler FINAL : public ArmAssembler { uint32_t location_; // Offset into assembler buffer in bytes. uint32_t target_; // Offset into assembler buffer in bytes. uint32_t adjustment_; // The number of extra bytes inserted between location_ and target_. - std::vector<FixupId> dependents_; // Fixups that require adjustment when current size changes. + // Fixups that require adjustment when current size changes are stored in a single + // array in the assembler and we store only the start index and count here. + uint32_t dependents_count_; + uint32_t dependents_start_; }; // Emit a single 32 or 16 bit data processing instruction. @@ -760,6 +770,7 @@ class Thumb2Assembler FINAL : public ArmAssembler { static int32_t LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset); std::vector<Fixup> fixups_; + std::unique_ptr<FixupId[]> fixup_dependents_; // Use std::deque<> for literal labels to allow insertions at the end // without invalidating pointers and references to existing elements. diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc index 68b7931a0c..84f5cb16fb 100644 --- a/compiler/utils/arm/assembler_thumb2_test.cc +++ b/compiler/utils/arm/assembler_thumb2_test.cc @@ -950,4 +950,73 @@ TEST_F(AssemblerThumb2Test, LoadLiteralDoubleFar) { __ GetAdjustedPosition(label.Position())); } +TEST_F(AssemblerThumb2Test, LoadLiteralBeyondMax1KiBDueToAlignmentOnSecondPass) { + // First part: as TwoCbzBeyondMaxOffset but add one 16-bit instruction to the end, + // so that the size is not Aligned<4>(.). On the first pass, the assembler resizes + // the second CBZ because it's out of range, then it will resize the first CBZ + // which has been pushed out of range. Thus, after the first pass, the code size + // will appear Aligned<4>(.) but the final size will not be. + Label label0, label1, label2; + __ cbz(arm::R0, &label1); + constexpr size_t kLdrR0R0Count1 = 63; + for (size_t i = 0; i != kLdrR0R0Count1; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ Bind(&label0); + __ cbz(arm::R0, &label2); + __ Bind(&label1); + constexpr size_t kLdrR0R0Count2 = 65; + for (size_t i = 0; i != kLdrR0R0Count2; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + __ Bind(&label2); + __ ldr(arm::R0, arm::Address(arm::R0)); + + std::string expected_part1 = + "cmp r0, #0\n" // cbz r0, label1 + "beq.n 1f\n" + + RepeatInsn(kLdrR0R0Count1, "ldr r0, [r0]\n") + + "0:\n" + "cmp r0, #0\n" // cbz r0, label2 + "beq.n 2f\n" + "1:\n" + + RepeatInsn(kLdrR0R0Count2, "ldr r0, [r0]\n") + + "2:\n" // Here the offset is Aligned<4>(.). + "ldr r0, [r0]\n"; // Make the first part + + // Second part: as LoadLiteralMax1KiB with the caveat that the offset of the load + // literal will not be Aligned<4>(.) but it will appear to be when we process the + // instruction during the first pass, so the literal will need a padding and it + // will push the literal out of range, so we shall end up with "ldr.w". + arm::Literal* literal = __ NewLiteral<int32_t>(0x12345678); + __ LoadLiteral(arm::R0, literal); + Label label; + __ Bind(&label); + constexpr size_t kLdrR0R0Count = 511; + for (size_t i = 0; i != kLdrR0R0Count; ++i) { + __ ldr(arm::R0, arm::Address(arm::R0)); + } + + std::string expected = + expected_part1 + + "1:\n" + "ldr.w r0, [pc, #((2f - 1b - 2) & ~2)]\n" + + RepeatInsn(kLdrR0R0Count, "ldr r0, [r0]\n") + + ".align 2, 0\n" + "2:\n" + ".word 0x12345678\n"; + DriverStr(expected, "LoadLiteralMax1KiB"); + + EXPECT_EQ(static_cast<uint32_t>(label.Position()) + 6u, + __ GetAdjustedPosition(label.Position())); +} + +TEST_F(AssemblerThumb2Test, Clz) { + __ clz(arm::R0, arm::R1); + + const char* expected = "clz r0, r1\n"; + + DriverStr(expected, "clz"); +} + } // namespace art diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h index 1513296c2d..6b4daed909 100644 --- a/compiler/utils/arm/constants_arm.h +++ b/compiler/utils/arm/constants_arm.h @@ -32,8 +32,9 @@ namespace arm { // Defines constants and accessor classes to assemble, disassemble and // simulate ARM instructions. // -// Section references in the code refer to the "ARM Architecture Reference -// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf) +// Section references in the code refer to the "ARM Architecture +// Reference Manual ARMv7-A and ARMv7-R edition", issue C.b (24 July +// 2012). // // Constants for specific fields are defined in their respective named enums. // General constants are in an anonymous enum in class Instr. @@ -97,26 +98,32 @@ enum DRegister { // private marker to avoid generate-operator-out.py from proce std::ostream& operator<<(std::ostream& os, const DRegister& rhs); -// Values for the condition field as defined in section A3.2. +// Values for the condition field as defined in Table A8-1 "Condition +// codes" (refer to Section A8.3 "Conditional execution"). enum Condition { // private marker to avoid generate-operator-out.py from processing. kNoCondition = -1, - EQ = 0, // equal - NE = 1, // not equal - CS = 2, // carry set/unsigned higher or same - CC = 3, // carry clear/unsigned lower - MI = 4, // minus/negative - PL = 5, // plus/positive or zero - VS = 6, // overflow - VC = 7, // no overflow - HI = 8, // unsigned higher - LS = 9, // unsigned lower or same - GE = 10, // signed greater than or equal - LT = 11, // signed less than - GT = 12, // signed greater than - LE = 13, // signed less than or equal - AL = 14, // always (unconditional) - kSpecialCondition = 15, // special condition (refer to section A3.2.1) + // Meaning (integer) | Meaning (floating-point) + // ---------------------------------------+----------------------------------------- + EQ = 0, // Equal | Equal + NE = 1, // Not equal | Not equal, or unordered + CS = 2, // Carry set | Greater than, equal, or unordered + CC = 3, // Carry clear | Less than + MI = 4, // Minus, negative | Less than + PL = 5, // Plus, positive or zero | Greater than, equal, or unordered + VS = 6, // Overflow | Unordered (i.e. at least one NaN operand) + VC = 7, // No overflow | Not unordered + HI = 8, // Unsigned higher | Greater than, or unordered + LS = 9, // Unsigned lower or same | Less than or equal + GE = 10, // Signed greater than or equal | Greater than or equal + LT = 11, // Signed less than | Less than, or unordered + GT = 12, // Signed greater than | Greater than + LE = 13, // Signed less than or equal | Less than, equal, or unordered + AL = 14, // Always (unconditional) | Always (unconditional) + kSpecialCondition = 15, // Special condition (refer to Section A8.3 "Conditional execution"). kMaxCondition = 16, + + HS = CS, // HS (unsigned higher or same) is a synonym for CS. + LO = CC // LO (unsigned lower) is a synonym for CC. }; std::ostream& operator<<(std::ostream& os, const Condition& rhs); diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index 05882a30b0..8e85fe96ab 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -254,7 +254,7 @@ class Arm64Assembler FINAL : public Assembler { class Arm64Exception { private: - explicit Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) + Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) : scratch_(scratch), stack_adjust_(stack_adjust) { } diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h index ff5a77c97a..303e0d5ad4 100644 --- a/compiler/utils/array_ref.h +++ b/compiler/utils/array_ref.h @@ -62,14 +62,14 @@ class ArrayRef { } template <size_t size> - constexpr ArrayRef(T (&array)[size]) + explicit constexpr ArrayRef(T (&array)[size]) : array_(array), size_(size) { } template <typename U, size_t size> - constexpr ArrayRef(U (&array)[size], - typename std::enable_if<std::is_same<T, const U>::value, tag>::type - t ATTRIBUTE_UNUSED = tag()) + explicit constexpr ArrayRef(U (&array)[size], + typename std::enable_if<std::is_same<T, const U>::value, tag>::type + t ATTRIBUTE_UNUSED = tag()) : array_(array), size_(size) { } @@ -83,9 +83,9 @@ class ArrayRef { } template <typename U, typename Alloc> - ArrayRef(const std::vector<U, Alloc>& v, - typename std::enable_if<std::is_same<T, const U>::value, tag>::type - t ATTRIBUTE_UNUSED = tag()) + explicit ArrayRef(const std::vector<U, Alloc>& v, + typename std::enable_if<std::is_same<T, const U>::value, tag>::type + t ATTRIBUTE_UNUSED = tag()) : array_(v.data()), size_(v.size()) { } diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc index 6d8a98931f..496ca95ff9 100644 --- a/compiler/utils/assembler.cc +++ b/compiler/utils/assembler.cc @@ -19,13 +19,25 @@ #include <algorithm> #include <vector> +#ifdef ART_ENABLE_CODEGEN_arm #include "arm/assembler_arm32.h" #include "arm/assembler_thumb2.h" +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 #include "arm64/assembler_arm64.h" +#endif +#ifdef ART_ENABLE_CODEGEN_mips #include "mips/assembler_mips.h" +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 #include "mips64/assembler_mips64.h" +#endif +#ifdef ART_ENABLE_CODEGEN_x86 #include "x86/assembler_x86.h" +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 #include "x86_64/assembler_x86_64.h" +#endif #include "globals.h" #include "memory_region.h" @@ -112,20 +124,32 @@ void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() { Assembler* Assembler::Create(InstructionSet instruction_set) { switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm case kArm: return new arm::Arm32Assembler(); case kThumb2: return new arm::Thumb2Assembler(); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return new arm64::Arm64Assembler(); +#endif +#ifdef ART_ENABLE_CODEGEN_mips case kMips: return new mips::MipsAssembler(); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return new mips64::Mips64Assembler(); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 case kX86: return new x86::X86Assembler(); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return new x86_64::X86_64Assembler(); +#endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; return nullptr; diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index 20f61f942b..cb01cea8ef 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -32,7 +32,7 @@ namespace arm { // Include results file (generated manually) #include "assembler_thumb_test_expected.cc.inc" -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ // This controls whether the results are printed to the // screen or compared against the expected output. // To generate new expected output, set this to true and @@ -72,7 +72,7 @@ void InitResults() { } std::string GetToolsDir() { -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ // This will only work on the host. There is no as, objcopy or objdump on the device. static std::string toolsdir; @@ -89,7 +89,7 @@ std::string GetToolsDir() { } void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) { -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ static std::string toolsdir = GetToolsDir(); ScratchFile file; diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h index 8cdb180740..2c4a689096 100644 --- a/compiler/utils/dedupe_set.h +++ b/compiler/utils/dedupe_set.h @@ -99,7 +99,7 @@ class DedupeSet { return hashed_key.store_ptr; } - explicit DedupeSet(const char* set_name, SwapAllocator<void>& alloc) + DedupeSet(const char* set_name, SwapAllocator<void>& alloc) : allocator_(alloc), hash_time_(0) { for (HashType i = 0; i < kShard; ++i) { std::ostringstream oss; diff --git a/compiler/utils/dex_cache_arrays_layout.h b/compiler/utils/dex_cache_arrays_layout.h index 8f98ea11ba..2a109bd11e 100644 --- a/compiler/utils/dex_cache_arrays_layout.h +++ b/compiler/utils/dex_cache_arrays_layout.h @@ -37,7 +37,7 @@ class DexCacheArraysLayout { } // Construct a layout for a particular dex file. - explicit DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file); + DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file); bool Valid() const { return Size() != 0u; diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h index bb62bca3b9..893daff719 100644 --- a/compiler/utils/managed_register.h +++ b/compiler/utils/managed_register.h @@ -95,7 +95,7 @@ class ManagedRegisterSpill : public ManagedRegister { explicit ManagedRegisterSpill(const ManagedRegister& other) : ManagedRegister(other), size_(-1), spill_offset_(-1) { } - explicit ManagedRegisterSpill(const ManagedRegister& other, int32_t size) + ManagedRegisterSpill(const ManagedRegister& other, int32_t size) : ManagedRegister(other), size_(size), spill_offset_(-1) { } int32_t getSpillOffset() { diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 0d1b82ce7b..df95daddc1 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -283,7 +283,7 @@ class MipsAssembler FINAL : public Assembler { // Slowpath entered when Thread::Current()->_exception is non-null class MipsExceptionSlowPath FINAL : public SlowPath { public: - explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust) + MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust) : scratch_(scratch), stack_adjust_(stack_adjust) {} virtual void Emit(Assembler *sp_asm) OVERRIDE; private: diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 47b146a28c..31130ea43d 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -354,7 +354,7 @@ class Mips64Assembler FINAL : public Assembler { // Slowpath entered when Thread::Current()->_exception is non-null class Mips64ExceptionSlowPath FINAL : public SlowPath { public: - explicit Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust) + Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust) : scratch_(scratch), stack_adjust_(stack_adjust) {} virtual void Emit(Assembler *sp_asm) OVERRIDE; private: diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc index 325ee4fa01..42ed8810f8 100644 --- a/compiler/utils/swap_space.cc +++ b/compiler/utils/swap_space.cc @@ -143,7 +143,6 @@ SpaceChunk SwapSpace::NewFileChunk(size_t min_size) { LOG(ERROR) << "Unable to mmap new swap file chunk."; LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size; LOG(ERROR) << "Free list:"; - MutexLock lock(Thread::Current(), lock_); DumpFreeMap(free_by_size_); LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_); LOG(FATAL) << "Aborting..."; diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h index 691df4a945..f7c772d673 100644 --- a/compiler/utils/swap_space.h +++ b/compiler/utils/swap_space.h @@ -60,15 +60,15 @@ class SwapSpace { public: SwapSpace(int fd, size_t initial_size); ~SwapSpace(); - void* Alloc(size_t size) LOCKS_EXCLUDED(lock_); - void Free(void* ptr, size_t size) LOCKS_EXCLUDED(lock_); + void* Alloc(size_t size) REQUIRES(!lock_); + void Free(void* ptr, size_t size) REQUIRES(!lock_); size_t GetSize() { return size_; } private: - SpaceChunk NewFileChunk(size_t min_size); + SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_); int fd_; size_t size_; diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index fa85ada864..9b3d792903 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -145,12 +145,33 @@ void X86Assembler::movl(const Address& dst, Label* lbl) { EmitLabel(lbl, dst.length_ + 5); } +void X86Assembler::movntl(const Address& dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xC3); + EmitOperand(src, dst); +} + void X86Assembler::bswapl(Register dst) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x0F); EmitUint8(0xC8 + dst); } +void X86Assembler::bsrl(Register dst, Register src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitRegisterOperand(dst, src); +} + +void X86Assembler::bsrl(Register dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst, src); +} + void X86Assembler::movzxb(Register dst, ByteRegister src) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x0F); @@ -1194,11 +1215,26 @@ void X86Assembler::imull(Register dst, Register src) { } -void X86Assembler::imull(Register reg, const Immediate& imm) { +void X86Assembler::imull(Register dst, Register src, const Immediate& imm) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); - EmitUint8(0x69); - EmitOperand(reg, Operand(reg)); - EmitImmediate(imm); + // See whether imm can be represented as a sign-extended 8bit value. + int32_t v32 = static_cast<int32_t>(imm.value()); + if (IsInt<8>(v32)) { + // Sign-extension works. + EmitUint8(0x6B); + EmitOperand(dst, Operand(src)); + EmitUint8(static_cast<uint8_t>(v32 & 0xFF)); + } else { + // Not representable, use full immediate. + EmitUint8(0x69); + EmitOperand(dst, Operand(src)); + EmitImmediate(imm); + } +} + + +void X86Assembler::imull(Register reg, const Immediate& imm) { + imull(reg, reg, imm); } @@ -1515,6 +1551,29 @@ void X86Assembler::repne_scasw() { } +void X86Assembler::repe_cmpsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86Assembler::repe_cmpsl() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86Assembler::rep_movsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA5); +} + + X86Assembler* X86Assembler::lock() { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0xF0); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index d1b4e1dc5f..a9227f38b0 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -205,7 +205,7 @@ class Address : public Operand { class X86Assembler FINAL : public Assembler { public: - explicit X86Assembler() {} + X86Assembler() {} virtual ~X86Assembler() {} /* @@ -231,7 +231,11 @@ class X86Assembler FINAL : public Assembler { void movl(const Address& dst, const Immediate& imm); void movl(const Address& dst, Label* lbl); + void movntl(const Address& dst, Register src); + void bswapl(Register dst); + void bsrl(Register dst, Register src); + void bsrl(Register dst, const Address& src); void movzxb(Register dst, ByteRegister src); void movzxb(Register dst, const Address& src); @@ -409,6 +413,7 @@ class X86Assembler FINAL : public Assembler { void imull(Register dst, Register src); void imull(Register reg, const Immediate& imm); + void imull(Register dst, Register src, const Immediate& imm); void imull(Register reg, const Address& address); void imull(Register reg); @@ -465,6 +470,9 @@ class X86Assembler FINAL : public Assembler { void jmp(Label* label); void repne_scasw(); + void repe_cmpsw(); + void repe_cmpsl(); + void rep_movsw(); X86Assembler* lock(); void cmpxchgl(const Address& address, Register reg); @@ -644,7 +652,6 @@ class X86Assembler FINAL : public Assembler { void EmitComplex(int rm, const Operand& operand, const Immediate& immediate); void EmitLabel(Label* label, int instruction_size); void EmitLabelLink(Label* label); - void EmitNearLabelLink(Label* label); void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm); void EmitGenericShift(int rm, const Operand& operand, Register shifter); diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index aacc57bb0c..731b5f4ac5 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -105,6 +105,16 @@ TEST_F(AssemblerX86Test, Movl) { DriverStr(expected, "movl"); } +TEST_F(AssemblerX86Test, Movntl) { + GetAssembler()->movntl(x86::Address(x86::EDI, x86::EBX, x86::TIMES_4, 12), x86::EAX); + GetAssembler()->movntl(x86::Address(x86::EDI, 0), x86::EAX); + const char* expected = + "movntil %EAX, 0xc(%EDI,%EBX,4)\n" + "movntil %EAX, (%EDI)\n"; + + DriverStr(expected, "movntl"); +} + TEST_F(AssemblerX86Test, psrlq) { GetAssembler()->psrlq(x86::XMM0, CreateImmediate(32)); const char* expected = "psrlq $0x20, %xmm0\n"; @@ -196,4 +206,41 @@ TEST_F(AssemblerX86Test, Repnescasw) { DriverStr(expected, "Repnescasw"); } +TEST_F(AssemblerX86Test, Repecmpsw) { + GetAssembler()->repe_cmpsw(); + const char* expected = "repe cmpsw\n"; + DriverStr(expected, "Repecmpsw"); +} + +TEST_F(AssemblerX86Test, Repecmpsl) { + GetAssembler()->repe_cmpsl(); + const char* expected = "repe cmpsl\n"; + DriverStr(expected, "Repecmpsl"); +} + +TEST_F(AssemblerX86Test, RepneScasw) { + GetAssembler()->repne_scasw(); + const char* expected = "repne scasw\n"; + DriverStr(expected, "repne_scasw"); +} + +TEST_F(AssemblerX86Test, RepMovsw) { + GetAssembler()->rep_movsw(); + const char* expected = "rep movsw\n"; + DriverStr(expected, "rep_movsw"); +} + +TEST_F(AssemblerX86Test, Bsrl) { + DriverStr(RepeatRR(&x86::X86Assembler::bsrl, "bsrl %{reg2}, %{reg1}"), "bsrl"); +} + +TEST_F(AssemblerX86Test, BsrlAddress) { + GetAssembler()->bsrl(x86::Register(x86::EDI), x86::Address( + x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12)); + const char* expected = + "bsrl 0xc(%EDI,%EBX,4), %EDI\n"; + + DriverStr(expected, "bsrl_address"); +} + } // namespace art diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index f35f51c494..dc61c992e0 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -194,6 +194,21 @@ void X86_64Assembler::movl(const Address& dst, const Immediate& imm) { EmitImmediate(imm); } +void X86_64Assembler::movntl(const Address& dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(src, dst); + EmitUint8(0x0F); + EmitUint8(0xC3); + EmitOperand(src.LowBits(), dst); +} + +void X86_64Assembler::movntq(const Address& dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(src, dst); + EmitUint8(0x0F); + EmitUint8(0xC3); + EmitOperand(src.LowBits(), dst); +} void X86_64Assembler::cmov(Condition c, CpuRegister dst, CpuRegister src) { cmov(c, dst, src, true); @@ -1672,28 +1687,33 @@ void X86_64Assembler::imull(CpuRegister dst, CpuRegister src) { EmitOperand(dst.LowBits(), Operand(src)); } -void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) { +void X86_64Assembler::imull(CpuRegister dst, CpuRegister src, const Immediate& imm) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); CHECK(imm.is_int32()); // imull only supports 32b immediate. - EmitOptionalRex32(reg, reg); + EmitOptionalRex32(dst, src); // See whether imm can be represented as a sign-extended 8bit value. int32_t v32 = static_cast<int32_t>(imm.value()); if (IsInt<8>(v32)) { // Sign-extension works. EmitUint8(0x6B); - EmitOperand(reg.LowBits(), Operand(reg)); + EmitOperand(dst.LowBits(), Operand(src)); EmitUint8(static_cast<uint8_t>(v32 & 0xFF)); } else { // Not representable, use full immediate. EmitUint8(0x69); - EmitOperand(reg.LowBits(), Operand(reg)); + EmitOperand(dst.LowBits(), Operand(src)); EmitImmediate(imm); } } +void X86_64Assembler::imull(CpuRegister reg, const Immediate& imm) { + imull(reg, reg, imm); +} + + void X86_64Assembler::imull(CpuRegister reg, const Address& address) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitOptionalRex32(reg, address); @@ -1986,6 +2006,14 @@ void X86_64Assembler::jmp(Label* label) { } +void X86_64Assembler::rep_movsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA5); +} + + X86_64Assembler* X86_64Assembler::lock() { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0xF0); @@ -2064,6 +2092,37 @@ void X86_64Assembler::bswapq(CpuRegister dst) { EmitUint8(0xC8 + dst.LowBits()); } +void X86_64Assembler::bsrl(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::bsrl(CpuRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst.LowBits(), src); +} + +void X86_64Assembler::bsrq(CpuRegister dst, CpuRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitRegisterOperand(dst.LowBits(), src.LowBits()); +} + +void X86_64Assembler::bsrq(CpuRegister dst, const Address& src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitRex64(dst, src); + EmitUint8(0x0F); + EmitUint8(0xBD); + EmitOperand(dst.LowBits(), src); +} void X86_64Assembler::repne_scasw() { AssemblerBuffer::EnsureCapacity ensured(&buffer_); @@ -2073,6 +2132,29 @@ void X86_64Assembler::repne_scasw() { } +void X86_64Assembler::repe_cmpsw() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86_64Assembler::repe_cmpsl() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitUint8(0xA7); +} + + +void X86_64Assembler::repe_cmpsq() { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0xF3); + EmitRex64(); + EmitUint8(0xA7); +} + + void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) { // TODO: Need to have a code constants table. int64_t constant = bit_cast<int64_t, double>(value); diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 61ffeab1e8..da42213048 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -326,10 +326,13 @@ class X86_64Assembler FINAL : public Assembler { void movq(CpuRegister dst, CpuRegister src); void movl(CpuRegister dst, CpuRegister src); + void movntl(const Address& dst, CpuRegister src); + void movntq(const Address& dst, CpuRegister src); + void movq(CpuRegister dst, const Address& src); void movl(CpuRegister dst, const Address& src); void movq(const Address& dst, CpuRegister src); - void movq(const Address& dst, const Immediate& src); + void movq(const Address& dst, const Immediate& imm); void movl(const Address& dst, CpuRegister src); void movl(const Address& dst, const Immediate& imm); @@ -539,6 +542,7 @@ class X86_64Assembler FINAL : public Assembler { void imull(CpuRegister dst, CpuRegister src); void imull(CpuRegister reg, const Immediate& imm); + void imull(CpuRegister dst, CpuRegister src, const Immediate& imm); void imull(CpuRegister reg, const Address& address); void imulq(CpuRegister src); @@ -602,7 +606,16 @@ class X86_64Assembler FINAL : public Assembler { void bswapl(CpuRegister dst); void bswapq(CpuRegister dst); + void bsrl(CpuRegister dst, CpuRegister src); + void bsrl(CpuRegister dst, const Address& src); + void bsrq(CpuRegister dst, CpuRegister src); + void bsrq(CpuRegister dst, const Address& src); + void repne_scasw(); + void repe_cmpsw(); + void repe_cmpsl(); + void repe_cmpsq(); + void rep_movsw(); // // Macros for High-level operations. @@ -796,7 +809,6 @@ class X86_64Assembler FINAL : public Assembler { void EmitComplex(uint8_t rm, const Operand& operand, const Immediate& immediate); void EmitLabel(Label* label, int instruction_size); void EmitLabelLink(Label* label); - void EmitNearLabelLink(Label* label); void EmitGenericShift(bool wide, int rm, CpuRegister reg, const Immediate& imm); void EmitGenericShift(bool wide, int rm, CpuRegister operand, CpuRegister shifter); diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index b86bc85489..8673f039ed 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -35,7 +35,7 @@ TEST(AssemblerX86_64, CreateBuffer) { ASSERT_EQ(static_cast<size_t>(5), buffer.Size()); } -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ static constexpr size_t kRandomIterations = 1000; // Devices might be puny, don't stress them... #else static constexpr size_t kRandomIterations = 100000; // Hosts are pretty powerful. @@ -674,6 +674,46 @@ TEST_F(AssemblerX86_64Test, MovqAddrImm) { DriverStr(expected, "movq"); } +TEST_F(AssemblerX86_64Test, Movntl) { + GetAssembler()->movntl(x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntl(x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntl(x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntl(x86_64::Address(x86_64::CpuRegister(x86_64::R13), 0), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntl(x86_64::Address( + x86_64::CpuRegister(x86_64::R13), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_1, 0), x86_64::CpuRegister(x86_64::R9)); + const char* expected = + "movntil %EAX, 0xc(%RDI,%RBX,4)\n" + "movntil %EAX, 0xc(%RDI,%R9,4)\n" + "movntil %EAX, 0xc(%RDI,%R9,4)\n" + "movntil %EAX, (%R13)\n" + "movntil %R9d, (%R13,%R9,1)\n"; + + DriverStr(expected, "movntl"); +} + +TEST_F(AssemblerX86_64Test, Movntq) { + GetAssembler()->movntq(x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntq(x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntq(x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntq(x86_64::Address(x86_64::CpuRegister(x86_64::R13), 0), x86_64::CpuRegister(x86_64::RAX)); + GetAssembler()->movntq(x86_64::Address( + x86_64::CpuRegister(x86_64::R13), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_1, 0), x86_64::CpuRegister(x86_64::R9)); + const char* expected = + "movntiq %RAX, 0xc(%RDI,%RBX,4)\n" + "movntiq %RAX, 0xc(%RDI,%R9,4)\n" + "movntiq %RAX, 0xc(%RDI,%R9,4)\n" + "movntiq %RAX, (%R13)\n" + "movntiq %R9, (%R13,%R9,1)\n"; + + DriverStr(expected, "movntq"); +} + TEST_F(AssemblerX86_64Test, Cvtsi2ssAddr) { GetAssembler()->cvtsi2ss(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address(x86_64::CpuRegister(x86_64::RAX), 0), @@ -796,6 +836,18 @@ TEST_F(AssemblerX86_64Test, Xorq) { DriverStr(expected, "xorq"); } +TEST_F(AssemblerX86_64Test, RepneScasw) { + GetAssembler()->repne_scasw(); + const char* expected = "repne scasw\n"; + DriverStr(expected, "repne_scasw"); +} + +TEST_F(AssemblerX86_64Test, RepMovsw) { + GetAssembler()->rep_movsw(); + const char* expected = "rep movsw\n"; + DriverStr(expected, "rep_movsw"); +} + TEST_F(AssemblerX86_64Test, Movsxd) { DriverStr(RepeatRr(&x86_64::X86_64Assembler::movsxd, "movsxd %{reg2}, %{reg1}"), "movsxd"); } @@ -953,6 +1005,48 @@ TEST_F(AssemblerX86_64Test, Orpd) { DriverStr(RepeatFF(&x86_64::X86_64Assembler::orpd, "orpd %{reg2}, %{reg1}"), "orpd"); } +TEST_F(AssemblerX86_64Test, UcomissAddress) { + GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); + GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM1), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12)); + GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM2), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12)); + GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM3), x86_64::Address( + x86_64::CpuRegister(x86_64::R13), 0)); + GetAssembler()->ucomiss(x86_64::XmmRegister(x86_64::XMM4), x86_64::Address( + x86_64::CpuRegister(x86_64::R13), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_1, 0)); + const char* expected = + "ucomiss 0xc(%RDI,%RBX,4), %xmm0\n" + "ucomiss 0xc(%RDI,%R9,4), %xmm1\n" + "ucomiss 0xc(%RDI,%R9,4), %xmm2\n" + "ucomiss (%R13), %xmm3\n" + "ucomiss (%R13,%R9,1), %xmm4\n"; + + DriverStr(expected, "ucomiss_address"); +} + +TEST_F(AssemblerX86_64Test, UcomisdAddress) { + GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM0), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); + GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM1), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12)); + GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM2), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12)); + GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM3), x86_64::Address( + x86_64::CpuRegister(x86_64::R13), 0)); + GetAssembler()->ucomisd(x86_64::XmmRegister(x86_64::XMM4), x86_64::Address( + x86_64::CpuRegister(x86_64::R13), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_1, 0)); + const char* expected = + "ucomisd 0xc(%RDI,%RBX,4), %xmm0\n" + "ucomisd 0xc(%RDI,%R9,4), %xmm1\n" + "ucomisd 0xc(%RDI,%R9,4), %xmm2\n" + "ucomisd (%R13), %xmm3\n" + "ucomisd (%R13,%R9,1), %xmm4\n"; + + DriverStr(expected, "ucomisd_address"); +} + // X87 std::string x87_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED, @@ -1047,6 +1141,44 @@ TEST_F(AssemblerX86_64Test, Bswapq) { DriverStr(RepeatR(&x86_64::X86_64Assembler::bswapq, "bswap %{reg}"), "bswapq"); } +TEST_F(AssemblerX86_64Test, Bsrl) { + DriverStr(Repeatrr(&x86_64::X86_64Assembler::bsrl, "bsrl %{reg2}, %{reg1}"), "bsrl"); +} + +TEST_F(AssemblerX86_64Test, BsrlAddress) { + GetAssembler()->bsrl(x86_64::CpuRegister(x86_64::R10), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); + GetAssembler()->bsrl(x86_64::CpuRegister(x86_64::RDI), x86_64::Address( + x86_64::CpuRegister(x86_64::R10), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); + GetAssembler()->bsrl(x86_64::CpuRegister(x86_64::RDI), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12)); + const char* expected = + "bsrl 0xc(%RDI,%RBX,4), %R10d\n" + "bsrl 0xc(%R10,%RBX,4), %edi\n" + "bsrl 0xc(%RDI,%R9,4), %edi\n"; + + DriverStr(expected, "bsrl_address"); +} + +TEST_F(AssemblerX86_64Test, Bsrq) { + DriverStr(RepeatRR(&x86_64::X86_64Assembler::bsrq, "bsrq %{reg2}, %{reg1}"), "bsrq"); +} + +TEST_F(AssemblerX86_64Test, BsrqAddress) { + GetAssembler()->bsrq(x86_64::CpuRegister(x86_64::R10), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); + GetAssembler()->bsrq(x86_64::CpuRegister(x86_64::RDI), x86_64::Address( + x86_64::CpuRegister(x86_64::R10), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12)); + GetAssembler()->bsrq(x86_64::CpuRegister(x86_64::RDI), x86_64::Address( + x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12)); + const char* expected = + "bsrq 0xc(%RDI,%RBX,4), %R10\n" + "bsrq 0xc(%R10,%RBX,4), %RDI\n" + "bsrq 0xc(%RDI,%R9,4), %RDI\n"; + + DriverStr(expected, "bsrq_address"); +} + std::string setcc_test_fn(AssemblerX86_64Test::Base* assembler_test, x86_64::X86_64Assembler* assembler) { // From Condition @@ -1221,4 +1353,22 @@ TEST_F(AssemblerX86_64Test, Repnescasw) { DriverStr(expected, "Repnescasw"); } +TEST_F(AssemblerX86_64Test, Repecmpsw) { + GetAssembler()->repe_cmpsw(); + const char* expected = "repe cmpsw\n"; + DriverStr(expected, "Repecmpsw"); +} + +TEST_F(AssemblerX86_64Test, Repecmpsl) { + GetAssembler()->repe_cmpsl(); + const char* expected = "repe cmpsl\n"; + DriverStr(expected, "Repecmpsl"); +} + +TEST_F(AssemblerX86_64Test, Repecmpsq) { + GetAssembler()->repe_cmpsq(); + const char* expected = "repe cmpsq\n"; + DriverStr(expected, "Repecmpsq"); +} + } // namespace art diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk index d127d352d8..71e9a283d6 100644 --- a/dalvikvm/Android.mk +++ b/dalvikvm/Android.mk @@ -66,16 +66,22 @@ LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk LOCAL_IS_HOST_MODULE := true LOCAL_MULTILIB := both +ifdef ART_MULTILIB_OVERRIDE_host + LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host) +endif +ifeq ($(LOCAL_MULTILIB),both) LOCAL_MODULE_STEM_32 := dalvikvm32 LOCAL_MODULE_STEM_64 := dalvikvm64 +endif LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE) include $(BUILD_HOST_EXECUTABLE) - # Create symlink for the primary version target. +ifeq ($(LOCAL_MULTILIB),both) include $(BUILD_SYSTEM)/executable_prefer_symlink.mk -ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE) ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)$(ART_PHONY_TEST_HOST_SUFFIX) ifdef 2ND_ART_PHONY_TEST_HOST_SUFFIX ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)$(2ND_ART_PHONY_TEST_HOST_SUFFIX) endif +endif +ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE) diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk index d27ee3efa6..2c906234ab 100644 --- a/dex2oat/Android.mk +++ b/dex2oat/Android.mk @@ -14,6 +14,15 @@ # limitations under the License. # +# ASan slows down dex2oat by ~3.5x, which translates into extremely slow first +# boot. Disabled to help speed up SANITIZE_TARGET mode. +# The supported way of using SANITIZE_TARGET is by first running a normal build, +# followed by a SANITIZE_TARGET=address build on top of it (in the same build +# tree). By disabling this module in SANITIZE_TARGET build, we keep the regular, +# uninstrumented version of it. +# Bug: 22233158 +ifneq (address,$(strip $(SANITIZE_TARGET))) + LOCAL_PATH := $(call my-dir) include art/build/Android.executable.mk @@ -59,3 +68,5 @@ ifeq ($(ART_BUILD_HOST_DEBUG),true) $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd libartd-compiler libartd libziparchive-host libnativehelper libnativebridge libsigchain_dummy libvixld liblog libz libbacktrace libcutils libunwindbacktrace libutils libbase,art/compiler,host,debug,$(dex2oat_host_arch),static)) endif endif + +endif diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index b4520e92f3..56536129f2 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -18,7 +18,7 @@ #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> -#include <valgrind.h> +#include "base/memory_tool.h" #include <fstream> #include <iostream> @@ -280,6 +280,21 @@ NO_RETURN static void Usage(const char* fmt, ...) { UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold); UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold); UsageError(""); + UsageError(" --inline-depth-limit=<depth-limit>: the depth limit of inlining for fine tuning"); + UsageError(" the compiler. A zero value will disable inlining. Honored only by Optimizing."); + UsageError(" Has priority over the --compiler-filter option. Intended for "); + UsageError(" development/experimental use."); + UsageError(" Example: --inline-depth-limit=%d", CompilerOptions::kDefaultInlineDepthLimit); + UsageError(" Default: %d", CompilerOptions::kDefaultInlineDepthLimit); + UsageError(""); + UsageError(" --inline-max-code-units=<code-units-count>: the maximum code units that a method"); + UsageError(" can have to be considered for inlining. A zero value will disable inlining."); + UsageError(" Honored only by Optimizing. Has priority over the --compiler-filter option."); + UsageError(" Intended for development/experimental use."); + UsageError(" Example: --inline-max-code-units=%d", + CompilerOptions::kDefaultInlineMaxCodeUnits); + UsageError(" Default: %d", CompilerOptions::kDefaultInlineMaxCodeUnits); + UsageError(""); UsageError(" --dump-timing: display a breakdown of where time was spent"); UsageError(""); UsageError(" --include-patch-information: Include patching information so the generated code"); @@ -519,37 +534,26 @@ class Dex2Oat FINAL { // the runtime. LogCompletionTime(); - if (kIsDebugBuild || (RUNNING_ON_VALGRIND != 0)) { + if (kIsDebugBuild || (RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { delete runtime_; // See field declaration for why this is manual. } } - // Parse the arguments from the command line. In case of an unrecognized option or impossible - // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method - // returns, arguments have been successfully parsed. - void ParseArgs(int argc, char** argv) { - original_argc = argc; - original_argv = argv; - - InitLogging(argv); - - // Skip over argv[0]. - argv++; - argc--; - - if (argc == 0) { - Usage("No arguments specified"); - } - + struct ParserOptions { std::string oat_symbols; std::string boot_image_filename; const char* compiler_filter_string = nullptr; + CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter; bool compile_pic = false; int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold; int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold; int small_method_threshold = CompilerOptions::kDefaultSmallMethodThreshold; int tiny_method_threshold = CompilerOptions::kDefaultTinyMethodThreshold; int num_dex_methods_threshold = CompilerOptions::kDefaultNumDexMethodsThreshold; + static constexpr int kUnsetInlineDepthLimit = -1; + int inline_depth_limit = kUnsetInlineDepthLimit; + static constexpr int kUnsetInlineMaxCodeUnits = -1; + int inline_max_code_units = kUnsetInlineMaxCodeUnits; // Profile file to use double top_k_profile_threshold = CompilerOptions::kDefaultTopKProfileThreshold; @@ -561,261 +565,192 @@ class Dex2Oat FINAL { bool abort_on_hard_verifier_error = false; bool requested_specific_compiler = false; + bool implicit_null_checks = false; + bool implicit_so_checks = false; + bool implicit_suspend_checks = false; + PassManagerOptions pass_manager_options; std::string error_msg; + }; + + template <typename T> + static void ParseUintOption(const StringPiece& option, + const std::string& option_name, + T* out, + bool is_long_option = true) { + std::string option_prefix = option_name + (is_long_option ? "=" : ""); + DCHECK(option.starts_with(option_prefix)); + const char* value_string = option.substr(option_prefix.size()).data(); + int64_t parsed_integer_value; + if (!ParseInt(value_string, &parsed_integer_value)) { + Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string); + } + if (parsed_integer_value < 0) { + Usage("%s passed a negative value %d", option_name.c_str(), parsed_integer_value); + } + *out = dchecked_integral_cast<T>(parsed_integer_value); + } - for (int i = 0; i < argc; i++) { - const StringPiece option(argv[i]); - const bool log_options = false; - if (log_options) { - LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i]; - } - if (option.starts_with("--dex-file=")) { - dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data()); - } else if (option.starts_with("--dex-location=")) { - dex_locations_.push_back(option.substr(strlen("--dex-location=")).data()); - } else if (option.starts_with("--zip-fd=")) { - const char* zip_fd_str = option.substr(strlen("--zip-fd=")).data(); - if (!ParseInt(zip_fd_str, &zip_fd_)) { - Usage("Failed to parse --zip-fd argument '%s' as an integer", zip_fd_str); - } - if (zip_fd_ < 0) { - Usage("--zip-fd passed a negative value %d", zip_fd_); - } - } else if (option.starts_with("--zip-location=")) { - zip_location_ = option.substr(strlen("--zip-location=")).data(); - } else if (option.starts_with("--oat-file=")) { - oat_filename_ = option.substr(strlen("--oat-file=")).data(); - } else if (option.starts_with("--oat-symbols=")) { - oat_symbols = option.substr(strlen("--oat-symbols=")).data(); - } else if (option.starts_with("--oat-fd=")) { - const char* oat_fd_str = option.substr(strlen("--oat-fd=")).data(); - if (!ParseInt(oat_fd_str, &oat_fd_)) { - Usage("Failed to parse --oat-fd argument '%s' as an integer", oat_fd_str); - } - if (oat_fd_ < 0) { - Usage("--oat-fd passed a negative value %d", oat_fd_); - } - } else if (option == "--watch-dog") { - watch_dog_enabled = true; - } else if (option == "--no-watch-dog") { - watch_dog_enabled = false; - } else if (option.starts_with("-j")) { - const char* thread_count_str = option.substr(strlen("-j")).data(); - if (!ParseUint(thread_count_str, &thread_count_)) { - Usage("Failed to parse -j argument '%s' as an integer", thread_count_str); - } - } else if (option.starts_with("--oat-location=")) { - oat_location_ = option.substr(strlen("--oat-location=")).data(); - } else if (option.starts_with("--image=")) { - image_filename_ = option.substr(strlen("--image=")).data(); - } else if (option.starts_with("--image-classes=")) { - image_classes_filename_ = option.substr(strlen("--image-classes=")).data(); - } else if (option.starts_with("--image-classes-zip=")) { - image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data(); - } else if (option.starts_with("--compiled-classes=")) { - compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data(); - } else if (option.starts_with("--compiled-classes-zip=")) { - compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data(); - } else if (option.starts_with("--compiled-methods=")) { - compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data(); - } else if (option.starts_with("--compiled-methods-zip=")) { - compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data(); - } else if (option.starts_with("--base=")) { - const char* image_base_str = option.substr(strlen("--base=")).data(); - char* end; - image_base_ = strtoul(image_base_str, &end, 16); - if (end == image_base_str || *end != '\0') { - Usage("Failed to parse hexadecimal value for option %s", option.data()); - } - } else if (option.starts_with("--boot-image=")) { - boot_image_filename = option.substr(strlen("--boot-image=")).data(); - } else if (option.starts_with("--android-root=")) { - android_root_ = option.substr(strlen("--android-root=")).data(); - } else if (option.starts_with("--instruction-set=")) { - StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data(); - // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it. - std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]); - strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length()); - buf.get()[instruction_set_str.length()] = 0; - instruction_set_ = GetInstructionSetFromString(buf.get()); - // arm actually means thumb2. - if (instruction_set_ == InstructionSet::kArm) { - instruction_set_ = InstructionSet::kThumb2; - } - } else if (option.starts_with("--instruction-set-variant=")) { - StringPiece str = option.substr(strlen("--instruction-set-variant=")).data(); - instruction_set_features_.reset( - InstructionSetFeatures::FromVariant(instruction_set_, str.as_string(), &error_msg)); - if (instruction_set_features_.get() == nullptr) { - Usage("%s", error_msg.c_str()); - } - } else if (option.starts_with("--instruction-set-features=")) { - StringPiece str = option.substr(strlen("--instruction-set-features=")).data(); - if (instruction_set_features_.get() == nullptr) { - instruction_set_features_.reset( - InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg)); - if (instruction_set_features_.get() == nullptr) { - Usage("Problem initializing default instruction set features variant: %s", - error_msg.c_str()); - } - } - instruction_set_features_.reset( - instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg)); - if (instruction_set_features_.get() == nullptr) { - Usage("Error parsing '%s': %s", option.data(), error_msg.c_str()); - } - } else if (option.starts_with("--compiler-backend=")) { - requested_specific_compiler = true; - StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data(); - if (backend_str == "Quick") { - compiler_kind_ = Compiler::kQuick; - } else if (backend_str == "Optimizing") { - compiler_kind_ = Compiler::kOptimizing; - } else { - Usage("Unknown compiler backend: %s", backend_str.data()); - } - } else if (option.starts_with("--compiler-filter=")) { - compiler_filter_string = option.substr(strlen("--compiler-filter=")).data(); - } else if (option == "--compile-pic") { - compile_pic = true; - } else if (option.starts_with("--huge-method-max=")) { - const char* threshold = option.substr(strlen("--huge-method-max=")).data(); - if (!ParseInt(threshold, &huge_method_threshold)) { - Usage("Failed to parse --huge-method-max '%s' as an integer", threshold); - } - if (huge_method_threshold < 0) { - Usage("--huge-method-max passed a negative value %s", huge_method_threshold); - } - } else if (option.starts_with("--large-method-max=")) { - const char* threshold = option.substr(strlen("--large-method-max=")).data(); - if (!ParseInt(threshold, &large_method_threshold)) { - Usage("Failed to parse --large-method-max '%s' as an integer", threshold); - } - if (large_method_threshold < 0) { - Usage("--large-method-max passed a negative value %s", large_method_threshold); - } - } else if (option.starts_with("--small-method-max=")) { - const char* threshold = option.substr(strlen("--small-method-max=")).data(); - if (!ParseInt(threshold, &small_method_threshold)) { - Usage("Failed to parse --small-method-max '%s' as an integer", threshold); - } - if (small_method_threshold < 0) { - Usage("--small-method-max passed a negative value %s", small_method_threshold); - } - } else if (option.starts_with("--tiny-method-max=")) { - const char* threshold = option.substr(strlen("--tiny-method-max=")).data(); - if (!ParseInt(threshold, &tiny_method_threshold)) { - Usage("Failed to parse --tiny-method-max '%s' as an integer", threshold); - } - if (tiny_method_threshold < 0) { - Usage("--tiny-method-max passed a negative value %s", tiny_method_threshold); - } - } else if (option.starts_with("--num-dex-methods=")) { - const char* threshold = option.substr(strlen("--num-dex-methods=")).data(); - if (!ParseInt(threshold, &num_dex_methods_threshold)) { - Usage("Failed to parse --num-dex-methods '%s' as an integer", threshold); - } - if (num_dex_methods_threshold < 0) { - Usage("--num-dex-methods passed a negative value %s", num_dex_methods_threshold); - } - } else if (option == "--host") { - is_host_ = true; - } else if (option == "--runtime-arg") { - if (++i >= argc) { - Usage("Missing required argument for --runtime-arg"); - } - if (log_options) { - LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i]; - } - runtime_args_.push_back(argv[i]); - } else if (option == "--dump-timing") { - dump_timing_ = true; - } else if (option == "--dump-passes") { - dump_passes_ = true; - } else if (option.starts_with("--dump-cfg=")) { - dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data(); - } else if (option == "--dump-stats") { - dump_stats_ = true; - } else if (option == "--generate-debug-info" || option == "-g") { - generate_debug_info = true; - } else if (option == "--no-generate-debug-info") { - generate_debug_info = false; - } else if (option == "--debuggable") { - debuggable = true; - generate_debug_info = true; - } else if (option.starts_with("--profile-file=")) { - profile_file_ = option.substr(strlen("--profile-file=")).data(); - VLOG(compiler) << "dex2oat: profile file is " << profile_file_; - } else if (option == "--no-profile-file") { - // No profile - } else if (option.starts_with("--top-k-profile-threshold=")) { - ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold); - } else if (option == "--print-pass-names") { - pass_manager_options.SetPrintPassNames(true); - } else if (option.starts_with("--disable-passes=")) { - const std::string disable_passes = option.substr(strlen("--disable-passes=")).data(); - pass_manager_options.SetDisablePassList(disable_passes); - } else if (option.starts_with("--print-passes=")) { - const std::string print_passes = option.substr(strlen("--print-passes=")).data(); - pass_manager_options.SetPrintPassList(print_passes); - } else if (option == "--print-all-passes") { - pass_manager_options.SetPrintAllPasses(); - } else if (option.starts_with("--dump-cfg-passes=")) { - const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data(); - pass_manager_options.SetDumpPassList(dump_passes_string); - } else if (option == "--print-pass-options") { - pass_manager_options.SetPrintPassOptions(true); - } else if (option.starts_with("--pass-options=")) { - const std::string options = option.substr(strlen("--pass-options=")).data(); - pass_manager_options.SetOverriddenPassOptions(options); - } else if (option == "--include-patch-information") { - include_patch_information = true; - } else if (option == "--no-include-patch-information") { - include_patch_information = false; - } else if (option.starts_with("--verbose-methods=")) { - // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages - // conditional on having verbost methods. - gLogVerbosity.compiler = false; - Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_); - } else if (option.starts_with("--dump-init-failures=")) { - std::string file_name = option.substr(strlen("--dump-init-failures=")).data(); - init_failure_output_.reset(new std::ofstream(file_name)); - if (init_failure_output_.get() == nullptr) { - LOG(ERROR) << "Failed to allocate ofstream"; - } else if (init_failure_output_->fail()) { - LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization " - << "failures."; - init_failure_output_.reset(); - } - } else if (option.starts_with("--swap-file=")) { - swap_file_name_ = option.substr(strlen("--swap-file=")).data(); - } else if (option.starts_with("--swap-fd=")) { - const char* swap_fd_str = option.substr(strlen("--swap-fd=")).data(); - if (!ParseInt(swap_fd_str, &swap_fd_)) { - Usage("Failed to parse --swap-fd argument '%s' as an integer", swap_fd_str); - } - if (swap_fd_ < 0) { - Usage("--swap-fd passed a negative value %d", swap_fd_); - } - } else if (option == "--abort-on-hard-verifier-error") { - abort_on_hard_verifier_error = true; - } else { - Usage("Unknown argument %s", option.data()); + void ParseZipFd(const StringPiece& option) { + ParseUintOption(option, "--zip-fd", &zip_fd_); + } + + void ParseOatFd(const StringPiece& option) { + ParseUintOption(option, "--oat-fd", &oat_fd_); + } + + void ParseJ(const StringPiece& option) { + ParseUintOption(option, "-j", &thread_count_, /* is_long_option */ false); + } + + void ParseBase(const StringPiece& option) { + DCHECK(option.starts_with("--base=")); + const char* image_base_str = option.substr(strlen("--base=")).data(); + char* end; + image_base_ = strtoul(image_base_str, &end, 16); + if (end == image_base_str || *end != '\0') { + Usage("Failed to parse hexadecimal value for option %s", option.data()); + } + } + + void ParseInstructionSet(const StringPiece& option) { + DCHECK(option.starts_with("--instruction-set=")); + StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data(); + // StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it. + std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]); + strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length()); + buf.get()[instruction_set_str.length()] = 0; + instruction_set_ = GetInstructionSetFromString(buf.get()); + // arm actually means thumb2. + if (instruction_set_ == InstructionSet::kArm) { + instruction_set_ = InstructionSet::kThumb2; + } + } + + void ParseInstructionSetVariant(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--instruction-set-variant=")); + StringPiece str = option.substr(strlen("--instruction-set-variant=")).data(); + instruction_set_features_.reset( + InstructionSetFeatures::FromVariant( + instruction_set_, str.as_string(), &parser_options->error_msg)); + if (instruction_set_features_.get() == nullptr) { + Usage("%s", parser_options->error_msg.c_str()); + } + } + + void ParseInstructionSetFeatures(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--instruction-set-features=")); + StringPiece str = option.substr(strlen("--instruction-set-features=")).data(); + if (instruction_set_features_.get() == nullptr) { + instruction_set_features_.reset( + InstructionSetFeatures::FromVariant( + instruction_set_, "default", &parser_options->error_msg)); + if (instruction_set_features_.get() == nullptr) { + Usage("Problem initializing default instruction set features variant: %s", + parser_options->error_msg.c_str()); } } + instruction_set_features_.reset( + instruction_set_features_->AddFeaturesFromString(str.as_string(), + &parser_options->error_msg)); + if (instruction_set_features_.get() == nullptr) { + Usage("Error parsing '%s': %s", option.data(), parser_options->error_msg.c_str()); + } + } + + void ParseCompilerBackend(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--compiler-backend=")); + parser_options->requested_specific_compiler = true; + StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data(); + if (backend_str == "Quick") { + compiler_kind_ = Compiler::kQuick; + } else if (backend_str == "Optimizing") { + compiler_kind_ = Compiler::kOptimizing; + } else { + Usage("Unknown compiler backend: %s", backend_str.data()); + } + } + + void ParseHugeMethodMax(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--huge-method-max", &parser_options->huge_method_threshold); + } + + void ParseLargeMethodMax(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--large-method-max", &parser_options->large_method_threshold); + } + + void ParseSmallMethodMax(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--small-method-max", &parser_options->small_method_threshold); + } + + void ParseTinyMethodMax(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--tiny-method-max", &parser_options->tiny_method_threshold); + } + + void ParseNumDexMethods(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--num-dex-methods", &parser_options->num_dex_methods_threshold); + } + + void ParseInlineDepthLimit(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--inline-depth-limit", &parser_options->inline_depth_limit); + } + + void ParseInlineMaxCodeUnits(const StringPiece& option, ParserOptions* parser_options) { + ParseUintOption(option, "--inline-max-code-units=", &parser_options->inline_max_code_units); + } + void ParseDisablePasses(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--disable-passes=")); + const std::string disable_passes = option.substr(strlen("--disable-passes=")).data(); + parser_options->pass_manager_options.SetDisablePassList(disable_passes); + } + + void ParsePrintPasses(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--print-passes=")); + const std::string print_passes = option.substr(strlen("--print-passes=")).data(); + parser_options->pass_manager_options.SetPrintPassList(print_passes); + } + + void ParseDumpCfgPasses(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--dump-cfg-passes=")); + const std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data(); + parser_options->pass_manager_options.SetDumpPassList(dump_passes_string); + } + + void ParsePassOptions(const StringPiece& option, ParserOptions* parser_options) { + DCHECK(option.starts_with("--pass-options=")); + const std::string pass_options = option.substr(strlen("--pass-options=")).data(); + parser_options->pass_manager_options.SetOverriddenPassOptions(pass_options); + } + + void ParseDumpInitFailures(const StringPiece& option) { + DCHECK(option.starts_with("--dump-init-failures=")); + std::string file_name = option.substr(strlen("--dump-init-failures=")).data(); + init_failure_output_.reset(new std::ofstream(file_name)); + if (init_failure_output_.get() == nullptr) { + LOG(ERROR) << "Failed to allocate ofstream"; + } else if (init_failure_output_->fail()) { + LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization " + << "failures."; + init_failure_output_.reset(); + } + } + + void ParseSwapFd(const StringPiece& option) { + ParseUintOption(option, "--swap-fd", &swap_fd_); + } + + void ProcessOptions(ParserOptions* parser_options) { image_ = (!image_filename_.empty()); - if (!requested_specific_compiler && !kUseOptimizingCompiler) { + if (!parser_options->requested_specific_compiler && !kUseOptimizingCompiler) { // If no specific compiler is requested, the current behavior is // to compile the boot image with Quick, and the rest with Optimizing. compiler_kind_ = image_ ? Compiler::kQuick : Compiler::kOptimizing; } - if (compiler_kind_ == Compiler::kOptimizing) { // Optimizing only supports PIC mode. - compile_pic = true; + parser_options->compile_pic = true; } if (oat_filename_.empty() && oat_fd_ == -1) { @@ -826,11 +761,11 @@ class Dex2Oat FINAL { Usage("--oat-file should not be used with --oat-fd"); } - if (!oat_symbols.empty() && oat_fd_ != -1) { + if (!parser_options->oat_symbols.empty() && oat_fd_ != -1) { Usage("--oat-symbols should not be used with --oat-fd"); } - if (!oat_symbols.empty() && is_host_) { + if (!parser_options->oat_symbols.empty() && is_host_) { Usage("--oat-symbols should not be used with --host"); } @@ -846,13 +781,13 @@ class Dex2Oat FINAL { android_root_ += android_root_env_var; } - if (!image_ && boot_image_filename.empty()) { - boot_image_filename += android_root_; - boot_image_filename += "/framework/boot.art"; + if (!image_ && parser_options->boot_image_filename.empty()) { + parser_options->boot_image_filename += android_root_; + parser_options->boot_image_filename += "/framework/boot.art"; } - if (!boot_image_filename.empty()) { + if (!parser_options->boot_image_filename.empty()) { boot_image_option_ += "-Ximage:"; - boot_image_option_ += boot_image_filename; + boot_image_option_ += parser_options->boot_image_filename; } if (image_classes_filename_ != nullptr && !image_) { @@ -910,8 +845,8 @@ class Dex2Oat FINAL { } oat_stripped_ = oat_filename_; - if (!oat_symbols.empty()) { - oat_unstripped_ = oat_symbols; + if (!parser_options->oat_symbols.empty()) { + oat_unstripped_ = parser_options->oat_symbols; } else { oat_unstripped_ = oat_filename_; } @@ -920,10 +855,11 @@ class Dex2Oat FINAL { // instruction set. if (instruction_set_features_.get() == nullptr) { instruction_set_features_.reset( - InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg)); + InstructionSetFeatures::FromVariant( + instruction_set_, "default", &parser_options->error_msg)); if (instruction_set_features_.get() == nullptr) { Usage("Problem initializing default instruction set features variant: %s", - error_msg.c_str()); + parser_options->error_msg.c_str()); } } @@ -938,36 +874,50 @@ class Dex2Oat FINAL { } } - if (compiler_filter_string == nullptr) { - compiler_filter_string = "speed"; + if (parser_options->compiler_filter_string == nullptr) { + parser_options->compiler_filter_string = "speed"; + } + + CHECK(parser_options->compiler_filter_string != nullptr); + if (strcmp(parser_options->compiler_filter_string, "verify-none") == 0) { + parser_options->compiler_filter = CompilerOptions::kVerifyNone; + } else if (strcmp(parser_options->compiler_filter_string, "interpret-only") == 0) { + parser_options->compiler_filter = CompilerOptions::kInterpretOnly; + } else if (strcmp(parser_options->compiler_filter_string, "verify-at-runtime") == 0) { + parser_options->compiler_filter = CompilerOptions::kVerifyAtRuntime; + } else if (strcmp(parser_options->compiler_filter_string, "space") == 0) { + parser_options->compiler_filter = CompilerOptions::kSpace; + } else if (strcmp(parser_options->compiler_filter_string, "balanced") == 0) { + parser_options->compiler_filter = CompilerOptions::kBalanced; + } else if (strcmp(parser_options->compiler_filter_string, "speed") == 0) { + parser_options->compiler_filter = CompilerOptions::kSpeed; + } else if (strcmp(parser_options->compiler_filter_string, "everything") == 0) { + parser_options->compiler_filter = CompilerOptions::kEverything; + } else if (strcmp(parser_options->compiler_filter_string, "time") == 0) { + parser_options->compiler_filter = CompilerOptions::kTime; + } else { + Usage("Unknown --compiler-filter value %s", parser_options->compiler_filter_string); } - CHECK(compiler_filter_string != nullptr); - CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter; - if (strcmp(compiler_filter_string, "verify-none") == 0) { - compiler_filter = CompilerOptions::kVerifyNone; - } else if (strcmp(compiler_filter_string, "interpret-only") == 0) { - compiler_filter = CompilerOptions::kInterpretOnly; - } else if (strcmp(compiler_filter_string, "verify-at-runtime") == 0) { - compiler_filter = CompilerOptions::kVerifyAtRuntime; - } else if (strcmp(compiler_filter_string, "space") == 0) { - compiler_filter = CompilerOptions::kSpace; - } else if (strcmp(compiler_filter_string, "balanced") == 0) { - compiler_filter = CompilerOptions::kBalanced; - } else if (strcmp(compiler_filter_string, "speed") == 0) { - compiler_filter = CompilerOptions::kSpeed; - } else if (strcmp(compiler_filter_string, "everything") == 0) { - compiler_filter = CompilerOptions::kEverything; - } else if (strcmp(compiler_filter_string, "time") == 0) { - compiler_filter = CompilerOptions::kTime; - } else { - Usage("Unknown --compiler-filter value %s", compiler_filter_string); + // It they are not set, use default values for inlining settings. + // TODO: We should rethink the compiler filter. We mostly save + // time here, which is orthogonal to space. + if (parser_options->inline_depth_limit == ParserOptions::kUnsetInlineDepthLimit) { + parser_options->inline_depth_limit = + (parser_options->compiler_filter == CompilerOptions::kSpace) + // Implementation of the space filter: limit inlining depth. + ? CompilerOptions::kSpaceFilterInlineDepthLimit + : CompilerOptions::kDefaultInlineDepthLimit; + } + if (parser_options->inline_max_code_units == ParserOptions::kUnsetInlineMaxCodeUnits) { + parser_options->inline_max_code_units = + (parser_options->compiler_filter == CompilerOptions::kSpace) + // Implementation of the space filter: limit inlining max code units. + ? CompilerOptions::kSpaceFilterInlineMaxCodeUnits + : CompilerOptions::kDefaultInlineMaxCodeUnits; } // Checks are all explicit until we know the architecture. - bool implicit_null_checks = false; - bool implicit_so_checks = false; - bool implicit_suspend_checks = false; // Set the compilation target's implicit checks options. switch (instruction_set_) { case kArm: @@ -975,8 +925,10 @@ class Dex2Oat FINAL { case kArm64: case kX86: case kX86_64: - implicit_null_checks = true; - implicit_so_checks = true; + case kMips: + case kMips64: + parser_options->implicit_null_checks = true; + parser_options->implicit_so_checks = true; break; default: @@ -984,53 +936,224 @@ class Dex2Oat FINAL { break; } - compiler_options_.reset(new CompilerOptions(compiler_filter, - huge_method_threshold, - large_method_threshold, - small_method_threshold, - tiny_method_threshold, - num_dex_methods_threshold, - include_patch_information, - top_k_profile_threshold, - debuggable, - generate_debug_info, - implicit_null_checks, - implicit_so_checks, - implicit_suspend_checks, - compile_pic, + compiler_options_.reset(new CompilerOptions(parser_options->compiler_filter, + parser_options->huge_method_threshold, + parser_options->large_method_threshold, + parser_options->small_method_threshold, + parser_options->tiny_method_threshold, + parser_options->num_dex_methods_threshold, + parser_options->inline_depth_limit, + parser_options->inline_max_code_units, + parser_options->include_patch_information, + parser_options->top_k_profile_threshold, + parser_options->debuggable, + parser_options->generate_debug_info, + parser_options->implicit_null_checks, + parser_options->implicit_so_checks, + parser_options->implicit_suspend_checks, + parser_options->compile_pic, verbose_methods_.empty() ? nullptr : &verbose_methods_, - new PassManagerOptions(pass_manager_options), + new PassManagerOptions( + parser_options->pass_manager_options), init_failure_output_.get(), - abort_on_hard_verifier_error)); + parser_options->abort_on_hard_verifier_error)); // Done with usage checks, enable watchdog if requested - if (watch_dog_enabled) { + if (parser_options->watch_dog_enabled) { watchdog_.reset(new WatchDog(true)); } // Fill some values into the key-value store for the oat header. key_value_store_.reset(new SafeMap<std::string, std::string>()); + } - // Insert some compiler things. - { - std::ostringstream oss; - for (int i = 0; i < argc; ++i) { - if (i > 0) { - oss << ' '; + void InsertCompileOptions(int argc, char** argv, ParserOptions* parser_options) { + std::ostringstream oss; + for (int i = 0; i < argc; ++i) { + if (i > 0) { + oss << ' '; + } + oss << argv[i]; + } + key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str()); + oss.str(""); // Reset. + oss << kRuntimeISA; + key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str()); + key_value_store_->Put( + OatHeader::kPicKey, + parser_options->compile_pic ? OatHeader::kTrueValue : OatHeader::kFalseValue); + key_value_store_->Put( + OatHeader::kDebuggableKey, + parser_options->debuggable ? OatHeader::kTrueValue : OatHeader::kFalseValue); + } + + // Parse the arguments from the command line. In case of an unrecognized option or impossible + // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method + // returns, arguments have been successfully parsed. + void ParseArgs(int argc, char** argv) { + original_argc = argc; + original_argv = argv; + + InitLogging(argv); + + // Skip over argv[0]. + argv++; + argc--; + + if (argc == 0) { + Usage("No arguments specified"); + } + + std::unique_ptr<ParserOptions> parser_options(new ParserOptions()); + + for (int i = 0; i < argc; i++) { + const StringPiece option(argv[i]); + const bool log_options = false; + if (log_options) { + LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i]; + } + if (option.starts_with("--dex-file=")) { + dex_filenames_.push_back(option.substr(strlen("--dex-file=")).data()); + } else if (option.starts_with("--dex-location=")) { + dex_locations_.push_back(option.substr(strlen("--dex-location=")).data()); + } else if (option.starts_with("--zip-fd=")) { + ParseZipFd(option); + } else if (option.starts_with("--zip-location=")) { + zip_location_ = option.substr(strlen("--zip-location=")).data(); + } else if (option.starts_with("--oat-file=")) { + oat_filename_ = option.substr(strlen("--oat-file=")).data(); + } else if (option.starts_with("--oat-symbols=")) { + parser_options->oat_symbols = option.substr(strlen("--oat-symbols=")).data(); + } else if (option.starts_with("--oat-fd=")) { + ParseOatFd(option); + } else if (option == "--watch-dog") { + parser_options->watch_dog_enabled = true; + } else if (option == "--no-watch-dog") { + parser_options->watch_dog_enabled = false; + } else if (option.starts_with("-j")) { + ParseJ(option); + } else if (option.starts_with("--oat-location=")) { + oat_location_ = option.substr(strlen("--oat-location=")).data(); + } else if (option.starts_with("--image=")) { + image_filename_ = option.substr(strlen("--image=")).data(); + } else if (option.starts_with("--image-classes=")) { + image_classes_filename_ = option.substr(strlen("--image-classes=")).data(); + } else if (option.starts_with("--image-classes-zip=")) { + image_classes_zip_filename_ = option.substr(strlen("--image-classes-zip=")).data(); + } else if (option.starts_with("--compiled-classes=")) { + compiled_classes_filename_ = option.substr(strlen("--compiled-classes=")).data(); + } else if (option.starts_with("--compiled-classes-zip=")) { + compiled_classes_zip_filename_ = option.substr(strlen("--compiled-classes-zip=")).data(); + } else if (option.starts_with("--compiled-methods=")) { + compiled_methods_filename_ = option.substr(strlen("--compiled-methods=")).data(); + } else if (option.starts_with("--compiled-methods-zip=")) { + compiled_methods_zip_filename_ = option.substr(strlen("--compiled-methods-zip=")).data(); + } else if (option.starts_with("--base=")) { + ParseBase(option); + } else if (option.starts_with("--boot-image=")) { + parser_options->boot_image_filename = option.substr(strlen("--boot-image=")).data(); + } else if (option.starts_with("--android-root=")) { + android_root_ = option.substr(strlen("--android-root=")).data(); + } else if (option.starts_with("--instruction-set=")) { + ParseInstructionSet(option); + } else if (option.starts_with("--instruction-set-variant=")) { + ParseInstructionSetVariant(option, parser_options.get()); + } else if (option.starts_with("--instruction-set-features=")) { + ParseInstructionSetFeatures(option, parser_options.get()); + } else if (option.starts_with("--compiler-backend=")) { + ParseCompilerBackend(option, parser_options.get()); + } else if (option.starts_with("--compiler-filter=")) { + parser_options->compiler_filter_string = option.substr(strlen("--compiler-filter=")).data(); + } else if (option == "--compile-pic") { + parser_options->compile_pic = true; + } else if (option.starts_with("--huge-method-max=")) { + ParseHugeMethodMax(option, parser_options.get()); + } else if (option.starts_with("--large-method-max=")) { + ParseLargeMethodMax(option, parser_options.get()); + } else if (option.starts_with("--small-method-max=")) { + ParseSmallMethodMax(option, parser_options.get()); + } else if (option.starts_with("--tiny-method-max=")) { + ParseTinyMethodMax(option, parser_options.get()); + } else if (option.starts_with("--num-dex-methods=")) { + ParseNumDexMethods(option, parser_options.get()); + } else if (option.starts_with("--inline-depth-limit=")) { + ParseInlineDepthLimit(option, parser_options.get()); + } else if (option.starts_with("--inline-max-code-units=")) { + ParseInlineMaxCodeUnits(option, parser_options.get()); + } else if (option == "--host") { + is_host_ = true; + } else if (option == "--runtime-arg") { + if (++i >= argc) { + Usage("Missing required argument for --runtime-arg"); } - oss << argv[i]; + if (log_options) { + LOG(INFO) << "dex2oat: option[" << i << "]=" << argv[i]; + } + runtime_args_.push_back(argv[i]); + } else if (option == "--dump-timing") { + dump_timing_ = true; + } else if (option == "--dump-passes") { + dump_passes_ = true; + } else if (option.starts_with("--dump-cfg=")) { + dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data(); + } else if (option == "--dump-stats") { + dump_stats_ = true; + } else if (option == "--generate-debug-info" || option == "-g") { + parser_options->generate_debug_info = true; + } else if (option == "--no-generate-debug-info") { + parser_options->generate_debug_info = false; + } else if (option == "--debuggable") { + parser_options->debuggable = true; + parser_options->generate_debug_info = true; + } else if (option.starts_with("--profile-file=")) { + profile_file_ = option.substr(strlen("--profile-file=")).data(); + VLOG(compiler) << "dex2oat: profile file is " << profile_file_; + } else if (option == "--no-profile-file") { + // No profile + } else if (option.starts_with("--top-k-profile-threshold=")) { + ParseDouble(option.data(), '=', 0.0, 100.0, &parser_options->top_k_profile_threshold); + } else if (option == "--print-pass-names") { + parser_options->pass_manager_options.SetPrintPassNames(true); + } else if (option.starts_with("--disable-passes=")) { + ParseDisablePasses(option, parser_options.get()); + } else if (option.starts_with("--print-passes=")) { + ParsePrintPasses(option, parser_options.get()); + } else if (option == "--print-all-passes") { + parser_options->pass_manager_options.SetPrintAllPasses(); + } else if (option.starts_with("--dump-cfg-passes=")) { + ParseDumpCfgPasses(option, parser_options.get()); + } else if (option == "--print-pass-options") { + parser_options->pass_manager_options.SetPrintPassOptions(true); + } else if (option.starts_with("--pass-options=")) { + ParsePassOptions(option, parser_options.get()); + } else if (option == "--include-patch-information") { + parser_options->include_patch_information = true; + } else if (option == "--no-include-patch-information") { + parser_options->include_patch_information = false; + } else if (option.starts_with("--verbose-methods=")) { + // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages + // conditional on having verbost methods. + gLogVerbosity.compiler = false; + Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_); + } else if (option.starts_with("--dump-init-failures=")) { + ParseDumpInitFailures(option); + } else if (option.starts_with("--swap-file=")) { + swap_file_name_ = option.substr(strlen("--swap-file=")).data(); + } else if (option.starts_with("--swap-fd=")) { + ParseSwapFd(option); + } else if (option == "--abort-on-hard-verifier-error") { + parser_options->abort_on_hard_verifier_error = true; + } else { + Usage("Unknown argument %s", option.data()); } - key_value_store_->Put(OatHeader::kDex2OatCmdLineKey, oss.str()); - oss.str(""); // Reset. - oss << kRuntimeISA; - key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str()); - key_value_store_->Put(OatHeader::kPicKey, - compile_pic ? OatHeader::kTrueValue : OatHeader::kFalseValue); - key_value_store_->Put(OatHeader::kDebuggableKey, - debuggable ? OatHeader::kTrueValue : OatHeader::kFalseValue); } + + ProcessOptions(parser_options.get()); + + // Insert some compiler things. + InsertCompileOptions(argc, argv, parser_options.get()); } // Check whether the oat output file is writable, and open it for later. Also open a swap file, @@ -1038,10 +1161,6 @@ class Dex2Oat FINAL { bool OpenFile() { bool create_file = !oat_unstripped_.empty(); // as opposed to using open file descriptor if (create_file) { - // We're supposed to create this file. If the file already exists, it may be in use currently. - // We must not change the content of that file, then. So unlink it first. - unlink(oat_unstripped_.c_str()); - oat_file_.reset(OS::CreateEmptyFile(oat_unstripped_.c_str())); if (oat_location_.empty()) { oat_location_ = oat_filename_; @@ -1662,7 +1781,7 @@ class Dex2Oat FINAL { // Let the ImageWriter write the image file. If we do not compile PIC, also fix up the oat file. bool CreateImageFile() - LOCKS_EXCLUDED(Locks::mutator_lock_) { + REQUIRES(!Locks::mutator_lock_) { CHECK(image_writer_ != nullptr); if (!image_writer_->Write(image_filename_, oat_unstripped_, oat_location_)) { LOG(ERROR) << "Failed to create image file " << image_filename_; @@ -2005,7 +2124,7 @@ int main(int argc, char** argv) { // Everything was done, do an explicit exit here to avoid running Runtime destructors that take // time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class // should not destruct the runtime in this case. - if (!art::kIsDebugBuild && (RUNNING_ON_VALGRIND == 0)) { + if (!art::kIsDebugBuild && (RUNNING_ON_MEMORY_TOOL == 0)) { exit(result); } return result; diff --git a/dexdump/Android.mk b/dexdump/Android.mk new file mode 100755 index 0000000000..a208ccf89b --- /dev/null +++ b/dexdump/Android.mk @@ -0,0 +1,54 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(ajcbik): Art-i-fy this makefile + +# TODO(ajcbik): rename dexdump2 into dexdump when Dalvik version is removed + +LOCAL_PATH:= $(call my-dir) + +dexdump_src_files := dexdump_main.cc dexdump.cc +dexdump_c_includes := art/runtime +dexdump_libraries := libart + +## +## Build the device command line tool dexdump. +## + +ifneq ($(SDK_ONLY),true) # SDK_only doesn't need device version +include $(CLEAR_VARS) +LOCAL_CPP_EXTENSION := cc +LOCAL_SRC_FILES := $(dexdump_src_files) +LOCAL_C_INCLUDES := $(dexdump_c_includes) +LOCAL_CFLAGS += -Wall +LOCAL_SHARED_LIBRARIES += $(dexdump_libraries) +LOCAL_MODULE := dexdump2 +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES) +include $(BUILD_EXECUTABLE) +endif # !SDK_ONLY + +## +## Build the host command line tool dexdump. +## + +include $(CLEAR_VARS) +LOCAL_CPP_EXTENSION := cc +LOCAL_SRC_FILES := $(dexdump_src_files) +LOCAL_C_INCLUDES := $(dexdump_c_includes) +LOCAL_CFLAGS += -Wall +LOCAL_SHARED_LIBRARIES += $(dexdump_libraries) +LOCAL_MODULE := dexdump2 +LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host) +include $(BUILD_HOST_EXECUTABLE) diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc new file mode 100644 index 0000000000..282db5de83 --- /dev/null +++ b/dexdump/dexdump.cc @@ -0,0 +1,1338 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Implementation file of the dexdump utility. + * + * This is a re-implementation of the original dexdump utility that was + * based on Dalvik functions in libdex into a new dexdump that is now + * based on Art functions in libart instead. The output is identical to + * the original for correct DEX files. Error messages may differ, however. + * Also, ODEX files are no longer supported. + * + * The dexdump tool is intended to mimic objdump. When possible, use + * similar command-line arguments. + * + * Differences between XML output and the "current.xml" file: + * - classes in same package are not all grouped together; nothing is sorted + * - no "deprecated" on fields and methods + * - no "value" on fields + * - no parameter names + * - no generic signatures on parameters, e.g. type="java.lang.Class<?>" + * - class shows declared fields and methods; does not show inherited fields + */ + +#include "dexdump.h" + +#include <inttypes.h> +#include <stdio.h> + +#include <iostream> +#include <memory> +#include <sstream> +#include <vector> + +#include "dex_file-inl.h" +#include "dex_instruction-inl.h" +#include "utils.h" + +namespace art { + +/* + * Options parsed in main driver. + */ +struct Options gOptions; + +/* + * Output file. Defaults to stdout. + */ +FILE* gOutFile = stdout; + +/* + * Data types that match the definitions in the VM specification. + */ +typedef uint8_t u1; +typedef uint16_t u2; +typedef uint32_t u4; +typedef uint64_t u8; +typedef int32_t s4; +typedef int64_t s8; + +/* + * Basic information about a field or a method. + */ +struct FieldMethodInfo { + const char* classDescriptor; + const char* name; + const char* signature; +}; + +/* + * Flags for use with createAccessFlagStr(). + */ +enum AccessFor { + kAccessForClass = 0, kAccessForMethod = 1, kAccessForField = 2, kAccessForMAX +}; +const int kNumFlags = 18; + +/* + * Gets 2 little-endian bytes. + */ +static inline u2 get2LE(unsigned char const* pSrc) { + return pSrc[0] | (pSrc[1] << 8); +} + +/* + * Converts a single-character primitive type into human-readable form. + */ +static const char* primitiveTypeLabel(char typeChar) { + switch (typeChar) { + case 'B': return "byte"; + case 'C': return "char"; + case 'D': return "double"; + case 'F': return "float"; + case 'I': return "int"; + case 'J': return "long"; + case 'S': return "short"; + case 'V': return "void"; + case 'Z': return "boolean"; + default: return "UNKNOWN"; + } // switch +} + +/* + * Converts a type descriptor to human-readable "dotted" form. For + * example, "Ljava/lang/String;" becomes "java.lang.String", and + * "[I" becomes "int[]". Also converts '$' to '.', which means this + * form can't be converted back to a descriptor. + */ +static char* descriptorToDot(const char* str) { + int targetLen = strlen(str); + int offset = 0; + + // Strip leading [s; will be added to end. + while (targetLen > 1 && str[offset] == '[') { + offset++; + targetLen--; + } // while + + const int arrayDepth = offset; + + if (targetLen == 1) { + // Primitive type. + str = primitiveTypeLabel(str[offset]); + offset = 0; + targetLen = strlen(str); + } else { + // Account for leading 'L' and trailing ';'. + if (targetLen >= 2 && str[offset] == 'L' && + str[offset + targetLen - 1] == ';') { + targetLen -= 2; + offset++; + } + } + + // Copy class name over. + char* newStr = reinterpret_cast<char*>( + malloc(targetLen + arrayDepth * 2 + 1)); + int i = 0; + for (; i < targetLen; i++) { + const char ch = str[offset + i]; + newStr[i] = (ch == '/' || ch == '$') ? '.' : ch; + } // for + + // Add the appropriate number of brackets for arrays. + for (int j = 0; j < arrayDepth; j++) { + newStr[i++] = '['; + newStr[i++] = ']'; + } // for + + newStr[i] = '\0'; + return newStr; +} + +/* + * Converts the class name portion of a type descriptor to human-readable + * "dotted" form. + * + * Returns a newly-allocated string. + */ +static char* descriptorClassToDot(const char* str) { + // Reduce to just the class name, trimming trailing ';'. + const char* lastSlash = strrchr(str, '/'); + if (lastSlash == nullptr) { + lastSlash = str + 1; // start past 'L' + } else { + lastSlash++; // start past '/' + } + + char* newStr = strdup(lastSlash); + newStr[strlen(lastSlash) - 1] = '\0'; + for (char* cp = newStr; *cp != '\0'; cp++) { + if (*cp == '$') { + *cp = '.'; + } + } // for + return newStr; +} + +/* + * Returns a quoted string representing the boolean value. + */ +static const char* quotedBool(bool val) { + return val ? "\"true\"" : "\"false\""; +} + +/* + * Returns a quoted string representing the access flags. + */ +static const char* quotedVisibility(u4 accessFlags) { + if (accessFlags & kAccPublic) { + return "\"public\""; + } else if (accessFlags & kAccProtected) { + return "\"protected\""; + } else if (accessFlags & kAccPrivate) { + return "\"private\""; + } else { + return "\"package\""; + } +} + +/* + * Counts the number of '1' bits in a word. + */ +static int countOnes(u4 val) { + val = val - ((val >> 1) & 0x55555555); + val = (val & 0x33333333) + ((val >> 2) & 0x33333333); + return (((val + (val >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; +} + +/* + * Creates a new string with human-readable access flags. + * + * In the base language the access_flags fields are type u2; in Dalvik + * they're u4. + */ +static char* createAccessFlagStr(u4 flags, AccessFor forWhat) { + static const char* kAccessStrings[kAccessForMAX][kNumFlags] = { + { + "PUBLIC", /* 0x00001 */ + "PRIVATE", /* 0x00002 */ + "PROTECTED", /* 0x00004 */ + "STATIC", /* 0x00008 */ + "FINAL", /* 0x00010 */ + "?", /* 0x00020 */ + "?", /* 0x00040 */ + "?", /* 0x00080 */ + "?", /* 0x00100 */ + "INTERFACE", /* 0x00200 */ + "ABSTRACT", /* 0x00400 */ + "?", /* 0x00800 */ + "SYNTHETIC", /* 0x01000 */ + "ANNOTATION", /* 0x02000 */ + "ENUM", /* 0x04000 */ + "?", /* 0x08000 */ + "VERIFIED", /* 0x10000 */ + "OPTIMIZED", /* 0x20000 */ + }, { + "PUBLIC", /* 0x00001 */ + "PRIVATE", /* 0x00002 */ + "PROTECTED", /* 0x00004 */ + "STATIC", /* 0x00008 */ + "FINAL", /* 0x00010 */ + "SYNCHRONIZED", /* 0x00020 */ + "BRIDGE", /* 0x00040 */ + "VARARGS", /* 0x00080 */ + "NATIVE", /* 0x00100 */ + "?", /* 0x00200 */ + "ABSTRACT", /* 0x00400 */ + "STRICT", /* 0x00800 */ + "SYNTHETIC", /* 0x01000 */ + "?", /* 0x02000 */ + "?", /* 0x04000 */ + "MIRANDA", /* 0x08000 */ + "CONSTRUCTOR", /* 0x10000 */ + "DECLARED_SYNCHRONIZED", /* 0x20000 */ + }, { + "PUBLIC", /* 0x00001 */ + "PRIVATE", /* 0x00002 */ + "PROTECTED", /* 0x00004 */ + "STATIC", /* 0x00008 */ + "FINAL", /* 0x00010 */ + "?", /* 0x00020 */ + "VOLATILE", /* 0x00040 */ + "TRANSIENT", /* 0x00080 */ + "?", /* 0x00100 */ + "?", /* 0x00200 */ + "?", /* 0x00400 */ + "?", /* 0x00800 */ + "SYNTHETIC", /* 0x01000 */ + "?", /* 0x02000 */ + "ENUM", /* 0x04000 */ + "?", /* 0x08000 */ + "?", /* 0x10000 */ + "?", /* 0x20000 */ + }, + }; + + // Allocate enough storage to hold the expected number of strings, + // plus a space between each. We over-allocate, using the longest + // string above as the base metric. + const int kLongest = 21; // The strlen of longest string above. + const int count = countOnes(flags); + char* str; + char* cp; + cp = str = reinterpret_cast<char*>(malloc(count * (kLongest + 1) + 1)); + + for (int i = 0; i < kNumFlags; i++) { + if (flags & 0x01) { + const char* accessStr = kAccessStrings[forWhat][i]; + const int len = strlen(accessStr); + if (cp != str) { + *cp++ = ' '; + } + memcpy(cp, accessStr, len); + cp += len; + } + flags >>= 1; + } // for + + *cp = '\0'; + return str; +} + +/* + * Copies character data from "data" to "out", converting non-ASCII values + * to fprintf format chars or an ASCII filler ('.' or '?'). + * + * The output buffer must be able to hold (2*len)+1 bytes. The result is + * NULL-terminated. + */ +static void asciify(char* out, const unsigned char* data, size_t len) { + while (len--) { + if (*data < 0x20) { + // Could do more here, but we don't need them yet. + switch (*data) { + case '\0': + *out++ = '\\'; + *out++ = '0'; + break; + case '\n': + *out++ = '\\'; + *out++ = 'n'; + break; + default: + *out++ = '.'; + break; + } // switch + } else if (*data >= 0x80) { + *out++ = '?'; + } else { + *out++ = *data; + } + data++; + } // while + *out = '\0'; +} + +/* + * Dumps the file header. + * + * Note that some of the : are misaligned on purpose to preserve + * the exact output of the original Dalvik dexdump. + */ +static void dumpFileHeader(const DexFile* pDexFile) { + const DexFile::Header& pHeader = pDexFile->GetHeader(); + char sanitized[sizeof(pHeader.magic_) * 2 + 1]; + fprintf(gOutFile, "DEX file header:\n"); + asciify(sanitized, pHeader.magic_, sizeof(pHeader.magic_)); + fprintf(gOutFile, "magic : '%s'\n", sanitized); + fprintf(gOutFile, "checksum : %08x\n", pHeader.checksum_); + fprintf(gOutFile, "signature : %02x%02x...%02x%02x\n", + pHeader.signature_[0], pHeader.signature_[1], + pHeader.signature_[DexFile::kSha1DigestSize - 2], + pHeader.signature_[DexFile::kSha1DigestSize - 1]); + fprintf(gOutFile, "file_size : %d\n", pHeader.file_size_); + fprintf(gOutFile, "header_size : %d\n", pHeader.header_size_); + fprintf(gOutFile, "link_size : %d\n", pHeader.link_size_); + fprintf(gOutFile, "link_off : %d (0x%06x)\n", + pHeader.link_off_, pHeader.link_off_); + fprintf(gOutFile, "string_ids_size : %d\n", pHeader.string_ids_size_); + fprintf(gOutFile, "string_ids_off : %d (0x%06x)\n", + pHeader.string_ids_off_, pHeader.string_ids_off_); + fprintf(gOutFile, "type_ids_size : %d\n", pHeader.type_ids_size_); + fprintf(gOutFile, "type_ids_off : %d (0x%06x)\n", + pHeader.type_ids_off_, pHeader.type_ids_off_); + fprintf(gOutFile, "proto_ids_size : %d\n", pHeader.proto_ids_size_); + fprintf(gOutFile, "proto_ids_off : %d (0x%06x)\n", + pHeader.proto_ids_off_, pHeader.proto_ids_off_); + fprintf(gOutFile, "field_ids_size : %d\n", pHeader.field_ids_size_); + fprintf(gOutFile, "field_ids_off : %d (0x%06x)\n", + pHeader.field_ids_off_, pHeader.field_ids_off_); + fprintf(gOutFile, "method_ids_size : %d\n", pHeader.method_ids_size_); + fprintf(gOutFile, "method_ids_off : %d (0x%06x)\n", + pHeader.method_ids_off_, pHeader.method_ids_off_); + fprintf(gOutFile, "class_defs_size : %d\n", pHeader.class_defs_size_); + fprintf(gOutFile, "class_defs_off : %d (0x%06x)\n", + pHeader.class_defs_off_, pHeader.class_defs_off_); + fprintf(gOutFile, "data_size : %d\n", pHeader.data_size_); + fprintf(gOutFile, "data_off : %d (0x%06x)\n\n", + pHeader.data_off_, pHeader.data_off_); +} + +/* + * Dumps a class_def_item. + */ +static void dumpClassDef(const DexFile* pDexFile, int idx) { + // General class information. + const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx); + fprintf(gOutFile, "Class #%d header:\n", idx); + fprintf(gOutFile, "class_idx : %d\n", pClassDef.class_idx_); + fprintf(gOutFile, "access_flags : %d (0x%04x)\n", + pClassDef.access_flags_, pClassDef.access_flags_); + fprintf(gOutFile, "superclass_idx : %d\n", pClassDef.superclass_idx_); + fprintf(gOutFile, "interfaces_off : %d (0x%06x)\n", + pClassDef.interfaces_off_, pClassDef.interfaces_off_); + fprintf(gOutFile, "source_file_idx : %d\n", pClassDef.source_file_idx_); + fprintf(gOutFile, "annotations_off : %d (0x%06x)\n", + pClassDef.annotations_off_, pClassDef.annotations_off_); + fprintf(gOutFile, "class_data_off : %d (0x%06x)\n", + pClassDef.class_data_off_, pClassDef.class_data_off_); + + // Fields and methods. + const u1* pEncodedData = pDexFile->GetClassData(pClassDef); + if (pEncodedData != nullptr) { + ClassDataItemIterator pClassData(*pDexFile, pEncodedData); + fprintf(gOutFile, "static_fields_size : %d\n", pClassData.NumStaticFields()); + fprintf(gOutFile, "instance_fields_size: %d\n", pClassData.NumInstanceFields()); + fprintf(gOutFile, "direct_methods_size : %d\n", pClassData.NumDirectMethods()); + fprintf(gOutFile, "virtual_methods_size: %d\n", pClassData.NumVirtualMethods()); + } else { + fprintf(gOutFile, "static_fields_size : 0\n"); + fprintf(gOutFile, "instance_fields_size: 0\n"); + fprintf(gOutFile, "direct_methods_size : 0\n"); + fprintf(gOutFile, "virtual_methods_size: 0\n"); + } + fprintf(gOutFile, "\n"); +} + +/* + * Dumps an interface that a class declares to implement. + */ +static void dumpInterface(const DexFile* pDexFile, const DexFile::TypeItem& pTypeItem, int i) { + const char* interfaceName = pDexFile->StringByTypeIdx(pTypeItem.type_idx_); + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " #%d : '%s'\n", i, interfaceName); + } else { + char* dotted = descriptorToDot(interfaceName); + fprintf(gOutFile, "<implements name=\"%s\">\n</implements>\n", dotted); + free(dotted); + } +} + +/* + * Dumps the catches table associated with the code. + */ +static void dumpCatches(const DexFile* pDexFile, const DexFile::CodeItem* pCode) { + const u4 triesSize = pCode->tries_size_; + + // No catch table. + if (triesSize == 0) { + fprintf(gOutFile, " catches : (none)\n"); + return; + } + + // Dump all table entries. + fprintf(gOutFile, " catches : %d\n", triesSize); + for (u4 i = 0; i < triesSize; i++) { + const DexFile::TryItem* pTry = pDexFile->GetTryItems(*pCode, i); + const u4 start = pTry->start_addr_; + const u4 end = start + pTry->insn_count_; + fprintf(gOutFile, " 0x%04x - 0x%04x\n", start, end); + for (CatchHandlerIterator it(*pCode, *pTry); it.HasNext(); it.Next()) { + const u2 tidx = it.GetHandlerTypeIndex(); + const char* descriptor = + (tidx == DexFile::kDexNoIndex16) ? "<any>" : pDexFile->StringByTypeIdx(tidx); + fprintf(gOutFile, " %s -> 0x%04x\n", descriptor, it.GetHandlerAddress()); + } // for + } // for +} + +/* + * Callback for dumping each positions table entry. + */ +static bool dumpPositionsCb(void* /*context*/, u4 address, u4 lineNum) { + fprintf(gOutFile, " 0x%04x line=%d\n", address, lineNum); + return false; +} + +/* + * Callback for dumping locals table entry. + */ +static void dumpLocalsCb(void* /*context*/, u2 slot, u4 startAddress, u4 endAddress, + const char* name, const char* descriptor, const char* signature) { + fprintf(gOutFile, " 0x%04x - 0x%04x reg=%d %s %s %s\n", + startAddress, endAddress, slot, name, descriptor, signature); +} + +/* + * Helper for dumpInstruction(), which builds the string + * representation for the index in the given instruction. This will + * first try to use the given buffer, but if the result won't fit, + * then this will allocate a new buffer to hold the result. A pointer + * to the buffer which holds the full result is always returned, and + * this can be compared with the one passed in, to see if the result + * needs to be free()d. + */ +static char* indexString(const DexFile* pDexFile, + const Instruction* pDecInsn, char* buf, size_t bufSize) { + // Determine index and width of the string. + u4 index = 0; + u4 width = 4; + switch (Instruction::FormatOf(pDecInsn->Opcode())) { + // SOME NOT SUPPORTED: + // case Instruction::k20bc: + case Instruction::k21c: + case Instruction::k35c: + // case Instruction::k35ms: + case Instruction::k3rc: + // case Instruction::k3rms: + // case Instruction::k35mi: + // case Instruction::k3rmi: + index = pDecInsn->VRegB(); + width = 4; + break; + case Instruction::k31c: + index = pDecInsn->VRegB(); + width = 8; + break; + case Instruction::k22c: + // case Instruction::k22cs: + index = pDecInsn->VRegC(); + width = 4; + break; + default: + break; + } // switch + + // Determine index type. + size_t outSize = 0; + switch (Instruction::IndexTypeOf(pDecInsn->Opcode())) { + case Instruction::kIndexUnknown: + // This function should never get called for this type, but do + // something sensible here, just to help with debugging. + outSize = snprintf(buf, bufSize, "<unknown-index>"); + break; + case Instruction::kIndexNone: + // This function should never get called for this type, but do + // something sensible here, just to help with debugging. + outSize = snprintf(buf, bufSize, "<no-index>"); + break; + case Instruction::kIndexTypeRef: + if (index < pDexFile->GetHeader().type_ids_size_) { + const char* tp = pDexFile->StringByTypeIdx(index); + outSize = snprintf(buf, bufSize, "%s // type@%0*x", tp, width, index); + } else { + outSize = snprintf(buf, bufSize, "<type?> // type@%0*x", width, index); + } + break; + case Instruction::kIndexStringRef: + if (index < pDexFile->GetHeader().string_ids_size_) { + const char* st = pDexFile->StringDataByIdx(index); + outSize = snprintf(buf, bufSize, "\"%s\" // string@%0*x", st, width, index); + } else { + outSize = snprintf(buf, bufSize, "<string?> // string@%0*x", width, index); + } + break; + case Instruction::kIndexMethodRef: + if (index < pDexFile->GetHeader().method_ids_size_) { + const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(index); + const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_); + const Signature signature = pDexFile->GetMethodSignature(pMethodId); + const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_); + outSize = snprintf(buf, bufSize, "%s.%s:%s // method@%0*x", + backDescriptor, name, signature.ToString().c_str(), width, index); + } else { + outSize = snprintf(buf, bufSize, "<method?> // method@%0*x", width, index); + } + break; + case Instruction::kIndexFieldRef: + if (index < pDexFile->GetHeader().field_ids_size_) { + const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(index); + const char* name = pDexFile->StringDataByIdx(pFieldId.name_idx_); + const char* typeDescriptor = pDexFile->StringByTypeIdx(pFieldId.type_idx_); + const char* backDescriptor = pDexFile->StringByTypeIdx(pFieldId.class_idx_); + outSize = snprintf(buf, bufSize, "%s.%s:%s // field@%0*x", + backDescriptor, name, typeDescriptor, width, index); + } else { + outSize = snprintf(buf, bufSize, "<field?> // field@%0*x", width, index); + } + break; + case Instruction::kIndexVtableOffset: + outSize = snprintf(buf, bufSize, "[%0*x] // vtable #%0*x", + width, index, width, index); + break; + case Instruction::kIndexFieldOffset: + outSize = snprintf(buf, bufSize, "[obj+%0*x]", width, index); + break; + // SOME NOT SUPPORTED: + // case Instruction::kIndexVaries: + // case Instruction::kIndexInlineMethod: + default: + outSize = snprintf(buf, bufSize, "<?>"); + break; + } // switch + + // Determine success of string construction. + if (outSize >= bufSize) { + // The buffer wasn't big enough; allocate and retry. Note: + // snprintf() doesn't count the '\0' as part of its returned + // size, so we add explicit space for it here. + outSize++; + buf = reinterpret_cast<char*>(malloc(outSize)); + if (buf == nullptr) { + return nullptr; + } + return indexString(pDexFile, pDecInsn, buf, outSize); + } + return buf; +} + +/* + * Dumps a single instruction. + */ +static void dumpInstruction(const DexFile* pDexFile, + const DexFile::CodeItem* pCode, + u4 codeOffset, u4 insnIdx, u4 insnWidth, + const Instruction* pDecInsn) { + // Address of instruction (expressed as byte offset). + fprintf(gOutFile, "%06x:", codeOffset + 0x10 + insnIdx * 2); + + // Dump (part of) raw bytes. + const u2* insns = pCode->insns_; + for (u4 i = 0; i < 8; i++) { + if (i < insnWidth) { + if (i == 7) { + fprintf(gOutFile, " ... "); + } else { + // Print 16-bit value in little-endian order. + const u1* bytePtr = (const u1*) &insns[insnIdx + i]; + fprintf(gOutFile, " %02x%02x", bytePtr[0], bytePtr[1]); + } + } else { + fputs(" ", gOutFile); + } + } // for + + // Dump pseudo-instruction or opcode. + if (pDecInsn->Opcode() == Instruction::NOP) { + const u2 instr = get2LE((const u1*) &insns[insnIdx]); + if (instr == Instruction::kPackedSwitchSignature) { + fprintf(gOutFile, "|%04x: packed-switch-data (%d units)", insnIdx, insnWidth); + } else if (instr == Instruction::kSparseSwitchSignature) { + fprintf(gOutFile, "|%04x: sparse-switch-data (%d units)", insnIdx, insnWidth); + } else if (instr == Instruction::kArrayDataSignature) { + fprintf(gOutFile, "|%04x: array-data (%d units)", insnIdx, insnWidth); + } else { + fprintf(gOutFile, "|%04x: nop // spacer", insnIdx); + } + } else { + fprintf(gOutFile, "|%04x: %s", insnIdx, pDecInsn->Name()); + } + + // Set up additional argument. + char indexBufChars[200]; + char *indexBuf = indexBufChars; + if (Instruction::IndexTypeOf(pDecInsn->Opcode()) != Instruction::kIndexNone) { + indexBuf = indexString(pDexFile, pDecInsn, + indexBufChars, sizeof(indexBufChars)); + } + + // Dump the instruction. + // + // NOTE: pDecInsn->DumpString(pDexFile) differs too much from original. + // + switch (Instruction::FormatOf(pDecInsn->Opcode())) { + case Instruction::k10x: // op + break; + case Instruction::k12x: // op vA, vB + fprintf(gOutFile, " v%d, v%d", pDecInsn->VRegA(), pDecInsn->VRegB()); + break; + case Instruction::k11n: // op vA, #+B + fprintf(gOutFile, " v%d, #int %d // #%x", + pDecInsn->VRegA(), (s4) pDecInsn->VRegB(), (u1)pDecInsn->VRegB()); + break; + case Instruction::k11x: // op vAA + fprintf(gOutFile, " v%d", pDecInsn->VRegA()); + break; + case Instruction::k10t: // op +AA + case Instruction::k20t: // op +AAAA + { + const s4 targ = (s4) pDecInsn->VRegA(); + fprintf(gOutFile, " %04x // %c%04x", + insnIdx + targ, + (targ < 0) ? '-' : '+', + (targ < 0) ? -targ : targ); + } + break; + case Instruction::k22x: // op vAA, vBBBB + fprintf(gOutFile, " v%d, v%d", pDecInsn->VRegA(), pDecInsn->VRegB()); + break; + case Instruction::k21t: // op vAA, +BBBB + { + const s4 targ = (s4) pDecInsn->VRegB(); + fprintf(gOutFile, " v%d, %04x // %c%04x", pDecInsn->VRegA(), + insnIdx + targ, + (targ < 0) ? '-' : '+', + (targ < 0) ? -targ : targ); + } + break; + case Instruction::k21s: // op vAA, #+BBBB + fprintf(gOutFile, " v%d, #int %d // #%x", + pDecInsn->VRegA(), (s4) pDecInsn->VRegB(), (u2)pDecInsn->VRegB()); + break; + case Instruction::k21h: // op vAA, #+BBBB0000[00000000] + // The printed format varies a bit based on the actual opcode. + if (pDecInsn->Opcode() == Instruction::CONST_HIGH16) { + const s4 value = pDecInsn->VRegB() << 16; + fprintf(gOutFile, " v%d, #int %d // #%x", + pDecInsn->VRegA(), value, (u2) pDecInsn->VRegB()); + } else { + const s8 value = ((s8) pDecInsn->VRegB()) << 48; + fprintf(gOutFile, " v%d, #long %" PRId64 " // #%x", + pDecInsn->VRegA(), value, (u2) pDecInsn->VRegB()); + } + break; + case Instruction::k21c: // op vAA, thing@BBBB + case Instruction::k31c: // op vAA, thing@BBBBBBBB + fprintf(gOutFile, " v%d, %s", pDecInsn->VRegA(), indexBuf); + break; + case Instruction::k23x: // op vAA, vBB, vCC + fprintf(gOutFile, " v%d, v%d, v%d", + pDecInsn->VRegA(), pDecInsn->VRegB(), pDecInsn->VRegC()); + break; + case Instruction::k22b: // op vAA, vBB, #+CC + fprintf(gOutFile, " v%d, v%d, #int %d // #%02x", + pDecInsn->VRegA(), pDecInsn->VRegB(), + (s4) pDecInsn->VRegC(), (u1) pDecInsn->VRegC()); + break; + case Instruction::k22t: // op vA, vB, +CCCC + { + const s4 targ = (s4) pDecInsn->VRegC(); + fprintf(gOutFile, " v%d, v%d, %04x // %c%04x", + pDecInsn->VRegA(), pDecInsn->VRegB(), + insnIdx + targ, + (targ < 0) ? '-' : '+', + (targ < 0) ? -targ : targ); + } + break; + case Instruction::k22s: // op vA, vB, #+CCCC + fprintf(gOutFile, " v%d, v%d, #int %d // #%04x", + pDecInsn->VRegA(), pDecInsn->VRegB(), + (s4) pDecInsn->VRegC(), (u2) pDecInsn->VRegC()); + break; + case Instruction::k22c: // op vA, vB, thing@CCCC + // NOT SUPPORTED: + // case Instruction::k22cs: // [opt] op vA, vB, field offset CCCC + fprintf(gOutFile, " v%d, v%d, %s", + pDecInsn->VRegA(), pDecInsn->VRegB(), indexBuf); + break; + case Instruction::k30t: + fprintf(gOutFile, " #%08x", pDecInsn->VRegA()); + break; + case Instruction::k31i: // op vAA, #+BBBBBBBB + { + // This is often, but not always, a float. + union { + float f; + u4 i; + } conv; + conv.i = pDecInsn->VRegB(); + fprintf(gOutFile, " v%d, #float %f // #%08x", + pDecInsn->VRegA(), conv.f, pDecInsn->VRegB()); + } + break; + case Instruction::k31t: // op vAA, offset +BBBBBBBB + fprintf(gOutFile, " v%d, %08x // +%08x", + pDecInsn->VRegA(), insnIdx + pDecInsn->VRegB(), pDecInsn->VRegB()); + break; + case Instruction::k32x: // op vAAAA, vBBBB + fprintf(gOutFile, " v%d, v%d", pDecInsn->VRegA(), pDecInsn->VRegB()); + break; + case Instruction::k35c: // op {vC, vD, vE, vF, vG}, thing@BBBB + // NOT SUPPORTED: + // case Instruction::k35ms: // [opt] invoke-virtual+super + // case Instruction::k35mi: // [opt] inline invoke + { + u4 arg[5]; + pDecInsn->GetVarArgs(arg); + fputs(" {", gOutFile); + for (int i = 0, n = pDecInsn->VRegA(); i < n; i++) { + if (i == 0) { + fprintf(gOutFile, "v%d", arg[i]); + } else { + fprintf(gOutFile, ", v%d", arg[i]); + } + } // for + fprintf(gOutFile, "}, %s", indexBuf); + } + break; + case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB + // NOT SUPPORTED: + // case Instruction::k3rms: // [opt] invoke-virtual+super/range + // case Instruction::k3rmi: // [opt] execute-inline/range + { + // This doesn't match the "dx" output when some of the args are + // 64-bit values -- dx only shows the first register. + fputs(" {", gOutFile); + for (int i = 0, n = pDecInsn->VRegA(); i < n; i++) { + if (i == 0) { + fprintf(gOutFile, "v%d", pDecInsn->VRegC() + i); + } else { + fprintf(gOutFile, ", v%d", pDecInsn->VRegC() + i); + } + } // for + fprintf(gOutFile, "}, %s", indexBuf); + } + break; + case Instruction::k51l: // op vAA, #+BBBBBBBBBBBBBBBB + { + // This is often, but not always, a double. + union { + double d; + u8 j; + } conv; + conv.j = pDecInsn->WideVRegB(); + fprintf(gOutFile, " v%d, #double %f // #%016" PRIx64, + pDecInsn->VRegA(), conv.d, pDecInsn->WideVRegB()); + } + break; + // NOT SUPPORTED: + // case Instruction::k00x: // unknown op or breakpoint + // break; + default: + fprintf(gOutFile, " ???"); + break; + } // switch + + fputc('\n', gOutFile); + + if (indexBuf != indexBufChars) { + free(indexBuf); + } +} + +/* + * Dumps a bytecode disassembly. + */ +static void dumpBytecodes(const DexFile* pDexFile, u4 idx, + const DexFile::CodeItem* pCode, u4 codeOffset) { + const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx); + const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_); + const Signature signature = pDexFile->GetMethodSignature(pMethodId); + const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_); + + // Generate header. + char* tmp = descriptorToDot(backDescriptor); + fprintf(gOutFile, "%06x: " + "|[%06x] %s.%s:%s\n", + codeOffset, codeOffset, tmp, name, signature.ToString().c_str()); + free(tmp); + + // Iterate over all instructions. + const u2* insns = pCode->insns_; + for (u4 insnIdx = 0; insnIdx < pCode->insns_size_in_code_units_;) { + const Instruction* instruction = Instruction::At(&insns[insnIdx]); + const u4 insnWidth = instruction->SizeInCodeUnits(); + if (insnWidth == 0) { + fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", insnIdx); + break; + } + dumpInstruction(pDexFile, pCode, codeOffset, insnIdx, insnWidth, instruction); + insnIdx += insnWidth; + } // for +} + +/* + * Dumps code of a method. + */ +static void dumpCode(const DexFile* pDexFile, u4 idx, u4 flags, + const DexFile::CodeItem* pCode, u4 codeOffset) { + fprintf(gOutFile, " registers : %d\n", pCode->registers_size_); + fprintf(gOutFile, " ins : %d\n", pCode->ins_size_); + fprintf(gOutFile, " outs : %d\n", pCode->outs_size_); + fprintf(gOutFile, " insns size : %d 16-bit code units\n", + pCode->insns_size_in_code_units_); + + // Bytecode disassembly, if requested. + if (gOptions.disassemble) { + dumpBytecodes(pDexFile, idx, pCode, codeOffset); + } + + // Try-catch blocks. + dumpCatches(pDexFile, pCode); + + // Positions and locals table in the debug info. + bool is_static = (flags & kAccStatic) != 0; + fprintf(gOutFile, " positions : \n"); + pDexFile->DecodeDebugInfo( + pCode, is_static, idx, dumpPositionsCb, nullptr, nullptr); + fprintf(gOutFile, " locals : \n"); + pDexFile->DecodeDebugInfo( + pCode, is_static, idx, nullptr, dumpLocalsCb, nullptr); +} + +/* + * Dumps a method. + */ +static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags, + const DexFile::CodeItem* pCode, u4 codeOffset, int i) { + // Bail for anything private if export only requested. + if (gOptions.exportsOnly && (flags & (kAccPublic | kAccProtected)) == 0) { + return; + } + + const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx); + const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_); + const Signature signature = pDexFile->GetMethodSignature(pMethodId); + char* typeDescriptor = strdup(signature.ToString().c_str()); + const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_); + char* accessStr = createAccessFlagStr(flags, kAccessForMethod); + + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " #%d : (in %s)\n", i, backDescriptor); + fprintf(gOutFile, " name : '%s'\n", name); + fprintf(gOutFile, " type : '%s'\n", typeDescriptor); + fprintf(gOutFile, " access : 0x%04x (%s)\n", flags, accessStr); + if (pCode == nullptr) { + fprintf(gOutFile, " code : (none)\n"); + } else { + fprintf(gOutFile, " code -\n"); + dumpCode(pDexFile, idx, flags, pCode, codeOffset); + } + if (gOptions.disassemble) { + fputc('\n', gOutFile); + } + } else if (gOptions.outputFormat == OUTPUT_XML) { + const bool constructor = (name[0] == '<'); + + // Method name and prototype. + if (constructor) { + char* tmp = descriptorClassToDot(backDescriptor); + fprintf(gOutFile, "<constructor name=\"%s\"\n", tmp); + free(tmp); + tmp = descriptorToDot(backDescriptor); + fprintf(gOutFile, " type=\"%s\"\n", tmp); + free(tmp); + } else { + fprintf(gOutFile, "<method name=\"%s\"\n", name); + const char* returnType = strrchr(typeDescriptor, ')'); + if (returnType == nullptr) { + fprintf(stderr, "bad method type descriptor '%s'\n", typeDescriptor); + goto bail; + } + char* tmp = descriptorToDot(returnType+1); + fprintf(gOutFile, " return=\"%s\"\n", tmp); + free(tmp); + fprintf(gOutFile, " abstract=%s\n", quotedBool((flags & kAccAbstract) != 0)); + fprintf(gOutFile, " native=%s\n", quotedBool((flags & kAccNative) != 0)); + fprintf(gOutFile, " synchronized=%s\n", quotedBool( + (flags & (kAccSynchronized | kAccDeclaredSynchronized)) != 0)); + } + + // Additional method flags. + fprintf(gOutFile, " static=%s\n", quotedBool((flags & kAccStatic) != 0)); + fprintf(gOutFile, " final=%s\n", quotedBool((flags & kAccFinal) != 0)); + // The "deprecated=" not knowable w/o parsing annotations. + fprintf(gOutFile, " visibility=%s\n>\n", quotedVisibility(flags)); + + // Parameters. + if (typeDescriptor[0] != '(') { + fprintf(stderr, "ERROR: bad descriptor '%s'\n", typeDescriptor); + goto bail; + } + char* tmpBuf = reinterpret_cast<char*>(malloc(strlen(typeDescriptor) + 1)); + const char* base = typeDescriptor + 1; + int argNum = 0; + while (*base != ')') { + char* cp = tmpBuf; + while (*base == '[') { + *cp++ = *base++; + } + if (*base == 'L') { + // Copy through ';'. + do { + *cp = *base++; + } while (*cp++ != ';'); + } else { + // Primitive char, copy it. + if (strchr("ZBCSIFJD", *base) == NULL) { + fprintf(stderr, "ERROR: bad method signature '%s'\n", base); + goto bail; + } + *cp++ = *base++; + } + // Null terminate and display. + *cp++ = '\0'; + char* tmp = descriptorToDot(tmpBuf); + fprintf(gOutFile, "<parameter name=\"arg%d\" type=\"%s\">\n" + "</parameter>\n", argNum++, tmp); + free(tmp); + } // while + free(tmpBuf); + if (constructor) { + fprintf(gOutFile, "</constructor>\n"); + } else { + fprintf(gOutFile, "</method>\n"); + } + } + + bail: + free(typeDescriptor); + free(accessStr); +} + +/* + * Dumps a static (class) field. + */ +static void dumpSField(const DexFile* pDexFile, u4 idx, u4 flags, int i) { + // Bail for anything private if export only requested. + if (gOptions.exportsOnly && (flags & (kAccPublic | kAccProtected)) == 0) { + return; + } + + const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(idx); + const char* name = pDexFile->StringDataByIdx(pFieldId.name_idx_); + const char* typeDescriptor = pDexFile->StringByTypeIdx(pFieldId.type_idx_); + const char* backDescriptor = pDexFile->StringByTypeIdx(pFieldId.class_idx_); + char* accessStr = createAccessFlagStr(flags, kAccessForField); + + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " #%d : (in %s)\n", i, backDescriptor); + fprintf(gOutFile, " name : '%s'\n", name); + fprintf(gOutFile, " type : '%s'\n", typeDescriptor); + fprintf(gOutFile, " access : 0x%04x (%s)\n", flags, accessStr); + } else if (gOptions.outputFormat == OUTPUT_XML) { + fprintf(gOutFile, "<field name=\"%s\"\n", name); + char *tmp = descriptorToDot(typeDescriptor); + fprintf(gOutFile, " type=\"%s\"\n", tmp); + free(tmp); + fprintf(gOutFile, " transient=%s\n", quotedBool((flags & kAccTransient) != 0)); + fprintf(gOutFile, " volatile=%s\n", quotedBool((flags & kAccVolatile) != 0)); + // The "value=" is not knowable w/o parsing annotations. + fprintf(gOutFile, " static=%s\n", quotedBool((flags & kAccStatic) != 0)); + fprintf(gOutFile, " final=%s\n", quotedBool((flags & kAccFinal) != 0)); + // The "deprecated=" is not knowable w/o parsing annotations. + fprintf(gOutFile, " visibility=%s\n", quotedVisibility(flags)); + fprintf(gOutFile, ">\n</field>\n"); + } + + free(accessStr); +} + +/* + * Dumps an instance field. + */ +static void dumpIField(const DexFile* pDexFile, u4 idx, u4 flags, int i) { + dumpSField(pDexFile, idx, flags, i); +} + +/* + * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item + * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a + * tool, so this is not performance-critical. + */ + +static void dumpCfg(const DexFile* dex_file, + uint32_t dex_method_idx, + const DexFile::CodeItem* code_item) { + if (code_item != nullptr) { + std::ostringstream oss; + DumpMethodCFG(dex_file, dex_method_idx, oss); + fprintf(gOutFile, "%s", oss.str().c_str()); + } +} + +static void dumpCfg(const DexFile* dex_file, int idx) { + const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx); + const uint8_t* class_data = dex_file->GetClassData(class_def); + if (class_data == nullptr) { // empty class such as a marker interface? + return; + } + ClassDataItemIterator it(*dex_file, class_data); + while (it.HasNextStaticField()) { + it.Next(); + } + while (it.HasNextInstanceField()) { + it.Next(); + } + while (it.HasNextDirectMethod()) { + dumpCfg(dex_file, + it.GetMemberIndex(), + it.GetMethodCodeItem()); + it.Next(); + } + while (it.HasNextVirtualMethod()) { + dumpCfg(dex_file, + it.GetMemberIndex(), + it.GetMethodCodeItem()); + it.Next(); + } +} + +/* + * Dumps the class. + * + * Note "idx" is a DexClassDef index, not a DexTypeId index. + * + * If "*pLastPackage" is nullptr or does not match the current class' package, + * the value will be replaced with a newly-allocated string. + */ +static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) { + const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx); + + // Omitting non-public class. + if (gOptions.exportsOnly && (pClassDef.access_flags_ & kAccPublic) == 0) { + return; + } + + if (gOptions.cfg) { + dumpCfg(pDexFile, idx); + return; + } + + // For the XML output, show the package name. Ideally we'd gather + // up the classes, sort them, and dump them alphabetically so the + // package name wouldn't jump around, but that's not a great plan + // for something that needs to run on the device. + const char* classDescriptor = pDexFile->StringByTypeIdx(pClassDef.class_idx_); + if (!(classDescriptor[0] == 'L' && + classDescriptor[strlen(classDescriptor)-1] == ';')) { + // Arrays and primitives should not be defined explicitly. Keep going? + fprintf(stderr, "Malformed class name '%s'\n", classDescriptor); + } else if (gOptions.outputFormat == OUTPUT_XML) { + char* mangle = strdup(classDescriptor + 1); + mangle[strlen(mangle)-1] = '\0'; + + // Reduce to just the package name. + char* lastSlash = strrchr(mangle, '/'); + if (lastSlash != nullptr) { + *lastSlash = '\0'; + } else { + *mangle = '\0'; + } + + for (char* cp = mangle; *cp != '\0'; cp++) { + if (*cp == '/') { + *cp = '.'; + } + } // for + + if (*pLastPackage == nullptr || strcmp(mangle, *pLastPackage) != 0) { + // Start of a new package. + if (*pLastPackage != nullptr) { + fprintf(gOutFile, "</package>\n"); + } + fprintf(gOutFile, "<package name=\"%s\"\n>\n", mangle); + free(*pLastPackage); + *pLastPackage = mangle; + } else { + free(mangle); + } + } + + // General class information. + char* accessStr = createAccessFlagStr(pClassDef.access_flags_, kAccessForClass); + const char* superclassDescriptor; + if (pClassDef.superclass_idx_ == DexFile::kDexNoIndex16) { + superclassDescriptor = nullptr; + } else { + superclassDescriptor = pDexFile->StringByTypeIdx(pClassDef.superclass_idx_); + } + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, "Class #%d -\n", idx); + fprintf(gOutFile, " Class descriptor : '%s'\n", classDescriptor); + fprintf(gOutFile, " Access flags : 0x%04x (%s)\n", pClassDef.access_flags_, accessStr); + if (superclassDescriptor != nullptr) { + fprintf(gOutFile, " Superclass : '%s'\n", superclassDescriptor); + } + fprintf(gOutFile, " Interfaces -\n"); + } else { + char* tmp = descriptorClassToDot(classDescriptor); + fprintf(gOutFile, "<class name=\"%s\"\n", tmp); + free(tmp); + if (superclassDescriptor != nullptr) { + tmp = descriptorToDot(superclassDescriptor); + fprintf(gOutFile, " extends=\"%s\"\n", tmp); + free(tmp); + } + fprintf(gOutFile, " abstract=%s\n", quotedBool((pClassDef.access_flags_ & kAccAbstract) != 0)); + fprintf(gOutFile, " static=%s\n", quotedBool((pClassDef.access_flags_ & kAccStatic) != 0)); + fprintf(gOutFile, " final=%s\n", quotedBool((pClassDef.access_flags_ & kAccFinal) != 0)); + // The "deprecated=" not knowable w/o parsing annotations. + fprintf(gOutFile, " visibility=%s\n", quotedVisibility(pClassDef.access_flags_)); + fprintf(gOutFile, ">\n"); + } + + // Interfaces. + const DexFile::TypeList* pInterfaces = pDexFile->GetInterfacesList(pClassDef); + if (pInterfaces != nullptr) { + for (u4 i = 0; i < pInterfaces->Size(); i++) { + dumpInterface(pDexFile, pInterfaces->GetTypeItem(i), i); + } // for + } + + // Fields and methods. + const u1* pEncodedData = pDexFile->GetClassData(pClassDef); + if (pEncodedData == nullptr) { + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " Static fields -\n"); + fprintf(gOutFile, " Instance fields -\n"); + fprintf(gOutFile, " Direct methods -\n"); + fprintf(gOutFile, " Virtual methods -\n"); + } + } else { + ClassDataItemIterator pClassData(*pDexFile, pEncodedData); + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " Static fields -\n"); + } + for (int i = 0; pClassData.HasNextStaticField(); i++, pClassData.Next()) { + dumpSField(pDexFile, pClassData.GetMemberIndex(), + pClassData.GetRawMemberAccessFlags(), i); + } // for + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " Instance fields -\n"); + } + for (int i = 0; pClassData.HasNextInstanceField(); i++, pClassData.Next()) { + dumpIField(pDexFile, pClassData.GetMemberIndex(), + pClassData.GetRawMemberAccessFlags(), i); + } // for + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " Direct methods -\n"); + } + for (int i = 0; pClassData.HasNextDirectMethod(); i++, pClassData.Next()) { + dumpMethod(pDexFile, pClassData.GetMemberIndex(), + pClassData.GetRawMemberAccessFlags(), + pClassData.GetMethodCodeItem(), + pClassData.GetMethodCodeItemOffset(), i); + } // for + if (gOptions.outputFormat == OUTPUT_PLAIN) { + fprintf(gOutFile, " Virtual methods -\n"); + } + for (int i = 0; pClassData.HasNextVirtualMethod(); i++, pClassData.Next()) { + dumpMethod(pDexFile, pClassData.GetMemberIndex(), + pClassData.GetRawMemberAccessFlags(), + pClassData.GetMethodCodeItem(), + pClassData.GetMethodCodeItemOffset(), i); + } // for + } + + // End of class. + if (gOptions.outputFormat == OUTPUT_PLAIN) { + const char* fileName; + if (pClassDef.source_file_idx_ != DexFile::kDexNoIndex) { + fileName = pDexFile->StringDataByIdx(pClassDef.source_file_idx_); + } else { + fileName = "unknown"; + } + fprintf(gOutFile, " source_file_idx : %d (%s)\n\n", + pClassDef.source_file_idx_, fileName); + } else if (gOptions.outputFormat == OUTPUT_XML) { + fprintf(gOutFile, "</class>\n"); + } + + free(accessStr); +} + +/* + * Dumps the requested sections of the file. + */ +static void processDexFile(const char* fileName, const DexFile* pDexFile) { + if (gOptions.verbose) { + fprintf(gOutFile, "Opened '%s', DEX version '%.3s'\n", + fileName, pDexFile->GetHeader().magic_ + 4); + } + + // Headers. + if (gOptions.showFileHeaders) { + dumpFileHeader(pDexFile); + } + + // Open XML context. + if (gOptions.outputFormat == OUTPUT_XML) { + fprintf(gOutFile, "<api>\n"); + } + + // Iterate over all classes. + char* package = nullptr; + const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_; + for (u4 i = 0; i < classDefsSize; i++) { + if (gOptions.showSectionHeaders) { + dumpClassDef(pDexFile, i); + } + dumpClass(pDexFile, i, &package); + } // for + + // Free the last package allocated. + if (package != nullptr) { + fprintf(gOutFile, "</package>\n"); + free(package); + } + + // Close XML context. + if (gOptions.outputFormat == OUTPUT_XML) { + fprintf(gOutFile, "</api>\n"); + } +} + +/* + * Processes a single file (either direct .dex or indirect .zip/.jar/.apk). + */ +int processFile(const char* fileName) { + if (gOptions.verbose) { + fprintf(gOutFile, "Processing '%s'...\n", fileName); + } + + // If the file is not a .dex file, the function tries .zip/.jar/.apk files, + // all of which are Zip archives with "classes.dex" inside. The compressed + // data needs to be extracted to a temp file, the location of which varies. + // + // TODO(ajcbik): fix following issues + // + // (1) gOptions.tempFileName is not accounted for + // (2) gOptions.ignoreBadChecksum is not accounted for + // + std::string error_msg; + std::vector<std::unique_ptr<const DexFile>> dex_files; + if (!DexFile::Open(fileName, fileName, &error_msg, &dex_files)) { + // Display returned error message to user. Note that this error behavior + // differs from the error messages shown by the original Dalvik dexdump. + fputs(error_msg.c_str(), stderr); + fputc('\n', stderr); + return -1; + } + + // Success. Either report checksum verification or process + // all dex files found in given file. + if (gOptions.checksumOnly) { + fprintf(gOutFile, "Checksum verified\n"); + } else { + for (size_t i = 0; i < dex_files.size(); i++) { + processDexFile(fileName, dex_files[i].get()); + } + } + return 0; +} + +} // namespace art diff --git a/dexdump/dexdump.h b/dexdump/dexdump.h new file mode 100644 index 0000000000..50280a9f28 --- /dev/null +++ b/dexdump/dexdump.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Header file of the dexdump utility. + * + * This is a re-implementation of the original dexdump utility that was + * based on Dalvik functions in libdex into a new dexdump that is now + * based on Art functions in libart instead. The output is identical to + * the original for correct DEX files. Error messages may differ, however. + * Also, ODEX files are no longer supported. + */ + +#ifndef ART_DEXDUMP_DEXDUMP_H_ +#define ART_DEXDUMP_DEXDUMP_H_ + +#include <stdint.h> +#include <stdio.h> + +namespace art { + +/* Supported output formats. */ +enum OutputFormat { + OUTPUT_PLAIN = 0, // default + OUTPUT_XML, // XML-style +}; + +/* Command-line options. */ +struct Options { + bool checksumOnly; + bool disassemble; + bool exportsOnly; + bool ignoreBadChecksum; + bool showFileHeaders; + bool showSectionHeaders; + bool verbose; + bool cfg; + OutputFormat outputFormat; + const char* outputFileName; + const char* tempFileName; +}; + +/* Prototypes. */ +extern struct Options gOptions; +extern FILE* gOutFile; +int processFile(const char* fileName); + +} // namespace art + +#endif // ART_DEXDUMP_DEXDUMP_H_ diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc new file mode 100644 index 0000000000..2466f33d1e --- /dev/null +++ b/dexdump/dexdump_main.cc @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Main driver of the dexdump utility. + * + * This is a re-implementation of the original dexdump utility that was + * based on Dalvik functions in libdex into a new dexdump that is now + * based on Art functions in libart instead. The output is identical to + * the original for correct DEX files. Error messages may differ, however. + * Also, ODEX files are no longer supported. + */ + +#include "dexdump.h" + +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +#include "mem_map.h" +#include "runtime.h" + +namespace art { + +static const char* gProgName = "dexdump"; + +/* + * Shows usage. + */ +static void usage(void) { + fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n"); + fprintf(stderr, "%s: [-c] [-d] [-f] [-h] [-i] [-l layout] [-o outfile]" + " [-t tempfile] dexfile...\n", gProgName); + fprintf(stderr, "\n"); + fprintf(stderr, " -c : verify checksum and exit\n"); + fprintf(stderr, " -d : disassemble code sections\n"); + fprintf(stderr, " -f : display summary information from file header\n"); + fprintf(stderr, " -g : dump CFG for dex\n"); + fprintf(stderr, " -h : display file header details\n"); + fprintf(stderr, " -i : ignore checksum failures\n"); + fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n"); + fprintf(stderr, " -o : output file name (defaults to stdout)\n"); + fprintf(stderr, " -t : temp file name (defaults to /sdcard/dex-temp-*)\n"); +} + +/* + * Main driver of the dexdump utility. + */ +int dexdumpDriver(int argc, char** argv) { + // Art specific set up. + InitLogging(argv); + MemMap::Init(); + + // Reset options. + bool wantUsage = false; + memset(&gOptions, 0, sizeof(gOptions)); + gOptions.verbose = true; + + // Parse all arguments. + while (1) { + const int ic = getopt(argc, argv, "cdfghil:t:o:"); + if (ic < 0) { + break; // done + } + switch (ic) { + case 'c': // verify the checksum then exit + gOptions.checksumOnly = true; + break; + case 'd': // disassemble Dalvik instructions + gOptions.disassemble = true; + break; + case 'f': // dump outer file header + gOptions.showFileHeaders = true; + break; + case 'g': // dump cfg + gOptions.cfg = true; + break; + case 'h': // dump section headers, i.e. all meta-data + gOptions.showSectionHeaders = true; + break; + case 'i': // continue even if checksum is bad + gOptions.ignoreBadChecksum = true; + break; + case 'l': // layout + if (strcmp(optarg, "plain") == 0) { + gOptions.outputFormat = OUTPUT_PLAIN; + } else if (strcmp(optarg, "xml") == 0) { + gOptions.outputFormat = OUTPUT_XML; + gOptions.verbose = false; + gOptions.exportsOnly = true; + } else { + wantUsage = true; + } + break; + case 't': // temp file, used when opening compressed Jar + gOptions.tempFileName = optarg; + break; + case 'o': // output file + gOptions.outputFileName = optarg; + break; + default: + wantUsage = true; + break; + } // switch + } // while + + // Detect early problems. + if (optind == argc) { + fprintf(stderr, "%s: no file specified\n", gProgName); + wantUsage = true; + } + if (gOptions.checksumOnly && gOptions.ignoreBadChecksum) { + fprintf(stderr, "Can't specify both -c and -i\n"); + wantUsage = true; + } + if (wantUsage) { + usage(); + return 2; + } + + // Open alternative output file. + if (gOptions.outputFileName) { + gOutFile = fopen(gOptions.outputFileName, "w"); + if (!gOutFile) { + fprintf(stderr, "Can't open %s\n", gOptions.outputFileName); + return 1; + } + } + + // Process all files supplied on command line. + int result = 0; + while (optind < argc) { + result |= processFile(argv[optind++]); + } // while + return result != 0; +} + +} // namespace art + +int main(int argc, char** argv) { + return art::dexdumpDriver(argc, argv); +} diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc new file mode 100644 index 0000000000..d9b210d767 --- /dev/null +++ b/dexdump/dexdump_test.cc @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <string> +#include <vector> +#include <sstream> + +#include <sys/types.h> +#include <unistd.h> + +#include "base/stringprintf.h" +#include "common_runtime_test.h" +#include "runtime/arch/instruction_set.h" +#include "runtime/gc/heap.h" +#include "runtime/gc/space/image_space.h" +#include "runtime/os.h" +#include "runtime/utils.h" +#include "utils.h" + +namespace art { + +class DexDumpTest : public CommonRuntimeTest { + protected: + virtual void SetUp() { + CommonRuntimeTest::SetUp(); + // Dogfood our own lib core dex file. + dex_file_ = GetLibCoreDexFileName(); + } + + // Runs test with given arguments. + bool Exec(const std::vector<std::string>& args, std::string* error_msg) { + // TODO(ajcbik): dexdump2 -> dexdump + std::string file_path = GetTestAndroidRoot(); + if (IsHost()) { + file_path += "/bin/dexdump2"; + } else { + file_path += "/xbin/dexdump2"; + } + EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path"; + std::vector<std::string> exec_argv = { file_path }; + exec_argv.insert(exec_argv.end(), args.begin(), args.end()); + return ::art::Exec(exec_argv, error_msg); + } + + std::string dex_file_; +}; + + +TEST_F(DexDumpTest, NoInputFileGiven) { + std::string error_msg; + ASSERT_FALSE(Exec({}, &error_msg)) << error_msg; +} + +TEST_F(DexDumpTest, CantOpenOutput) { + std::string error_msg; + ASSERT_FALSE(Exec({"-o", "/joho", dex_file_}, &error_msg)) << error_msg; +} + +TEST_F(DexDumpTest, BadFlagCombination) { + std::string error_msg; + ASSERT_FALSE(Exec({"-c", "-i", dex_file_}, &error_msg)) << error_msg; +} + +TEST_F(DexDumpTest, FullPlainOutput) { + std::string error_msg; + ASSERT_TRUE(Exec({"-d", "-f", "-h", "-l", "plain", "-o", "/dev/null", + dex_file_}, &error_msg)) << error_msg; +} + +TEST_F(DexDumpTest, XMLOutput) { + std::string error_msg; + ASSERT_TRUE(Exec({"-l", "xml", "-o", "/dev/null", + dex_file_}, &error_msg)) << error_msg; +} + +} // namespace art diff --git a/dexlist/Android.mk b/dexlist/Android.mk new file mode 100755 index 0000000000..9fbd8470b8 --- /dev/null +++ b/dexlist/Android.mk @@ -0,0 +1,54 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO(ajcbik): Art-i-fy this makefile + +# TODO(ajcbik): rename dexlist2 into dexlist when Dalvik version is removed + +LOCAL_PATH:= $(call my-dir) + +dexlist_src_files := dexlist.cc +dexlist_c_includes := art/runtime +dexlist_libraries := libart + +## +## Build the device command line tool dexlist. +## + +ifneq ($(SDK_ONLY),true) # SDK_only doesn't need device version +include $(CLEAR_VARS) +LOCAL_CPP_EXTENSION := cc +LOCAL_SRC_FILES := $(dexlist_src_files) +LOCAL_C_INCLUDES := $(dexlist_c_includes) +LOCAL_CFLAGS += -Wall +LOCAL_SHARED_LIBRARIES += $(dexlist_libraries) +LOCAL_MODULE := dexlist2 +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES) +include $(BUILD_EXECUTABLE) +endif # !SDK_ONLY + +## +## Build the host command line tool dexlist. +## + +include $(CLEAR_VARS) +LOCAL_CPP_EXTENSION := cc +LOCAL_SRC_FILES := $(dexlist_src_files) +LOCAL_C_INCLUDES := $(dexlist_c_includes) +LOCAL_CFLAGS += -Wall +LOCAL_SHARED_LIBRARIES += $(dexlist_libraries) +LOCAL_MODULE := dexlist2 +LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host) +include $(BUILD_HOST_EXECUTABLE) diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc new file mode 100644 index 0000000000..d8fd242024 --- /dev/null +++ b/dexlist/dexlist.cc @@ -0,0 +1,297 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Implementation file of the dexlist utility. + * + * This is a re-implementation of the original dexlist utility that was + * based on Dalvik functions in libdex into a new dexlist that is now + * based on Art functions in libart instead. The output is identical to + * the original for correct DEX files. Error messages may differ, however. + * + * List all methods in all concrete classes in one or more DEX files. + */ + +#include <stdlib.h> +#include <stdio.h> + +#include "dex_file-inl.h" +#include "mem_map.h" +#include "runtime.h" + +namespace art { + +static const char* gProgName = "dexlist"; + +/* Command-line options. */ +static struct { + char* argCopy; + const char* classToFind; + const char* methodToFind; + const char* outputFileName; +} gOptions; + +/* + * Output file. Defaults to stdout. + */ +static FILE* gOutFile = stdout; + +/* + * Data types that match the definitions in the VM specification. + */ +typedef uint8_t u1; +typedef uint32_t u4; +typedef uint64_t u8; + +/* + * Returns a newly-allocated string for the "dot version" of the class + * name for the given type descriptor. That is, The initial "L" and + * final ";" (if any) have been removed and all occurrences of '/' + * have been changed to '.'. + */ +static char* descriptorToDot(const char* str) { + size_t at = strlen(str); + if (str[0] == 'L') { + at -= 2; // Two fewer chars to copy. + str++; + } + char* newStr = reinterpret_cast<char*>(malloc(at + 1)); + newStr[at] = '\0'; + while (at > 0) { + at--; + newStr[at] = (str[at] == '/') ? '.' : str[at]; + } + return newStr; +} + +/* + * Positions table callback; we just want to catch the number of the + * first line in the method, which *should* correspond to the first + * entry from the table. (Could also use "min" here.) + */ +static bool positionsCb(void* context, u4 /*address*/, u4 lineNum) { + int* pFirstLine = reinterpret_cast<int *>(context); + if (*pFirstLine == -1) { + *pFirstLine = lineNum; + } + return 0; +} + +/* + * Dumps a method. + */ +static void dumpMethod(const DexFile* pDexFile, + const char* fileName, u4 idx, u4 flags, + const DexFile::CodeItem* pCode, u4 codeOffset) { + // Abstract and native methods don't get listed. + if (pCode == nullptr || codeOffset == 0) { + return; + } + + // Method information. + const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx); + const char* methodName = pDexFile->StringDataByIdx(pMethodId.name_idx_); + const char* classDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_); + char* className = descriptorToDot(classDescriptor); + const u4 insnsOff = codeOffset + 0x10; + + // Don't list methods that do not match a particular query. + if (gOptions.methodToFind != nullptr && + (strcmp(gOptions.classToFind, className) != 0 || + strcmp(gOptions.methodToFind, methodName) != 0)) { + free(className); + return; + } + + // If the filename is empty, then set it to something printable. + if (fileName == nullptr || fileName[0] == 0) { + fileName = "(none)"; + } + + // Find the first line. + int firstLine = -1; + bool is_static = (flags & kAccStatic) != 0; + pDexFile->DecodeDebugInfo( + pCode, is_static, idx, positionsCb, nullptr, &firstLine); + + // Method signature. + const Signature signature = pDexFile->GetMethodSignature(pMethodId); + char* typeDesc = strdup(signature.ToString().c_str()); + + // Dump actual method information. + fprintf(gOutFile, "0x%08x %d %s %s %s %s %d\n", + insnsOff, pCode->insns_size_in_code_units_ * 2, + className, methodName, typeDesc, fileName, firstLine); + + free(typeDesc); + free(className); +} + +/* + * Runs through all direct and virtual methods in the class. + */ +void dumpClass(const DexFile* pDexFile, u4 idx) { + const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx); + + const char* fileName; + if (pClassDef.source_file_idx_ == DexFile::kDexNoIndex) { + fileName = nullptr; + } else { + fileName = pDexFile->StringDataByIdx(pClassDef.source_file_idx_); + } + + const u1* pEncodedData = pDexFile->GetClassData(pClassDef); + if (pEncodedData != nullptr) { + ClassDataItemIterator pClassData(*pDexFile, pEncodedData); + // Skip the fields. + for (; pClassData.HasNextStaticField(); pClassData.Next()) {} + for (; pClassData.HasNextInstanceField(); pClassData.Next()) {} + // Direct methods. + for (; pClassData.HasNextDirectMethod(); pClassData.Next()) { + dumpMethod(pDexFile, fileName, + pClassData.GetMemberIndex(), + pClassData.GetRawMemberAccessFlags(), + pClassData.GetMethodCodeItem(), + pClassData.GetMethodCodeItemOffset()); + } + // Virtual methods. + for (; pClassData.HasNextVirtualMethod(); pClassData.Next()) { + dumpMethod(pDexFile, fileName, + pClassData.GetMemberIndex(), + pClassData.GetRawMemberAccessFlags(), + pClassData.GetMethodCodeItem(), + pClassData.GetMethodCodeItemOffset()); + } + } +} + +/* + * Processes a single file (either direct .dex or indirect .zip/.jar/.apk). + */ +static int processFile(const char* fileName) { + // If the file is not a .dex file, the function tries .zip/.jar/.apk files, + // all of which are Zip archives with "classes.dex" inside. + std::string error_msg; + std::vector<std::unique_ptr<const DexFile>> dex_files; + if (!DexFile::Open(fileName, fileName, &error_msg, &dex_files)) { + fputs(error_msg.c_str(), stderr); + fputc('\n', stderr); + return -1; + } + + // Success. Iterate over all dex files found in given file. + fprintf(gOutFile, "#%s\n", fileName); + for (size_t i = 0; i < dex_files.size(); i++) { + // Iterate over all classes in one dex file. + const DexFile* pDexFile = dex_files[i].get(); + const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_; + for (u4 idx = 0; idx < classDefsSize; idx++) { + dumpClass(pDexFile, idx); + } + } + return 0; +} + +/* + * Shows usage. + */ +static void usage(void) { + fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n"); + fprintf(stderr, "%s: [-m p.c.m] [-o outfile] dexfile...\n", gProgName); + fprintf(stderr, "\n"); +} + +/* + * Main driver of the dexlist utility. + */ +int dexlistDriver(int argc, char** argv) { + // Art specific set up. + InitLogging(argv); + MemMap::Init(); + + // Reset options. + bool wantUsage = false; + memset(&gOptions, 0, sizeof(gOptions)); + + // Parse all arguments. + while (1) { + const int ic = getopt(argc, argv, "o:m:"); + if (ic < 0) { + break; // done + } + switch (ic) { + case 'o': // output file + gOptions.outputFileName = optarg; + break; + case 'm': + // If -m x.y.z is given, then find all instances of the + // fully-qualified method name. This isn't really what + // dexlist is for, but it's easy to do it here. + { + gOptions.argCopy = strdup(optarg); + char* meth = strrchr(gOptions.argCopy, '.'); + if (meth == nullptr) { + fprintf(stderr, "Expected: package.Class.method\n"); + wantUsage = true; + } else { + *meth = '\0'; + gOptions.classToFind = gOptions.argCopy; + gOptions.methodToFind = meth + 1; + } + } + break; + default: + wantUsage = true; + break; + } // switch + } // while + + // Detect early problems. + if (optind == argc) { + fprintf(stderr, "%s: no file specified\n", gProgName); + wantUsage = true; + } + if (wantUsage) { + usage(); + free(gOptions.argCopy); + return 2; + } + + // Open alternative output file. + if (gOptions.outputFileName) { + gOutFile = fopen(gOptions.outputFileName, "w"); + if (!gOutFile) { + fprintf(stderr, "Can't open %s\n", gOptions.outputFileName); + free(gOptions.argCopy); + return 1; + } + } + + // Process all files supplied on command line. If one of them fails we + // continue on, only returning a failure at the end. + int result = 0; + while (optind < argc) { + result |= processFile(argv[optind++]); + } // while + + free(gOptions.argCopy); + return result != 0; +} + +} // namespace art + +int main(int argc, char** argv) { + return art::dexlistDriver(argc, argv); +} + diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc new file mode 100644 index 0000000000..7b1b63dba7 --- /dev/null +++ b/dexlist/dexlist_test.cc @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <string> +#include <vector> +#include <sstream> + +#include <sys/types.h> +#include <unistd.h> + +#include "base/stringprintf.h" +#include "common_runtime_test.h" +#include "runtime/arch/instruction_set.h" +#include "runtime/gc/heap.h" +#include "runtime/gc/space/image_space.h" +#include "runtime/os.h" +#include "runtime/utils.h" +#include "utils.h" + +namespace art { + +class DexListTest : public CommonRuntimeTest { + protected: + virtual void SetUp() { + CommonRuntimeTest::SetUp(); + // Dogfood our own lib core dex file. + dex_file_ = GetLibCoreDexFileName(); + } + + // Runs test with given arguments. + bool Exec(const std::vector<std::string>& args, std::string* error_msg) { + // TODO(ajcbik): dexlist2 -> dexlist + std::string file_path = GetTestAndroidRoot(); + if (IsHost()) { + file_path += "/bin/dexlist2"; + } else { + file_path += "/xbin/dexlist2"; + } + EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path"; + std::vector<std::string> exec_argv = { file_path }; + exec_argv.insert(exec_argv.end(), args.begin(), args.end()); + return ::art::Exec(exec_argv, error_msg); + } + + std::string dex_file_; +}; + + +TEST_F(DexListTest, NoInputFileGiven) { + std::string error_msg; + ASSERT_FALSE(Exec({}, &error_msg)) << error_msg; +} + +TEST_F(DexListTest, CantOpenOutput) { + std::string error_msg; + ASSERT_FALSE(Exec({"-o", "/joho", dex_file_}, &error_msg)) << error_msg; +} + +TEST_F(DexListTest, IllFormedMethod) { + std::string error_msg; + ASSERT_FALSE(Exec({"-m", "joho", dex_file_}, &error_msg)) << error_msg; +} + +TEST_F(DexListTest, FullOutput) { + std::string error_msg; + ASSERT_TRUE(Exec({"-o", "/dev/null", dex_file_}, &error_msg)) << error_msg; +} + +TEST_F(DexListTest, MethodOutput) { + std::string error_msg; + ASSERT_TRUE(Exec({"-o", "/dev/null", "-m", "java.lang.Object.toString", + dex_file_}, &error_msg)) << error_msg; +} + +} // namespace art diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index 31e653bf92..5e2cf6b81d 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -22,6 +22,7 @@ #include <sstream> #include "arch/arm/registers_arm.h" +#include "base/bit_utils.h" #include "base/logging.h" #include "base/stringprintf.h" #include "thread.h" @@ -201,14 +202,13 @@ std::ostream& operator<<(std::ostream& os, const RegisterList& rhs) { } struct FpRegister { - explicit FpRegister(uint32_t instr, uint16_t at_bit, uint16_t extra_at_bit) { + FpRegister(uint32_t instr, uint16_t at_bit, uint16_t extra_at_bit) { size = (instr >> 8) & 1; uint32_t Vn = (instr >> at_bit) & 0xF; uint32_t N = (instr >> extra_at_bit) & 1; r = (size != 0 ? ((N << 4) | Vn) : ((Vn << 1) | N)); } - explicit FpRegister(uint32_t instr, uint16_t at_bit, uint16_t extra_at_bit, - uint32_t forced_size) { + FpRegister(uint32_t instr, uint16_t at_bit, uint16_t extra_at_bit, uint32_t forced_size) { size = forced_size; uint32_t Vn = (instr >> at_bit) & 0xF; uint32_t N = (instr >> extra_at_bit) & 1; @@ -1455,6 +1455,20 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) } // else unknown instruction break; } + case 0x2B: { // 0101011 + // CLZ - 111 11 0101011 mmmm 1111 dddd 1000 mmmm + if ((instr & 0xf0f0) == 0xf080) { + opcode << "clz"; + ArmRegister Rm(instr, 0); + ArmRegister Rd(instr, 8); + args << Rd << ", " << Rm; + ArmRegister Rm2(instr, 16); + if (Rm.r != Rm2.r || Rm.r == 13 || Rm.r == 15 || Rd.r == 13 || Rd.r == 15) { + args << " (UNPREDICTABLE)"; + } + } + break; + } default: // more formats if ((op2 >> 4) == 2) { // 010xxxx // data processing (register) diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc index 348b2a5d00..5f8871470d 100644 --- a/disassembler/disassembler_arm64.cc +++ b/disassembler/disassembler_arm64.cc @@ -94,7 +94,7 @@ void CustomDisassembler::VisitLoadStoreUnsignedOffset(const vixl::Instruction* i int64_t offset = instr->ImmLSUnsigned() << instr->SizeLS(); std::ostringstream tmp_stream; Thread::DumpThreadOffset<8>(tmp_stream, static_cast<uint32_t>(offset)); - AppendToOutput(" (%s)", tmp_stream.str().c_str()); + AppendToOutput(" ; %s", tmp_stream.str().c_str()); } } diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h index 4f70a9b523..b0e49b3978 100644 --- a/disassembler/disassembler_mips.h +++ b/disassembler/disassembler_mips.h @@ -26,10 +26,11 @@ namespace mips { class DisassemblerMips FINAL : public Disassembler { public: - explicit DisassemblerMips(DisassemblerOptions* options, bool is64bit) : Disassembler(options), - is64bit_(is64bit), - last_ptr_(nullptr), - last_instr_(0) {} + DisassemblerMips(DisassemblerOptions* options, bool is64bit) + : Disassembler(options), + is64bit_(is64bit), + last_ptr_(nullptr), + last_instr_(0) {} size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE; void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE; diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc index 2ead4a2af5..d4574f4f0a 100644 --- a/disassembler/disassembler_x86.cc +++ b/disassembler/disassembler_x86.cc @@ -928,6 +928,11 @@ DISASSEMBLER_ENTRY(cmp, has_modrm = true; load = true; break; + case 0xBD: + opcode1 = "bsr"; + has_modrm = true; + load = true; + break; case 0xBE: opcode1 = "movsxb"; has_modrm = true; @@ -1117,6 +1122,12 @@ DISASSEMBLER_ENTRY(cmp, opcode1 = opcode_tmp.c_str(); } break; + case 0xA5: + opcode1 = (prefix[2] == 0x66 ? "movsw" : "movsl"); + break; + case 0xA7: + opcode1 = (prefix[2] == 0x66 ? "cmpsw" : "cmpsl"); + break; case 0xAF: opcode1 = (prefix[2] == 0x66 ? "scasw" : "scasl"); break; diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc index f32488133c..304d4e5860 100644 --- a/imgdiag/imgdiag.cc +++ b/imgdiag/imgdiag.cc @@ -56,7 +56,7 @@ class ImgDiagDumper { image_location_(image_location), image_diff_pid_(image_diff_pid) {} - bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; os << "MAGIC: " << image_header_.GetMagic() << "\n\n"; @@ -92,7 +92,7 @@ class ImgDiagDumper { return str.substr(idx + 1); } - bool DumpImageDiff(pid_t image_diff_pid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool DumpImageDiff(pid_t image_diff_pid) SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; { @@ -140,7 +140,7 @@ class ImgDiagDumper { // Look at /proc/$pid/mem and only diff the things from there bool DumpImageDiffMap(pid_t image_diff_pid, const backtrace_map_t& boot_map) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; const size_t pointer_size = InstructionSetPointerSize( Runtime::Current()->GetInstructionSet()); @@ -548,10 +548,6 @@ class ImgDiagDumper { os << " entryPointFromJni: " << reinterpret_cast<const void*>( art_method->GetEntryPointFromJniPtrSize(pointer_size)) << ", "; - os << " entryPointFromInterpreter: " - << reinterpret_cast<const void*>( - art_method->GetEntryPointFromInterpreterPtrSize(pointer_size)) - << ", "; os << " entryPointFromQuickCompiledCode: " << reinterpret_cast<const void*>( art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)) @@ -631,10 +627,6 @@ class ImgDiagDumper { os << " entryPointFromJni: " << reinterpret_cast<const void*>( art_method->GetEntryPointFromJniPtrSize(pointer_size)) << ", "; - os << " entryPointFromInterpreter: " - << reinterpret_cast<const void*>( - art_method->GetEntryPointFromInterpreterPtrSize(pointer_size)) - << ", "; os << " entryPointFromQuickCompiledCode: " << reinterpret_cast<const void*>( art_method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)) @@ -691,7 +683,7 @@ class ImgDiagDumper { } static std::string GetClassDescriptor(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(klass != nullptr); std::string descriptor; diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index b3801b351f..44b78ff0a3 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -107,7 +107,7 @@ class OatSymbolizer FINAL { const OatFile* oat_file_; }; - explicit OatSymbolizer(const OatFile* oat_file, const std::string& output_name) : + OatSymbolizer(const OatFile* oat_file, const std::string& output_name) : oat_file_(oat_file), builder_(nullptr), output_name_(output_name.empty() ? "symbolized.oat" : output_name) { } @@ -346,7 +346,7 @@ class OatDumperOptions { class OatDumper { public: - explicit OatDumper(const OatFile& oat_file, const OatDumperOptions& options) + OatDumper(const OatFile& oat_file, const OatDumperOptions& options) : oat_file_(oat_file), oat_dex_files_(oat_file.GetOatDexFiles()), options_(options), @@ -499,7 +499,7 @@ class OatDumper { return oat_file_.GetOatHeader().GetInstructionSet(); } - const void* GetQuickOatCode(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const void* GetQuickOatCode(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < oat_dex_files_.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; CHECK(oat_dex_file != nullptr); @@ -599,6 +599,9 @@ class OatDumper { os << std::flush; return false; } + + VariableIndentationOutputStream vios(&os); + ScopedIndentation indent1(&vios); for (size_t class_def_index = 0; class_def_index < dex_file->NumClassDefs(); class_def_index++) { @@ -617,10 +620,8 @@ class OatDumper { << " (" << oat_class.GetStatus() << ")" << " (" << oat_class.GetType() << ")\n"; // TODO: include bitmap here if type is kOatClassSomeCompiled? - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); if (options_.list_classes_) continue; - if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def, &stop_analysis)) { + if (!DumpOatClass(&vios, oat_class, *(dex_file.get()), class_def, &stop_analysis)) { success = false; } if (stop_analysis) { @@ -720,20 +721,21 @@ class OatDumper { } } - bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file, + bool DumpOatClass(VariableIndentationOutputStream* vios, + const OatFile::OatClass& oat_class, const DexFile& dex_file, const DexFile::ClassDef& class_def, bool* stop_analysis) { bool success = true; bool addr_found = false; const uint8_t* class_data = dex_file.GetClassData(class_def); if (class_data == nullptr) { // empty class such as a marker interface? - os << std::flush; + vios->Stream() << std::flush; return success; } ClassDataItemIterator it(dex_file, class_data); SkipAllFields(it); uint32_t class_method_index = 0; while (it.HasNextDirectMethod()) { - if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file, + if (!DumpOatMethod(vios, class_def, class_method_index, oat_class, dex_file, it.GetMemberIndex(), it.GetMethodCodeItem(), it.GetRawMemberAccessFlags(), &addr_found)) { success = false; @@ -746,7 +748,7 @@ class OatDumper { it.Next(); } while (it.HasNextVirtualMethod()) { - if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file, + if (!DumpOatMethod(vios, class_def, class_method_index, oat_class, dex_file, it.GetMemberIndex(), it.GetMethodCodeItem(), it.GetRawMemberAccessFlags(), &addr_found)) { success = false; @@ -759,7 +761,7 @@ class OatDumper { it.Next(); } DCHECK(!it.HasNext()); - os << std::flush; + vios->Stream() << std::flush; return success; } @@ -768,7 +770,8 @@ class OatDumper { // When this was picked, the largest arm method was 55,256 bytes and arm64 was 50,412 bytes. static constexpr uint32_t kMaxCodeSize = 100 * 1000; - bool DumpOatMethod(std::ostream& os, const DexFile::ClassDef& class_def, + bool DumpOatMethod(VariableIndentationOutputStream* vios, + const DexFile::ClassDef& class_def, uint32_t class_method_index, const OatFile::OatClass& oat_class, const DexFile& dex_file, uint32_t dex_method_idx, const DexFile::CodeItem* code_item, @@ -782,16 +785,11 @@ class OatDumper { } std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true); - os << StringPrintf("%d: %s (dex_method_idx=%d)\n", - class_method_index, pretty_method.c_str(), - dex_method_idx); + vios->Stream() << StringPrintf("%d: %s (dex_method_idx=%d)\n", + class_method_index, pretty_method.c_str(), + dex_method_idx); if (options_.list_methods_) return success; - Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter)); - Indenter indent2_filter(indent1_os->rdbuf(), kIndentChar, kIndentBy1Count); - std::unique_ptr<std::ostream> indent2_os(new std::ostream(&indent2_filter)); - uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index); const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index); const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index); @@ -805,137 +803,147 @@ class OatDumper { } } + // Everything below is indented at least once. + ScopedIndentation indent1(vios); + { - *indent1_os << "DEX CODE:\n"; - DumpDexCode(*indent2_os, dex_file, code_item); + vios->Stream() << "DEX CODE:\n"; + ScopedIndentation indent2(vios); + DumpDexCode(vios->Stream(), dex_file, code_item); } std::unique_ptr<verifier::MethodVerifier> verifier; if (Runtime::Current() != nullptr) { - *indent1_os << "VERIFIER TYPE ANALYSIS:\n"; - verifier.reset(DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item, + vios->Stream() << "VERIFIER TYPE ANALYSIS:\n"; + ScopedIndentation indent2(vios); + verifier.reset(DumpVerifier(vios, + dex_method_idx, &dex_file, class_def, code_item, method_access_flags)); } { - *indent1_os << "OatMethodOffsets "; + vios->Stream() << "OatMethodOffsets "; if (options_.absolute_addresses_) { - *indent1_os << StringPrintf("%p ", oat_method_offsets); + vios->Stream() << StringPrintf("%p ", oat_method_offsets); } - *indent1_os << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset); + vios->Stream() << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset); if (oat_method_offsets_offset > oat_file_.Size()) { - *indent1_os << StringPrintf( + vios->Stream() << StringPrintf( "WARNING: oat method offsets offset 0x%08x is past end of file 0x%08zx.\n", oat_method_offsets_offset, oat_file_.Size()); // If we can't read OatMethodOffsets, the rest of the data is dangerous to read. - os << std::flush; + vios->Stream() << std::flush; return false; } - *indent2_os << StringPrintf("code_offset: 0x%08x ", code_offset); + ScopedIndentation indent2(vios); + vios->Stream() << StringPrintf("code_offset: 0x%08x ", code_offset); uint32_t aligned_code_begin = AlignCodeOffset(oat_method.GetCodeOffset()); if (aligned_code_begin > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "code offset 0x%08x is past end of file 0x%08zx.\n", - aligned_code_begin, oat_file_.Size()); + vios->Stream() << StringPrintf("WARNING: " + "code offset 0x%08x is past end of file 0x%08zx.\n", + aligned_code_begin, oat_file_.Size()); success = false; } - *indent2_os << "\n"; + vios->Stream() << "\n"; - *indent2_os << "gc_map: "; + vios->Stream() << "gc_map: "; if (options_.absolute_addresses_) { - *indent2_os << StringPrintf("%p ", oat_method.GetGcMap()); + vios->Stream() << StringPrintf("%p ", oat_method.GetGcMap()); } uint32_t gc_map_offset = oat_method.GetGcMapOffset(); - *indent2_os << StringPrintf("(offset=0x%08x)\n", gc_map_offset); + vios->Stream() << StringPrintf("(offset=0x%08x)\n", gc_map_offset); if (gc_map_offset > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "gc map table offset 0x%08x is past end of file 0x%08zx.\n", - gc_map_offset, oat_file_.Size()); + vios->Stream() << StringPrintf("WARNING: " + "gc map table offset 0x%08x is past end of file 0x%08zx.\n", + gc_map_offset, oat_file_.Size()); success = false; } else if (options_.dump_raw_gc_map_) { - Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent3_os(&indent3_filter); - DumpGcMap(indent3_os, oat_method, code_item); + ScopedIndentation indent3(vios); + DumpGcMap(vios->Stream(), oat_method, code_item); } } { - *indent1_os << "OatQuickMethodHeader "; + vios->Stream() << "OatQuickMethodHeader "; uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset(); const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader(); if (options_.absolute_addresses_) { - *indent1_os << StringPrintf("%p ", method_header); + vios->Stream() << StringPrintf("%p ", method_header); } - *indent1_os << StringPrintf("(offset=0x%08x)\n", method_header_offset); + vios->Stream() << StringPrintf("(offset=0x%08x)\n", method_header_offset); if (method_header_offset > oat_file_.Size()) { - *indent1_os << StringPrintf( + vios->Stream() << StringPrintf( "WARNING: oat quick method header offset 0x%08x is past end of file 0x%08zx.\n", method_header_offset, oat_file_.Size()); // If we can't read the OatQuickMethodHeader, the rest of the data is dangerous to read. - os << std::flush; + vios->Stream() << std::flush; return false; } - *indent2_os << "mapping_table: "; + ScopedIndentation indent2(vios); + vios->Stream() << "mapping_table: "; if (options_.absolute_addresses_) { - *indent2_os << StringPrintf("%p ", oat_method.GetMappingTable()); + vios->Stream() << StringPrintf("%p ", oat_method.GetMappingTable()); } uint32_t mapping_table_offset = oat_method.GetMappingTableOffset(); - *indent2_os << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset()); + vios->Stream() << StringPrintf("(offset=0x%08x)\n", oat_method.GetMappingTableOffset()); if (mapping_table_offset > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "mapping table offset 0x%08x is past end of file 0x%08zx. " - "mapping table offset was loaded from offset 0x%08x.\n", - mapping_table_offset, oat_file_.Size(), - oat_method.GetMappingTableOffsetOffset()); + vios->Stream() << StringPrintf("WARNING: " + "mapping table offset 0x%08x is past end of file 0x%08zx. " + "mapping table offset was loaded from offset 0x%08x.\n", + mapping_table_offset, oat_file_.Size(), + oat_method.GetMappingTableOffsetOffset()); success = false; } else if (options_.dump_raw_mapping_table_) { - Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent3_os(&indent3_filter); - DumpMappingTable(indent3_os, oat_method); + ScopedIndentation indent3(vios); + DumpMappingTable(vios, oat_method); } - *indent2_os << "vmap_table: "; + vios->Stream() << "vmap_table: "; if (options_.absolute_addresses_) { - *indent2_os << StringPrintf("%p ", oat_method.GetVmapTable()); + vios->Stream() << StringPrintf("%p ", oat_method.GetVmapTable()); } uint32_t vmap_table_offset = oat_method.GetVmapTableOffset(); - *indent2_os << StringPrintf("(offset=0x%08x)\n", vmap_table_offset); + vios->Stream() << StringPrintf("(offset=0x%08x)\n", vmap_table_offset); if (vmap_table_offset > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "vmap table offset 0x%08x is past end of file 0x%08zx. " - "vmap table offset was loaded from offset 0x%08x.\n", - vmap_table_offset, oat_file_.Size(), - oat_method.GetVmapTableOffsetOffset()); + vios->Stream() << StringPrintf("WARNING: " + "vmap table offset 0x%08x is past end of file 0x%08zx. " + "vmap table offset was loaded from offset 0x%08x.\n", + vmap_table_offset, oat_file_.Size(), + oat_method.GetVmapTableOffsetOffset()); success = false; } else if (options_.dump_vmap_) { - DumpVmapData(*indent2_os, oat_method, code_item); + DumpVmapData(vios, oat_method, code_item); } } { - *indent1_os << "QuickMethodFrameInfo\n"; - - *indent2_os << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes()); - *indent2_os << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask()); - DumpSpillMask(*indent2_os, oat_method.GetCoreSpillMask(), false); - *indent2_os << "\n"; - *indent2_os << StringPrintf("fp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask()); - DumpSpillMask(*indent2_os, oat_method.GetFpSpillMask(), true); - *indent2_os << "\n"; + vios->Stream() << "QuickMethodFrameInfo\n"; + + ScopedIndentation indent2(vios); + vios->Stream() + << StringPrintf("frame_size_in_bytes: %zd\n", oat_method.GetFrameSizeInBytes()); + vios->Stream() << StringPrintf("core_spill_mask: 0x%08x ", oat_method.GetCoreSpillMask()); + DumpSpillMask(vios->Stream(), oat_method.GetCoreSpillMask(), false); + vios->Stream() << "\n"; + vios->Stream() << StringPrintf("fp_spill_mask: 0x%08x ", oat_method.GetFpSpillMask()); + DumpSpillMask(vios->Stream(), oat_method.GetFpSpillMask(), true); + vios->Stream() << "\n"; } { - // Based on spill masks from QuickMethodFrameInfo so placed - // after it is dumped, but useful for understanding quick - // code, so dumped here. - DumpVregLocations(*indent2_os, oat_method, code_item); + // Based on spill masks from QuickMethodFrameInfo so placed + // after it is dumped, but useful for understanding quick + // code, so dumped here. + ScopedIndentation indent2(vios); + DumpVregLocations(vios->Stream(), oat_method, code_item); } { - *indent1_os << "CODE: "; + vios->Stream() << "CODE: "; uint32_t code_size_offset = oat_method.GetQuickCodeSizeOffset(); if (code_size_offset > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "code size offset 0x%08x is past end of file 0x%08zx.", - code_size_offset, oat_file_.Size()); + ScopedIndentation indent2(vios); + vios->Stream() << StringPrintf("WARNING: " + "code size offset 0x%08x is past end of file 0x%08zx.", + code_size_offset, oat_file_.Size()); success = false; } else { const void* code = oat_method.GetQuickCode(); @@ -943,49 +951,52 @@ class OatDumper { uint64_t aligned_code_end = aligned_code_begin + code_size; if (options_.absolute_addresses_) { - *indent1_os << StringPrintf("%p ", code); + vios->Stream() << StringPrintf("%p ", code); } - *indent1_os << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n", - code_offset, - code_size_offset, - code_size, - code != nullptr ? "..." : ""); + vios->Stream() << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n", + code_offset, + code_size_offset, + code_size, + code != nullptr ? "..." : ""); + ScopedIndentation indent2(vios); if (aligned_code_begin > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "start of code at 0x%08x is past end of file 0x%08zx.", - aligned_code_begin, oat_file_.Size()); + vios->Stream() << StringPrintf("WARNING: " + "start of code at 0x%08x is past end of file 0x%08zx.", + aligned_code_begin, oat_file_.Size()); success = false; } else if (aligned_code_end > oat_file_.Size()) { - *indent2_os << StringPrintf("WARNING: " - "end of code at 0x%08" PRIx64 " is past end of file 0x%08zx. " - "code size is 0x%08x loaded from offset 0x%08x.\n", - aligned_code_end, oat_file_.Size(), - code_size, code_size_offset); + vios->Stream() << StringPrintf( + "WARNING: " + "end of code at 0x%08" PRIx64 " is past end of file 0x%08zx. " + "code size is 0x%08x loaded from offset 0x%08x.\n", + aligned_code_end, oat_file_.Size(), + code_size, code_size_offset); success = false; if (options_.disassemble_code_) { if (code_size_offset + kPrologueBytes <= oat_file_.Size()) { - DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes); + DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes); } } } else if (code_size > kMaxCodeSize) { - *indent2_os << StringPrintf("WARNING: " - "code size %d is bigger than max expected threshold of %d. " - "code size is 0x%08x loaded from offset 0x%08x.\n", - code_size, kMaxCodeSize, - code_size, code_size_offset); + vios->Stream() << StringPrintf( + "WARNING: " + "code size %d is bigger than max expected threshold of %d. " + "code size is 0x%08x loaded from offset 0x%08x.\n", + code_size, kMaxCodeSize, + code_size, code_size_offset); success = false; if (options_.disassemble_code_) { if (code_size_offset + kPrologueBytes <= oat_file_.Size()) { - DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes); + DumpCode(vios, verifier.get(), oat_method, code_item, true, kPrologueBytes); } } } else if (options_.disassemble_code_) { - DumpCode(*indent2_os, verifier.get(), oat_method, code_item, !success, 0); + DumpCode(vios, verifier.get(), oat_method, code_item, !success, 0); } } } - os << std::flush; + vios->Stream() << std::flush; return success; } @@ -1013,7 +1024,7 @@ class OatDumper { } // Display data stored at the the vmap offset of an oat method. - void DumpVmapData(std::ostream& os, + void DumpVmapData(VariableIndentationOutputStream* vios, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item) { if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) { @@ -1022,24 +1033,30 @@ class OatDumper { if (raw_code_info != nullptr) { CodeInfo code_info(raw_code_info); DCHECK(code_item != nullptr); - DumpCodeInfo(os, code_info, oat_method, *code_item); - } + ScopedIndentation indent1(vios); + DumpCodeInfo(vios, code_info, oat_method, *code_item); + } + } else if (IsMethodGeneratedByDexToDexCompiler(oat_method, code_item)) { + // We don't encode the size in the table, so just emit that we have quickened + // information. + ScopedIndentation indent(vios); + vios->Stream() << "quickened data\n"; } else { // Otherwise, display the vmap table. const uint8_t* raw_table = oat_method.GetVmapTable(); if (raw_table != nullptr) { VmapTable vmap_table(raw_table); - DumpVmapTable(os, oat_method, vmap_table); + DumpVmapTable(vios->Stream(), oat_method, vmap_table); } } } // Display a CodeInfo object emitted by the optimizing compiler. - void DumpCodeInfo(std::ostream& os, + void DumpCodeInfo(VariableIndentationOutputStream* vios, const CodeInfo& code_info, const OatFile::OatMethod& oat_method, const DexFile::CodeItem& code_item) { - code_info.Dump(os, + code_info.Dump(vios, oat_method.GetCodeOffset(), code_item.registers_size_, options_.dump_code_info_stack_maps_); @@ -1177,48 +1194,50 @@ class OatDumper { } } - void DumpMappingTable(std::ostream& os, const OatFile::OatMethod& oat_method) { + void DumpMappingTable(VariableIndentationOutputStream* vios, + const OatFile::OatMethod& oat_method) { const void* quick_code = oat_method.GetQuickCode(); if (quick_code == nullptr) { return; } MappingTable table(oat_method.GetMappingTable()); if (table.TotalSize() != 0) { - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent_os(&indent_filter); if (table.PcToDexSize() != 0) { typedef MappingTable::PcToDexIterator It; - os << "suspend point mappings {\n"; + vios->Stream() << "suspend point mappings {\n"; for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) { - indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc()); + ScopedIndentation indent1(vios); + vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc()); } - os << "}\n"; + vios->Stream() << "}\n"; } if (table.DexToPcSize() != 0) { typedef MappingTable::DexToPcIterator It; - os << "catch entry mappings {\n"; + vios->Stream() << "catch entry mappings {\n"; for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) { - indent_os << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc()); + ScopedIndentation indent1(vios); + vios->Stream() << StringPrintf("0x%04x -> 0x%04x\n", cur.NativePcOffset(), cur.DexPc()); } - os << "}\n"; + vios->Stream() << "}\n"; } } } - uint32_t DumpInformationAtOffset(std::ostream& os, + uint32_t DumpInformationAtOffset(VariableIndentationOutputStream* vios, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item, size_t offset, bool suspend_point_mapping) { if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) { if (suspend_point_mapping) { - DumpDexRegisterMapAtOffset(os, oat_method, code_item, offset); + ScopedIndentation indent1(vios); + DumpDexRegisterMapAtOffset(vios, oat_method, code_item, offset); } // The return value is not used in the case of a method compiled // with the optimizing compiler. return DexFile::kDexNoIndex; } else { - return DumpMappingAtOffset(os, oat_method, offset, suspend_point_mapping); + return DumpMappingAtOffset(vios->Stream(), oat_method, offset, suspend_point_mapping); } } @@ -1331,10 +1350,24 @@ class OatDumper { // If the native GC map is null and the Dex `code_item` is not // null, then this method has been compiled with the optimizing // compiler. - return oat_method.GetGcMap() == nullptr && code_item != nullptr; + return oat_method.GetQuickCode() != nullptr && + oat_method.GetGcMap() == nullptr && + code_item != nullptr; } - void DumpDexRegisterMapAtOffset(std::ostream& os, + // Has `oat_method` -- corresponding to the Dex `code_item` -- been compiled by + // the dextodex compiler? + static bool IsMethodGeneratedByDexToDexCompiler(const OatFile::OatMethod& oat_method, + const DexFile::CodeItem* code_item) { + // If the quick code is null, the Dex `code_item` is not + // null, and the vmap table is not null, then this method has been compiled + // with the dextodex compiler. + return oat_method.GetQuickCode() == nullptr && + oat_method.GetVmapTable() != nullptr && + code_item != nullptr; + } + + void DumpDexRegisterMapAtOffset(VariableIndentationOutputStream* vios, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item, size_t offset) { @@ -1349,13 +1382,14 @@ class OatDumper { StackMapEncoding encoding = code_info.ExtractEncoding(); StackMap stack_map = code_info.GetStackMapForNativePcOffset(offset, encoding); if (stack_map.IsValid()) { - stack_map.Dump( - os, code_info, encoding, oat_method.GetCodeOffset(), code_item->registers_size_); + stack_map.Dump(vios, code_info, encoding, oat_method.GetCodeOffset(), + code_item->registers_size_); } } } - verifier::MethodVerifier* DumpVerifier(std::ostream& os, uint32_t dex_method_idx, + verifier::MethodVerifier* DumpVerifier(VariableIndentationOutputStream* vios, + uint32_t dex_method_idx, const DexFile* dex_file, const DexFile::ClassDef& class_def, const DexFile::CodeItem* code_item, @@ -1367,14 +1401,15 @@ class OatDumper { hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file))); DCHECK(options_.class_loader_ != nullptr); return verifier::MethodVerifier::VerifyMethodAndDump( - soa.Self(), os, dex_method_idx, dex_file, dex_cache, *options_.class_loader_, &class_def, - code_item, nullptr, method_access_flags); + soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_, + &class_def, code_item, nullptr, method_access_flags); } return nullptr; } - void DumpCode(std::ostream& os, verifier::MethodVerifier* verifier, + void DumpCode(VariableIndentationOutputStream* vios, + verifier::MethodVerifier* verifier, const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item, bool bad_input, size_t code_size) { const void* quick_code = oat_method.GetQuickCode(); @@ -1383,22 +1418,23 @@ class OatDumper { code_size = oat_method.GetQuickCodeSize(); } if (code_size == 0 || quick_code == nullptr) { - os << "NO CODE!\n"; + vios->Stream() << "NO CODE!\n"; return; } else { const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code); size_t offset = 0; while (offset < code_size) { if (!bad_input) { - DumpInformationAtOffset(os, oat_method, code_item, offset, false); + DumpInformationAtOffset(vios, oat_method, code_item, offset, false); } - offset += disassembler_->Dump(os, quick_native_pc + offset); + offset += disassembler_->Dump(vios->Stream(), quick_native_pc + offset); if (!bad_input) { - uint32_t dex_pc = DumpInformationAtOffset(os, oat_method, code_item, offset, true); + uint32_t dex_pc = + DumpInformationAtOffset(vios, oat_method, code_item, offset, true); if (dex_pc != DexFile::kDexNoIndex) { - DumpGcMapAtNativePcOffset(os, oat_method, code_item, offset); + DumpGcMapAtNativePcOffset(vios->Stream(), oat_method, code_item, offset); if (verifier != nullptr) { - DumpVRegsAtDexPc(os, verifier, oat_method, code_item, dex_pc); + DumpVRegsAtDexPc(vios->Stream(), verifier, oat_method, code_item, dex_pc); } } } @@ -1417,15 +1453,19 @@ class OatDumper { class ImageDumper { public: - explicit ImageDumper(std::ostream* os, gc::space::ImageSpace& image_space, - const ImageHeader& image_header, OatDumperOptions* oat_dumper_options) + ImageDumper(std::ostream* os, gc::space::ImageSpace& image_space, + const ImageHeader& image_header, OatDumperOptions* oat_dumper_options) : os_(os), + vios_(os), + indent1_(&vios_), image_space_(image_space), image_header_(image_header), oat_dumper_options_(oat_dumper_options) {} - bool Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *os_; + std::ostream& indent_os = vios_.Stream(); + os << "MAGIC: " << image_header_.GetMagic() << "\n\n"; os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n"; @@ -1453,20 +1493,17 @@ class ImageDumper { { os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots()) << "\n"; - Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent1_os(&indent1_filter); static_assert(arraysize(image_roots_descriptions_) == static_cast<size_t>(ImageHeader::kImageRootsMax), "sizes must match"); for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { ImageHeader::ImageRoot image_root = static_cast<ImageHeader::ImageRoot>(i); const char* image_root_description = image_roots_descriptions_[i]; mirror::Object* image_root_object = image_header_.GetImageRoot(image_root); - indent1_os << StringPrintf("%s: %p\n", image_root_description, image_root_object); + indent_os << StringPrintf("%s: %p\n", image_root_description, image_root_object); if (image_root_object->IsObjectArray()) { - Indenter indent2_filter(indent1_os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent2_os(&indent2_filter); mirror::ObjectArray<mirror::Object>* image_root_object_array = image_root_object->AsObjectArray<mirror::Object>(); + ScopedIndentation indent2(&vios_); for (int j = 0; j < image_root_object_array->GetLength(); j++) { mirror::Object* value = image_root_object_array->Get(j); size_t run = 0; @@ -1478,20 +1515,22 @@ class ImageDumper { } } if (run == 0) { - indent2_os << StringPrintf("%d: ", j); + indent_os << StringPrintf("%d: ", j); } else { - indent2_os << StringPrintf("%d to %zd: ", j, j + run); + indent_os << StringPrintf("%d to %zd: ", j, j + run); j = j + run; } if (value != nullptr) { - PrettyObjectValue(indent2_os, value->GetClass(), value); + PrettyObjectValue(indent_os, value->GetClass(), value); } else { - indent2_os << j << ": null\n"; + indent_os << j << ": null\n"; } } } } + } + { os << "METHOD ROOTS\n"; static_assert(arraysize(image_methods_descriptions_) == static_cast<size_t>(ImageHeader::kImageMethodsCount), "sizes must match"); @@ -1499,7 +1538,7 @@ class ImageDumper { auto image_root = static_cast<ImageHeader::ImageMethod>(i); const char* description = image_methods_descriptions_[i]; auto* image_method = image_header_.GetImageMethod(image_root); - indent1_os << StringPrintf("%s: %p\n", description, image_method); + indent_os << StringPrintf("%s: %p\n", description, image_method); } } os << "\n"; @@ -1556,11 +1595,6 @@ class ImageDumper { } } { - std::ostream* saved_os = os_; - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent_os(&indent_filter); - os_ = &indent_os; - // Mark dex caches. dex_cache_arrays_.clear(); { @@ -1581,22 +1615,15 @@ class ImageDumper { // TODO: Dump fields. // Dump methods after. const auto& methods_section = image_header_.GetMethodsSection(); - const auto pointer_size = + const size_t pointer_size = InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()); - const auto method_size = ArtMethod::ObjectSize(pointer_size); - for (size_t pos = 0; pos < methods_section.Size(); pos += method_size) { - auto* method = reinterpret_cast<ArtMethod*>( - image_space->Begin() + pos + methods_section.Offset()); - indent_os << method << " " << " ArtMethod: " << PrettyMethod(method) << "\n"; - DumpMethod(method, this, indent_os); - indent_os << "\n"; - } + DumpArtMethodVisitor visitor(this); + methods_section.VisitPackedArtMethods(&visitor, image_space->Begin(), pointer_size); } } // Dump the large objects separately. heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this); indent_os << "\n"; - os_ = saved_os; } os << "STATS:\n" << std::flush; std::unique_ptr<File> file(OS::OpenFileForReading(image_filename.c_str())); @@ -1613,14 +1640,21 @@ class ImageDumper { const auto& intern_section = image_header_.GetImageSection( ImageHeader::kSectionInternedStrings); stats_.header_bytes = header_bytes; - size_t alignment_bytes = RoundUp(header_bytes, kObjectAlignment) - header_bytes; - stats_.alignment_bytes += alignment_bytes; + stats_.alignment_bytes += RoundUp(header_bytes, kObjectAlignment) - header_bytes; + // Add padding between the field and method section. + // (Field section is 4-byte aligned, method section is 8-byte aligned on 64-bit targets.) + stats_.alignment_bytes += + method_section.Offset() - (field_section.Offset() + field_section.Size()); + // Add padding between the method section and the intern table. + // (Method section is 4-byte aligned on 32-bit targets, intern table is 8-byte aligned.) + stats_.alignment_bytes += + intern_section.Offset() - (method_section.Offset() + method_section.Size()); stats_.alignment_bytes += bitmap_section.Offset() - image_header_.GetImageSize(); stats_.bitmap_bytes += bitmap_section.Size(); stats_.art_field_bytes += field_section.Size(); stats_.art_method_bytes += method_section.Size(); stats_.interned_strings_bytes += intern_section.Size(); - stats_.Dump(os); + stats_.Dump(os, indent_os); os << "\n"; os << std::flush; @@ -1629,8 +1663,23 @@ class ImageDumper { } private: + class DumpArtMethodVisitor : public ArtMethodVisitor { + public: + explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {} + + virtual void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::ostream& indent_os = image_dumper_->vios_.Stream(); + indent_os << method << " " << " ArtMethod: " << PrettyMethod(method) << "\n"; + image_dumper_->DumpMethod(method, image_dumper_, indent_os); + indent_os << "\n"; + } + + private: + ImageDumper* const image_dumper_; + }; + static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(type != nullptr); if (value == nullptr) { os << StringPrintf("null %s\n", PrettyDescriptor(type).c_str()); @@ -1647,7 +1696,7 @@ class ImageDumper { } static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { os << StringPrintf("%s: ", field->GetName()); switch (field->GetTypeAsPrimitiveType()) { case Primitive::kPrimLong: @@ -1700,14 +1749,13 @@ class ImageDumper { } static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* super = klass->GetSuperClass(); if (super != nullptr) { DumpFields(os, obj, super); } - ArtField* fields = klass->GetIFields(); - for (size_t i = 0, count = klass->NumInstanceFields(); i < count; i++) { - PrintField(os, &fields[i], obj); + for (ArtField& field : klass->GetIFields()) { + PrintField(os, &field, obj); } } @@ -1716,7 +1764,7 @@ class ImageDumper { } const void* GetQuickOatCodeBegin(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize( InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet())); if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) { @@ -1729,7 +1777,7 @@ class ImageDumper { } uint32_t GetQuickOatCodeSize(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m)); if (oat_code_begin == nullptr) { return 0; @@ -1738,7 +1786,7 @@ class ImageDumper { } const void* GetQuickOatCodeEnd(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m)); if (oat_code_begin == nullptr) { return nullptr; @@ -1746,7 +1794,7 @@ class ImageDumper { return oat_code_begin + GetQuickOatCodeSize(m); } - static void Callback(mirror::Object* obj, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); DCHECK(arg != nullptr); ImageDumper* state = reinterpret_cast<ImageDumper*>(arg); @@ -1759,7 +1807,8 @@ class ImageDumper { state->stats_.object_bytes += object_bytes; state->stats_.alignment_bytes += alignment_bytes; - std::ostream& os = *state->os_; + std::ostream& os = state->vios_.Stream(); + mirror::Class* obj_class = obj->GetClass(); if (obj_class->IsArrayClass()) { os << StringPrintf("%p: %s length:%d\n", obj, PrettyDescriptor(obj_class).c_str(), @@ -1774,9 +1823,8 @@ class ImageDumper { } else { os << StringPrintf("%p: %s\n", obj, PrettyDescriptor(obj_class).c_str()); } - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent_os(&indent_filter); - DumpFields(indent_os, obj, obj_class); + ScopedIndentation indent1(&state->vios_); + DumpFields(os, obj, obj_class); const auto image_pointer_size = InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet()); if (obj->IsObjectArray()) { @@ -1792,25 +1840,22 @@ class ImageDumper { } } if (run == 0) { - indent_os << StringPrintf("%d: ", i); + os << StringPrintf("%d: ", i); } else { - indent_os << StringPrintf("%d to %zd: ", i, i + run); + os << StringPrintf("%d to %zd: ", i, i + run); i = i + run; } mirror::Class* value_class = (value == nullptr) ? obj_class->GetComponentType() : value->GetClass(); - PrettyObjectValue(indent_os, value_class, value); + PrettyObjectValue(os, value_class, value); } } else if (obj->IsClass()) { mirror::Class* klass = obj->AsClass(); - ArtField* sfields = klass->GetSFields(); - const size_t num_fields = klass->NumStaticFields(); - if (num_fields != 0) { - indent_os << "STATICS:\n"; - Indenter indent2_filter(indent_os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent2_os(&indent2_filter); - for (size_t i = 0; i < num_fields; i++) { - PrintField(indent2_os, &sfields[i], sfields[i].GetDeclaringClass()); + if (klass->NumStaticFields() != 0) { + os << "STATICS:\n"; + ScopedIndentation indent2(&state->vios_); + for (ArtField& field : klass->GetSFields()) { + PrintField(os, &field, field.GetDeclaringClass()); } } } else { @@ -1826,9 +1871,9 @@ class ImageDumper { for (int32_t j = i + 1; j < length && elem == arr->GetElementPtrSize<void*>(j, image_pointer_size); j++, run++) { } if (run == 0) { - indent_os << StringPrintf("%d: ", i); + os << StringPrintf("%d: ", i); } else { - indent_os << StringPrintf("%d to %zd: ", i, i + run); + os << StringPrintf("%d to %zd: ", i, i + run); i = i + run; } auto offset = reinterpret_cast<uint8_t*>(elem) - state->image_space_.Begin(); @@ -1840,7 +1885,7 @@ class ImageDumper { } else { msg = "Unknown type"; } - indent_os << StringPrintf("%p %s\n", elem, msg.c_str()); + os << StringPrintf("%p %s\n", elem, msg.c_str()); } } } @@ -1849,7 +1894,7 @@ class ImageDumper { } void DumpMethod(ArtMethod* method, ImageDumper* state, std::ostream& indent_os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(method != nullptr); const auto image_pointer_size = InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet()); @@ -1889,10 +1934,14 @@ class ImageDumper { state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes; } - size_t vmap_table_bytes = state->ComputeOatSize( - method->GetVmapTable(image_pointer_size), &first_occurrence); - if (first_occurrence) { - state->stats_.vmap_table_bytes += vmap_table_bytes; + size_t vmap_table_bytes = 0u; + if (!method->IsOptimized(image_pointer_size)) { + // Method compiled with the optimizing compiler have no vmap table. + vmap_table_bytes = state->ComputeOatSize( + method->GetVmapTable(image_pointer_size), &first_occurrence); + if (first_occurrence) { + state->stats_.vmap_table_bytes += vmap_table_bytes; + } } const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method); @@ -1913,12 +1962,15 @@ class ImageDumper { } state->stats_.managed_code_bytes_ignoring_deduplication += quick_oat_code_size; + uint32_t method_access_flags = method->GetAccessFlags(); + indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end); - indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd\n", - dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes); + indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd AccessFlags=0x%x\n", + dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes, + method_access_flags); size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes + - vmap_table_bytes + quick_oat_code_size + ArtMethod::ObjectSize(image_pointer_size); + vmap_table_bytes + quick_oat_code_size + ArtMethod::Size(image_pointer_size); double expansion = static_cast<double>(quick_oat_code_size) / static_cast<double>(dex_instruction_bytes); @@ -1971,7 +2023,7 @@ class ImageDumper { std::vector<double> method_outlier_expansion; std::vector<std::pair<std::string, size_t>> oat_dex_file_sizes; - explicit Stats() + Stats() : oat_file_bytes(0), file_bytes(0), header_bytes(0), @@ -2030,7 +2082,7 @@ class ImageDumper { } void DumpOutliers(std::ostream& os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t sum_of_sizes = 0; size_t sum_of_sizes_squared = 0; size_t sum_of_expansion = 0; @@ -2130,12 +2182,11 @@ class ImageDumper { os << "\n" << std::flush; } - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Dump(std::ostream& os, std::ostream& indent_os) + SHARED_REQUIRES(Locks::mutator_lock_) { { os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n" << "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n"; - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent_os(&indent_filter); indent_os << StringPrintf("header_bytes = %8zd (%2.0f%% of art file bytes)\n" "object_bytes = %8zd (%2.0f%% of art file bytes)\n" "art_field_bytes = %8zd (%2.0f%% of art file bytes)\n" @@ -2228,7 +2279,13 @@ class ImageDumper { // threshold, we assume 2 bytes per instruction and 2 instructions per block. kLargeMethodDexBytes = 16000 }; + + // For performance, use the *os_ directly for anything that doesn't need indentation + // and prepare an indentation stream with default indentation 1. std::ostream* os_; + VariableIndentationOutputStream vios_; + ScopedIndentation indent1_; + gc::space::ImageSpace& image_space_; const ImageHeader& image_header_; std::unique_ptr<OatDumper> oat_dumper_; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 3a155be807..a71197a6ce 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -92,6 +92,32 @@ static bool LocationToFilename(const std::string& location, InstructionSet isa, } } +static const OatHeader* GetOatHeader(const ElfFile* elf_file) { + uint64_t off = 0; + if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) { + return nullptr; + } + + OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + off); + return oat_header; +} + +// This function takes an elf file and reads the current patch delta value +// encoded in its oat header value +static bool ReadOatPatchDelta(const ElfFile* elf_file, off_t* delta, std::string* error_msg) { + const OatHeader* oat_header = GetOatHeader(elf_file); + if (oat_header == nullptr) { + *error_msg = "Unable to get oat header from elf file."; + return false; + } + if (!oat_header->IsValid()) { + *error_msg = "Elf file has an invalid oat header"; + return false; + } + *delta = oat_header->GetImagePatchDelta(); + return true; +} + bool PatchOat::Patch(const std::string& image_location, off_t delta, File* output_image, InstructionSet isa, TimingLogger* timings) { @@ -419,24 +445,43 @@ bool PatchOat::ReplaceOatFileWithSymlink(const std::string& input_oat_filename, return true; } +class PatchOatArtFieldVisitor : public ArtFieldVisitor { + public: + explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {} + + void Visit(ArtField* field) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ArtField* const dest = patch_oat_->RelocatedCopyOf(field); + dest->SetDeclaringClass(patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass())); + } + + private: + PatchOat* const patch_oat_; +}; + void PatchOat::PatchArtFields(const ImageHeader* image_header) { + PatchOatArtFieldVisitor visitor(this); const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields); - for (size_t pos = 0; pos < section.Size(); pos += sizeof(ArtField)) { - auto* src = reinterpret_cast<ArtField*>(heap_->Begin() + section.Offset() + pos); - auto* dest = RelocatedCopyOf(src); - dest->SetDeclaringClass(RelocatedAddressOfPointer(src->GetDeclaringClass())); - } + section.VisitPackedArtFields(&visitor, heap_->Begin()); } +class PatchOatArtMethodVisitor : public ArtMethodVisitor { + public: + explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {} + + void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method); + patch_oat_->FixupMethod(method, dest); + } + + private: + PatchOat* const patch_oat_; +}; + void PatchOat::PatchArtMethods(const ImageHeader* image_header) { const auto& section = image_header->GetMethodsSection(); const size_t pointer_size = InstructionSetPointerSize(isa_); - size_t method_size = ArtMethod::ObjectSize(pointer_size); - for (size_t pos = 0; pos < section.Size(); pos += method_size) { - auto* src = reinterpret_cast<ArtMethod*>(heap_->Begin() + section.Offset() + pos); - auto* dest = RelocatedCopyOf(src); - FixupMethod(src, dest); - } + PatchOatArtMethodVisitor visitor(this); + section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size); } class FixupRootVisitor : public RootVisitor { @@ -445,7 +490,7 @@ class FixupRootVisitor : public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]); } @@ -453,7 +498,7 @@ class FixupRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr())); } @@ -565,25 +610,6 @@ void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED, copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object); } -const OatHeader* PatchOat::GetOatHeader(const ElfFile* elf_file) { - if (elf_file->Is64Bit()) { - return GetOatHeader<ElfFileImpl64>(elf_file->GetImpl64()); - } else { - return GetOatHeader<ElfFileImpl32>(elf_file->GetImpl32()); - } -} - -template <typename ElfFileImpl> -const OatHeader* PatchOat::GetOatHeader(const ElfFileImpl* elf_file) { - auto rodata_sec = elf_file->FindSectionByName(".rodata"); - if (rodata_sec == nullptr) { - return nullptr; - } - - OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + rodata_sec->sh_offset); - return oat_header; -} - // Called by BitmapCallback void PatchOat::VisitObject(mirror::Object* object) { mirror::Object* copy = RelocatedCopyOf(object); @@ -597,12 +623,12 @@ void PatchOat::VisitObject(mirror::Object* object) { } } PatchOat::PatchVisitor visitor(this, copy); - object->VisitReferences<true, kVerifyNone>(visitor, visitor); + object->VisitReferences<kVerifyNone>(visitor, visitor); if (object->IsClass<kVerifyNone>()) { auto* klass = object->AsClass(); auto* copy_klass = down_cast<mirror::Class*>(copy); - copy_klass->SetSFieldsUnchecked(RelocatedAddressOfPointer(klass->GetSFields())); - copy_klass->SetIFieldsUnchecked(RelocatedAddressOfPointer(klass->GetIFields())); + copy_klass->SetSFieldsPtrUnchecked(RelocatedAddressOfPointer(klass->GetSFieldsPtr())); + copy_klass->SetIFieldsPtrUnchecked(RelocatedAddressOfPointer(klass->GetIFieldsPtr())); copy_klass->SetDirectMethodsPtrUnchecked( RelocatedAddressOfPointer(klass->GetDirectMethodsPtr())); copy_klass->SetVirtualMethodsPtr(RelocatedAddressOfPointer(klass->GetVirtualMethodsPtr())); @@ -651,8 +677,6 @@ void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) { copy->SetDexCacheResolvedTypes(RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes())); copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer( object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size); - copy->SetEntryPointFromInterpreterPtrSize(RelocatedAddressOfPointer( - object->GetEntryPointFromInterpreterPtrSize(pointer_size)), pointer_size); copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer( object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size); } @@ -853,11 +877,11 @@ NO_RETURN static void Usage(const char *fmt, ...) { UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by."); UsageError(" This value may be negative."); UsageError(""); - UsageError(" --patched-image-file=<file.art>: Use the same patch delta as was used to patch"); - UsageError(" the given image file."); + UsageError(" --patched-image-file=<file.art>: Relocate the oat file to be the same as the"); + UsageError(" given image file."); UsageError(""); - UsageError(" --patched-image-location=<file.art>: Use the same patch delta as was used to"); - UsageError(" patch the given image location. If used one must also specify the"); + UsageError(" --patched-image-location=<file.art>: Relocate the oat file to be the same as the"); + UsageError(" image at the given location. If used one must also specify the"); UsageError(" --instruction-set flag. It will search for this image in the same way that"); UsageError(" is done when loading one."); UsageError(""); @@ -973,6 +997,7 @@ static int patchoat(int argc, char **argv) { bool orig_base_offset_set = false; off_t base_delta = 0; bool base_delta_set = false; + bool match_delta = false; std::string patched_image_filename; std::string patched_image_location; bool dump_timings = kIsDebugBuild; @@ -1171,7 +1196,11 @@ static int patchoat(int argc, char **argv) { base_delta_set = true; base_delta = base_offset - orig_base_offset; } else if (!patched_image_filename.empty()) { + if (have_image_files) { + Usage("--patched-image-location should not be used when patching other images"); + } base_delta_set = true; + match_delta = true; std::string error_msg; if (!ReadBaseDelta(patched_image_filename.c_str(), &base_delta, &error_msg)) { Usage(error_msg.c_str(), patched_image_filename.c_str()); @@ -1289,6 +1318,32 @@ static int patchoat(int argc, char **argv) { return EXIT_FAILURE; } + if (match_delta) { + CHECK(!have_image_files); // We will not do this with images. + std::string error_msg; + // Figure out what the current delta is so we can match it to the desired delta. + std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat.get(), PROT_READ, MAP_PRIVATE, + &error_msg)); + off_t current_delta = 0; + if (elf.get() == nullptr) { + LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg; + cleanup(false); + return EXIT_FAILURE; + } else if (!ReadOatPatchDelta(elf.get(), ¤t_delta, &error_msg)) { + LOG(ERROR) << "Unable to get current delta: " << error_msg; + cleanup(false); + return EXIT_FAILURE; + } + // Before this line base_delta is the desired final delta. We need it to be the actual amount to + // change everything by. We subtract the current delta from it to make it this. + base_delta -= current_delta; + if (!IsAligned<kPageSize>(base_delta)) { + LOG(ERROR) << "Given image file was relocated by an illegal delta"; + cleanup(false); + return false; + } + } + if (debug) { LOG(INFO) << "moving offset by " << base_delta << " (0x" << std::hex << base_delta << ") bytes or " @@ -1315,18 +1370,18 @@ static int patchoat(int argc, char **argv) { new_oat_out); // The order here doesn't matter. If the first one is successfully saved and the second one // erased, ImageSpace will still detect a problem and not use the files. - ret = ret && FinishFile(output_image.get(), ret); - ret = ret && FinishFile(output_oat.get(), ret); + ret = FinishFile(output_image.get(), ret); + ret = FinishFile(output_oat.get(), ret); } else if (have_oat_files) { TimingLogger::ScopedTiming pt("patch oat", &timings); ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings, output_oat_fd >= 0, // was it opened from FD? new_oat_out); - ret = ret && FinishFile(output_oat.get(), ret); + ret = FinishFile(output_oat.get(), ret); } else if (have_image_files) { TimingLogger::ScopedTiming pt("patch image", &timings); ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings); - ret = ret && FinishFile(output_image.get(), ret); + ret = FinishFile(output_image.get(), ret); } else { CHECK(false); ret = true; diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 23abca8c7e..87ecc618eb 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -94,16 +94,16 @@ class PatchOat { bool new_oat_out); // Output oat was newly created? static void BitmapCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { reinterpret_cast<PatchOat*>(arg)->VisitObject(obj); } void VisitObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupMethod(ArtMethod* object, ArtMethod* copy) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FixupNativePointerArray(mirror::PointerArray* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool InHeap(mirror::Object*); // Patches oat in place, modifying the oat_file given to the constructor. @@ -113,13 +113,13 @@ class PatchOat { template <typename ElfFileImpl> bool PatchOatHeader(ElfFileImpl* oat_file); - bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PatchArtFields(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PatchArtMethods(const ImageHeader* image_header) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool PatchImage() SHARED_REQUIRES(Locks::mutator_lock_); + void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); + void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchInternedStrings(const ImageHeader* image_header) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool WriteElf(File* out); bool WriteImage(File* out); @@ -163,13 +163,6 @@ class PatchOat { return ret; } - // Look up the oat header from any elf file. - static const OatHeader* GetOatHeader(const ElfFile* elf_file); - - // Templatized version to actually look up the oat header - template <typename ElfFileImpl> - static const OatHeader* GetOatHeader(const ElfFileImpl* elf_file); - // Walks through the old image and patches the mmap'd copy of it to the new offset. It does not // change the heap. class PatchVisitor { @@ -177,10 +170,15 @@ class PatchOat { PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {} ~PatchVisitor() {} void operator() (mirror::Object* obj, MemberOffset off, bool b) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // For reference classes. void operator() (mirror::Class* cls, mirror::Reference* ref) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + // TODO: Consider using these for updating native class roots? + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + private: PatchOat* const patcher_; mirror::Object* const copy_; @@ -202,6 +200,8 @@ class PatchOat { TimingLogger* timings_; friend class FixupRootVisitor; + friend class PatchOatArtFieldVisitor; + friend class PatchOatArtMethodVisitor; DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat); }; diff --git a/runtime/Android.mk b/runtime/Android.mk index 7f103a4a7d..8f70d30894 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -39,6 +39,7 @@ LIBART_COMMON_SRC_FILES := \ base/unix_file/random_access_file_utils.cc \ check_jni.cc \ class_linker.cc \ + class_table.cc \ common_throws.cc \ debugger.cc \ dex_file.cc \ @@ -98,6 +99,7 @@ LIBART_COMMON_SRC_FILES := \ jit/jit.cc \ jit/jit_code_cache.cc \ jit/jit_instrumentation.cc \ + lambda/box_table.cc \ jni_internal.cc \ jobject_comparator.cc \ linear_alloc.cc \ @@ -311,13 +313,14 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ dex_instruction.h \ dex_instruction_utils.h \ gc_root.h \ - gc/allocator/rosalloc.h \ - gc/collector/gc_type.h \ gc/allocator_type.h \ + gc/allocator/rosalloc.h \ gc/collector_type.h \ + gc/collector/gc_type.h \ + gc/heap.h \ gc/space/region_space.h \ gc/space/space.h \ - gc/heap.h \ + gc/weak_root_state.h \ image.h \ instrumentation.h \ indirect_reference_table.h \ @@ -338,10 +341,13 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \ LIBART_CFLAGS := -DBUILDING_LIBART=1 +LIBART_TARGET_CFLAGS := +LIBART_HOST_CFLAGS := + ifeq ($(MALLOC_IMPL),dlmalloc) - LIBART_CFLAGS += -DUSE_DLMALLOC + LIBART_TARGET_CFLAGS += -DUSE_DLMALLOC else - LIBART_CFLAGS += -DUSE_JEMALLOC + LIBART_TARGET_CFLAGS += -DUSE_JEMALLOC endif # Default dex2oat instruction set features. @@ -387,13 +393,6 @@ define build-libart art_static_or_shared := $(3) include $$(CLEAR_VARS) - # Clang assembler has problem with macros in asm_support_x86.S, http://b/17443165, - # on linux. Yet sdk on mac needs integrated assembler. - ifeq ($$(HOST_OS),darwin) - LOCAL_CLANG_ASFLAGS += -integrated-as - else - LOCAL_CLANG_ASFLAGS += -no-integrated-as - endif LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION) ifeq ($$(art_ndebug_or_debug),ndebug) LOCAL_MODULE := libart @@ -437,8 +436,10 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT LOCAL_CFLAGS := $$(LIBART_CFLAGS) LOCAL_LDFLAGS := $$(LIBART_LDFLAGS) ifeq ($$(art_target_or_host),target) + LOCAL_CFLAGS += $$(LIBART_TARGET_CFLAGS) LOCAL_LDFLAGS += $$(LIBART_TARGET_LDFLAGS) else #host + LOCAL_CFLAGS += $$(LIBART_HOST_CFLAGS) LOCAL_LDFLAGS += $$(LIBART_HOST_LDFLAGS) ifeq ($$(art_static_or_shared),static) LOCAL_LDFLAGS += -static @@ -578,4 +579,6 @@ LIBART_HOST_SRC_FILES_32 := LIBART_HOST_SRC_FILES_64 := LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := LIBART_CFLAGS := +LIBART_TARGET_CFLAGS := +LIBART_HOST_CFLAGS := build-libart := diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S index 665d2a3306..44c7649dea 100644 --- a/runtime/arch/arm/asm_support_arm.S +++ b/runtime/arch/arm/asm_support_arm.S @@ -50,6 +50,11 @@ // generated at END. .macro DEF_ENTRY thumb_or_arm, name \thumb_or_arm +// Clang ignores .thumb_func and requires an explicit .thumb. Investigate whether we should still +// carry around the .thumb_func. + .ifc \thumb_or_arm, .thumb_func + .thumb + .endif .type \name, #function .hidden \name // Hide this as a global symbol, so we do not incur plt calls. .global \name diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h index a58aecbc6b..77bb5c8399 100644 --- a/runtime/arch/arm/context_arm.h +++ b/runtime/arch/arm/context_arm.h @@ -35,7 +35,7 @@ class ArmContext : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index 2f2654d4f6..be9af9871d 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -171,6 +171,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = artReadBarrierSlow; } } // namespace art diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc index f8590d3bd2..28d1942f0a 100644 --- a/runtime/arch/arm/instruction_set_features_arm.cc +++ b/runtime/arch/arm/instruction_set_features_arm.cc @@ -16,7 +16,7 @@ #include "instruction_set_features_arm.h" -#if defined(HAVE_ANDROID_OS) && defined(__arm__) +#if defined(__ANDROID__) && defined(__arm__) #include <sys/auxv.h> #include <asm/hwcap.h> #endif @@ -166,7 +166,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() { bool has_div = false; bool has_lpae = false; -#if defined(HAVE_ANDROID_OS) && defined(__arm__) +#if defined(__ANDROID__) && defined(__arm__) uint64_t hwcaps = getauxval(AT_HWCAP); LOG(INFO) << "hwcaps=" << hwcaps; if ((hwcaps & HWCAP_IDIVT) != 0) { diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index ca3ca1d240..f6d954f4f1 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -24,10 +24,9 @@ .extern artDeliverPendingException /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kSaveAll) + * Macro to spill the GPRs. */ -.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2 +.macro SPILL_ALL_CALLEE_SAVE_GPRS push {r4-r11, lr} @ 9 words (36 bytes) of callee saves. .cfi_adjust_cfa_offset 36 .cfi_rel_offset r4, 0 @@ -39,12 +38,19 @@ .cfi_rel_offset r10, 24 .cfi_rel_offset r11, 28 .cfi_rel_offset lr, 32 +.endm + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveAll) + */ +.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2 + SPILL_ALL_CALLEE_SAVE_GPRS @ 9 words (36 bytes) of callee saves. vpush {s16-s31} @ 16 words (64 bytes) of floats. .cfi_adjust_cfa_offset 64 sub sp, #12 @ 3 words of space, bottom word will hold Method* .cfi_adjust_cfa_offset 12 RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1. - THIS_LOAD_REQUIRES_READ_BARRIER ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*. str \rTemp1, [sp, #0] @ Place Method* at bottom of stack. str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. @@ -72,7 +78,6 @@ sub sp, #4 @ bottom word will hold Method* .cfi_adjust_cfa_offset 4 RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1. - THIS_LOAD_REQUIRES_READ_BARRIER ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*. str \rTemp1, [sp, #0] @ Place Method* at bottom of stack. str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. @@ -132,7 +137,6 @@ .macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2 SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1. - THIS_LOAD_REQUIRES_READ_BARRIER @ rTemp1 is kRefsAndArgs Method*. ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET] str \rTemp1, [sp, #0] @ Place Method* at bottom of stack. @@ -164,7 +168,6 @@ .cfi_adjust_cfa_offset -40 .endm - .macro RETURN_IF_RESULT_IS_ZERO cbnz r0, 1f @ result non-zero branch over bx lr @ return @@ -325,25 +328,25 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr * On success this wrapper will restore arguments and *jump* to the target, leaving the lr * pointing back to the original caller. */ -.macro INVOKE_TRAMPOLINE c_name, cxx_name +.macro INVOKE_TRAMPOLINE_BODY cxx_name .extern \cxx_name -ENTRY \c_name SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC mov r2, r9 @ pass Thread::Current mov r3, sp - .cfi_adjust_cfa_offset 16 - bl \cxx_name @ (method_idx, this, caller, Thread*, SP) - .cfi_adjust_cfa_offset -16 + bl \cxx_name @ (method_idx, this, Thread*, SP) mov r12, r1 @ save Method*->code_ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME cbz r0, 1f @ did we find the target? if not go to exception delivery bx r12 @ tail call to target 1: DELIVER_PENDING_EXCEPTION +.endm +.macro INVOKE_TRAMPOLINE c_name, cxx_name +ENTRY \c_name + INVOKE_TRAMPOLINE_BODY \cxx_name END \c_name .endm -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck @@ -380,17 +383,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo * +-------------------------+ */ ENTRY art_quick_invoke_stub_internal - push {r4, r5, r6, r7, r8, r9, r10, r11, lr} @ spill regs - .cfi_adjust_cfa_offset 16 - .cfi_rel_offset r4, 0 - .cfi_rel_offset r5, 4 - .cfi_rel_offset r6, 8 - .cfi_rel_offset r7, 12 - .cfi_rel_offset r8, 16 - .cfi_rel_offset r9, 20 - .cfi_rel_offset r10, 24 - .cfi_rel_offset r11, 28 - .cfi_rel_offset lr, 32 + SPILL_ALL_CALLEE_SAVE_GPRS @ spill regs (9) mov r11, sp @ save the stack pointer .cfi_def_cfa_register r11 @@ -591,6 +584,59 @@ ENTRY art_quick_check_cast bkpt END art_quick_check_cast +// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude. +.macro POP_REG_NE rReg, offset, rExclude + .ifnc \rReg, \rExclude + ldr \rReg, [sp, #\offset] @ restore rReg + .cfi_restore \rReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER rDest, rObj, offset +#ifdef USE_READ_BARRIER + push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj) + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + .cfi_rel_offset r2, 8 + .cfi_rel_offset r3, 12 + .cfi_rel_offset ip, 16 + .cfi_rel_offset lr, 20 + sub sp, #8 @ push padding + .cfi_adjust_cfa_offset 8 + @ mov r0, r0 @ pass ref in r0 (no-op for now since parameter ref is unused) + .ifnc \rObj, r1 + mov r1, \rObj @ pass rObj + .endif + mov r2, #\offset @ pass offset + bl artReadBarrierSlow @ artReadBarrierSlow(ref, rObj, offset) + @ No need to unpoison return value in r0, artReadBarrierSlow() would do the unpoisoning. + .ifnc \rDest, r0 + mov \rDest, r0 @ save return value in rDest + .endif + add sp, #8 @ pop padding + .cfi_adjust_cfa_offset -8 + POP_REG_NE r0, 0, \rDest @ conditionally restore saved registers + POP_REG_NE r1, 4, \rDest + POP_REG_NE r2, 8, \rDest + POP_REG_NE r3, 12, \rDest + POP_REG_NE ip, 16, \rDest + add sp, #20 + .cfi_adjust_cfa_offset -20 + pop {lr} @ restore lr + .cfi_adjust_cfa_offset -4 + .cfi_restore lr +#else + ldr \rDest, [\rObj, #\offset] + UNPOISON_HEAP_REF \rDest +#endif // USE_READ_BARRIER +.endm + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -612,15 +658,21 @@ ENTRY art_quick_aput_obj_with_bound_check b art_quick_throw_array_bounds END art_quick_aput_obj_with_bound_check +#ifdef USE_READ_BARRIER + .extern artReadBarrierSlow +#endif .hidden art_quick_aput_obj ENTRY art_quick_aput_obj +#ifdef USE_READ_BARRIER + @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro. + tst r2, r2 + beq .Ldo_aput_null +#else cbz r2, .Ldo_aput_null - ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET] - UNPOISON_HEAP_REF r3 - ldr ip, [r2, #MIRROR_OBJECT_CLASS_OFFSET] - UNPOISON_HEAP_REF ip - ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] - UNPOISON_HEAP_REF r3 +#endif // USE_READ_BARRIER + READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET cmp r3, ip @ value's type == array's component type - trivial assignability bne .Lcheck_assignability .Ldo_aput: @@ -896,7 +948,7 @@ END art_quick_proxy_invoke_handler */ ENTRY art_quick_imt_conflict_trampoline mov r0, r12 - b art_quick_invoke_interface_trampoline + INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline END art_quick_imt_conflict_trampoline .extern artQuickResolutionTrampoline diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h index 0383ad628a..1c99f3c42d 100644 --- a/runtime/arch/arm64/context_arm64.h +++ b/runtime/arch/arm64/context_arm64.h @@ -35,7 +35,7 @@ class Arm64Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index 2ce2a29bbf..0f06727d0d 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -155,6 +155,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = artReadBarrierSlow; }; } // namespace art diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h index e59ff58954..805131f1fd 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64.h +++ b/runtime/arch/arm64/instruction_set_features_arm64.h @@ -83,9 +83,7 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures { std::string* error_msg) const OVERRIDE; private: - explicit Arm64InstructionSetFeatures(bool smp, - bool needs_a53_835769_fix, - bool needs_a53_843419_fix) + Arm64InstructionSetFeatures(bool smp, bool needs_a53_835769_fix, bool needs_a53_843419_fix) : InstructionSetFeatures(smp), fix_cortex_a53_835769_(needs_a53_835769_fix), fix_cortex_a53_843419_(needs_a53_843419_fix) { diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 614936b93f..8ba3d4392d 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -31,8 +31,6 @@ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . - THIS_LOAD_REQUIRES_READ_BARRIER - // Loads appropriate callee-save-method. ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ] @@ -95,8 +93,6 @@ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefOnly] . - THIS_LOAD_REQUIRES_READ_BARRIER - // Loads appropriate callee-save-method. ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ] @@ -251,7 +247,6 @@ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . - THIS_LOAD_REQUIRES_READ_BARRIER ldr xIP0, [xIP0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ] SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL @@ -450,9 +445,8 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr * * Clobbers xIP0. */ -.macro INVOKE_TRAMPOLINE c_name, cxx_name +.macro INVOKE_TRAMPOLINE_BODY cxx_name .extern \cxx_name -ENTRY \c_name SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC // Helper signature is always // (method_idx, *this_object, *caller_method, *self, sp) @@ -466,10 +460,13 @@ ENTRY \c_name br xIP0 // tail call to target 1: DELIVER_PENDING_EXCEPTION +.endm +.macro INVOKE_TRAMPOLINE c_name, cxx_name +ENTRY \c_name + INVOKE_TRAMPOLINE_BODY \cxx_name END \c_name .endm -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck @@ -540,18 +537,18 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 // W10 - temporary add x9, sp, #8 // Destination address is bottom of stack + null. - // Use \@ to differentiate between macro invocations. -.LcopyParams\@: + // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler + // does not have unique-id variables. +1: cmp w2, #0 - beq .LendCopyParams\@ + beq 2f sub w2, w2, #4 // Need 65536 bytes of range. ldr w10, [x1, x2] str w10, [x9, x2] - b .LcopyParams\@ - -.LendCopyParams\@: + b 1b +2: // Store null into ArtMethod* at bottom of frame. str xzr, [sp] .endm @@ -590,26 +587,29 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+8 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. ldrb w10, [x5] + // Check the return type and store the correct register into the jvalue in memory. + // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables. + // Don't set anything for a void type. cmp w10, #'V' - beq .Lexit_art_quick_invoke_stub\@ + beq 3f + // Is it a double? cmp w10, #'D' - bne .Lreturn_is_float\@ + bne 1f str d0, [x4] - b .Lexit_art_quick_invoke_stub\@ + b 3f -.Lreturn_is_float\@: +1: // Is it a float? cmp w10, #'F' - bne .Lreturn_is_int\@ + bne 2f str s0, [x4] - b .Lexit_art_quick_invoke_stub\@ + b 3f - // Just store x0. Doesn't matter if it is 64 or 32 bits. -.Lreturn_is_int\@: +2: // Just store x0. Doesn't matter if it is 64 or 32 bits. str x0, [x4] -.Lexit_art_quick_invoke_stub\@: +3: // Finish up. ldp x2, x19, [xFP, #32] // Restore stack pointer and x19. .cfi_restore x19 mov sp, x2 @@ -1117,6 +1117,62 @@ ENTRY art_quick_check_cast brk 0 // We should not return here... END art_quick_check_cast +// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. +.macro POP_REG_NE xReg, offset, xExclude + .ifnc \xReg, \xExclude + ldr \xReg, [sp, #\offset] // restore xReg + .cfi_restore \xReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * xDest, wDest and xObj are registers, offset is a defined literal such as + * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle + * name mismatch between instructions. This macro uses the lower 32b of register when possible. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER xDest, wDest, xObj, offset +#ifdef USE_READ_BARRIER + // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned. + stp x0, x1, [sp, #-48]! + .cfi_adjust_cfa_offset 48 + .cfi_rel_offset x0, 0 + .cfi_rel_offset x1, 8 + stp x2, x3, [sp, #16] + .cfi_rel_offset x2, 16 + .cfi_rel_offset x3, 24 + stp x4, xLR, [sp, #32] + .cfi_rel_offset x4, 32 + .cfi_rel_offset x30, 40 + + // mov x0, x0 // pass ref in x0 (no-op for now since parameter ref is unused) + .ifnc \xObj, x1 + mov x1, \xObj // pass xObj + .endif + mov w2, #\offset // pass offset + bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset) + // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning. + .ifnc \wDest, w0 + mov \wDest, w0 // save return value in wDest + .endif + + // Conditionally restore saved registers + POP_REG_NE x0, 0, \xDest + POP_REG_NE x1, 8, \xDest + POP_REG_NE x2, 16, \xDest + POP_REG_NE x3, 24, \xDest + POP_REG_NE x4, 32, \xDest + ldr xLR, [sp, #40] + .cfi_restore x30 + add sp, sp, #48 + .cfi_adjust_cfa_offset -48 +#else + ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. + UNPOISON_HEAP_REF \wDest +#endif // USE_READ_BARRIER +.endm + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -1144,17 +1200,17 @@ ENTRY art_quick_aput_obj_with_bound_check b art_quick_throw_array_bounds END art_quick_aput_obj_with_bound_check +#ifdef USE_READ_BARRIER + .extern artReadBarrierSlow +#endif ENTRY art_quick_aput_obj cbz x2, .Ldo_aput_null - ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b + READ_BARRIER x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b // This also zero-extends to x3 - UNPOISON_HEAP_REF w3 - ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b + READ_BARRIER x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET // Heap reference = 32b // This also zero-extends to x4 - UNPOISON_HEAP_REF w4 - ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b + READ_BARRIER x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET // Heap reference = 32b // This also zero-extends to x3 - UNPOISON_HEAP_REF w3 cmp w3, w4 // value's type == array's component type - trivial assignability bne .Lcheck_assignability .Ldo_aput: @@ -1429,9 +1485,10 @@ END art_quick_proxy_invoke_handler * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's * dex method index. */ + .extern artInvokeInterfaceTrampoline ENTRY art_quick_imt_conflict_trampoline mov x0, xIP1 - b art_quick_invoke_interface_trampoline + INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline END art_quick_imt_conflict_trampoline ENTRY art_quick_resolution_trampoline diff --git a/runtime/arch/context.h b/runtime/arch/context.h index f86f9ae117..9ef761e981 100644 --- a/runtime/arch/context.h +++ b/runtime/arch/context.h @@ -42,7 +42,7 @@ class Context { // Reads values from callee saves in the given frame. The frame also holds // the method that holds the layout. virtual void FillCalleeSaves(const StackVisitor& fr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Sets the stack pointer value. virtual void SetSP(uintptr_t new_sp) = 0; diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc index e6f4e7ab01..99c2d4dc74 100644 --- a/runtime/arch/instruction_set_features_test.cc +++ b/runtime/arch/instruction_set_features_test.cc @@ -18,7 +18,7 @@ #include <gtest/gtest.h> -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/properties.h" #endif @@ -26,7 +26,7 @@ namespace art { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #if defined(__aarch64__) TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromSystemPropertyVariant) { LOG(WARNING) << "Test disabled due to no CPP define for A53 erratum 835769"; @@ -111,7 +111,7 @@ TEST(InstructionSetFeaturesTest, FeaturesFromCpuInfo) { } #endif -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ TEST(InstructionSetFeaturesTest, HostFeaturesFromCppDefines) { std::string error_msg; std::unique_ptr<const InstructionSetFeatures> default_features( diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h index d01b95e5f6..38cf29a6aa 100644 --- a/runtime/arch/mips/context_mips.h +++ b/runtime/arch/mips/context_mips.h @@ -34,7 +34,7 @@ class MipsContext : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h index b1aa3ee63f..f9c53152f6 100644 --- a/runtime/arch/mips/entrypoints_direct_mips.h +++ b/runtime/arch/mips/entrypoints_direct_mips.h @@ -44,7 +44,8 @@ static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) { entrypoint == kQuickCmpgDouble || entrypoint == kQuickCmpgFloat || entrypoint == kQuickCmplDouble || - entrypoint == kQuickCmplFloat; + entrypoint == kQuickCmplFloat || + entrypoint == kQuickReadBarrierSlow; } } // namespace art diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index ff04106f15..4e4b91fdcd 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -267,6 +267,10 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct."); + // Deoptimize + qpoints->pDeoptimize = art_quick_deoptimize; + static_assert(!IsDirectEntrypoint(kQuickDeoptimize), "Non-direct C stub marked direct."); + // Atomic 64-bit load/store qpoints->pA64Load = QuasiAtomic::Read64; static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct."); @@ -275,6 +279,8 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pReadBarrierJni = ReadBarrierJni; static_assert(!IsDirectEntrypoint(kQuickReadBarrierJni), "Non-direct C stub marked direct."); + qpoints->pReadBarrierSlow = artReadBarrierSlow; + static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct."); }; } // namespace art diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc index abe495b46d..8ea78eb900 100644 --- a/runtime/arch/mips/fault_handler_mips.cc +++ b/runtime/arch/mips/fault_handler_mips.cc @@ -17,11 +17,17 @@ #include "fault_handler.h" #include <sys/ucontext.h> +#include "art_method-inl.h" #include "base/macros.h" #include "globals.h" #include "base/logging.h" #include "base/hex_dump.h" +#include "registers_mips.h" +#include "thread.h" +#include "thread-inl.h" +extern "C" void art_quick_throw_stack_overflow(); +extern "C" void art_quick_throw_null_pointer_exception(); // // Mips specific fault handler functions. @@ -33,16 +39,52 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info void* context ATTRIBUTE_UNUSED) { } -void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, - void* context ATTRIBUTE_UNUSED, - ArtMethod** out_method ATTRIBUTE_UNUSED, - uintptr_t* out_return_pc ATTRIBUTE_UNUSED, - uintptr_t* out_sp ATTRIBUTE_UNUSED) { +void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, + ArtMethod** out_method, + uintptr_t* out_return_pc, uintptr_t* out_sp) { + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + *out_sp = static_cast<uintptr_t>(sc->sc_regs[29]); // SP register + VLOG(signals) << "sp: " << *out_sp; + if (*out_sp == 0) { + return; + } + + // In the case of a stack overflow, the stack is not valid and we can't + // get the method from the top of the stack. However it's in r0. + uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr + uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>( + reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips)); + if (overflow_addr == fault_addr) { + *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[4]); // A0 register + } else { + // The method is at the top of the stack. + *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); + } + + // Work out the return PC. This will be the address of the instruction + // following the faulting ldr/str instruction. + + VLOG(signals) << "pc: " << std::hex + << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc)); + + *out_return_pc = sc->sc_pc + 4; } bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED, - void* context ATTRIBUTE_UNUSED) { - return false; + void* context) { + // The code that looks for the catch location needs to know the value of the + // PC at the point of call. For Null checks we insert a GC map that is immediately after + // the load/store instruction that might cause the fault. + + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + + sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location + sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception); + sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function + VLOG(signals) << "Generating null pointer exception"; + return true; } bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED, @@ -50,8 +92,51 @@ bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBU return false; } -bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED, - void* context ATTRIBUTE_UNUSED) { - return false; +// Stack overflow fault handler. +// +// This checks that the fault address is equal to the current stack pointer +// minus the overflow region size (16K typically). The instruction that +// generates this signal is: +// +// lw zero, -16384(sp) +// +// It will fault if sp is inside the protected region on the stack. +// +// If we determine this is a stack overflow we need to move the stack pointer +// to the overflow region below the protected region. + +bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) { + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc; + VLOG(signals) << "sigcontext: " << std::hex << sc; + + uintptr_t sp = sc->sc_regs[29]; // SP register + VLOG(signals) << "sp: " << std::hex << sp; + + uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr + VLOG(signals) << "fault_addr: " << std::hex << fault_addr; + VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp << + ", fault_addr: " << fault_addr; + + uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips); + + // Check that the fault address is the value expected for a stack overflow. + if (fault_addr != overflow_addr) { + VLOG(signals) << "Not a stack overflow"; + return false; + } + + VLOG(signals) << "Stack overflow found"; + + // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from. + // The value of RA must be the same as it was when we entered the code that + // caused this fault. This will be inserted into a callee save frame by + // the function to which this handler returns (art_quick_throw_stack_overflow). + sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow); + sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function + + // The kernel will now return to the address in sc->arm_pc. + return true; } } // namespace art diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index cc1de43723..4d5004f444 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -79,7 +79,6 @@ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) lw $t0, 0($t0) - THIS_LOAD_REQUIRES_READ_BARRIER lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t0) sw $t0, 0($sp) # Place Method* at bottom of stack. sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -127,7 +126,6 @@ lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) lw $t0, 0($t0) - THIS_LOAD_REQUIRES_READ_BARRIER lw $t0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t0) sw $t0, 0($sp) # Place Method* at bottom of stack. sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -219,7 +217,6 @@ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) lw $t0, 0($t0) - THIS_LOAD_REQUIRES_READ_BARRIER lw $t0, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t0) sw $t0, 0($sp) # Place Method* at bottom of stack. sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -459,9 +456,8 @@ END art_quick_throw_no_such_method * On success this wrapper will restore arguments and *jump* to the target, leaving the lr * pointing back to the original caller. */ -.macro INVOKE_TRAMPOLINE c_name, cxx_name +.macro INVOKE_TRAMPOLINE_BODY cxx_name .extern \cxx_name -ENTRY \c_name SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC move $a2, rSELF # pass Thread::Current jal \cxx_name # (method_idx, this, Thread*, $sp) @@ -474,10 +470,13 @@ ENTRY \c_name nop 1: DELIVER_PENDING_EXCEPTION +.endm +.macro INVOKE_TRAMPOLINE c_name, cxx_name +ENTRY \c_name + INVOKE_TRAMPOLINE_BODY \cxx_name END \c_name .endm -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck @@ -625,6 +624,76 @@ ENTRY art_quick_check_cast END art_quick_check_cast /* + * Restore rReg's value from offset($sp) if rReg is not the same as rExclude. + * nReg is the register number for rReg. + */ +.macro POP_REG_NE rReg, nReg, offset, rExclude + .ifnc \rReg, \rExclude + lw \rReg, \offset($sp) # restore rReg + .cfi_restore \nReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER rDest, rObj, offset +#ifdef USE_READ_BARRIER + # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment. + addiu $sp, $sp, -32 + .cfi_adjust_cfa_offset 32 + sw $ra, 28($sp) + .cfi_rel_offset 31, 28 + sw $t9, 24($sp) + .cfi_rel_offset 25, 24 + sw $t1, 20($sp) + .cfi_rel_offset 9, 20 + sw $t0, 16($sp) + .cfi_rel_offset 8, 16 + sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + sw $a0, 0($sp) + .cfi_rel_offset 4, 0 + + # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused) + .ifnc \rObj, $a1 + move $a1, \rObj # pass rObj + .endif + addiu $a2, $zero, \offset # pass offset + jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset) + addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack + # before the call to artReadBarrierSlow. + addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow + # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning. + move \rDest, $v0 # save return value in rDest + # (rDest cannot be v0 in art_quick_aput_obj) + + lw $a0, 0($sp) # restore registers except rDest + # (rDest can only be t0 or t1 in art_quick_aput_obj) + .cfi_restore 4 + lw $a1, 4($sp) + .cfi_restore 5 + lw $a2, 8($sp) + .cfi_restore 6 + POP_REG_NE $t0, 8, 16, \rDest + POP_REG_NE $t1, 9, 20, \rDest + lw $t9, 24($sp) + .cfi_restore 25 + lw $ra, 28($sp) # restore $ra + .cfi_restore 31 + addiu $sp, $sp, 32 + .cfi_adjust_cfa_offset -32 +#else + lw \rDest, \offset(\rObj) + UNPOISON_HEAP_REF \rDest +#endif // USE_READ_BARRIER +.endm + + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. * a0 = array, a1 = index, a2 = value @@ -646,15 +715,15 @@ ENTRY art_quick_aput_obj_with_bound_check move $a1, $t0 END art_quick_aput_obj_with_bound_check +#ifdef USE_READ_BARRIER + .extern artReadBarrierSlow +#endif ENTRY art_quick_aput_obj beqz $a2, .Ldo_aput_null nop - lw $t0, MIRROR_OBJECT_CLASS_OFFSET($a0) - UNPOISON_HEAP_REF $t0 - lw $t1, MIRROR_OBJECT_CLASS_OFFSET($a2) - UNPOISON_HEAP_REF $t1 - lw $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0) - UNPOISON_HEAP_REF $t0 + READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability nop .Ldo_aput: @@ -1103,9 +1172,8 @@ END art_quick_proxy_invoke_handler * dex method index. */ ENTRY art_quick_imt_conflict_trampoline - la $t9, art_quick_invoke_interface_trampoline - jalr $zero, $t9 move $a0, $t0 + INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline END art_quick_imt_conflict_trampoline .extern artQuickResolutionTrampoline diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h index ebc036cf98..e4a144f420 100644 --- a/runtime/arch/mips64/context_mips64.h +++ b/runtime/arch/mips64/context_mips64.h @@ -34,7 +34,7 @@ class Mips64Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(SP, new_sp); diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 321c27bf50..ec02d5ab69 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -176,6 +176,9 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; + // Deoptimize + qpoints->pDeoptimize = art_quick_deoptimize; + // TODO - use lld/scd instructions for Mips64 // Atomic 64-bit load/store qpoints->pA64Load = QuasiAtomic::Read64; @@ -183,6 +186,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = artReadBarrierSlow; }; } // namespace art diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc index 277c2b2394..4abfcf12ff 100644 --- a/runtime/arch/mips64/fault_handler_mips64.cc +++ b/runtime/arch/mips64/fault_handler_mips64.cc @@ -17,11 +17,17 @@ #include "fault_handler.h" #include <sys/ucontext.h> +#include "art_method-inl.h" #include "base/macros.h" #include "globals.h" #include "base/logging.h" #include "base/hex_dump.h" +#include "registers_mips64.h" +#include "thread.h" +#include "thread-inl.h" +extern "C" void art_quick_throw_stack_overflow(); +extern "C" void art_quick_throw_null_pointer_exception(); // // Mips64 specific fault handler functions. @@ -33,16 +39,52 @@ void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info void* context ATTRIBUTE_UNUSED) { } -void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, - void* context ATTRIBUTE_UNUSED, - ArtMethod** out_method ATTRIBUTE_UNUSED, - uintptr_t* out_return_pc ATTRIBUTE_UNUSED, - uintptr_t* out_sp ATTRIBUTE_UNUSED) { +void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, + ArtMethod** out_method, + uintptr_t* out_return_pc, uintptr_t* out_sp) { + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + *out_sp = static_cast<uintptr_t>(sc->sc_regs[29]); // SP register + VLOG(signals) << "sp: " << *out_sp; + if (*out_sp == 0) { + return; + } + + // In the case of a stack overflow, the stack is not valid and we can't + // get the method from the top of the stack. However it's in r0. + uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr); // BVA addr + uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>( + reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kMips64)); + if (overflow_addr == fault_addr) { + *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[4]); // A0 register + } else { + // The method is at the top of the stack. + *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); + } + + // Work out the return PC. This will be the address of the instruction + // following the faulting ldr/str instruction. + + VLOG(signals) << "pc: " << std::hex + << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc)); + + *out_return_pc = sc->sc_pc + 4; } bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED, - void* context ATTRIBUTE_UNUSED) { - return false; + void* context) { + // The code that looks for the catch location needs to know the value of the + // PC at the point of call. For Null checks we insert a GC map that is immediately after + // the load/store instruction that might cause the fault. + + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + + sc->sc_regs[31] = sc->sc_pc + 4; // RA needs to point to gc map location + sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception); + sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function + VLOG(signals) << "Generating null pointer exception"; + return true; } bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED, @@ -50,8 +92,51 @@ bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBU return false; } -bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED, - void* context ATTRIBUTE_UNUSED) { - return false; +// Stack overflow fault handler. +// +// This checks that the fault address is equal to the current stack pointer +// minus the overflow region size (16K typically). The instruction that +// generates this signal is: +// +// lw zero, -16384(sp) +// +// It will fault if sp is inside the protected region on the stack. +// +// If we determine this is a stack overflow we need to move the stack pointer +// to the overflow region below the protected region. + +bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) { + struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc; + VLOG(signals) << "sigcontext: " << std::hex << sc; + + uintptr_t sp = sc->sc_regs[29]; // SP register + VLOG(signals) << "sp: " << std::hex << sp; + + uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr); // BVA addr + VLOG(signals) << "fault_addr: " << std::hex << fault_addr; + VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp << + ", fault_addr: " << fault_addr; + + uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kMips64); + + // Check that the fault address is the value expected for a stack overflow. + if (fault_addr != overflow_addr) { + VLOG(signals) << "Not a stack overflow"; + return false; + } + + VLOG(signals) << "Stack overflow found"; + + // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from. + // The value of RA must be the same as it was when we entered the code that + // caused this fault. This will be inserted into a callee save frame by + // the function to which this handler returns (art_quick_throw_stack_overflow). + sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow); + sc->sc_regs[25] = sc->sc_pc; // make sure T9 points to the function + + // The kernel will now return to the address in sc->arm_pc. + return true; } } // namespace art diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index 37c6c5b3f9..c30e6ca93f 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -89,7 +89,6 @@ # load appropriate callee-save-method ld $t1, %got(_ZN3art7Runtime9instance_E)($gp) ld $t1, 0($t1) - THIS_LOAD_REQUIRES_READ_BARRIER ld $t1, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET($t1) sd $t1, 0($sp) # Place ArtMethod* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -132,7 +131,6 @@ # load appropriate callee-save-method ld $t1, %got(_ZN3art7Runtime9instance_E)($gp) ld $t1, 0($t1) - THIS_LOAD_REQUIRES_READ_BARRIER ld $t1, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET($t1) sd $t1, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -255,7 +253,6 @@ # load appropriate callee-save-method ld $t1, %got(_ZN3art7Runtime9instance_E)($gp) ld $t1, 0($t1) - THIS_LOAD_REQUIRES_READ_BARRIER ld $t1, RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET($t1) sd $t1, 0($sp) # Place Method* at bottom of stack. sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. @@ -525,9 +522,8 @@ END art_quick_throw_no_such_method * On success this wrapper will restore arguments and *jump* to the target, leaving the ra * pointing back to the original caller. */ -.macro INVOKE_TRAMPOLINE c_name, cxx_name +.macro INVOKE_TRAMPOLINE_BODY cxx_name .extern \cxx_name -ENTRY \c_name SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC move $a2, rSELF # pass Thread::Current jal \cxx_name # (method_idx, this, Thread*, $sp) @@ -541,10 +537,13 @@ ENTRY \c_name nop 1: DELIVER_PENDING_EXCEPTION +.endm +.macro INVOKE_TRAMPOLINE c_name, cxx_name +ENTRY \c_name + INVOKE_TRAMPOLINE_BODY \cxx_name END \c_name .endm -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck @@ -886,6 +885,77 @@ ENTRY art_quick_check_cast move $a2, rSELF # pass Thread::Current END art_quick_check_cast + + /* + * Restore rReg's value from offset($sp) if rReg is not the same as rExclude. + * nReg is the register number for rReg. + */ +.macro POP_REG_NE rReg, nReg, offset, rExclude + .ifnc \rReg, \rExclude + ld \rReg, \offset($sp) # restore rReg + .cfi_restore \nReg + .endif +.endm + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +.macro READ_BARRIER rDest, rObj, offset +#ifdef USE_READ_BARRIER + # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned. + daddiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sd $ra, 56($sp) + .cfi_rel_offset 31, 56 + sd $t9, 48($sp) + .cfi_rel_offset 25, 48 + sd $t1, 40($sp) + .cfi_rel_offset 13, 40 + sd $t0, 32($sp) + .cfi_rel_offset 12, 32 + sd $a2, 16($sp) # padding slot at offset 24 (padding can be any slot in the 64B) + .cfi_rel_offset 6, 16 + sd $a1, 8($sp) + .cfi_rel_offset 5, 8 + sd $a0, 0($sp) + .cfi_rel_offset 4, 0 + + # move $a0, $a0 # pass ref in a0 (no-op for now since parameter ref is unused) + .ifnc \rObj, $a1 + move $a1, \rObj # pass rObj + .endif + daddiu $a2, $zero, \offset # pass offset + jal artReadBarrierSlow # artReadBarrierSlow(ref, rObj, offset) + .cpreturn # Restore gp from t8 in branch delay slot. + # t8 may be clobbered in artReadBarrierSlow. + # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning. + move \rDest, $v0 # save return value in rDest + # (rDest cannot be v0 in art_quick_aput_obj) + + ld $a0, 0($sp) # restore registers except rDest + # (rDest can only be t0 or t1 in art_quick_aput_obj) + .cfi_restore 4 + ld $a1, 8($sp) + .cfi_restore 5 + ld $a2, 16($sp) + .cfi_restore 6 + POP_REG_NE $t0, 12, 32, \rDest + POP_REG_NE $t1, 13, 40, \rDest + ld $t9, 48($sp) + .cfi_restore 25 + ld $ra, 56($sp) # restore $ra + .cfi_restore 31 + daddiu $sp, $sp, 64 + .cfi_adjust_cfa_offset -64 + SETUP_GP # set up gp because we are not returning +#else + lwu \rDest, \offset(\rObj) + UNPOISON_HEAP_REF \rDest +#endif // USE_READ_BARRIER +.endm + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -911,12 +981,9 @@ END art_quick_aput_obj_with_bound_check ENTRY art_quick_aput_obj beq $a2, $zero, .Ldo_aput_null nop - lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0) - UNPOISON_HEAP_REF $t0 - lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2) - UNPOISON_HEAP_REF $t1 - lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0) - UNPOISON_HEAP_REF $t0 + READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET + READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability nop .Ldo_aput: @@ -1369,10 +1436,8 @@ END art_quick_proxy_invoke_handler * dex method index. */ ENTRY art_quick_imt_conflict_trampoline - dla $t9, art_quick_invoke_interface_trampoline - .cpreturn - jalr $zero, $t9 move $a0, $t0 + INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline END art_quick_imt_conflict_trampoline .extern artQuickResolutionTrampoline diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 05b42f5604..e6710ed780 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -126,7 +126,7 @@ class StubTest : public CommonRuntimeTest { // Use the result from r0 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self), [referrer] "r"(referrer) - : "memory"); // clobber. + : "r0", "memory"); // clobber. #elif defined(__aarch64__) __asm__ __volatile__( // Spill x0-x7 which we say we don't clobber. May contain args. @@ -479,7 +479,7 @@ class StubTest : public CommonRuntimeTest { // Use the result from r0 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self), [referrer] "r"(referrer), [hidden] "r"(hidden) - : "memory"); // clobber. + : "r0", "memory"); // clobber. #elif defined(__aarch64__) __asm__ __volatile__( // Spill x0-x7 which we say we don't clobber. May contain args. @@ -1124,8 +1124,6 @@ TEST_F(StubTest, CheckCast) { TEST_F(StubTest, APutObj) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); @@ -1258,8 +1256,6 @@ TEST_F(StubTest, APutObj) { } TEST_F(StubTest, AllocObject) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) // This will lead to OOM error messages in the log. @@ -1385,8 +1381,6 @@ TEST_F(StubTest, AllocObject) { } TEST_F(StubTest, AllocObjectArray) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs @@ -1474,8 +1468,6 @@ TEST_F(StubTest, AllocObjectArray) { TEST_F(StubTest, StringCompareTo) { - TEST_DISABLED_FOR_READ_BARRIER(); - #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs @@ -1557,7 +1549,7 @@ TEST_F(StubTest, StringCompareTo) { static void GetSetBooleanStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) constexpr size_t num_values = 5; @@ -1588,7 +1580,7 @@ static void GetSetBooleanStatic(ArtField* f, Thread* self, } static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int8_t values[] = { -128, -64, 0, 64, 127 }; @@ -1619,7 +1611,7 @@ static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer, static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint8_t values[] = { 0, true, 2, 128, 0xFF }; @@ -1654,7 +1646,7 @@ static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thre } static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int8_t values[] = { -128, -64, 0, 64, 127 }; @@ -1689,7 +1681,7 @@ static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f, static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF }; @@ -1719,7 +1711,7 @@ static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer, } static void GetSetShortStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE }; @@ -1750,7 +1742,7 @@ static void GetSetShortStatic(ArtField* f, Thread* self, static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF }; @@ -1784,7 +1776,7 @@ static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f, } static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE }; @@ -1819,7 +1811,7 @@ static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f, static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF }; @@ -1855,7 +1847,7 @@ static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer, static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF }; @@ -1896,7 +1888,7 @@ static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f, static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { test->Invoke3WithReferrer(static_cast<size_t>(f_idx), reinterpret_cast<size_t>(val), 0U, @@ -1916,7 +1908,7 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test); @@ -1940,7 +1932,7 @@ static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer, static void set_and_check_instance(ArtField* f, mirror::Object* trg, mirror::Object* val, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()), reinterpret_cast<size_t>(trg), reinterpret_cast<size_t>(val), @@ -1963,7 +1955,7 @@ static void set_and_check_instance(ArtField* f, mirror::Object* trg, static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test); @@ -1986,7 +1978,7 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f, static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ defined(__aarch64__) uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF }; @@ -2017,7 +2009,7 @@ static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer, static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f, Thread* self, ArtMethod* referrer, StubTest* test) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \ defined(__aarch64__) uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF }; @@ -2071,37 +2063,34 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) // Play with it... // Static fields. - ArtField* fields = c->GetSFields(); - size_t num_fields = c->NumStaticFields(); - for (size_t i = 0; i < num_fields; ++i) { - ArtField* f = &fields[i]; - Primitive::Type type = f->GetTypeAsPrimitiveType(); + for (ArtField& f : c->GetSFields()) { + Primitive::Type type = f.GetTypeAsPrimitiveType(); if (test_type != type) { continue; } switch (type) { case Primitive::Type::kPrimBoolean: - GetSetBooleanStatic(f, self, m, test); + GetSetBooleanStatic(&f, self, m, test); break; case Primitive::Type::kPrimByte: - GetSetByteStatic(f, self, m, test); + GetSetByteStatic(&f, self, m, test); break; case Primitive::Type::kPrimChar: - GetSetCharStatic(f, self, m, test); + GetSetCharStatic(&f, self, m, test); break; case Primitive::Type::kPrimShort: - GetSetShortStatic(f, self, m, test); + GetSetShortStatic(&f, self, m, test); break; case Primitive::Type::kPrimInt: - GetSet32Static(f, self, m, test); + GetSet32Static(&f, self, m, test); break; case Primitive::Type::kPrimLong: - GetSet64Static(f, self, m, test); + GetSet64Static(&f, self, m, test); break; case Primitive::Type::kPrimNot: // Don't try array. - if (f->GetTypeDescriptor()[0] != '[') { - GetSetObjStatic(f, self, m, test); + if (f.GetTypeDescriptor()[0] != '[') { + GetSetObjStatic(&f, self, m, test); } break; default: @@ -2110,37 +2099,34 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) } // Instance fields. - fields = c->GetIFields(); - num_fields = c->NumInstanceFields(); - for (size_t i = 0; i < num_fields; ++i) { - ArtField* f = &fields[i]; - Primitive::Type type = f->GetTypeAsPrimitiveType(); + for (ArtField& f : c->GetIFields()) { + Primitive::Type type = f.GetTypeAsPrimitiveType(); if (test_type != type) { continue; } switch (type) { case Primitive::Type::kPrimBoolean: - GetSetBooleanInstance(&obj, f, self, m, test); + GetSetBooleanInstance(&obj, &f, self, m, test); break; case Primitive::Type::kPrimByte: - GetSetByteInstance(&obj, f, self, m, test); + GetSetByteInstance(&obj, &f, self, m, test); break; case Primitive::Type::kPrimChar: - GetSetCharInstance(&obj, f, self, m, test); + GetSetCharInstance(&obj, &f, self, m, test); break; case Primitive::Type::kPrimShort: - GetSetShortInstance(&obj, f, self, m, test); + GetSetShortInstance(&obj, &f, self, m, test); break; case Primitive::Type::kPrimInt: - GetSet32Instance(&obj, f, self, m, test); + GetSet32Instance(&obj, &f, self, m, test); break; case Primitive::Type::kPrimLong: - GetSet64Instance(&obj, f, self, m, test); + GetSet64Instance(&obj, &f, self, m, test); break; case Primitive::Type::kPrimNot: // Don't try array. - if (f->GetTypeDescriptor()[0] != '[') { - GetSetObjInstance(&obj, f, self, m, test); + if (f.GetTypeDescriptor()[0] != '[') { + GetSetObjInstance(&obj, &f, self, m, test); } break; default: @@ -2152,8 +2138,6 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) } TEST_F(StubTest, Fields8) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2166,8 +2150,6 @@ TEST_F(StubTest, Fields8) { } TEST_F(StubTest, Fields16) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2180,8 +2162,6 @@ TEST_F(StubTest, Fields16) { } TEST_F(StubTest, Fields32) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2193,8 +2173,6 @@ TEST_F(StubTest, Fields32) { } TEST_F(StubTest, FieldsObj) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2206,8 +2184,6 @@ TEST_F(StubTest, FieldsObj) { } TEST_F(StubTest, Fields64) { - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); self->TransitionFromSuspendedToRunnable(); @@ -2221,8 +2197,6 @@ TEST_F(StubTest, Fields64) { TEST_F(StubTest, IMT) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); ScopedObjectAccess soa(self); @@ -2342,8 +2316,6 @@ TEST_F(StubTest, IMT) { TEST_F(StubTest, StringIndexOf) { #if defined(__arm__) || defined(__aarch64__) - TEST_DISABLED_FOR_READ_BARRIER(); - Thread* self = Thread::Current(); ScopedObjectAccess soa(self); // garbage is created during ClassLinker::Init @@ -2416,4 +2388,40 @@ TEST_F(StubTest, StringIndexOf) { #endif } +TEST_F(StubTest, ReadBarrier) { +#if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \ + defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__))) + Thread* self = Thread::Current(); + + const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow); + + // Create an object + ScopedObjectAccess soa(self); + // garbage is created during ClassLinker::Init + + StackHandleScope<2> hs(soa.Self()); + Handle<mirror::Class> c( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"))); + + // Build an object instance + Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self()))); + + EXPECT_FALSE(self->IsExceptionPending()); + + size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()), + mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self); + + EXPECT_FALSE(self->IsExceptionPending()); + EXPECT_NE(reinterpret_cast<size_t>(nullptr), result); + mirror::Class* klass = reinterpret_cast<mirror::Class*>(result); + EXPECT_EQ(klass, obj->GetClass()); + + // Tests done. +#else + LOG(INFO) << "Skipping read_barrier_slow"; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping read_barrier_slow" << std::endl; +#endif +} + } // namespace art diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S index 2159f0e717..77b8e87c99 100644 --- a/runtime/arch/x86/asm_support_x86.S +++ b/runtime/arch/x86/asm_support_x86.S @@ -19,61 +19,53 @@ #include "asm_support_x86.h" -#if defined(__APPLE__) || (defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)) - // Clang's as(1) doesn't let you name macro parameters prior to 3.5. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name - #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name - #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name - #define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name - #define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name - #define END_MACRO .endmacro - - // Clang's as(1) uses $0, $1, and so on for macro arguments. - #define RAW_VAR(name,index) $index - #define VAR(name,index) SYMBOL($index) - #define PLT_VAR(name, index) SYMBOL($index) - #define REG_VAR(name,index) %$index - #define CALL_MACRO(name,index) $index - - // The use of $x for arguments mean that literals need to be represented with $$x in macros. - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $$value +// Regular gas(1) & current clang/llvm assembler support named macro parameters. +#define MACRO0(macro_name) .macro macro_name +#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 +#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 +#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 +#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4 +#define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5 +#define END_MACRO .endm + +#if defined(__clang__) + // Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't + // separate the backslash and parameter by a space. Everything just works. + #define RAW_VAR(name) \name + #define VAR(name) \name + #define CALLVAR(name) SYMBOL(\name) + #define PLT_VAR(name) \name@PLT + #define REG_VAR(name) %\name + #define CALL_MACRO(name) \name #else - // Regular gas(1) lets you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 - #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 - #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 - #define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4 - #define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5 - #define END_MACRO .endm - // Regular gas(1) uses \argument_name for macro arguments. // We need to turn on alternate macro syntax so we can use & instead or the preprocessor // will screw us by inserting a space between the \ and the name. Even in this mode there's // no special meaning to $, so literals are still just $x. The use of altmacro means % is a - // special character meaning care needs to be taken when passing registers as macro arguments. + // special character meaning care needs to be taken when passing registers as macro + // arguments. .altmacro - #define RAW_VAR(name,index) name& - #define VAR(name,index) name& - #define PLT_VAR(name, index) name&@PLT - #define REG_VAR(name,index) %name - #define CALL_MACRO(name,index) name& + #define RAW_VAR(name) name& + #define VAR(name) name& + #define CALLVAR(name) SYMBOL(name&) + #define PLT_VAR(name) name&@PLT + #define REG_VAR(name) %name + #define CALL_MACRO(name) name& +#endif - #define LITERAL(value) $value +#define LITERAL(value) $value +#if defined(__APPLE__) + #define MACRO_LITERAL(value) $(value) +#else #define MACRO_LITERAL(value) $value #endif #if defined(__APPLE__) - #define FUNCTION_TYPE(name,index) - #define SIZE(name,index) -#elif defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) - #define FUNCTION_TYPE(name,index) .type $index, @function - #define SIZE(name,index) .size $index, .-$index + #define FUNCTION_TYPE(name) + #define SIZE(name) #else - #define FUNCTION_TYPE(name,index) .type name&, @function - #define SIZE(name,index) .size name, .-name + #define FUNCTION_TYPE(name) .type name, @function + #define SIZE(name) .size name, .-name #endif // CFI support. @@ -100,16 +92,10 @@ #define CFI_REMEMBER_STATE #endif - // Symbols. + // Symbols. On a Mac, we need a leading underscore. #if !defined(__APPLE__) #define SYMBOL(name) name - #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) - // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a - // better fix. - #define PLT_SYMBOL(name) name // ## @PLT - #else - #define PLT_SYMBOL(name) name ## @PLT - #endif + #define PLT_SYMBOL(name) name ## @PLT #else // Mac OS' symbols have an _ prefix. #define SYMBOL(name) _ ## name @@ -129,11 +115,11 @@ MACRO0(ALIGN_FUNCTION_ENTRY) END_MACRO MACRO1(DEFINE_FUNCTION, c_name) - FUNCTION_TYPE(\c_name, 0) - ASM_HIDDEN VAR(c_name, 0) - .globl VAR(c_name, 0) + FUNCTION_TYPE(SYMBOL(\c_name)) + ASM_HIDDEN CALLVAR(c_name) + .globl CALLVAR(c_name) ALIGN_FUNCTION_ENTRY -VAR(c_name, 0): +CALLVAR(c_name): CFI_STARTPROC // Ensure we get a sane starting CFA. CFI_DEF_CFA(esp, 4) @@ -141,36 +127,38 @@ END_MACRO MACRO1(END_FUNCTION, c_name) CFI_ENDPROC - SIZE(\c_name, 0) + SIZE(SYMBOL(\c_name)) END_MACRO MACRO1(PUSH, reg) - pushl REG_VAR(reg, 0) + pushl REG_VAR(reg) CFI_ADJUST_CFA_OFFSET(4) - CFI_REL_OFFSET(REG_VAR(reg, 0), 0) + CFI_REL_OFFSET(REG_VAR(reg), 0) END_MACRO MACRO1(POP, reg) - popl REG_VAR(reg,0) + popl REG_VAR(reg) CFI_ADJUST_CFA_OFFSET(-4) - CFI_RESTORE(REG_VAR(reg,0)) + CFI_RESTORE(REG_VAR(reg)) END_MACRO +#define UNREACHABLE int3 + MACRO1(UNIMPLEMENTED,name) - FUNCTION_TYPE(\name, 0) - .globl VAR(name, 0) + FUNCTION_TYPE(\name) + .globl VAR(name) ALIGN_FUNCTION_ENTRY -VAR(name, 0): +VAR(name): CFI_STARTPROC - int3 - int3 + UNREACHABLE + UNREACHABLE CFI_ENDPROC - SIZE(\name, 0) + SIZE(\name) END_MACRO MACRO1(SETUP_GOT_NOSAVE, got_reg) #ifndef __APPLE__ - .ifc RAW_VAR(got_reg, 0), ebx + .ifc VAR(got_reg), ebx call __x86.get_pc_thunk.bx addl $_GLOBAL_OFFSET_TABLE_, %ebx .else @@ -182,15 +170,16 @@ END_MACRO // Macros to poison (negate) the reference for heap poisoning. MACRO1(POISON_HEAP_REF, rRef) #ifdef USE_HEAP_POISONING - neg REG_VAR(rRef, 0) + neg REG_VAR(rRef) #endif // USE_HEAP_POISONING END_MACRO // Macros to unpoison (negate) the reference for heap poisoning. MACRO1(UNPOISON_HEAP_REF, rRef) #ifdef USE_HEAP_POISONING - neg REG_VAR(rRef, 0) + neg REG_VAR(rRef) #endif // USE_HEAP_POISONING END_MACRO + #endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h index a783d48ed2..c4a11d8a88 100644 --- a/runtime/arch/x86/context_x86.h +++ b/runtime/arch/x86/context_x86.h @@ -34,7 +34,7 @@ class X86Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(ESP, new_sp); diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index 737f4d1c5b..e2632c103b 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -28,6 +28,9 @@ namespace art { extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass, const mirror::Class* ref_class); +// Read barrier entrypoints. +extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t); + void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Interpreter @@ -141,6 +144,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = art_quick_read_barrier_slow; }; } // namespace art diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S index 5d27e474f2..aca5a37e75 100644 --- a/runtime/arch/x86/jni_entrypoints_x86.S +++ b/runtime/arch/x86/jni_entrypoints_x86.S @@ -23,6 +23,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub subl LITERAL(8), %esp // align stack CFI_ADJUST_CFA_OFFSET(8) pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) call SYMBOL(artFindNativeMethod) // (Thread*) addl LITERAL(12), %esp // remove argument & padding CFI_ADJUST_CFA_OFFSET(-12) diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index c9bc9779dc..1da5a2ff17 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -26,15 +26,14 @@ MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg) PUSH edi // Save callee saves (ebx is saved/restored by the upcall) PUSH esi PUSH ebp - subl MACRO_LITERAL(12), %esp // Grow stack by 3 words. + subl MACRO_LITERAL(12), %esp // Grow stack by 3 words. CFI_ADJUST_CFA_OFFSET(12) - SETUP_GOT_NOSAVE RAW_VAR(got_reg, 0) + SETUP_GOT_NOSAVE RAW_VAR(got_reg) // Load Runtime::instance_ from GOT. - movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1) - movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1) + movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg) + movl (REG_VAR(temp_reg)), REG_VAR(temp_reg) // Push save all callee-save method. - THIS_LOAD_REQUIRES_READ_BARRIER - pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1)) + pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg)) CFI_ADJUST_CFA_OFFSET(4) // Store esp as the top quick frame. movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET @@ -53,15 +52,14 @@ MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg) PUSH edi // Save callee saves (ebx is saved/restored by the upcall) PUSH esi PUSH ebp - subl MACRO_LITERAL(12), %esp // Grow stack by 3 words. + subl MACRO_LITERAL(12), %esp // Grow stack by 3 words. CFI_ADJUST_CFA_OFFSET(12) - SETUP_GOT_NOSAVE VAR(got_reg, 0) + SETUP_GOT_NOSAVE RAW_VAR(got_reg) // Load Runtime::instance_ from GOT. - movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1) - movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1) + movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg) + movl (REG_VAR(temp_reg)), REG_VAR(temp_reg) // Push save all callee-save method. - THIS_LOAD_REQUIRES_READ_BARRIER - pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1)) + pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg)) CFI_ADJUST_CFA_OFFSET(4) // Store esp as the top quick frame. movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET @@ -101,13 +99,12 @@ MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg) movsd %xmm2, 16(%esp) movsd %xmm3, 24(%esp) - SETUP_GOT_NOSAVE VAR(got_reg, 0) + SETUP_GOT_NOSAVE RAW_VAR(got_reg) // Load Runtime::instance_ from GOT. - movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1) - movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1) + movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg) + movl (REG_VAR(temp_reg)), REG_VAR(temp_reg) // Push save all callee-save method. - THIS_LOAD_REQUIRES_READ_BARRIER - pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1)) + pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg)) CFI_ADJUST_CFA_OFFSET(4) // Store esp as the stop quick frame. movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET @@ -157,10 +154,10 @@ MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME) addl MACRO_LITERAL(36), %esp // Remove FPRs and EAX. CFI_ADJUST_CFA_OFFSET(-36) - POP ecx // Restore args except eax + POP ecx // Restore args except eax POP edx POP ebx - POP ebp // Restore callee saves + POP ebp // Restore callee saves POP esi POP edi END_MACRO @@ -196,54 +193,54 @@ END_MACRO MACRO0(DELIVER_PENDING_EXCEPTION) SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save callee saves for throw // Outgoing argument set up - subl MACRO_LITERAL(12), %esp // Alignment padding + subl MACRO_LITERAL(12), %esp // Alignment padding CFI_ADJUST_CFA_OFFSET(12) pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*) - int3 // unreached + UNREACHABLE END_MACRO MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context // Outgoing argument set up - subl MACRO_LITERAL(12), %esp // alignment padding + subl MACRO_LITERAL(12), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(12) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - call VAR(cxx_name, 1) // cxx_name(Thread*) - int3 // unreached - END_FUNCTION RAW_VAR(c_name, 0) + call CALLVAR(cxx_name) // cxx_name(Thread*) + UNREACHABLE + END_FUNCTION VAR(c_name) END_MACRO MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context mov %esp, %ecx // Outgoing argument set up - subl MACRO_LITERAL(8), %esp // alignment padding + subl MACRO_LITERAL(8), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(8) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, Thread*) - int3 // unreached - END_FUNCTION RAW_VAR(c_name, 0) + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, Thread*) + UNREACHABLE + END_FUNCTION VAR(c_name) END_MACRO MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context // Outgoing argument set up - PUSH eax // alignment padding - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + PUSH eax // alignment padding + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*) - int3 // unreached - END_FUNCTION RAW_VAR(c_name, 0) + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*) + UNREACHABLE + END_FUNCTION VAR(c_name) END_MACRO /* @@ -293,8 +290,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC * On success this wrapper will restore arguments and *jump* to the target, leaving the lr * pointing back to the original caller. */ -MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) - DEFINE_FUNCTION RAW_VAR(c_name, 0) +MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name) SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME ebx, ebx movl %esp, %edx // remember SP @@ -304,7 +300,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) CFI_ADJUST_CFA_OFFSET(4) PUSH ecx // pass arg2 PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP) + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP) movl %edx, %edi // save code pointer in EDI addl MACRO_LITERAL(20), %esp // Pop arguments skip eax CFI_ADJUST_CFA_OFFSET(-20) @@ -334,10 +330,13 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) addl MACRO_LITERAL(4), %esp // Pop code pointer off stack CFI_ADJUST_CFA_OFFSET(-4) DELIVER_PENDING_EXCEPTION - END_FUNCTION RAW_VAR(c_name, 0) +END_MACRO +MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name) + INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name) + END_FUNCTION VAR(c_name) END_MACRO -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck @@ -352,27 +351,27 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo */ MACRO5(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, shorty, arg_array, temp_char, finished) 1: // LOOP - movb (REG_VAR(shorty, 1)), REG_VAR(temp_char, 3) // temp_char := *shorty - addl MACRO_LITERAL(1), REG_VAR(shorty, 1) // shorty++ - cmpb MACRO_LITERAL(0), REG_VAR(temp_char, 3) // if (temp_char == '\0') - je RAW_VAR(finished, 4) // goto finished - cmpb MACRO_LITERAL(68), REG_VAR(temp_char, 3) // if (temp_char == 'D') - je 2f // goto FOUND_DOUBLE - cmpb MACRO_LITERAL(70), REG_VAR(temp_char, 3) // if (temp_char == 'F') - je 3f // goto FOUND_FLOAT - addl MACRO_LITERAL(4), REG_VAR(arg_array, 2) // arg_array++ + movb (REG_VAR(shorty)), REG_VAR(temp_char) // temp_char := *shorty + addl MACRO_LITERAL(1), REG_VAR(shorty) // shorty++ + cmpb MACRO_LITERAL(0), REG_VAR(temp_char) // if (temp_char == '\0') + je VAR(finished) // goto finished + cmpb MACRO_LITERAL(68), REG_VAR(temp_char) // if (temp_char == 'D') + je 2f // goto FOUND_DOUBLE + cmpb MACRO_LITERAL(70), REG_VAR(temp_char) // if (temp_char == 'F') + je 3f // goto FOUND_FLOAT + addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++ // Handle extra space in arg array taken by a long. - cmpb MACRO_LITERAL(74), REG_VAR(temp_char, 3) // if (temp_char != 'J') - jne 1b // goto LOOP - addl MACRO_LITERAL(4), REG_VAR(arg_array, 2) // arg_array++ - jmp 1b // goto LOOP + cmpb MACRO_LITERAL(74), REG_VAR(temp_char) // if (temp_char != 'J') + jne 1b // goto LOOP + addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++ + jmp 1b // goto LOOP 2: // FOUND_DOUBLE - movsd (REG_VAR(arg_array, 2)), REG_VAR(xmm_reg, 0) - addl MACRO_LITERAL(8), REG_VAR(arg_array, 2) // arg_array+=2 + movsd (REG_VAR(arg_array)), REG_VAR(xmm_reg) + addl MACRO_LITERAL(8), REG_VAR(arg_array) // arg_array+=2 jmp 4f 3: // FOUND_FLOAT - movss (REG_VAR(arg_array, 2)), REG_VAR(xmm_reg, 0) - addl MACRO_LITERAL(4), REG_VAR(arg_array, 2) // arg_array++ + movss (REG_VAR(arg_array)), REG_VAR(xmm_reg) + addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++ 4: END_MACRO @@ -383,21 +382,21 @@ END_MACRO */ MACRO4(SKIP_OVER_FLOATS, shorty, arg_array, temp_char, finished) 1: // LOOP: - movb (REG_VAR(shorty, 0)), REG_VAR(temp_char, 2) // temp_char := *shorty - addl MACRO_LITERAL(1), REG_VAR(shorty, 0) // shorty++ - cmpb MACRO_LITERAL(0), REG_VAR(temp_char, 2) // if (temp_char == '\0') - je RAW_VAR(finished, 3) // goto finished - cmpb MACRO_LITERAL(70), REG_VAR(temp_char, 2) // if (temp_char == 'F') - je 3f // goto SKIP_FLOAT - cmpb MACRO_LITERAL(68), REG_VAR(temp_char, 2) // if (temp_char == 'D') - je 4f // goto SKIP_DOUBLE - jmp 5f // goto end + movb (REG_VAR(shorty)), REG_VAR(temp_char) // temp_char := *shorty + addl MACRO_LITERAL(1), REG_VAR(shorty) // shorty++ + cmpb MACRO_LITERAL(0), REG_VAR(temp_char) // if (temp_char == '\0') + je VAR(finished) // goto finished + cmpb MACRO_LITERAL(70), REG_VAR(temp_char) // if (temp_char == 'F') + je 3f // goto SKIP_FLOAT + cmpb MACRO_LITERAL(68), REG_VAR(temp_char) // if (temp_char == 'D') + je 4f // goto SKIP_DOUBLE + jmp 5f // goto end 3: // SKIP_FLOAT - addl MACRO_LITERAL(4), REG_VAR(arg_array, 1) // arg_array++ - jmp 1b // goto LOOP + addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++ + jmp 1b // goto LOOP 4: // SKIP_DOUBLE - addl MACRO_LITERAL(8), REG_VAR(arg_array, 1) // arg_array+=2 - jmp 1b // goto LOOP + addl MACRO_LITERAL(8), REG_VAR(arg_array) // arg_array+=2 + jmp 1b // goto LOOP 5: END_MACRO @@ -617,147 +616,148 @@ DEFINE_FUNCTION art_quick_invoke_static_stub END_FUNCTION art_quick_invoke_static_stub MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - subl MACRO_LITERAL(12), %esp // push padding + subl MACRO_LITERAL(12), %esp // push padding CFI_ADJUST_CFA_OFFSET(12) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - call VAR(cxx_name, 1) // cxx_name(Thread*) - addl MACRO_LITERAL(16), %esp // pop arguments + call CALLVAR(cxx_name) // cxx_name(Thread*) + addl MACRO_LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - subl MACRO_LITERAL(8), %esp // push padding + subl MACRO_LITERAL(8), %esp // push padding CFI_ADJUST_CFA_OFFSET(8) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, Thread*) - addl MACRO_LITERAL(16), %esp // pop arguments + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, Thread*) + addl MACRO_LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - PUSH eax // push padding - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + PUSH eax // push padding + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*) - addl MACRO_LITERAL(16), %esp // pop arguments + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*) + addl MACRO_LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH edx // pass arg3 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*) - addl MACRO_LITERAL(16), %esp // pop arguments + PUSH edx // pass arg3 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, Thread*) + addl MACRO_LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - subl MACRO_LITERAL(12), %esp // alignment padding + subl MACRO_LITERAL(12), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(12) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ebx // pass arg4 - PUSH edx // pass arg3 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, arg4, Thread*) - addl MACRO_LITERAL(32), %esp // pop arguments + PUSH ebx // pass arg4 + PUSH edx // pass arg3 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*) + addl MACRO_LITERAL(32), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-32) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) - SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + DEFINE_FUNCTION VAR(c_name) + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer - PUSH eax // push padding - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + PUSH eax // push padding + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass referrer - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, referrer, Thread*) - addl MACRO_LITERAL(16), %esp // pop arguments + PUSH ecx // pass referrer + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, referrer, Thread*) + addl MACRO_LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) - SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + DEFINE_FUNCTION VAR(c_name) + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %edx // get referrer - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH edx // pass referrer - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, referrer, Thread*) - addl MACRO_LITERAL(16), %esp // pop arguments + PUSH edx // pass referrer + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, referrer, Thread*) + addl MACRO_LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION RAW_VAR(c_name, 0) - SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + DEFINE_FUNCTION VAR(c_name) + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer - subl MACRO_LITERAL(12), %esp // alignment padding + subl MACRO_LITERAL(12), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(12) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ebx // pass referrer - PUSH edx // pass arg3 - PUSH ecx // pass arg2 - PUSH eax // pass arg1 - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, referrer, Thread*) - addl LITERAL(32), %esp // pop arguments + PUSH ebx // pass referrer + PUSH edx // pass arg3 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 + call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, referrer, + // Thread*) + addl LITERAL(32), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-32) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION RAW_VAR(c_name, 0) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO0(RETURN_IF_RESULT_IS_NON_ZERO) @@ -778,9 +778,9 @@ END_MACRO MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION) cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ? - jne 1f // if exception field != 0 goto 1 - ret // return -1: // deliver exception on current thread + jne 1f // if exception field != 0 goto 1 + ret // return +1: // deliver exception on current thread DELIVER_PENDING_EXCEPTION END_MACRO @@ -1018,15 +1018,15 @@ DEFINE_FUNCTION art_quick_lock_object .Lslow_lock: SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - subl LITERAL(8), %esp // alignment padding + subl LITERAL(8), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(8) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH eax // pass object - call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*) - addl LITERAL(16), %esp // pop arguments + PUSH eax // pass object + call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*) + addl LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO END_FUNCTION art_quick_lock_object @@ -1073,56 +1073,103 @@ DEFINE_FUNCTION art_quick_unlock_object .Lslow_unlock: SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC // Outgoing argument set up - subl LITERAL(8), %esp // alignment padding + subl LITERAL(8), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(8) - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH eax // pass object + PUSH eax // pass object call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*) - addl LITERAL(16), %esp // pop arguments + addl LITERAL(16), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-16) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object DEFINE_FUNCTION art_quick_is_assignable - PUSH eax // alignment padding - PUSH ecx // pass arg2 - obj->klass - PUSH eax // pass arg1 - checked class + PUSH eax // alignment padding + PUSH ecx // pass arg2 - obj->klass + PUSH eax // pass arg1 - checked class call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) - addl LITERAL(12), %esp // pop arguments + addl LITERAL(12), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-12) ret END_FUNCTION art_quick_is_assignable DEFINE_FUNCTION art_quick_check_cast - PUSH eax // alignment padding - PUSH ecx // pass arg2 - obj->klass - PUSH eax // pass arg1 - checked class + PUSH eax // alignment padding + PUSH ecx // pass arg2 - obj->klass + PUSH eax // pass arg1 - checked class call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) testl %eax, %eax - jz 1f // jump forward if not assignable - addl LITERAL(12), %esp // pop arguments + jz 1f // jump forward if not assignable + addl LITERAL(12), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-12) ret - CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds. + CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds. 1: - POP eax // pop arguments + POP eax // pop arguments POP ecx addl LITERAL(4), %esp - CFI_ADJUST_CFA_OFFSET(-12) + CFI_ADJUST_CFA_OFFSET(-4) SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context // Outgoing argument set up - PUSH eax // alignment padding - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + PUSH eax // alignment padding + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) - PUSH ecx // pass arg2 - PUSH eax // pass arg1 + PUSH ecx // pass arg2 + PUSH eax // pass arg1 call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*) - int3 // unreached + UNREACHABLE END_FUNCTION art_quick_check_cast +// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack. +MACRO2(POP_REG_NE, reg, exclude_reg) + .ifc RAW_VAR(reg), RAW_VAR(exclude_reg) + addl MACRO_LITERAL(4), %esp + CFI_ADJUST_CFA_OFFSET(-4) + .else + POP RAW_VAR(reg) + .endif +END_MACRO + + /* + * Macro to insert read barrier, only used in art_quick_aput_obj. + * obj_reg and dest_reg are registers, offset is a defined literal such as + * MIRROR_OBJECT_CLASS_OFFSET. + * pop_eax is a boolean flag, indicating if eax is popped after the call. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + */ +MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax) +#ifdef USE_READ_BARRIER + PUSH eax // save registers used in art_quick_aput_obj + PUSH ebx + PUSH edx + PUSH ecx + // Outgoing argument set up + pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary + CFI_ADJUST_CFA_OFFSET(4) + PUSH RAW_VAR(obj_reg) // pass obj_reg + PUSH eax // pass ref, just pass eax for now since parameter ref is unused + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset) + // No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning. + .ifnc RAW_VAR(dest_reg), eax + movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg + .endif + addl MACRO_LITERAL(12), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-12) + POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg + POP_REG_NE edx, RAW_VAR(dest_reg) + POP_REG_NE ebx, RAW_VAR(dest_reg) + .ifc RAW_VAR(pop_eax), true + POP_REG_NE eax, RAW_VAR(dest_reg) + .endif +#else + movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg) + UNPOISON_HEAP_REF RAW_VAR(dest_reg) +#endif // USE_READ_BARRIER +END_MACRO + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -1146,17 +1193,20 @@ END_FUNCTION art_quick_aput_obj_with_bound_check DEFINE_FUNCTION art_quick_aput_obj test %edx, %edx // store of null jz .Ldo_aput_null - movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx - UNPOISON_HEAP_REF ebx - movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx - UNPOISON_HEAP_REF ebx + READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true + READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true // value's type == array's component type - trivial assignability -#ifdef USE_HEAP_POISONING - PUSH eax // save eax +#if defined(USE_READ_BARRIER) + READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false + cmpl %eax, %ebx + POP eax // restore eax from the push in the beginning of READ_BARRIER macro +#elif defined(USE_HEAP_POISONING) + PUSH eax // save eax + // Cannot call READ_BARRIER macro here, because the above push messes up stack alignment. movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax UNPOISON_HEAP_REF eax cmpl %eax, %ebx - POP eax // restore eax + POP eax // restore eax #else cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx #endif @@ -1172,19 +1222,21 @@ DEFINE_FUNCTION art_quick_aput_obj movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) ret .Lcheck_assignability: - PUSH eax // save arguments + PUSH eax // save arguments PUSH ecx PUSH edx - subl LITERAL(8), %esp // alignment padding + subl LITERAL(8), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(8) #ifdef USE_HEAP_POISONING + // This load does not need read barrier, since edx is unchanged and there's no GC safe point + // from last read of MIRROR_OBJECT_CLASS_OFFSET(%edx). movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored UNPOISON_HEAP_REF eax PUSH eax #else pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored -#endif CFI_ADJUST_CFA_OFFSET(4) +#endif PUSH ebx // pass arg1 - component type of the array call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b) addl LITERAL(16), %esp // pop arguments @@ -1213,7 +1265,7 @@ DEFINE_FUNCTION art_quick_aput_obj PUSH edx // pass arg2 - value PUSH eax // pass arg1 - array call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*) - int3 // unreached + UNREACHABLE END_FUNCTION art_quick_aput_obj DEFINE_FUNCTION art_quick_memcpy @@ -1250,37 +1302,37 @@ DEFINE_FUNCTION art_quick_f2l END_FUNCTION art_quick_f2l DEFINE_FUNCTION art_quick_ldiv - subl LITERAL(12), %esp // alignment padding + subl LITERAL(12), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(12) - PUSH ebx // pass arg4 b.hi - PUSH edx // pass arg3 b.lo - PUSH ecx // pass arg2 a.hi - PUSH eax // pass arg1 a.lo - call SYMBOL(artLdiv) // (jlong a, jlong b) - addl LITERAL(28), %esp // pop arguments + PUSH ebx // pass arg4 b.hi + PUSH edx // pass arg3 b.lo + PUSH ecx // pass arg2 a.hi + PUSH eax // pass arg1 a.lo + call SYMBOL(artLdiv) // (jlong a, jlong b) + addl LITERAL(28), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-28) ret END_FUNCTION art_quick_ldiv DEFINE_FUNCTION art_quick_lmod - subl LITERAL(12), %esp // alignment padding + subl LITERAL(12), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(12) - PUSH ebx // pass arg4 b.hi - PUSH edx // pass arg3 b.lo - PUSH ecx // pass arg2 a.hi - PUSH eax // pass arg1 a.lo - call SYMBOL(artLmod) // (jlong a, jlong b) - addl LITERAL(28), %esp // pop arguments + PUSH ebx // pass arg4 b.hi + PUSH edx // pass arg3 b.lo + PUSH ecx // pass arg2 a.hi + PUSH eax // pass arg1 a.lo + call SYMBOL(artLmod) // (jlong a, jlong b) + addl LITERAL(28), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-28) ret END_FUNCTION art_quick_lmod DEFINE_FUNCTION art_quick_lmul - imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx) - imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx) - mul %edx // edx:eax = a.lo(eax) * b.lo(edx) + imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx) + imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx) + mul %edx // edx:eax = a.lo(eax) * b.lo(edx) add %ebx, %ecx - add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi) + add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi) ret END_FUNCTION art_quick_lmul @@ -1415,7 +1467,7 @@ END_FUNCTION art_quick_proxy_invoke_handler */ DEFINE_FUNCTION art_quick_imt_conflict_trampoline movd %xmm7, %eax // get target method index stored in xmm7 - jmp SYMBOL(art_quick_invoke_interface_trampoline) + INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline END_FUNCTION art_quick_imt_conflict_trampoline DEFINE_FUNCTION art_quick_resolution_trampoline @@ -1429,6 +1481,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP) movl %eax, %edi // remember code pointer in EDI addl LITERAL(16), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-16) test %eax, %eax // if code pointer is null goto deliver pending exception jz 1f RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME_AND_JUMP @@ -1439,7 +1492,7 @@ END_FUNCTION art_quick_resolution_trampoline DEFINE_FUNCTION art_quick_generic_jni_trampoline SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX - movl %esp, %ebp // save SP at callee-save frame + movl %esp, %ebp // save SP at callee-save frame CFI_DEF_CFA_REGISTER(ebp) subl LITERAL(5120), %esp // prepare for artQuickGenericJniTrampoline call @@ -1474,7 +1527,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline // (esp) 4(esp) 12(esp) <= C calling convention // fs:... eax:edx fp0 <= where they are - subl LITERAL(20), %esp // Padding & pass float result. + subl LITERAL(20), %esp // Padding & pass float result. fstpl (%esp) pushl %edx // Pass int result. pushl %eax @@ -1497,7 +1550,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline CFI_ADJUST_CFA_OFFSET(-(4 + 4 * 8)) POP ecx - addl LITERAL(4), %esp // Avoid edx, as it may be part of the result. + addl LITERAL(4), %esp // Avoid edx, as it may be part of the result. CFI_ADJUST_CFA_OFFSET(-4) POP ebx POP ebp // Restore callee saves @@ -1536,7 +1589,7 @@ DEFINE_FUNCTION art_quick_to_interpreter_bridge addl LITERAL(48), %esp // Remove FPRs and EAX, ECX, EDX, EBX. CFI_ADJUST_CFA_OFFSET(-48) - POP ebp // Restore callee saves + POP ebp // Restore callee saves POP esi POP edi @@ -1559,6 +1612,7 @@ DEFINE_FUNCTION art_quick_instrumentation_entry PUSH eax // Pass Method*. call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, LR) addl LITERAL(28), %esp // Pop arguments upto saved Method*. + CFI_ADJUST_CFA_OFFSET(-28) movl 60(%esp), %edi // Restore edi. movl %eax, 60(%esp) // Place code* over edi, just under return pc. movl SYMBOL(art_quick_instrumentation_exit)@GOT(%ebx), %ebx @@ -1578,11 +1632,13 @@ DEFINE_FUNCTION art_quick_instrumentation_entry movl 52(%esp), %ebp // Restore ebp. movl 56(%esp), %esi // Restore esi. addl LITERAL(60), %esp // Wind stack back upto code*. + CFI_ADJUST_CFA_OFFSET(-60) ret // Call method (and pop). END_FUNCTION art_quick_instrumentation_entry DEFINE_FUNCTION art_quick_instrumentation_exit pushl LITERAL(0) // Push a fake return PC as there will be none on the stack. + CFI_ADJUST_CFA_OFFSET(4) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx mov %esp, %ecx // Remember SP subl LITERAL(8), %esp // Save float return value. @@ -1611,6 +1667,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit CFI_ADJUST_CFA_OFFSET(-8) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME addl LITERAL(4), %esp // Remove fake return pc. + CFI_ADJUST_CFA_OFFSET(-4) jmp *%ecx // Return. END_FUNCTION art_quick_instrumentation_exit @@ -1619,7 +1676,7 @@ END_FUNCTION art_quick_instrumentation_exit * will long jump to the upcall with a special exception of -1. */ DEFINE_FUNCTION art_quick_deoptimize - pushl %ebx // Entry point for a jump. Fake that we were called. + PUSH ebx // Entry point for a jump. Fake that we were called. .globl SYMBOL(art_quick_deoptimize_from_compiled_slow_path) // Entry point for real calls // from compiled slow paths. SYMBOL(art_quick_deoptimize_from_compiled_slow_path): @@ -1629,7 +1686,7 @@ SYMBOL(art_quick_deoptimize_from_compiled_slow_path): pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). CFI_ADJUST_CFA_OFFSET(4) call SYMBOL(artDeoptimize) // artDeoptimize(Thread*) - int3 // Unreachable. + UNREACHABLE END_FUNCTION art_quick_deoptimize /* @@ -1640,8 +1697,8 @@ END_FUNCTION art_quick_deoptimize * ecx: comp string object (known non-null) */ DEFINE_FUNCTION art_quick_string_compareto - PUSH esi // push callee save reg - PUSH edi // push callee save reg + PUSH esi // push callee save reg + PUSH edi // push callee save reg mov MIRROR_STRING_COUNT_OFFSET(%eax), %edx mov MIRROR_STRING_COUNT_OFFSET(%ecx), %ebx lea MIRROR_STRING_VALUE_OFFSET(%eax), %esi @@ -1682,11 +1739,21 @@ END_FUNCTION art_quick_string_compareto DEFINE_FUNCTION art_nested_signal_return SETUP_GOT_NOSAVE ebx // sets %ebx for call into PLT movl LITERAL(1), %ecx - pushl %ecx // second arg to longjmp (1) - pushl %eax // first arg to longjmp (jmp_buf) + PUSH ecx // second arg to longjmp (1) + PUSH eax // first arg to longjmp (jmp_buf) call PLT_SYMBOL(longjmp) - int3 // won't get here. + UNREACHABLE END_FUNCTION art_nested_signal_return +DEFINE_FUNCTION art_quick_read_barrier_slow + PUSH edx // pass arg3 - offset + PUSH ecx // pass arg2 - obj + PUSH eax // pass arg1 - ref + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset) + addl LITERAL(12), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-12) + ret +END_FUNCTION art_quick_read_barrier_slow + // TODO: implement these! UNIMPLEMENTED art_quick_memcmp16 diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S index b2b6c2dbfb..cf0039c84e 100644 --- a/runtime/arch/x86_64/asm_support_x86_64.S +++ b/runtime/arch/x86_64/asm_support_x86_64.S @@ -19,57 +19,50 @@ #include "asm_support_x86_64.h" -#if defined(__APPLE__) || (defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)) - // Clang's as(1) doesn't let you name macro parameters prior to 3.5. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name - #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name - #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name - #define END_MACRO .endmacro - - // Clang's as(1) uses $0, $1, and so on for macro arguments. - #define RAW_VAR(name,index) $index - #define VAR(name,index) SYMBOL($index) - #define PLT_VAR(name, index) PLT_SYMBOL($index) - #define REG_VAR(name,index) %$index - #define CALL_MACRO(name,index) $index - - // The use of $x for arguments mean that literals need to be represented with $$x in macros. - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $$value +// Regular gas(1) & current clang/llvm assembler support named macro parameters. +#define MACRO0(macro_name) .macro macro_name +#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 +#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 +#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 +#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4 +#define END_MACRO .endm + +#if defined(__clang__) + // Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't + // separate the backslash and parameter by a space. Everything just works. + #define RAW_VAR(name) \name + #define VAR(name) SYMBOL(\name) + #define PLT_VAR(name) \name@PLT + #define REG_VAR(name) %\name + #define CALL_MACRO(name) \name #else - // Regular gas(1) lets you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 - #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 - #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 - #define END_MACRO .endm - // Regular gas(1) uses \argument_name for macro arguments. // We need to turn on alternate macro syntax so we can use & instead or the preprocessor // will screw us by inserting a space between the \ and the name. Even in this mode there's // no special meaning to $, so literals are still just $x. The use of altmacro means % is a - // special character meaning care needs to be taken when passing registers as macro arguments. + // special character meaning care needs to be taken when passing registers as macro + // arguments. .altmacro - #define RAW_VAR(name,index) name& - #define VAR(name,index) name& - #define PLT_VAR(name, index) name&@PLT - #define REG_VAR(name,index) %name - #define CALL_MACRO(name,index) name& + #define RAW_VAR(name) name& + #define VAR(name) name& + #define PLT_VAR(name) name&@PLT + #define REG_VAR(name) %name + #define CALL_MACRO(name) name& +#endif - #define LITERAL(value) $value +#define LITERAL(value) $value +#if defined(__APPLE__) + #define MACRO_LITERAL(value) $$(value) +#else #define MACRO_LITERAL(value) $value #endif #if defined(__APPLE__) - #define FUNCTION_TYPE(name,index) - #define SIZE(name,index) -#elif defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) - #define FUNCTION_TYPE(name,index) .type $index, @function - #define SIZE(name,index) .size $index, .-$index + #define FUNCTION_TYPE(name) + #define SIZE(name) #else - #define FUNCTION_TYPE(name,index) .type name&, @function - #define SIZE(name,index) .size name, .-name + #define FUNCTION_TYPE(name) .type name, @function + #define SIZE(name) .size name, .-name #endif // CFI support. @@ -95,13 +88,7 @@ // Symbols. #if !defined(__APPLE__) #define SYMBOL(name) name - #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5) - // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a - // better fix. - #define PLT_SYMBOL(name) name // ## @PLT - #else - #define PLT_SYMBOL(name) name ## @PLT - #endif + #define PLT_SYMBOL(name) name ## @PLT #else #define SYMBOL(name) _ ## name #define PLT_SYMBOL(name) _ ## name @@ -122,11 +109,11 @@ END_MACRO // TODO: we might need to use SYMBOL() here to add the underscore prefix // for mac builds. MACRO1(DEFINE_FUNCTION, c_name) - FUNCTION_TYPE(\c_name, 0) - ASM_HIDDEN VAR(c_name, 0) - .globl VAR(c_name, 0) + FUNCTION_TYPE(SYMBOL(\c_name)) + ASM_HIDDEN SYMBOL(\c_name) + .globl VAR(c_name) ALIGN_FUNCTION_ENTRY -VAR(c_name, 0): +VAR(c_name): CFI_STARTPROC // Ensure we get a sane starting CFA. CFI_DEF_CFA(rsp, 8) @@ -134,32 +121,32 @@ END_MACRO MACRO1(END_FUNCTION, c_name) CFI_ENDPROC - SIZE(\c_name, 0) + SIZE(SYMBOL(\c_name)) END_MACRO MACRO1(PUSH, reg) - pushq REG_VAR(reg, 0) + pushq REG_VAR(reg) CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(REG_VAR(reg, 0), 0) + CFI_REL_OFFSET(REG_VAR(reg), 0) END_MACRO MACRO1(POP, reg) - popq REG_VAR(reg,0) + popq REG_VAR(reg) CFI_ADJUST_CFA_OFFSET(-8) - CFI_RESTORE(REG_VAR(reg,0)) + CFI_RESTORE(REG_VAR(reg)) END_MACRO MACRO1(UNIMPLEMENTED,name) - FUNCTION_TYPE(\name, 0) - ASM_HIDDEN VAR(c_name, 0) - .globl VAR(name, 0) + FUNCTION_TYPE(SYMBOL(\name)) + ASM_HIDDEN VAR(name) + .globl VAR(name) ALIGN_FUNCTION_ENTRY -VAR(name, 0): +VAR(name): CFI_STARTPROC int3 int3 CFI_ENDPROC - SIZE(\name, 0) + SIZE(SYMBOL(\name)) END_MACRO MACRO0(UNREACHABLE) @@ -173,14 +160,14 @@ END_MACRO // Macros to poison (negate) the reference for heap poisoning. MACRO1(POISON_HEAP_REF, rRef) #ifdef USE_HEAP_POISONING - negl REG_VAR(rRef, 0) + negl REG_VAR(rRef) #endif // USE_HEAP_POISONING END_MACRO // Macros to unpoison (negate) the reference for heap poisoning. MACRO1(UNPOISON_HEAP_REF, rRef) #ifdef USE_HEAP_POISONING - negl REG_VAR(rRef, 0) + negl REG_VAR(rRef) #endif // USE_HEAP_POISONING END_MACRO diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h index c9b0ff6b72..30bb9ec362 100644 --- a/runtime/arch/x86_64/context_x86_64.h +++ b/runtime/arch/x86_64/context_x86_64.h @@ -34,7 +34,7 @@ class X86_64Context : public Context { void Reset() OVERRIDE; - void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void SetSP(uintptr_t new_sp) OVERRIDE { SetGPR(RSP, new_sp); diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index d0ab9d5d49..ef1bb5f9a7 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -29,6 +29,9 @@ namespace art { extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass, const mirror::Class* ref_class); +// Read barrier entrypoints. +extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t); + void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { #if defined(__APPLE__) @@ -145,6 +148,7 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, // Read barrier qpoints->pReadBarrierJni = ReadBarrierJni; + qpoints->pReadBarrierSlow = art_quick_read_barrier_slow; #endif // __APPLE__ }; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 7d86c3accd..f4c9488260 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -66,7 +66,6 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME) movq %xmm14, 24(%rsp) movq %xmm15, 32(%rsp) // R10 := ArtMethod* for save all callee save frame method. - THIS_LOAD_REQUIRES_READ_BARRIER movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Store ArtMethod* to bottom of stack. movq %r10, 0(%rsp) @@ -109,7 +108,6 @@ MACRO0(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME) movq %xmm14, 24(%rsp) movq %xmm15, 32(%rsp) // R10 := ArtMethod* for refs only callee save frame method. - THIS_LOAD_REQUIRES_READ_BARRIER movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Store ArtMethod* to bottom of stack. movq %r10, 0(%rsp) @@ -168,7 +166,6 @@ MACRO0(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME) subq MACRO_LITERAL(80 + 4 * 8), %rsp CFI_ADJUST_CFA_OFFSET(80 + 4 * 8) // R10 := ArtMethod* for ref and args callee save frame method. - THIS_LOAD_REQUIRES_READ_BARRIER movq RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10 // Save FPRs. movq %xmm0, 16(%rsp) @@ -275,33 +272,33 @@ MACRO0(DELIVER_PENDING_EXCEPTION) END_MACRO MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + DEFINE_FUNCTION VAR(c_name) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(Thread*) + call VAR(cxx_name) // cxx_name(Thread*) UNREACHABLE - END_FUNCTION VAR(c_name, 0) + END_FUNCTION VAR(c_name) END_MACRO MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + DEFINE_FUNCTION VAR(c_name) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg1, Thread*) + call VAR(cxx_name) // cxx_name(arg1, Thread*) UNREACHABLE - END_FUNCTION VAR(c_name, 0) + END_FUNCTION VAR(c_name) END_MACRO MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context + DEFINE_FUNCTION VAR(c_name) + SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(Thread*) + call VAR(cxx_name) // cxx_name(Thread*) UNREACHABLE - END_FUNCTION VAR(c_name, 0) + END_FUNCTION VAR(c_name) END_MACRO /* @@ -353,8 +350,7 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromC * * Adapted from x86 code. */ -MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) - DEFINE_FUNCTION VAR(c_name, 0) +MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name) SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC // Helper signature is always // (method_idx, *this_object, *caller_method, *self, sp) @@ -362,7 +358,7 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread movq %rsp, %rcx // pass SP - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP) + call VAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP) // save the code pointer movq %rax, %rdi movq %rdx, %rax @@ -375,10 +371,13 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) jmp *%rax 1: DELIVER_PENDING_EXCEPTION - END_FUNCTION VAR(c_name, 0) +END_MACRO +MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name) + INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name) + END_FUNCTION VAR(c_name) END_MACRO -INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck @@ -397,7 +396,7 @@ MACRO2(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, finished) movb (%r10), %al // al := *shorty addq MACRO_LITERAL(1), %r10 // shorty++ cmpb MACRO_LITERAL(0), %al // if (al == '\0') goto xmm_setup_finished - je VAR(finished, 1) + je VAR(finished) cmpb MACRO_LITERAL(68), %al // if (al == 'D') goto FOUND_DOUBLE je 2f cmpb MACRO_LITERAL(70), %al // if (al == 'F') goto FOUND_FLOAT @@ -409,11 +408,11 @@ MACRO2(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, finished) addq MACRO_LITERAL(4), %r11 // arg_array++ jmp 1b // goto LOOP 2: // FOUND_DOUBLE - movsd (%r11), REG_VAR(xmm_reg, 0) + movsd (%r11), REG_VAR(xmm_reg) addq MACRO_LITERAL(8), %r11 // arg_array+=2 jmp 4f 3: // FOUND_FLOAT - movss (%r11), REG_VAR(xmm_reg, 0) + movss (%r11), REG_VAR(xmm_reg) addq MACRO_LITERAL(4), %r11 // arg_array++ 4: END_MACRO @@ -428,18 +427,18 @@ MACRO3(LOOP_OVER_SHORTY_LOADING_GPRS, gpr_reg64, gpr_reg32, finished) movb (%r10), %al // al := *shorty addq MACRO_LITERAL(1), %r10 // shorty++ cmpb MACRO_LITERAL(0), %al // if (al == '\0') goto gpr_setup_finished - je VAR(finished, 2) + je VAR(finished) cmpb MACRO_LITERAL(74), %al // if (al == 'J') goto FOUND_LONG je 2f cmpb MACRO_LITERAL(70), %al // if (al == 'F') goto SKIP_FLOAT je 3f cmpb MACRO_LITERAL(68), %al // if (al == 'D') goto SKIP_DOUBLE je 4f - movl (%r11), REG_VAR(gpr_reg32, 1) + movl (%r11), REG_VAR(gpr_reg32) addq MACRO_LITERAL(4), %r11 // arg_array++ jmp 5f 2: // FOUND_LONG - movq (%r11), REG_VAR(gpr_reg64, 0) + movq (%r11), REG_VAR(gpr_reg64) addq MACRO_LITERAL(8), %r11 // arg_array+=2 jmp 5f 3: // SKIP_FLOAT @@ -691,94 +690,94 @@ DEFINE_FUNCTION art_quick_do_long_jump END_FUNCTION art_quick_do_long_jump MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(Thread*) + call VAR(cxx_name) // cxx_name(Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg0, Thread*) + call VAR(cxx_name) // cxx_name(arg0, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*) + call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*) + call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, arg4, Thread*) + call VAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) movq 8(%rsp), %rsi // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // arg0 is in rdi movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*) + call VAR(cxx_name) // cxx_name(arg0, referrer, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) + END_FUNCTION VAR(c_name) END_MACRO MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) movq 8(%rsp), %rdx // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // arg0 and arg1 are in rdi/rsi movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current() - call VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*) + call VAR(cxx_name) // (arg0, arg1, referrer, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) + END_FUNCTION VAR(c_name) END_MACRO MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro) - DEFINE_FUNCTION VAR(c_name, 0) + DEFINE_FUNCTION VAR(c_name) movq 8(%rsp), %rcx // pass referrer SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // arg0, arg1, and arg2 are in rdi/rsi/rdx movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current() - call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*) + call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address - CALL_MACRO(return_macro, 2) // return or deliver exception - END_FUNCTION VAR(c_name, 0) + CALL_MACRO(return_macro) // return or deliver exception + END_FUNCTION VAR(c_name) END_MACRO MACRO0(RETURN_IF_RESULT_IS_NON_ZERO) @@ -918,8 +917,12 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab // Fast path tlab allocation. // RDI: uint32_t type_idx, RSI: ArtMethod* // RDX, RCX, R8, R9: free. RAX: return val. + // TODO: Add read barrier when this function is used. + // Might need a special macro since rsi and edx is 32b/64b mismatched. movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array UNPOISON_HEAP_REF edx + // TODO: Add read barrier when this function is used. + // Might need to break down into multiple instructions to get the base address in a register. // Load the class movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx UNPOISON_HEAP_REF edx @@ -1125,28 +1128,86 @@ END_FUNCTION art_quick_unlock_object DEFINE_FUNCTION art_quick_check_cast PUSH rdi // Save args for exc PUSH rsi + subq LITERAL(8), %rsp // Alignment padding. + CFI_ADJUST_CFA_OFFSET(8) SETUP_FP_CALLEE_SAVE_FRAME call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) testq %rax, %rax jz 1f // jump forward if not assignable RESTORE_FP_CALLEE_SAVE_FRAME - addq LITERAL(16), %rsp // pop arguments - CFI_ADJUST_CFA_OFFSET(-16) + addq LITERAL(24), %rsp // pop arguments + CFI_ADJUST_CFA_OFFSET(-24) ret - CFI_ADJUST_CFA_OFFSET(16 + 4 * 8) // Reset unwind info so following code unwinds. + CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds. 1: RESTORE_FP_CALLEE_SAVE_FRAME + addq LITERAL(8), %rsp // pop padding + CFI_ADJUST_CFA_OFFSET(-8) POP rsi // Pop arguments POP rdi SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*) - int3 // unreached + UNREACHABLE END_FUNCTION art_quick_check_cast +// Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack. +MACRO2(POP_REG_NE, reg, exclude_reg) + .ifc RAW_VAR(reg), RAW_VAR(exclude_reg) + addq MACRO_LITERAL(8), %rsp + CFI_ADJUST_CFA_OFFSET(-8) + .else + POP RAW_VAR(reg) + .endif +END_MACRO + + /* + * Macro to insert read barrier, used in art_quick_aput_obj and art_quick_alloc_object_tlab. + * obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as + * MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between + * 64b PUSH/POP and 32b argument. + * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. + * + * As with art_quick_aput_obj* functions, the 64b versions are in comments. + */ +MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64) +#ifdef USE_READ_BARRIER + PUSH rax // save registers that might be used + PUSH rdi + PUSH rsi + PUSH rdx + PUSH rcx + SETUP_FP_CALLEE_SAVE_FRAME + // Outgoing argument set up + // movl %edi, %edi // pass ref, no-op for now since parameter ref is unused + // // movq %rdi, %rdi + movl REG_VAR(obj_reg), %esi // pass obj_reg + // movq REG_VAR(obj_reg), %rsi + movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary + // movq MACRO_LITERAL((RAW_VAR(offset))), %rdx + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset) + // No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning. + .ifnc RAW_VAR(dest_reg32), eax + // .ifnc RAW_VAR(dest_reg64), rax + movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg + // movq %rax, REG_VAR(dest_reg64) + .endif + RESTORE_FP_CALLEE_SAVE_FRAME + POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg + POP_REG_NE rdx, RAW_VAR(dest_reg64) + POP_REG_NE rsi, RAW_VAR(dest_reg64) + POP_REG_NE rdi, RAW_VAR(dest_reg64) + POP_REG_NE rax, RAW_VAR(dest_reg64) +#else + movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32) + // movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64) + UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register +#endif // USE_READ_BARRIER +END_MACRO + /* * Entry from managed code for array put operations of objects where the value being stored * needs to be checked for compatibility. @@ -1191,15 +1252,13 @@ DEFINE_FUNCTION art_quick_aput_obj testl %edx, %edx // store of null // test %rdx, %rdx jz .Ldo_aput_null - movl MIRROR_OBJECT_CLASS_OFFSET(%edi), %ecx -// movq MIRROR_OBJECT_CLASS_OFFSET(%rdi), %rcx - UNPOISON_HEAP_REF ecx - movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx -// movq MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx - UNPOISON_HEAP_REF ecx -#ifdef USE_HEAP_POISONING - movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // rax is free. - UNPOISON_HEAP_REF eax + READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx + // READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx + READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx + // READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx +#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER) + READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free. + // READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax cmpl %eax, %ecx // value's type == array's component type - trivial assignability #else cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability @@ -1224,13 +1283,16 @@ DEFINE_FUNCTION art_quick_aput_obj PUSH rdi PUSH rsi PUSH rdx - subq LITERAL(8), %rsp // Alignment padding. - CFI_ADJUST_CFA_OFFSET(8) SETUP_FP_CALLEE_SAVE_FRAME - // "Uncompress" = do nothing, as already zero-extended on load. - movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class. - UNPOISON_HEAP_REF esi +#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER) + // The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value. + movl %eax, %esi // Pass arg2 = value's class. + // movq %rax, %rsi +#else + // "Uncompress" = do nothing, as already zero-extended on load. + movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class. +#endif movq %rcx, %rdi // Pass arg1 = array's component type. call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b) @@ -1241,8 +1303,6 @@ DEFINE_FUNCTION art_quick_aput_obj RESTORE_FP_CALLEE_SAVE_FRAME // Restore arguments. - addq LITERAL(8), %rsp - CFI_ADJUST_CFA_OFFSET(-8) POP rdx POP rsi POP rdi @@ -1256,12 +1316,10 @@ DEFINE_FUNCTION art_quick_aput_obj movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero // movb %dl, (%rdx, %rdi) ret - CFI_ADJUST_CFA_OFFSET(32 + 4 * 8) // Reset unwind info so following code unwinds. + CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds. .Lthrow_array_store_exception: RESTORE_FP_CALLEE_SAVE_FRAME // Restore arguments. - addq LITERAL(8), %rsp - CFI_ADJUST_CFA_OFFSET(-8) POP rdx POP rsi POP rdi @@ -1273,7 +1331,7 @@ DEFINE_FUNCTION art_quick_aput_obj movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current(). // Pass arg 1 = array. call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*) - int3 // unreached + UNREACHABLE END_FUNCTION art_quick_aput_obj // TODO: This is quite silly on X86_64 now. @@ -1352,7 +1410,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline int3 #else movq %rax, %rdi - jmp art_quick_invoke_interface_trampoline + INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline #endif // __APPLE__ END_FUNCTION art_quick_imt_conflict_trampoline @@ -1670,7 +1728,7 @@ SYMBOL(art_quick_deoptimize_from_compiled_slow_path): // Stack should be aligned now. movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread. call SYMBOL(artDeoptimize) // artDeoptimize(Thread*) - int3 // Unreachable. + UNREACHABLE END_FUNCTION art_quick_deoptimize /* @@ -1715,7 +1773,11 @@ UNIMPLEMENTED art_quick_memcmp16 DEFINE_FUNCTION art_quick_assignable_from_code SETUP_FP_CALLEE_SAVE_FRAME + subq LITERAL(8), %rsp // Alignment padding. + CFI_ADJUST_CFA_OFFSET(8) call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*) + addq LITERAL(8), %rsp + CFI_ADJUST_CFA_OFFSET(-8) RESTORE_FP_CALLEE_SAVE_FRAME ret END_FUNCTION art_quick_assignable_from_code @@ -1729,5 +1791,16 @@ DEFINE_FUNCTION art_nested_signal_return // first arg to longjmp is already in correct register movq LITERAL(1), %rsi // second arg to longjmp (1) call PLT_SYMBOL(longjmp) - int3 // won't get here + UNREACHABLE END_FUNCTION art_nested_signal_return + +DEFINE_FUNCTION art_quick_read_barrier_slow + SETUP_FP_CALLEE_SAVE_FRAME + subq LITERAL(8), %rsp // Alignment padding. + CFI_ADJUST_CFA_OFFSET(8) + call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj, offset) + addq LITERAL(8), %rsp + CFI_ADJUST_CFA_OFFSET(-8) + RESTORE_FP_CALLEE_SAVE_FRAME + ret +END_FUNCTION art_quick_read_barrier_slow diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h index 73beb1f168..5138cc99bf 100644 --- a/runtime/art_field-inl.h +++ b/runtime/art_field-inl.h @@ -253,7 +253,7 @@ inline void ArtField::SetObject(mirror::Object* object, mirror::Object* l) { SetObj<kTransactionActive>(object, l); } -inline const char* ArtField::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t field_index = GetDexFieldIndex(); if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) { DCHECK(IsStatic()); @@ -264,7 +264,7 @@ inline const char* ArtField::GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock return dex_file->GetFieldName(dex_file->GetFieldId(field_index)); } -inline const char* ArtField::GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t field_index = GetDexFieldIndex(); if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) { DCHECK(IsStatic()); @@ -278,11 +278,11 @@ inline const char* ArtField::GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mu } inline Primitive::Type ArtField::GetTypeAsPrimitiveType() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Primitive::GetType(GetTypeDescriptor()[0]); } -inline bool ArtField::IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline bool ArtField::IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_) { return GetTypeAsPrimitiveType() != Primitive::kPrimNot; } @@ -304,15 +304,15 @@ inline mirror::Class* ArtField::GetType() { return type; } -inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline size_t ArtField::FieldSize() SHARED_REQUIRES(Locks::mutator_lock_) { return Primitive::ComponentSize(GetTypeAsPrimitiveType()); } -inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline mirror::DexCache* ArtField::GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_) { return GetDeclaringClass()->GetDexCache(); } -inline const DexFile* ArtField::GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline const DexFile* ArtField::GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_) { return GetDexCache()->GetDexFile(); } diff --git a/runtime/art_field.cc b/runtime/art_field.cc index e4a583404a..3737e0ddee 100644 --- a/runtime/art_field.cc +++ b/runtime/art_field.cc @@ -49,10 +49,9 @@ void ArtField::SetOffset(MemberOffset num_bytes) { ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) { DCHECK(klass != nullptr); - auto* instance_fields = klass->GetIFields(); - for (size_t i = 0, count = klass->NumInstanceFields(); i < count; ++i) { - if (instance_fields[i].GetOffset().Uint32Value() == field_offset) { - return &instance_fields[i]; + for (ArtField& field : klass->GetIFields()) { + if (field.GetOffset().Uint32Value() == field_offset) { + return &field; } } // We did not find field in the class: look into superclass. @@ -62,10 +61,9 @@ ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t f ArtField* ArtField::FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset) { DCHECK(klass != nullptr); - auto* static_fields = klass->GetSFields(); - for (size_t i = 0, count = klass->NumStaticFields(); i < count; ++i) { - if (static_fields[i].GetOffset().Uint32Value() == field_offset) { - return &static_fields[i]; + for (ArtField& field : klass->GetSFields()) { + if (field.GetOffset().Uint32Value() == field_offset) { + return &field; } } return nullptr; diff --git a/runtime/art_field.h b/runtime/art_field.h index 7a03723d00..a943a34174 100644 --- a/runtime/art_field.h +++ b/runtime/art_field.h @@ -21,7 +21,6 @@ #include "gc_root.h" #include "modifiers.h" -#include "object_callbacks.h" #include "offsets.h" #include "primitive.h" #include "read_barrier_option.h" @@ -42,27 +41,27 @@ class ArtField FINAL { public: ArtField(); - mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); void SetDeclaringClass(mirror::Class *new_declaring_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_); - void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. access_flags_ = new_access_flags; } - bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPublic) != 0; } - bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccStatic) != 0; } - bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } @@ -76,115 +75,116 @@ class ArtField FINAL { } // Offset to field within an Object. - MemberOffset GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetOffset() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset OffsetOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_)); } - MemberOffset GetOffsetDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetOffsetDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_); - void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetOffset(MemberOffset num_bytes) SHARED_REQUIRES(Locks::mutator_lock_); // field access, null object for static fields - uint8_t GetBoolean(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint8_t GetBoolean(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetBoolean(mirror::Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetBoolean(mirror::Object* object, uint8_t z) SHARED_REQUIRES(Locks::mutator_lock_); - int8_t GetByte(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int8_t GetByte(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetByte(mirror::Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetByte(mirror::Object* object, int8_t b) SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetChar(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetChar(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetChar(mirror::Object* object, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetChar(mirror::Object* object, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_); - int16_t GetShort(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int16_t GetShort(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetShort(mirror::Object* object, int16_t s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetShort(mirror::Object* object, int16_t s) SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetInt(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetInt(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetInt(mirror::Object* object, int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetInt(mirror::Object* object, int32_t i) SHARED_REQUIRES(Locks::mutator_lock_); - int64_t GetLong(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int64_t GetLong(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetLong(mirror::Object* object, int64_t j) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetLong(mirror::Object* object, int64_t j) SHARED_REQUIRES(Locks::mutator_lock_); - float GetFloat(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + float GetFloat(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetFloat(mirror::Object* object, float f) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetFloat(mirror::Object* object, float f) SHARED_REQUIRES(Locks::mutator_lock_); - double GetDouble(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + double GetDouble(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void SetDouble(mirror::Object* object, double d) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDouble(mirror::Object* object, double d) SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetObject(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> void SetObject(mirror::Object* object, mirror::Object* l) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Raw field accesses. - uint32_t Get32(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t Get32(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> void Set32(mirror::Object* object, uint32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - uint64_t Get64(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint64_t Get64(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void Set64(mirror::Object* object, uint64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Set64(mirror::Object* object, uint64_t new_value) SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetObj(mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetObj(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> void SetObj(mirror::Object* object, mirror::Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires. template<typename RootVisitorType> - void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS; - bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccVolatile) != 0; } // Returns an instance field with this offset in the given class or null if not found. static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a static field with this offset in the given class or null if not found. static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Resolves / returns the name from the dex cache. mirror::String* GetStringName(Thread* self, bool resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_); - Primitive::Type GetTypeAsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Primitive::Type GetTypeAsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_); template <bool kResolve> - mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_); - size_t FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t FieldSize() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_); GcRoot<mirror::Class>& DeclaringClassRoot() { return declaring_class_; @@ -192,11 +192,11 @@ class ArtField FINAL { private: mirror::Class* ProxyFindSystemClass(const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_); mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx, mirror::DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); GcRoot<mirror::Class> declaring_class_; diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index 8712bdbbf5..40bb9e1d9b 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -20,6 +20,7 @@ #include "art_method.h" #include "art_field.h" +#include "base/logging.h" #include "dex_file.h" #include "dex_file-inl.h" #include "gc_root-inl.h" @@ -62,6 +63,15 @@ inline void ArtMethod::SetDeclaringClass(mirror::Class* new_declaring_class) { declaring_class_ = GcRoot<mirror::Class>(new_declaring_class); } +inline bool ArtMethod::CASDeclaringClass(mirror::Class* expected_class, + mirror::Class* desired_class) { + GcRoot<mirror::Class> expected_root(expected_class); + GcRoot<mirror::Class> desired_root(desired_class); + return reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_)-> + CompareExchangeStrongSequentiallyConsistent( + expected_root, desired_root); +} + inline uint32_t ArtMethod::GetAccessFlags() { DCHECK(IsRuntimeMethod() || GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous()); @@ -317,7 +327,9 @@ inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) { inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) { DCHECK(code_pointer != nullptr); - DCHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*))); + if (kIsDebugBuild && !IsProxyMethod()) { + CHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*))); + } return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_; } @@ -494,7 +506,7 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor) { inline void ArtMethod::CopyFrom(const ArtMethod* src, size_t image_pointer_size) { memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src), - ObjectSize(image_pointer_size)); + Size(image_pointer_size)); declaring_class_ = GcRoot<mirror::Class>(const_cast<ArtMethod*>(src)->GetDeclaringClass()); dex_cache_resolved_methods_ = GcRoot<mirror::PointerArray>( const_cast<ArtMethod*>(src)->GetDexCacheResolvedMethods()); diff --git a/runtime/art_method.cc b/runtime/art_method.cc index c78a851b0e..b9e13a4e81 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -20,6 +20,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" #include "base/stringpiece.h" +#include "debugger.h" #include "dex_file-inl.h" #include "dex_instruction.h" #include "entrypoints/entrypoint_utils.h" @@ -35,6 +36,7 @@ #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "mirror/string.h" +#include "oat_file-inl.h" #include "scoped_thread_state_change.h" #include "well_known_classes.h" @@ -93,7 +95,7 @@ size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) { } static bool HasSameNameAndSignature(ArtMethod* method1, ArtMethod* method2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedAssertNoThreadSuspension ants(Thread::Current(), "HasSameNameAndSignature"); const DexFile* dex_file = method1->GetDexFile(); const DexFile::MethodId& mid = dex_file->GetMethodId(method1->GetDexMethodIndex()); @@ -454,7 +456,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* // Counts the number of references in the parameter list of the corresponding method. // Note: Thus does _not_ include "this" for non-static methods. static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t shorty_len; const char* shorty = method->GetShorty(&shorty_len); uint32_t refs = 0; @@ -561,4 +563,14 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param return true; } +const uint8_t* ArtMethod::GetQuickenedInfo() { + bool found = false; + OatFile::OatMethod oat_method = + Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found); + if (!found || (oat_method.GetQuickCode() != nullptr)) { + return nullptr; + } + return oat_method.GetVmapTable(); +} + } // namespace art diff --git a/runtime/art_method.h b/runtime/art_method.h index 4a1e2c4532..cec183789e 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -23,7 +23,6 @@ #include "method_reference.h" #include "modifiers.h" #include "mirror/object.h" -#include "object_callbacks.h" #include "quick/quick_method_frame_info.h" #include "read_barrier_option.h" #include "stack.h" @@ -43,9 +42,6 @@ class Class; class PointerArray; } // namespace mirror -typedef void (EntryPointFromInterpreter)(Thread* self, const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - class ArtMethod FINAL { public: ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0), @@ -57,24 +53,27 @@ class ArtMethod FINAL { static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetDeclaringClass(mirror::Class *new_declaring_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + + bool CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class) + SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DeclaringClassOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_)); } - ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_); void SetAccessFlags(uint32_t new_access_flags) { // Not called within a transaction. @@ -82,35 +81,35 @@ class ArtMethod FINAL { } // Approximate what kind of method call would be used for this method. - InvokeType GetInvokeType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the method is declared public. - bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPublic) != 0; } // Returns true if the method is declared private. - bool IsPrivate() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrivate() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPrivate) != 0; } // Returns true if the method is declared static. - bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccStatic) != 0; } // Returns true if the method is a constructor. - bool IsConstructor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsConstructor() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccConstructor) != 0; } // Returns true if the method is a class initializer. - bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsClassInitializer() SHARED_REQUIRES(Locks::mutator_lock_) { return IsConstructor() && IsStatic(); } // Returns true if the method is static, private, or a constructor. - bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsDirect() SHARED_REQUIRES(Locks::mutator_lock_) { return IsDirect(GetAccessFlags()); } @@ -119,56 +118,56 @@ class ArtMethod FINAL { } // Returns true if the method is declared synchronized. - bool IsSynchronized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSynchronized() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized; return (GetAccessFlags() & synchonized) != 0; } - bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } - bool IsMiranda() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsMiranda() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccMiranda) != 0; } - bool IsNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsNative() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccNative) != 0; } - bool ShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccDontInline) != 0; } - void SetShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetShouldNotInline() SHARED_REQUIRES(Locks::mutator_lock_) { SetAccessFlags(GetAccessFlags() | kAccDontInline); } - bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFastNative() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t mask = kAccFastNative | kAccNative; return (GetAccessFlags() & mask) == mask; } - bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccAbstract) != 0; } - bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccSynthetic) != 0; } - bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPreverified) != 0; } - void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsPreverified()); SetAccessFlags(GetAccessFlags() | kAccPreverified); } - bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) { // Temporary solution for detecting if a method has been optimized: the compiler // does not create a GC map. Instead, the vmap table contains the stack map // (as in stack_map.h). @@ -178,18 +177,18 @@ class ArtMethod FINAL { && GetNativeGcMap(pointer_size) == nullptr; } - bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_); // Doesn't do erroneous / unresolved class checks. - uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetMethodIndexDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_); - size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetVtableIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetMethodIndex(); } - void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMethodIndex(uint16_t new_method_index) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. method_index_ = new_method_index; } @@ -214,7 +213,7 @@ class ArtMethod FINAL { // Number of 32bit registers that would be required to hold all the arguments static size_t NumArgRegisters(const StringPiece& shorty); - ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_); void SetDexMethodIndex(uint32_t new_idx) { // Not called within a transaction. @@ -230,36 +229,36 @@ class ArtMethod FINAL { } ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + bool HasDexCacheResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_); bool HasSameDexCacheResolvedMethods(ArtMethod* other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kWithCheck = true> mirror::Class* GetDexCacheResolvedType(uint32_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + bool HasDexCacheResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_); + bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_); bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the Class* from the type index into this method's dex cache. mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Find the method that this method overrides. - ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); // Find the method index for this method within other_dexfile. If this method isn't present then // return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same @@ -267,27 +266,10 @@ class ArtMethod FINAL { // in the other_dexfile. uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile, uint32_t name_and_signature_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - EntryPointFromInterpreter* GetEntryPointFromInterpreter() { - return GetEntryPointFromInterpreterPtrSize(sizeof(void*)); - } - EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size) { - return GetEntryPoint<EntryPointFromInterpreter*>( - EntryPointFromInterpreterOffset(pointer_size), pointer_size); - } - - void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) { - SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*)); - } - void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter, - size_t pointer_size) { - SetEntryPoint(EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, - pointer_size); - } + SHARED_REQUIRES(Locks::mutator_lock_); const void* GetEntryPointFromQuickCompiledCode() { return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*)); @@ -307,7 +289,7 @@ class ArtMethod FINAL { entry_point_from_quick_compiled_code, pointer_size); } - uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_); // Check whether the given PC is within the quick compiled code associated with this method's // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for @@ -317,12 +299,12 @@ class ArtMethod FINAL { reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc); } - void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the entrypoint points to the interpreter, as // opposed to the compiled code, that is, this method will be // interpretered on invocation. - bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetQuickOatCodeOffset(); void SetQuickOatCodeOffset(uint32_t code_offset); @@ -337,35 +319,37 @@ class ArtMethod FINAL { // Actual entry point pointer to compiled oat code or null. const void* GetQuickOatEntryPoint(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Actual pointer to compiled oat code or null. const void* GetQuickOatCodePointer(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size)); } // Callers should wrap the uint8_t* in a MappingTable instance for convenient access. const uint8_t* GetMappingTable(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Callers should wrap the uint8_t* in a VmapTable instance for convenient access. const uint8_t* GetVmapTable(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_); + + CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_); // Callers should wrap the uint8_t* in a GcMap instance for convenient access. const uint8_t* GetNativeGcMap(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kCheckFrameSize = true> - uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t result = GetQuickFrameInfo().FrameSizeInBytes(); if (kCheckFrameSize) { DCHECK_LE(static_cast<size_t>(kStackAlignment), result); @@ -373,35 +357,30 @@ class ArtMethod FINAL { return result; } - QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_); QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) { return GetReturnPcOffset(GetFrameSizeInBytes()); } FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes()); return FrameOffset(frame_size_in_bytes - sizeof(void*)); } - FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) { constexpr size_t handle_scope_offset = sizeof(ArtMethod*); DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes()); return FrameOffset(handle_scope_offset); } void RegisterNative(const void* native_method, bool is_fast) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) { - return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER( - PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size); - } + void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset EntryPointFromJniOffset(size_t pointer_size) { return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER( @@ -420,7 +399,7 @@ class ArtMethod FINAL { return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size); } - void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetEntryPointFromJni(const void* entrypoint) SHARED_REQUIRES(Locks::mutator_lock_) { SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*)); } ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) { @@ -432,34 +411,34 @@ class ArtMethod FINAL { ALWAYS_INLINE bool IsRuntimeMethod(); // Is this a hand crafted method used for something like describing callee saves? - bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_); - uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_); #ifdef NDEBUG uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return pc - reinterpret_cast<uintptr_t>(quick_entry_point); } #else uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); #endif // Converts a native PC to a dex PC. uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Converts a dex PC to a native PC. uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - MethodReference ToMethodReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) { return MethodReference(GetDexFile(), GetDexMethodIndex()); } @@ -468,75 +447,83 @@ class ArtMethod FINAL { // a move-exception instruction is present. uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc, bool* has_no_move_exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + // NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires. template<typename RootVisitorType> - void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS; - const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetDeclaringClassDescriptor() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const char* GetShorty() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t unused_length; return GetShorty(&unused_length); } - const char* GetShorty(uint32_t* out_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetShorty(uint32_t* out_length) SHARED_REQUIRES(Locks::mutator_lock_); - const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const Signature GetSignature() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::String* GetNameAsString(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::ProtoId& GetPrototype() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ProtoId& GetPrototype() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::TypeList* GetParameterTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::TypeList* GetParameterTypeList() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetDeclaringClassSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetDeclaringClassSourceFile() SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::ClassDef& GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ClassDef& GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetReturnTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetReturnTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_); const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large // number of bugs at call sites. - mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetReturnType(bool resolve = true) SHARED_REQUIRES(Locks::mutator_lock_); - mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // May cause thread suspension due to class resolution. bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - // Size of an instance of this object. - static size_t ObjectSize(size_t pointer_size) { + // Size of an instance of this native class. + static size_t Size(size_t pointer_size) { return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size) + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size; } + // Alignment of an instance of this native class. + static size_t Alignment(size_t pointer_size) { + // The ArtMethod alignment is the same as image pointer size. This differs from + // alignof(ArtMethod) if cross-compiling with pointer_size != sizeof(void*). + return pointer_size; + } + void CopyFrom(const ArtMethod* src, size_t image_pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". @@ -573,10 +560,6 @@ class ArtMethod FINAL { // PACKED(4) is necessary for the correctness of // RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size). struct PACKED(4) PtrSizedFields { - // Method dispatch from the interpreter invokes this pointer which may cause a bridge into - // compiled code. - void* entry_point_from_interpreter_; - // Pointer to JNI function registered to this method, or a function to resolve the JNI function. void* entry_point_from_jni_; diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 10ed0f4dfa..35acd424ba 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -89,7 +89,7 @@ ADD_TEST_EQ(THREAD_ID_OFFSET, art::Thread::ThinLockIdOffset<__SIZEOF_POINTER__>().Int32Value()) // Offset of field Thread::tlsPtr_.card_table. -#define THREAD_CARD_TABLE_OFFSET 128 +#define THREAD_CARD_TABLE_OFFSET 136 ADD_TEST_EQ(THREAD_CARD_TABLE_OFFSET, art::Thread::CardTableOffset<__SIZEOF_POINTER__>().Int32Value()) @@ -103,17 +103,20 @@ ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET, ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET, art::Thread::TopOfManagedStackOffset<__SIZEOF_POINTER__>().Int32Value()) -// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_. +// Offset of field Thread::tlsPtr_.self. #define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__)) ADD_TEST_EQ(THREAD_SELF_OFFSET, art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value()) -#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 147 * __SIZEOF_POINTER__) +// Offset of field Thread::tlsPtr_.thread_local_pos. +#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 151 * __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET, art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value()) +// Offset of field Thread::tlsPtr_.thread_local_end. #define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET, art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value()) +// Offset of field Thread::tlsPtr_.thread_local_objects. #define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET, art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value()) @@ -138,10 +141,10 @@ ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET, #define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (36 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET, art::mirror::Class::AccessFlagsOffset().Int32Value()) -#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE) +#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET, art::mirror::Class::ObjectSizeOffset().Int32Value()) -#define MIRROR_CLASS_STATUS_OFFSET (124 + MIRROR_OBJECT_HEADER_SIZE) +#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE) ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET, art::mirror::Class::StatusOffset().Int32Value()) @@ -189,11 +192,11 @@ ADD_TEST_EQ(ART_METHOD_DEX_CACHE_METHODS_OFFSET, ADD_TEST_EQ(ART_METHOD_DEX_CACHE_TYPES_OFFSET, art::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()) -#define ART_METHOD_QUICK_CODE_OFFSET_32 36 +#define ART_METHOD_QUICK_CODE_OFFSET_32 32 ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_32, art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()) -#define ART_METHOD_QUICK_CODE_OFFSET_64 48 +#define ART_METHOD_QUICK_CODE_OFFSET_64 40 ADD_TEST_EQ(ART_METHOD_QUICK_CODE_OFFSET_64, art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value()) diff --git a/runtime/barrier.h b/runtime/barrier.h index 0e7f61ef71..94977fb741 100644 --- a/runtime/barrier.h +++ b/runtime/barrier.h @@ -39,10 +39,10 @@ class Barrier { virtual ~Barrier(); // Pass through the barrier, decrement the count but do not block. - void Pass(Thread* self); + void Pass(Thread* self) REQUIRES(!lock_); // Wait on the barrier, decrement the count. - void Wait(Thread* self); + void Wait(Thread* self) REQUIRES(!lock_); // The following three calls are only safe if we somehow know that no other thread both // - has been woken up, and @@ -51,18 +51,18 @@ class Barrier { // to sleep, resulting in a deadlock. // Increment the count by delta, wait on condition if count is non zero. - void Increment(Thread* self, int delta) LOCKS_EXCLUDED(lock_); + void Increment(Thread* self, int delta) REQUIRES(!lock_); // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns // true if time out occurred. - bool Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_); + bool Increment(Thread* self, int delta, uint32_t timeout_ms) REQUIRES(!lock_); // Set the count to a new value. This should only be used if there is no possibility that // another thread is still in Wait(). See above. - void Init(Thread* self, int count); + void Init(Thread* self, int count) REQUIRES(!lock_); private: - void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void SetCountLocked(Thread* self, int count) REQUIRES(lock_); // Counter, when this reaches 0 all people blocked on the barrier are signalled. int count_ GUARDED_BY(lock_); diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h index 07daa7e0fa..3422625282 100644 --- a/runtime/base/allocator.h +++ b/runtime/base/allocator.h @@ -50,6 +50,7 @@ enum AllocatorTag { kAllocatorTagMonitorList, kAllocatorTagClassTable, kAllocatorTagInternTable, + kAllocatorTagLambdaBoxTable, kAllocatorTagMaps, kAllocatorTagLOS, kAllocatorTagSafeMap, diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index 8f2d94b564..e5832e151f 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -23,11 +23,11 @@ #include "mem_map.h" #include "mutex.h" #include "thread-inl.h" -#include <memcheck/memcheck.h> +#include "base/memory_tool.h" namespace art { -static constexpr size_t kValgrindRedZoneBytes = 8; +static constexpr size_t kMemoryToolRedZoneBytes = 8; constexpr size_t Arena::kDefaultSize; template <bool kCount> @@ -217,9 +217,9 @@ size_t ArenaPool::GetBytesAllocated() const { } void ArenaPool::FreeArenaChain(Arena* first) { - if (UNLIKELY(RUNNING_ON_VALGRIND > 0)) { + if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) { for (Arena* arena = first; arena != nullptr; arena = arena->next_) { - VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_); + MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_); } } if (first != nullptr) { @@ -255,7 +255,7 @@ ArenaAllocator::ArenaAllocator(ArenaPool* pool) end_(nullptr), ptr_(nullptr), arena_head_(nullptr), - running_on_valgrind_(RUNNING_ON_VALGRIND > 0) { + is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL) { } void ArenaAllocator::UpdateBytesAllocated() { @@ -267,7 +267,7 @@ void ArenaAllocator::UpdateBytesAllocated() { } void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) { - size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8); + size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8); if (UNLIKELY(ptr_ + rounded_bytes > end_)) { // Obtain a new block. ObtainNewArenaForAllocation(rounded_bytes); @@ -282,7 +282,7 @@ void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) { for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) { CHECK_EQ(*ptr, 0U); } - VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes); + MEMORY_TOOL_MAKE_NOACCESS(ret + bytes, rounded_bytes - bytes); return ret; } diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index d9723b57de..05c66f06ee 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -170,7 +170,7 @@ class MallocArena FINAL : public Arena { class MemMapArena FINAL : public Arena { public: - explicit MemMapArena(size_t size, bool low_4gb); + MemMapArena(size_t size, bool low_4gb); virtual ~MemMapArena(); void Release() OVERRIDE; @@ -182,12 +182,12 @@ class ArenaPool { public: explicit ArenaPool(bool use_malloc = true, bool low_4gb = false); ~ArenaPool(); - Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_); - void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_); - size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_); + Arena* AllocArena(size_t size) REQUIRES(!lock_); + void FreeArenaChain(Arena* first) REQUIRES(!lock_); + size_t GetBytesAllocated() const REQUIRES(!lock_); // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works // use_malloc is false. - void TrimMaps() LOCKS_EXCLUDED(lock_); + void TrimMaps() REQUIRES(!lock_); private: const bool use_malloc_; @@ -207,7 +207,7 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats // Returns zeroed memory. void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE { - if (UNLIKELY(running_on_valgrind_)) { + if (UNLIKELY(is_running_on_memory_tool_)) { return AllocValgrind(bytes, kind); } bytes = RoundUp(bytes, kAlignment); @@ -280,7 +280,7 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats uint8_t* end_; uint8_t* ptr_; Arena* arena_head_; - bool running_on_valgrind_; + bool is_running_on_memory_tool_; template <typename U> friend class ArenaAllocatorAdapter; diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h index d6c4a54b52..a7aafdf29e 100644 --- a/runtime/base/arena_containers.h +++ b/runtime/base/arena_containers.h @@ -134,7 +134,7 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind { typedef ArenaAllocatorAdapter<U> other; }; - explicit ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind) + ArenaAllocatorAdapter(ArenaAllocator* arena_allocator, ArenaAllocKind kind) : ArenaAllocatorAdapterKind(kind), arena_allocator_(arena_allocator) { } diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h index 797215822c..1b0d774419 100644 --- a/runtime/base/bit_utils.h +++ b/runtime/base/bit_utils.h @@ -29,21 +29,28 @@ namespace art { template<typename T> static constexpr int CLZ(T x) { static_assert(std::is_integral<T>::value, "T must be integral"); - // TODO: assert unsigned. There is currently many uses with signed values. + static_assert(std::is_unsigned<T>::value, "T must be unsigned"); static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4] "T too large, must be smaller than long long"); - return (sizeof(T) == sizeof(uint32_t)) - ? __builtin_clz(x) // TODO: __builtin_clz[ll] has undefined behavior for x=0 - : __builtin_clzll(x); + return + DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0)) + (sizeof(T) == sizeof(uint32_t)) + ? __builtin_clz(x) + : __builtin_clzll(x); } template<typename T> static constexpr int CTZ(T x) { static_assert(std::is_integral<T>::value, "T must be integral"); - // TODO: assert unsigned. There is currently many uses with signed values. - return (sizeof(T) == sizeof(uint32_t)) - ? __builtin_ctz(x) - : __builtin_ctzll(x); + // It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check + // that T is an unsigned type. + static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4] + "T too large, must be smaller than long long"); + return + DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0)) + (sizeof(T) == sizeof(uint32_t)) + ? __builtin_ctz(x) + : __builtin_ctzll(x); } template<typename T> @@ -137,7 +144,7 @@ static inline T* AlignUp(T* x, uintptr_t n) { } template<int n, typename T> -static inline bool IsAligned(T x) { +static constexpr bool IsAligned(T x) { static_assert((n & (n - 1)) == 0, "n is not a power of two"); return (x & (n - 1)) == 0; } @@ -158,6 +165,9 @@ static inline bool IsAlignedParam(T x, int n) { #define DCHECK_ALIGNED(value, alignment) \ DCHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value) +#define CHECK_ALIGNED_PARAM(value, alignment) \ + CHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value) + #define DCHECK_ALIGNED_PARAM(value, alignment) \ DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value) diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc index 39ce0d2cbe..cfd3d24aad 100644 --- a/runtime/base/bit_vector.cc +++ b/runtime/base/bit_vector.cc @@ -24,11 +24,7 @@ namespace art { -// TODO: replace excessive argument defaulting when we are at gcc 4.7 -// or later on host with delegating constructor support. Specifically, -// starts_bits and storage_size/storage are mutually exclusive. -BitVector::BitVector(uint32_t start_bits, - bool expandable, +BitVector::BitVector(bool expandable, Allocator* allocator, uint32_t storage_size, uint32_t* storage) @@ -36,12 +32,31 @@ BitVector::BitVector(uint32_t start_bits, storage_size_(storage_size), allocator_(allocator), expandable_(expandable) { + DCHECK(storage_ != nullptr); + static_assert(sizeof(*storage_) == kWordBytes, "word bytes"); static_assert(sizeof(*storage_) * 8u == kWordBits, "word bits"); - if (storage_ == nullptr) { - storage_size_ = BitsToWords(start_bits); - storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * kWordBytes)); - } +} + +BitVector::BitVector(uint32_t start_bits, + bool expandable, + Allocator* allocator) + : BitVector(expandable, + allocator, + BitsToWords(start_bits), + static_cast<uint32_t*>(allocator->Alloc(BitsToWords(start_bits) * kWordBytes))) { +} + + +BitVector::BitVector(const BitVector& src, + bool expandable, + Allocator* allocator) + : BitVector(expandable, + allocator, + src.storage_size_, + static_cast<uint32_t*>(allocator->Alloc(src.storage_size_ * kWordBytes))) { + // Direct memcpy would be faster, but this should be fine too and is cleaner. + Copy(&src); } BitVector::~BitVector() { @@ -357,4 +372,8 @@ void BitVector::EnsureSize(uint32_t idx) { } } +Allocator* BitVector::GetAllocator() const { + return allocator_; +} + } // namespace art diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h index afa8dc187e..9b55e708c8 100644 --- a/runtime/base/bit_vector.h +++ b/runtime/base/bit_vector.h @@ -113,9 +113,16 @@ class BitVector { BitVector(uint32_t start_bits, bool expandable, + Allocator* allocator); + + BitVector(bool expandable, Allocator* allocator, - uint32_t storage_size = 0, - uint32_t* storage = nullptr); + uint32_t storage_size, + uint32_t* storage); + + BitVector(const BitVector& src, + bool expandable, + Allocator* allocator); virtual ~BitVector(); @@ -245,6 +252,8 @@ class BitVector { void Dump(std::ostream& os, const char* prefix) const; + Allocator* GetAllocator() const; + private: /** * @brief Dump the bitvector into buffer in a 00101..01 format. diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc index 19c01f20e7..0e3df76fe2 100644 --- a/runtime/base/bit_vector_test.cc +++ b/runtime/base/bit_vector_test.cc @@ -71,7 +71,7 @@ TEST(BitVector, NoopAllocator) { uint32_t bits[kWords]; memset(bits, 0, sizeof(bits)); - BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits); + BitVector bv(false, Allocator::GetNoopAllocator(), kWords, bits); EXPECT_EQ(kWords, bv.GetStorageSize()); EXPECT_EQ(kWords * sizeof(uint32_t), bv.GetSizeOf()); EXPECT_EQ(bits, bv.GetRawStorage()); @@ -128,7 +128,7 @@ TEST(BitVector, SetInitialBits) { uint32_t bits[kWords]; memset(bits, 0, sizeof(bits)); - BitVector bv(0U, false, Allocator::GetNoopAllocator(), kWords, bits); + BitVector bv(false, Allocator::GetNoopAllocator(), kWords, bits); bv.SetInitialBits(0u); EXPECT_EQ(0u, bv.NumSetBits()); bv.SetInitialBits(1u); diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h index f2c8355f53..d110fe30b7 100644 --- a/runtime/base/hash_set.h +++ b/runtime/base/hash_set.h @@ -231,19 +231,33 @@ class HashSet { return ret; } + // Lower case for c++11 for each. const version. + ConstIterator begin() const { + ConstIterator ret(this, 0); + if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) { + ++ret; // Skip all the empty slots. + } + return ret; + } + // Lower case for c++11 for each. Iterator end() { return Iterator(this, NumBuckets()); } + // Lower case for c++11 for each. const version. + ConstIterator end() const { + return ConstIterator(this, NumBuckets()); + } + bool Empty() { return Size() == 0; } // Erase algorithm: // Make an empty slot where the iterator is pointing. - // Scan fowards until we hit another empty slot. - // If an element inbetween doesn't rehash to the range from the current empty slot to the + // Scan forwards until we hit another empty slot. + // If an element in between doesn't rehash to the range from the current empty slot to the // iterator. It must be before the empty slot, in that case we can move it to the empty slot // and set the empty slot to be the location we just moved from. // Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an @@ -299,23 +313,23 @@ class HashSet { // Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy // object in the heap for performance solution. template <typename K> - Iterator Find(const K& element) { - return FindWithHash(element, hashfn_(element)); + Iterator Find(const K& key) { + return FindWithHash(key, hashfn_(key)); } template <typename K> - ConstIterator Find(const K& element) const { - return FindWithHash(element, hashfn_(element)); + ConstIterator Find(const K& key) const { + return FindWithHash(key, hashfn_(key)); } template <typename K> - Iterator FindWithHash(const K& element, size_t hash) { - return Iterator(this, FindIndex(element, hash)); + Iterator FindWithHash(const K& key, size_t hash) { + return Iterator(this, FindIndex(key, hash)); } template <typename K> - ConstIterator FindWithHash(const K& element, size_t hash) const { - return ConstIterator(this, FindIndex(element, hash)); + ConstIterator FindWithHash(const K& key, size_t hash) const { + return ConstIterator(this, FindIndex(key, hash)); } // Insert an element, allows duplicates. @@ -399,6 +413,10 @@ class HashSet { } size_t IndexForHash(size_t hash) const { + // Protect against undefined behavior (division by zero). + if (UNLIKELY(num_buckets_ == 0)) { + return 0; + } return hash % num_buckets_; } @@ -414,6 +432,10 @@ class HashSet { // This value for not found is important so that Iterator(this, FindIndex(...)) == end(). template <typename K> size_t FindIndex(const K& element, size_t hash) const { + // Guard against failing to get an element for a non-existing index. + if (UNLIKELY(NumBuckets() == 0)) { + return 0; + } DCHECK_EQ(hashfn_(element), hash); size_t index = IndexForHash(hash); while (true) { @@ -448,31 +470,31 @@ class HashSet { } void DeallocateStorage() { - if (num_buckets_ != 0) { - if (owns_data_) { - for (size_t i = 0; i < NumBuckets(); ++i) { - allocfn_.destroy(allocfn_.address(data_[i])); - } + if (owns_data_) { + for (size_t i = 0; i < NumBuckets(); ++i) { + allocfn_.destroy(allocfn_.address(data_[i])); + } + if (data_ != nullptr) { allocfn_.deallocate(data_, NumBuckets()); - owns_data_ = false; } - data_ = nullptr; - num_buckets_ = 0; + owns_data_ = false; } + data_ = nullptr; + num_buckets_ = 0; } // Expand the set based on the load factors. void Expand() { size_t min_index = static_cast<size_t>(Size() / min_load_factor_); - if (min_index < kMinBuckets) { - min_index = kMinBuckets; - } // Resize based on the minimum load factor. Resize(min_index); } // Expand / shrink the table to the new specified size. void Resize(size_t new_size) { + if (new_size < kMinBuckets) { + new_size = kMinBuckets; + } DCHECK_GE(new_size, Size()); T* const old_data = data_; size_t old_num_buckets = num_buckets_; diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc index fd9eb45e3f..4ef1f9e8c9 100644 --- a/runtime/base/hash_set_test.cc +++ b/runtime/base/hash_set_test.cc @@ -186,6 +186,12 @@ TEST_F(HashSetTest, TestShrink) { // Shrink again, the load factor should be good again. hash_set.ShrinkToMaximumLoad(); EXPECT_DOUBLE_EQ(initial_load, hash_set.CalculateLoadFactor()); + + // Make sure all the initial elements we had are still there + for (const std::string& initial_string : strings) { + EXPECT_NE(hash_set.end(), hash_set.Find(initial_string)) + << "expected to find " << initial_string; + } } TEST_F(HashSetTest, TestStress) { diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h index aba376287e..03980e3273 100644 --- a/runtime/base/histogram-inl.h +++ b/runtime/base/histogram-inl.h @@ -66,7 +66,7 @@ inline void Histogram<Value>::GrowBuckets(Value new_max) { while (max_ < new_max) { // If we have reached the maximum number of buckets, merge buckets together. if (frequency_.size() >= max_buckets_) { - CHECK(IsAligned<2>(frequency_.size())); + CHECK_ALIGNED(frequency_.size(), 2); // We double the width of each bucket to reduce the number of buckets by a factor of 2. bucket_width_ *= 2; const size_t limit = frequency_.size() / 2; diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h index 6a0ef1f585..cf02d32d03 100644 --- a/runtime/base/iteration_range.h +++ b/runtime/base/iteration_range.h @@ -49,6 +49,11 @@ static inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, cons return IterationRange<Iter>(begin_it, end_it); } +template <typename Iter> +static inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) { + return IterationRange<Iter>(it, it); +} + } // namespace art #endif // ART_RUNTIME_BASE_ITERATION_RANGE_H_ diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 859de4bd5b..7a620e375b 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -26,7 +26,7 @@ #include "utils.h" // Headers for LogMessage::LogLine. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/log.h" #else #include <sys/types.h> @@ -47,7 +47,7 @@ static std::unique_ptr<std::string> gProgramInvocationShortName; // Print INTERNAL_FATAL messages directly instead of at destruction time. This only works on the // host right now: for the device, a stream buf collating output into lines and calling LogLine or // lower-level logging is necessary. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ static constexpr bool kPrintInternalFatalDirectly = false; #else static constexpr bool kPrintInternalFatalDirectly = !kIsTargetBuild; @@ -234,7 +234,7 @@ std::ostream& LogMessage::stream() { return data_->GetBuffer(); } -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ static const android_LogPriority kLogSeverityToAndroidLogPriority[] = { ANDROID_LOG_VERBOSE, ANDROID_LOG_DEBUG, ANDROID_LOG_INFO, ANDROID_LOG_WARN, ANDROID_LOG_ERROR, ANDROID_LOG_FATAL, ANDROID_LOG_FATAL @@ -245,7 +245,7 @@ static_assert(arraysize(kLogSeverityToAndroidLogPriority) == INTERNAL_FATAL + 1, void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_severity, const char* message) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ const char* tag = ProgramInvocationShortName(); int priority = kLogSeverityToAndroidLogPriority[log_severity]; if (priority == ANDROID_LOG_FATAL) { @@ -264,7 +264,7 @@ void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity log_se void LogMessage::LogLineLowStack(const char* file, unsigned int line, LogSeverity log_severity, const char* message) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Use android_writeLog() to avoid stack-based buffers used by android_printLog(). const char* tag = ProgramInvocationShortName(); int priority = kLogSeverityToAndroidLogPriority[log_severity]; diff --git a/runtime/base/logging.h b/runtime/base/logging.h index 35b50d1e45..2cd1a4de9f 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -17,7 +17,6 @@ #ifndef ART_RUNTIME_BASE_LOGGING_H_ #define ART_RUNTIME_BASE_LOGGING_H_ -#include <memory> #include <ostream> #include "base/macros.h" @@ -238,7 +237,7 @@ class LogMessage { public: LogMessage(const char* file, unsigned int line, LogSeverity severity, int error); - ~LogMessage(); // TODO: enable LOCKS_EXCLUDED(Locks::logging_lock_). + ~LogMessage(); // TODO: enable REQUIRES(!Locks::logging_lock_). // Returns the stream associated with the message, the LogMessage performs output when it goes // out of scope. diff --git a/runtime/base/macros.h b/runtime/base/macros.h index 5c596471c2..1d5dee23f8 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -244,18 +244,14 @@ template<typename... T> void UNUSED(const T&...) {} #define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) -#define EXCLUSIVE_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) #define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded) -#define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable) #define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) -#define LOCKS_EXCLUDED(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) #define PT_GUARDED_BY(x) // THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x)) #define PT_GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded) #define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) -#define SHARED_LOCKS_REQUIRED(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) #if defined(__clang__) #define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) @@ -263,12 +259,43 @@ template<typename... T> void UNUSED(const T&...) {} #define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) #define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) #define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) +#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) +#define SHARED_REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) +#define CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(capability(__VA_ARGS__)) +#define SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_capability(__VA_ARGS__)) +#define ASSERT_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(__VA_ARGS__)) +#define ASSERT_SHARED_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(__VA_ARGS__)) +#define RETURN_CAPABILITY(...) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(__VA_ARGS__)) +#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) +#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) +#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) +#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) +#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) +#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) #else #define EXCLUSIVE_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(__VA_ARGS__)) #define EXCLUSIVE_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(__VA_ARGS__)) #define SHARED_LOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(__VA_ARGS__)) #define SHARED_TRYLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(__VA_ARGS__)) #define UNLOCK_FUNCTION(...) THREAD_ANNOTATION_ATTRIBUTE__(unlock(__VA_ARGS__)) +#define REQUIRES(...) +#define SHARED_REQUIRES(...) +#define CAPABILITY(...) +#define SHARED_CAPABILITY(...) +#define ASSERT_CAPABILITY(...) +#define ASSERT_SHARED_CAPABILITY(...) +#define RETURN_CAPABILITY(...) +#define TRY_ACQUIRE(...) +#define TRY_ACQUIRE_SHARED(...) +#define ACQUIRE(...) +#define ACQUIRE_SHARED(...) +#define RELEASE(...) +#define RELEASE_SHARED(...) +#define SCOPED_CAPABILITY #endif +#define LOCKABLE CAPABILITY("mutex") +#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex") + #endif // ART_RUNTIME_BASE_MACROS_H_ diff --git a/runtime/base/memory_tool.h b/runtime/base/memory_tool.h new file mode 100644 index 0000000000..e0bdcfeced --- /dev/null +++ b/runtime/base/memory_tool.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_MEMORY_TOOL_H_ +#define ART_RUNTIME_BASE_MEMORY_TOOL_H_ + +#include <stddef.h> + +#if !defined(__has_feature) +#define __has_feature(x) 0 +#endif + +#if __has_feature(address_sanitizer) + +#include <sanitizer/asan_interface.h> +#define ADDRESS_SANITIZER + +#ifdef ART_ENABLE_ADDRESS_SANITIZER +#define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s) +#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s) +#define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s) +#else +#define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0) +#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0) +#define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0) +#endif + +#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#define RUNNING_ON_MEMORY_TOOL 1U +constexpr bool kMemoryToolIsValgrind = false; +constexpr bool kMemoryToolDetectsLeaks = true; +constexpr bool kMemoryToolAddsRedzones = true; +constexpr size_t kMemoryToolStackGuardSizeScale = 2; + +#else + +#include <valgrind.h> +#include <memcheck/memcheck.h> +#define MEMORY_TOOL_MAKE_NOACCESS(p, s) VALGRIND_MAKE_MEM_NOACCESS(p, s) +#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) VALGRIND_MAKE_MEM_UNDEFINED(p, s) +#define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s) +#define ATTRIBUTE_NO_SANITIZE_ADDRESS +#define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND +constexpr bool kMemoryToolIsValgrind = true; +constexpr bool kMemoryToolDetectsLeaks = true; +constexpr bool kMemoryToolAddsRedzones = true; +constexpr size_t kMemoryToolStackGuardSizeScale = 1; + +#endif + +#endif // ART_RUNTIME_BASE_MEMORY_TOOL_H_ diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index 87840e7a08..bd8de877e0 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -218,6 +218,16 @@ inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const { #endif } +inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) { + AssertSharedHeld(self); + RegisterAsUnlocked(self); +} + +inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) { + RegisterAsLocked(self); + AssertSharedHeld(self); +} + } // namespace art #endif // ART_RUNTIME_BASE_MUTEX_INL_H_ diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 5c6065dcb3..62cfb5243c 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -47,7 +47,7 @@ Mutex* Locks::jni_libraries_lock_ = nullptr; Mutex* Locks::logging_lock_ = nullptr; Mutex* Locks::mem_maps_lock_ = nullptr; Mutex* Locks::modify_ldt_lock_ = nullptr; -ReaderWriterMutex* Locks::mutator_lock_ = nullptr; +MutatorMutex* Locks::mutator_lock_ = nullptr; Mutex* Locks::profiler_lock_ = nullptr; Mutex* Locks::reference_processor_lock_ = nullptr; Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr; @@ -61,6 +61,8 @@ ConditionVariable* Locks::thread_exit_cond_ = nullptr; Mutex* Locks::thread_suspend_count_lock_ = nullptr; Mutex* Locks::trace_lock_ = nullptr; Mutex* Locks::unexpected_signal_lock_ = nullptr; +Mutex* Locks::lambda_table_lock_ = nullptr; +Uninterruptible Roles::uninterruptible_; struct AllMutexData { // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). @@ -738,6 +740,11 @@ std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) { return os; } +std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) { + mu.Dump(os); + return os; +} + ConditionVariable::ConditionVariable(const char* name, Mutex& guard) : name_(name), guard_(guard) { #if ART_USE_FUTEXES @@ -941,6 +948,7 @@ void Locks::Init() { DCHECK(thread_suspend_count_lock_ != nullptr); DCHECK(trace_lock_ != nullptr); DCHECK(unexpected_signal_lock_ != nullptr); + DCHECK(lambda_table_lock_ != nullptr); } else { // Create global locks in level order from highest lock level to lowest. LockLevel current_lock_level = kInstrumentEntrypointsLock; @@ -958,7 +966,7 @@ void Locks::Init() { UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock); DCHECK(mutator_lock_ == nullptr); - mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level); + mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level); UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock); DCHECK(heap_bitmap_lock_ == nullptr); @@ -1043,6 +1051,10 @@ void Locks::Init() { DCHECK(reference_queue_soft_references_lock_ == nullptr); reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kLambdaTableLock); + DCHECK(lambda_table_lock_ == nullptr); + lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kAbortLock); DCHECK(abort_lock_ == nullptr); abort_lock_ = new Mutex("abort lock", current_lock_level, true); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 678d55bddd..848c904fe6 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -43,7 +43,8 @@ namespace art { -class LOCKABLE ReaderWriterMutex; +class SHARED_LOCKABLE ReaderWriterMutex; +class SHARED_LOCKABLE MutatorMutex; class ScopedContentionRecorder; class Thread; @@ -59,6 +60,7 @@ enum LockLevel { kUnexpectedSignalLock, kThreadSuspendCountLock, kAbortLock, + kLambdaTableLock, kJdwpSocketLock, kRegionSpaceRegionLock, kTransactionLogLock, @@ -138,6 +140,7 @@ class BaseMutex { virtual bool IsMutex() const { return false; } virtual bool IsReaderWriterMutex() const { return false; } + virtual bool IsMutatorMutex() const { return false; } virtual void Dump(std::ostream& os) const = 0; @@ -211,35 +214,37 @@ class LOCKABLE Mutex : public BaseMutex { virtual bool IsMutex() const { return true; } // Block until mutex is free then acquire exclusive access. - void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); - void Lock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); } + void ExclusiveLock(Thread* self) ACQUIRE(); + void Lock(Thread* self) ACQUIRE() { ExclusiveLock(self); } // Returns true if acquires exclusive access, false otherwise. - bool ExclusiveTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); - bool TryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true) { return ExclusiveTryLock(self); } + bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true); + bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); } // Release exclusive access. - void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); - void Unlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); } + void ExclusiveUnlock(Thread* self) RELEASE(); + void Unlock(Thread* self) RELEASE() { ExclusiveUnlock(self); } // Is the current thread the exclusive holder of the Mutex. bool IsExclusiveHeld(const Thread* self) const; // Assert that the Mutex is exclusively held by the current thread. - void AssertExclusiveHeld(const Thread* self) { + void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { CHECK(IsExclusiveHeld(self)) << *this; } } - void AssertHeld(const Thread* self) { AssertExclusiveHeld(self); } + void AssertHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); } // Assert that the Mutex is not held by the current thread. - void AssertNotHeldExclusive(const Thread* self) { + void AssertNotHeldExclusive(const Thread* self) ASSERT_CAPABILITY(!*this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsExclusiveHeld(self)) << *this; } } - void AssertNotHeld(const Thread* self) { AssertNotHeldExclusive(self); } + void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!*this) { + AssertNotHeldExclusive(self); + } // Id associated with exclusive owner. No memory ordering semantics if called from a thread other // than the owner. @@ -252,6 +257,9 @@ class LOCKABLE Mutex : public BaseMutex { virtual void Dump(std::ostream& os) const; + // For negative capabilities in clang annotations. + const Mutex& operator!() const { return *this; } + private: #if ART_USE_FUTEXES // 0 is unheld, 1 is held. @@ -287,7 +295,7 @@ class LOCKABLE Mutex : public BaseMutex { // Shared(n) | Block | error | SharedLock(n+1)* | Shared(n-1) or Free // * for large values of n the SharedLock may block. std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu); -class LOCKABLE ReaderWriterMutex : public BaseMutex { +class SHARED_LOCKABLE ReaderWriterMutex : public BaseMutex { public: explicit ReaderWriterMutex(const char* name, LockLevel level = kDefaultMutexLevel); ~ReaderWriterMutex(); @@ -295,12 +303,12 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { virtual bool IsReaderWriterMutex() const { return true; } // Block until ReaderWriterMutex is free then acquire exclusive access. - void ExclusiveLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION(); - void WriterLock(Thread* self) EXCLUSIVE_LOCK_FUNCTION() { ExclusiveLock(self); } + void ExclusiveLock(Thread* self) ACQUIRE(); + void WriterLock(Thread* self) ACQUIRE() { ExclusiveLock(self); } // Release exclusive access. - void ExclusiveUnlock(Thread* self) UNLOCK_FUNCTION(); - void WriterUnlock(Thread* self) UNLOCK_FUNCTION() { ExclusiveUnlock(self); } + void ExclusiveUnlock(Thread* self) RELEASE(); + void WriterUnlock(Thread* self) RELEASE() { ExclusiveUnlock(self); } // Block until ReaderWriterMutex is free and acquire exclusive access. Returns true on success // or false if timeout is reached. @@ -310,50 +318,54 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { #endif // Block until ReaderWriterMutex is shared or free then acquire a share on the access. - void SharedLock(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE; - void ReaderLock(Thread* self) SHARED_LOCK_FUNCTION() { SharedLock(self); } + void SharedLock(Thread* self) ACQUIRE_SHARED() ALWAYS_INLINE; + void ReaderLock(Thread* self) ACQUIRE_SHARED() { SharedLock(self); } // Try to acquire share of ReaderWriterMutex. - bool SharedTryLock(Thread* self) EXCLUSIVE_TRYLOCK_FUNCTION(true); + bool SharedTryLock(Thread* self) SHARED_TRYLOCK_FUNCTION(true); // Release a share of the access. - void SharedUnlock(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE; - void ReaderUnlock(Thread* self) UNLOCK_FUNCTION() { SharedUnlock(self); } + void SharedUnlock(Thread* self) RELEASE_SHARED() ALWAYS_INLINE; + void ReaderUnlock(Thread* self) RELEASE_SHARED() { SharedUnlock(self); } // Is the current thread the exclusive holder of the ReaderWriterMutex. bool IsExclusiveHeld(const Thread* self) const; // Assert the current thread has exclusive access to the ReaderWriterMutex. - void AssertExclusiveHeld(const Thread* self) { + void AssertExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { CHECK(IsExclusiveHeld(self)) << *this; } } - void AssertWriterHeld(const Thread* self) { AssertExclusiveHeld(self); } + void AssertWriterHeld(const Thread* self) ASSERT_CAPABILITY(this) { AssertExclusiveHeld(self); } // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex. - void AssertNotExclusiveHeld(const Thread* self) { + void AssertNotExclusiveHeld(const Thread* self) ASSERT_CAPABILITY(!this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsExclusiveHeld(self)) << *this; } } - void AssertNotWriterHeld(const Thread* self) { AssertNotExclusiveHeld(self); } + void AssertNotWriterHeld(const Thread* self) ASSERT_CAPABILITY(!this) { + AssertNotExclusiveHeld(self); + } // Is the current thread a shared holder of the ReaderWriterMutex. bool IsSharedHeld(const Thread* self) const; // Assert the current thread has shared access to the ReaderWriterMutex. - void AssertSharedHeld(const Thread* self) { + void AssertSharedHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { if (kDebugLocking && (gAborting == 0)) { // TODO: we can only assert this well when self != null. CHECK(IsSharedHeld(self) || self == nullptr) << *this; } } - void AssertReaderHeld(const Thread* self) { AssertSharedHeld(self); } + void AssertReaderHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(this) { + AssertSharedHeld(self); + } // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive // mode. - void AssertNotHeld(const Thread* self) { + void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) { if (kDebugLocking && (gAborting == 0)) { CHECK(!IsSharedHeld(self)) << *this; } @@ -365,6 +377,9 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { virtual void Dump(std::ostream& os) const; + // For negative capabilities in clang annotations. + const ReaderWriterMutex& operator!() const { return *this; } + private: #if ART_USE_FUTEXES // Out-of-inline path for handling contention for a SharedLock. @@ -385,11 +400,44 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex { DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex); }; +// MutatorMutex is a special kind of ReaderWriterMutex created specifically for the +// Locks::mutator_lock_ mutex. The behaviour is identical to the ReaderWriterMutex except that +// thread state changes also play a part in lock ownership. The mutator_lock_ will not be truly +// held by any mutator threads. However, a thread in the kRunnable state is considered to have +// shared ownership of the mutator lock and therefore transitions in and out of the kRunnable +// state have associated implications on lock ownership. Extra methods to handle the state +// transitions have been added to the interface but are only accessible to the methods dealing +// with state transitions. The thread state and flags attributes are used to ensure thread state +// transitions are consistent with the permitted behaviour of the mutex. +// +// *) The most important consequence of this behaviour is that all threads must be in one of the +// suspended states before exclusive ownership of the mutator mutex is sought. +// +std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu); +class SHARED_LOCKABLE MutatorMutex : public ReaderWriterMutex { + public: + explicit MutatorMutex(const char* name, LockLevel level = kDefaultMutexLevel) + : ReaderWriterMutex(name, level) {} + ~MutatorMutex() {} + + virtual bool IsMutatorMutex() const { return true; } + + // For negative capabilities in clang annotations. + const MutatorMutex& operator!() const { return *this; } + + private: + friend class Thread; + void TransitionFromRunnableToSuspended(Thread* self) UNLOCK_FUNCTION() ALWAYS_INLINE; + void TransitionFromSuspendedToRunnable(Thread* self) SHARED_LOCK_FUNCTION() ALWAYS_INLINE; + + DISALLOW_COPY_AND_ASSIGN(MutatorMutex); +}; + // ConditionVariables allow threads to queue and sleep. Threads may then be resumed individually // (Signal) or all at once (Broadcast). class ConditionVariable { public: - explicit ConditionVariable(const char* name, Mutex& mutex); + ConditionVariable(const char* name, Mutex& mutex); ~ConditionVariable(); void Broadcast(Thread* self); @@ -425,13 +473,13 @@ class ConditionVariable { // Scoped locker/unlocker for a regular Mutex that acquires mu upon construction and releases it // upon destruction. -class SCOPED_LOCKABLE MutexLock { +class SCOPED_CAPABILITY MutexLock { public: - explicit MutexLock(Thread* self, Mutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) { + MutexLock(Thread* self, Mutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) { mu_.ExclusiveLock(self_); } - ~MutexLock() UNLOCK_FUNCTION() { + ~MutexLock() RELEASE() { mu_.ExclusiveUnlock(self_); } @@ -445,14 +493,14 @@ class SCOPED_LOCKABLE MutexLock { // Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon // construction and releases it upon destruction. -class SCOPED_LOCKABLE ReaderMutexLock { +class SCOPED_CAPABILITY ReaderMutexLock { public: - explicit ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : + ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) : self_(self), mu_(mu) { mu_.SharedLock(self_); } - ~ReaderMutexLock() UNLOCK_FUNCTION() { + ~ReaderMutexLock() RELEASE() { mu_.SharedUnlock(self_); } @@ -467,9 +515,9 @@ class SCOPED_LOCKABLE ReaderMutexLock { // Scoped locker/unlocker for a ReaderWriterMutex that acquires write access to mu upon // construction and releases it upon destruction. -class SCOPED_LOCKABLE WriterMutexLock { +class SCOPED_CAPABILITY WriterMutexLock { public: - explicit WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : + WriterMutexLock(Thread* self, ReaderWriterMutex& mu) EXCLUSIVE_LOCK_FUNCTION(mu) : self_(self), mu_(mu) { mu_.ExclusiveLock(self_); } @@ -487,6 +535,17 @@ class SCOPED_LOCKABLE WriterMutexLock { // "WriterMutexLock mu(lock)". #define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name") +// For StartNoThreadSuspension and EndNoThreadSuspension. +class CAPABILITY("role") Role { + public: + void Acquire() ACQUIRE() {} + void Release() RELEASE() {} + const Role& operator!() const { return *this; } +}; + +class Uninterruptible : public Role { +}; + // Global mutexes corresponding to the levels above. class Locks { public: @@ -495,35 +554,28 @@ class Locks { // Guards allocation entrypoint instrumenting. static Mutex* instrument_entrypoints_lock_; - // The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block - // mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds - // a share on the mutator_lock_. The garbage collector may also execute with shared access but - // at times requires exclusive access to the heap (not to be confused with the heap meta-data - // guarded by the heap_lock_ below). When the garbage collector requires exclusive access it asks - // the mutators to suspend themselves which also involves usage of the thread_suspend_count_lock_ - // to cover weaknesses in using ReaderWriterMutexes with ConditionVariables. We use a condition - // variable to wait upon in the suspension logic as releasing and then re-acquiring a share on - // the mutator lock doesn't necessarily allow the exclusive user (e.g the garbage collector) - // chance to acquire the lock. + // A barrier is used to synchronize the GC/Debugger thread with mutator threads. When GC/Debugger + // thread wants to suspend all mutator threads, it needs to wait for all mutator threads to pass + // a barrier. Threads that are already suspended will get their barrier passed by the GC/Debugger + // thread; threads in the runnable state will pass the barrier when they transit to the suspended + // state. GC/Debugger thread will be woken up when all mutator threads are suspended. // // Thread suspension: - // Shared users | Exclusive user - // (holding mutator lock and in kRunnable state) | .. running .. + // mutator thread | GC/Debugger + // .. running .. | .. running .. // .. running .. | Request thread suspension by: // .. running .. | - acquiring thread_suspend_count_lock_ // .. running .. | - incrementing Thread::suspend_count_ on // .. running .. | all mutator threads // .. running .. | - releasing thread_suspend_count_lock_ - // .. running .. | Block trying to acquire exclusive mutator lock + // .. running .. | Block wait for all threads to pass a barrier // Poll Thread::suspend_count_ and enter full | .. blocked .. // suspend code. | .. blocked .. - // Change state to kSuspended | .. blocked .. - // x: Release share on mutator_lock_ | Carry out exclusive access - // Acquire thread_suspend_count_lock_ | .. exclusive .. - // while Thread::suspend_count_ > 0 | .. exclusive .. - // - wait on Thread::resume_cond_ | .. exclusive .. - // (releases thread_suspend_count_lock_) | .. exclusive .. - // .. waiting .. | Release mutator_lock_ + // Change state to kSuspended (pass the barrier) | Wake up when all threads pass the barrier + // x: Acquire thread_suspend_count_lock_ | .. running .. + // while Thread::suspend_count_ > 0 | .. running .. + // - wait on Thread::resume_cond_ | .. running .. + // (releases thread_suspend_count_lock_) | .. running .. // .. waiting .. | Request thread resumption by: // .. waiting .. | - acquiring thread_suspend_count_lock_ // .. waiting .. | - decrementing Thread::suspend_count_ on @@ -531,29 +583,13 @@ class Locks { // .. waiting .. | - notifying on Thread::resume_cond_ // - re-acquire thread_suspend_count_lock_ | - releasing thread_suspend_count_lock_ // Release thread_suspend_count_lock_ | .. running .. - // Acquire share on mutator_lock_ | .. running .. - // - This could block but the thread still | .. running .. - // has a state of kSuspended and so this | .. running .. - // isn't an issue. | .. running .. - // Acquire thread_suspend_count_lock_ | .. running .. - // - we poll here as we're transitioning into | .. running .. - // kRunnable and an individual thread suspend | .. running .. - // request (e.g for debugging) won't try | .. running .. - // to acquire the mutator lock (which would | .. running .. - // block as we hold the mutator lock). This | .. running .. - // poll ensures that if the suspender thought | .. running .. - // we were suspended by incrementing our | .. running .. - // Thread::suspend_count_ and then reading | .. running .. - // our state we go back to waiting on | .. running .. - // Thread::resume_cond_. | .. running .. - // can_go_runnable = Thread::suspend_count_ == 0 | .. running .. - // Release thread_suspend_count_lock_ | .. running .. - // if can_go_runnable | .. running .. - // Change state to kRunnable | .. running .. - // else | .. running .. - // Goto x | .. running .. + // Change to kRunnable | .. running .. + // - this uses a CAS operation to ensure the | .. running .. + // suspend request flag isn't raised as the | .. running .. + // state is changed | .. running .. + // - if the CAS operation fails then goto x | .. running .. // .. running .. | .. running .. - static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_); + static MutatorMutex* mutator_lock_ ACQUIRED_AFTER(instrument_entrypoints_lock_); // Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap. static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_); @@ -639,6 +675,16 @@ class Locks { // Have an exclusive logging thread. static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_); + + // Allow reader-writer mutual exclusion on the boxed table of lambda objects. + // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it. + static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_); +}; + +class Roles { + public: + // Uninterruptible means that the thread may not become suspended. + static Uninterruptible uninterruptible_; }; } // namespace art diff --git a/runtime/base/mutex_test.cc b/runtime/base/mutex_test.cc index 3750c815e9..340550f02e 100644 --- a/runtime/base/mutex_test.cc +++ b/runtime/base/mutex_test.cc @@ -101,18 +101,18 @@ struct RecursiveLockWait { : mu("test mutex", kDefaultMutexLevel, true), cv("test condition variable", mu) { } - static void* Callback(void* arg) { - RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg); - state->mu.Lock(Thread::Current()); - state->cv.Signal(Thread::Current()); - state->mu.Unlock(Thread::Current()); - return nullptr; - } - Mutex mu; ConditionVariable cv; }; +static void* RecursiveLockWaitCallback(void* arg) { + RecursiveLockWait* state = reinterpret_cast<RecursiveLockWait*>(arg); + state->mu.Lock(Thread::Current()); + state->cv.Signal(Thread::Current()); + state->mu.Unlock(Thread::Current()); + return nullptr; +} + // GCC has trouble with our mutex tests, so we have to turn off thread safety analysis. static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS { RecursiveLockWait state; @@ -120,8 +120,7 @@ static void RecursiveLockWaitTest() NO_THREAD_SAFETY_ANALYSIS { state.mu.Lock(Thread::Current()); pthread_t pthread; - int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWait::Callback, - &state); + int pthread_create_result = pthread_create(&pthread, nullptr, RecursiveLockWaitCallback, &state); ASSERT_EQ(0, pthread_create_result); state.cv.Wait(Thread::Current()); diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc index 4a7be384b1..d823edd6d2 100644 --- a/runtime/base/scoped_arena_allocator.cc +++ b/runtime/base/scoped_arena_allocator.cc @@ -17,11 +17,11 @@ #include "scoped_arena_allocator.h" #include "arena_allocator.h" -#include <memcheck/memcheck.h> +#include "base/memory_tool.h" namespace art { -static constexpr size_t kValgrindRedZoneBytes = 8; +static constexpr size_t kMemoryToolRedZoneBytes = 8; ArenaStack::ArenaStack(ArenaPool* arena_pool) : DebugStackRefCounter(), @@ -30,7 +30,7 @@ ArenaStack::ArenaStack(ArenaPool* arena_pool) top_arena_(nullptr), top_ptr_(nullptr), top_end_(nullptr), - running_on_valgrind_(RUNNING_ON_VALGRIND > 0) { + is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL > 0) { } ArenaStack::~ArenaStack() { @@ -92,7 +92,7 @@ void ArenaStack::UpdateBytesAllocated() { } void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) { - size_t rounded_bytes = RoundUp(bytes + kValgrindRedZoneBytes, 8); + size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8); uint8_t* ptr = top_ptr_; if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) { ptr = AllocateFromNextArena(rounded_bytes); @@ -100,8 +100,8 @@ void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) { } CurrentStats()->RecordAlloc(bytes, kind); top_ptr_ = ptr + rounded_bytes; - VALGRIND_MAKE_MEM_UNDEFINED(ptr, bytes); - VALGRIND_MAKE_MEM_NOACCESS(ptr + bytes, rounded_bytes - bytes); + MEMORY_TOOL_MAKE_UNDEFINED(ptr, bytes); + MEMORY_TOOL_MAKE_NOACCESS(ptr + bytes, rounded_bytes - bytes); return ptr; } diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h index bbedeac3b0..ca514e411c 100644 --- a/runtime/base/scoped_arena_allocator.h +++ b/runtime/base/scoped_arena_allocator.h @@ -64,7 +64,7 @@ class ArenaStack : private DebugStackRefCounter { // Private - access via ScopedArenaAllocator or ScopedArenaAllocatorAdapter. void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE { - if (UNLIKELY(running_on_valgrind_)) { + if (UNLIKELY(is_running_on_memory_tool_)) { return AllocValgrind(bytes, kind); } size_t rounded_bytes = RoundUp(bytes, 8); @@ -88,7 +88,7 @@ class ArenaStack : private DebugStackRefCounter { uint8_t* top_ptr_; uint8_t* top_end_; - const bool running_on_valgrind_; + const bool is_running_on_memory_tool_; friend class ScopedArenaAllocator; template <typename T> diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h index b300109e31..a5344dbd31 100644 --- a/runtime/base/timing_logger.h +++ b/runtime/base/timing_logger.h @@ -33,17 +33,17 @@ class CumulativeLogger { explicit CumulativeLogger(const std::string& name); ~CumulativeLogger(); void Start(); - void End() LOCKS_EXCLUDED(lock_); - void Reset() LOCKS_EXCLUDED(lock_); - void Dump(std::ostream& os) const LOCKS_EXCLUDED(lock_); + void End() REQUIRES(!lock_); + void Reset() REQUIRES(!lock_); + void Dump(std::ostream& os) const REQUIRES(!lock_); uint64_t GetTotalNs() const { return GetTotalTime() * kAdjust; } // Allow the name to be modified, particularly when the cumulative logger is a field within a // parent class that is unable to determine the "name" of a sub-class. - void SetName(const std::string& name) LOCKS_EXCLUDED(lock_); - void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_); - size_t GetIterations() const; + void SetName(const std::string& name) REQUIRES(!lock_); + void AddLogger(const TimingLogger& logger) REQUIRES(!lock_); + size_t GetIterations() const REQUIRES(!lock_); private: class HistogramComparator { @@ -58,8 +58,8 @@ class CumulativeLogger { static constexpr size_t kInitialBucketSize = 50; // 50 microseconds. void AddPair(const std::string &label, uint64_t delta_time) - EXCLUSIVE_LOCKS_REQUIRED(lock_); - void DumpHistogram(std::ostream &os) const EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); + void DumpHistogram(std::ostream &os) const REQUIRES(lock_); uint64_t GetTotalTime() const { return total_time_; } @@ -131,7 +131,7 @@ class TimingLogger { friend class TimingLogger; }; - explicit TimingLogger(const char* name, bool precise, bool verbose); + TimingLogger(const char* name, bool precise, bool verbose); ~TimingLogger(); // Verify that all open timings have related closed timings. void Verify(); @@ -156,7 +156,7 @@ class TimingLogger { // starts and ends. class ScopedTiming { public: - explicit ScopedTiming(const char* label, TimingLogger* logger) : logger_(logger) { + ScopedTiming(const char* label, TimingLogger* logger) : logger_(logger) { logger_->StartTiming(label); } ~ScopedTiming() { diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h index d51fbd68a7..f47368b180 100644 --- a/runtime/base/unix_file/fd_file.h +++ b/runtime/base/unix_file/fd_file.h @@ -35,8 +35,8 @@ class FdFile : public RandomAccessFile { FdFile(); // Creates an FdFile using the given file descriptor. Takes ownership of the // file descriptor. (Use DisableAutoClose to retain ownership.) - explicit FdFile(int fd, bool checkUsage); - explicit FdFile(int fd, const std::string& path, bool checkUsage); + FdFile(int fd, bool checkUsage); + FdFile(int fd, const std::string& path, bool checkUsage); // Destroys an FdFile, closing the file descriptor if Close hasn't already // been called. (If you care about the return value of Close, call it diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index 45fb9c4b8d..4172b89244 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -16,6 +16,7 @@ #include "check_jni.h" +#include <iomanip> #include <sys/mman.h> #include <zlib.h> @@ -129,7 +130,7 @@ union JniValueType { class ScopedCheck { public: - explicit ScopedCheck(int flags, const char* functionName, bool has_method = true) + ScopedCheck(int flags, const char* functionName, bool has_method = true) : function_name_(functionName), flags_(flags), indent_(0), has_method_(has_method) { } @@ -155,7 +156,7 @@ class ScopedCheck { * Assumes "jobj" has already been validated. */ bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* o = soa.Decode<mirror::Object*>(java_object); if (o == nullptr) { AbortF("field operation on NULL object: %p", java_object); @@ -199,7 +200,7 @@ class ScopedCheck { */ bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc, jmethodID mid, Primitive::Type type, InvokeType invoke) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; @@ -246,7 +247,7 @@ class ScopedCheck { * Assumes "java_class" has already been validated. */ bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(java_class); ArtField* f = CheckFieldID(soa, fid); if (f == nullptr) { @@ -269,7 +270,7 @@ class ScopedCheck { * Instances of "java_class" must be instances of the method's declaring class. */ bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; @@ -290,7 +291,7 @@ class ScopedCheck { * will be handled automatically by the instanceof check.) */ bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = CheckMethodID(soa, mid); if (m == nullptr) { return false; @@ -343,7 +344,7 @@ class ScopedCheck { * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable. */ bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* traceMethod = nullptr; if (has_method_ && soa.Vm()->IsTracingEnabled()) { // We need to guard some of the invocation interface's calls: a bad caller might @@ -443,7 +444,7 @@ class ScopedCheck { } bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* method = soa.Decode<mirror::Object*>(jmethod); if (method == nullptr) { AbortF("expected non-null method"); @@ -461,7 +462,7 @@ class ScopedCheck { } bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = soa.DecodeMethod(mid); if (method == nullptr) { AbortF("expected non-null constructor"); @@ -475,7 +476,7 @@ class ScopedCheck { } bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* field = soa.Decode<mirror::Object*>(jfield); if (field == nullptr) { AbortF("expected non-null java.lang.reflect.Field"); @@ -491,7 +492,7 @@ class ScopedCheck { } bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* obj = soa.Decode<mirror::Object*>(jobj); if (!obj->GetClass()->IsThrowableClass()) { AbortF("expected java.lang.Throwable but got object of type " @@ -502,7 +503,7 @@ class ScopedCheck { } bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(jc); if (!c->IsThrowableClass()) { AbortF("expected java.lang.Throwable class but got object of " @@ -533,7 +534,7 @@ class ScopedCheck { } bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(jc); if (!c->IsInstantiableNonArray()) { AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c); @@ -543,7 +544,7 @@ class ScopedCheck { } bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!CheckArray(soa, array)) { return false; } @@ -558,7 +559,7 @@ class ScopedCheck { bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) { return false; } @@ -619,7 +620,7 @@ class ScopedCheck { * to "running" mode before doing the checks. */ bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const char* what = nullptr; switch (kind) { case kClass: @@ -715,7 +716,7 @@ class ScopedCheck { } bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (fmt) { case 'a': // jarray return CheckArray(soa, arg.a); @@ -785,7 +786,7 @@ class ScopedCheck { void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg, std::string* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (fmt) { case 'L': // jobject fall-through. case 'a': // jarray fall-through. @@ -946,7 +947,7 @@ class ScopedCheck { * Since we're dealing with objects, switch to "running" mode. */ bool CheckArray(ScopedObjectAccess& soa, jarray java_array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(java_array == nullptr)) { AbortF("jarray was NULL"); return false; @@ -983,7 +984,7 @@ class ScopedCheck { } ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (fid == nullptr) { AbortF("jfieldID was NULL"); return nullptr; @@ -999,7 +1000,7 @@ class ScopedCheck { } ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (mid == nullptr) { AbortF("jmethodID was NULL"); return nullptr; @@ -1014,7 +1015,7 @@ class ScopedCheck { return m; } - bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool CheckThread(JNIEnv* env) SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); if (self == nullptr) { AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid()); @@ -1083,10 +1084,29 @@ class ScopedCheck { } const char* errorKind = nullptr; - uint8_t utf8 = CheckUtfBytes(bytes, &errorKind); + const uint8_t* utf8 = CheckUtfBytes(bytes, &errorKind); if (errorKind != nullptr) { + // This is an expensive loop that will resize often, but this isn't supposed to hit in + // practice anyways. + std::ostringstream oss; + oss << std::hex; + const uint8_t* tmp = reinterpret_cast<const uint8_t*>(bytes); + while (*tmp != 0) { + if (tmp == utf8) { + oss << "<"; + } + oss << "0x" << std::setfill('0') << std::setw(2) << static_cast<uint32_t>(*tmp); + if (tmp == utf8) { + oss << '>'; + } + tmp++; + if (*tmp != 0) { + oss << ' '; + } + } + AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n" - " string: '%s'", errorKind, utf8, bytes); + " string: '%s'\n input: '%s'", errorKind, *utf8, bytes, oss.str().c_str()); return false; } return true; @@ -1094,11 +1114,11 @@ class ScopedCheck { // Checks whether |bytes| is valid modified UTF-8. We also accept 4 byte UTF // sequences in place of encoded surrogate pairs. - static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) { + static const uint8_t* CheckUtfBytes(const char* bytes, const char** errorKind) { while (*bytes != '\0') { - uint8_t utf8 = *(bytes++); + const uint8_t* utf8 = reinterpret_cast<const uint8_t*>(bytes++); // Switch on the high four bits. - switch (utf8 >> 4) { + switch (*utf8 >> 4) { case 0x00: case 0x01: case 0x02: @@ -1118,11 +1138,11 @@ class ScopedCheck { return utf8; case 0x0f: // Bit pattern 1111, which might be the start of a 4 byte sequence. - if ((utf8 & 0x08) == 0) { + if ((*utf8 & 0x08) == 0) { // Bit pattern 1111 0xxx, which is the start of a 4 byte sequence. // We consume one continuation byte here, and fall through to consume two more. - utf8 = *(bytes++); - if ((utf8 & 0xc0) != 0x80) { + utf8 = reinterpret_cast<const uint8_t*>(bytes++); + if ((*utf8 & 0xc0) != 0x80) { *errorKind = "continuation"; return utf8; } @@ -1135,8 +1155,8 @@ class ScopedCheck { FALLTHROUGH_INTENDED; case 0x0e: // Bit pattern 1110, so there are two additional bytes. - utf8 = *(bytes++); - if ((utf8 & 0xc0) != 0x80) { + utf8 = reinterpret_cast<const uint8_t*>(bytes++); + if ((*utf8 & 0xc0) != 0x80) { *errorKind = "continuation"; return utf8; } @@ -1146,8 +1166,8 @@ class ScopedCheck { case 0x0c: case 0x0d: // Bit pattern 110x, so there is one additional byte. - utf8 = *(bytes++); - if ((utf8 & 0xc0) != 0x80) { + utf8 = reinterpret_cast<const uint8_t*>(bytes++); + if ((*utf8 & 0xc0) != 0x80) { *errorKind = "continuation"; return utf8; } @@ -1206,6 +1226,8 @@ class GuardedCopy { const_cast<char*>(copy->StartRedZone())[i] = kCanary[j]; if (kCanary[j] == '\0') { j = 0; + } else { + j++; } } @@ -1217,6 +1239,8 @@ class GuardedCopy { const_cast<char*>(copy->EndRedZone())[i] = kCanary[j]; if (kCanary[j] == '\0') { j = 0; + } else { + j++; } } @@ -1367,6 +1391,8 @@ class GuardedCopy { } if (kCanary[j] == '\0') { j = 0; + } else { + j++; } } @@ -1381,6 +1407,8 @@ class GuardedCopy { } if (kCanary[j] == '\0') { j = 0; + } else { + j++; } } return true; @@ -2662,7 +2690,7 @@ class CheckJNI { static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj, jclass c, jmethodID mid, InvokeType invoke) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { bool checked; switch (invoke) { case kVirtual: { diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h index 504b7536f6..3155b518a8 100644 --- a/runtime/check_reference_map_visitor.h +++ b/runtime/check_reference_map_visitor.h @@ -28,10 +28,10 @@ namespace art { // holding references. class CheckReferenceMapVisitor : public StackVisitor { public: - explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit CheckReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsCalleeSaveMethod() || m->IsNative()) { CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex); @@ -52,7 +52,7 @@ class CheckReferenceMapVisitor : public StackVisitor { } void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (GetMethod()->IsOptimized(sizeof(void*))) { CheckOptimizedMethod(registers, number_of_references, native_pc_offset); } else { @@ -62,7 +62,7 @@ class CheckReferenceMapVisitor : public StackVisitor { private: void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); CodeInfo code_info = m->GetOptimizedCodeInfo(); StackMapEncoding encoding = code_info.ExtractEncoding(); @@ -104,7 +104,7 @@ class CheckReferenceMapVisitor : public StackVisitor { } void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*))); const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset); diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index 21b63c61a2..11901b3bef 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -187,7 +187,7 @@ inline mirror::IfTable* ClassLinker::AllocIfTable(Thread* self, size_t ifcount) } inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!class_roots_.IsNull()); mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read(); mirror::Class* klass = class_roots->Get(class_root); diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 23c59422c4..c179c64491 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -37,6 +37,7 @@ #include "base/unix_file/fd_file.h" #include "base/value_object.h" #include "class_linker-inl.h" +#include "class_table-inl.h" #include "compiler_callbacks.h" #include "debugger.h" #include "dex_file-inl.h" @@ -55,6 +56,7 @@ #include "linear_alloc.h" #include "oat.h" #include "oat_file.h" +#include "oat_file-inl.h" #include "oat_file_assistant.h" #include "object_lock.h" #include "mirror/class.h" @@ -90,7 +92,7 @@ static constexpr bool kDuplicateClassesCheck = false; static void ThrowNoClassDefFoundError(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_list args; va_start(args, fmt); @@ -99,14 +101,12 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) { va_end(args); } -bool ClassLinker::HasInitWithString( - Thread* self, ClassLinker* class_linker, const char* descriptor) { +bool ClassLinker::HasInitWithString(Thread* self, const char* descriptor) { ArtMethod* method = self->GetCurrentMethod(nullptr); StackHandleScope<1> hs(self); Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ? - method->GetDeclaringClass()->GetClassLoader() - : nullptr)); - mirror::Class* exception_class = class_linker->FindClass(self, descriptor, class_loader); + method->GetDeclaringClass()->GetClassLoader() : nullptr)); + mirror::Class* exception_class = FindClass(self, descriptor, class_loader); if (exception_class == nullptr) { // No exc class ~ no <init>-with-string. @@ -143,7 +143,7 @@ void ClassLinker::ThrowEarlierClassFailure(mirror::Class* c) { std::string temp; const char* descriptor = c->GetVerifyErrorClass()->GetDescriptor(&temp); - if (HasInitWithString(self, this, descriptor)) { + if (HasInitWithString(self, descriptor)) { self->ThrowNewException(descriptor, PrettyDescriptor(c).c_str()); } else { self->ThrowNewException(descriptor, nullptr); @@ -156,7 +156,7 @@ void ClassLinker::ThrowEarlierClassFailure(mirror::Class* c) { } static void VlogClassInitializationFailure(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (VLOG_IS_ON(class_linker)) { std::string temp; LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from " @@ -165,7 +165,7 @@ static void VlogClassInitializationFailure(Handle<mirror::Class> klass) } static void WrapExceptionInInitializer(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); JNIEnv* env = self->GetJniEnv(); @@ -194,7 +194,9 @@ struct FieldGapsComparator { bool operator() (const FieldGap& lhs, const FieldGap& rhs) NO_THREAD_SAFETY_ANALYSIS { // Sort by gap size, largest first. Secondary sort by starting offset. - return lhs.size > rhs.size || (lhs.size == rhs.size && lhs.start_offset < rhs.start_offset); + // Note that the priority queue returns the largest element, so operator() + // should return true if lhs is less than rhs. + return lhs.size < rhs.size || (lhs.size == rhs.size && lhs.start_offset > rhs.start_offset); } }; typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps; @@ -225,7 +227,7 @@ static void ShuffleForward(size_t* current_field_idx, MemberOffset* field_offset, std::deque<ArtField*>* grouped_and_sorted_fields, FieldGaps* gaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(current_field_idx != nullptr); DCHECK(grouped_and_sorted_fields != nullptr); DCHECK(gaps != nullptr); @@ -248,13 +250,13 @@ static void ShuffleForward(size_t* current_field_idx, if (!gaps->empty() && gaps->top().size >= n) { FieldGap gap = gaps->top(); gaps->pop(); - DCHECK(IsAligned<n>(gap.start_offset)); + DCHECK_ALIGNED(gap.start_offset, n); field->SetOffset(MemberOffset(gap.start_offset)); if (gap.size > n) { AddFieldGap(gap.start_offset + n, gap.start_offset + gap.size, gaps); } } else { - DCHECK(IsAligned<n>(field_offset->Uint32Value())); + DCHECK_ALIGNED(field_offset->Uint32Value(), n); field->SetOffset(*field_offset); *field_offset = MemberOffset(field_offset->Uint32Value() + n); } @@ -581,6 +583,7 @@ void ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Setup the ClassLoader, verifying the object_size_. class_root = FindSystemClass(self, "Ljava/lang/ClassLoader;"); + class_root->SetClassLoaderClass(); CHECK_EQ(class_root->GetObjectSize(), mirror::ClassLoader::InstanceSize()); SetClassRoot(kJavaLangClassLoader, class_root); @@ -1018,7 +1021,7 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class, gc::space::ImageSpace* space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (m->IsRuntimeMethod()) { CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m); } else if (m->IsMiranda()) { @@ -1036,7 +1039,7 @@ static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class, static void SanityCheckArtMethodPointerArray( mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size, - gc::space::ImageSpace* space) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + gc::space::ImageSpace* space) SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(arr != nullptr); for (int32_t j = 0; j < arr->GetLength(); ++j) { auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size); @@ -1051,18 +1054,17 @@ static void SanityCheckArtMethodPointerArray( } static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj; CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj; if (obj->IsClass()) { auto klass = obj->AsClass(); - ArtField* fields[2] = { klass->GetSFields(), klass->GetIFields() }; - size_t num_fields[2] = { klass->NumStaticFields(), klass->NumInstanceFields() }; - for (size_t i = 0; i < 2; ++i) { - for (size_t j = 0; j < num_fields[i]; ++j) { - CHECK_EQ(fields[i][j].GetDeclaringClass(), klass); - } + for (ArtField& field : klass->GetIFields()) { + CHECK_EQ(field.GetDeclaringClass(), klass); + } + for (ArtField& field : klass->GetSFields()) { + CHECK_EQ(field.GetDeclaringClass(), klass); } auto* runtime = Runtime::Current(); auto* image_space = runtime->GetHeap()->GetImageSpace(); @@ -1097,6 +1099,28 @@ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_ } } +// Set image methods' entry point to interpreter. +class SetInterpreterEntrypointArtMethodVisitor : public ArtMethodVisitor { + public: + explicit SetInterpreterEntrypointArtMethodVisitor(size_t image_pointer_size) + : image_pointer_size_(image_pointer_size) {} + + void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (kIsDebugBuild && !method->IsRuntimeMethod()) { + CHECK(method->GetDeclaringClass() != nullptr); + } + if (!method->IsNative() && !method->IsRuntimeMethod() && !method->IsResolutionMethod()) { + method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), + image_pointer_size_); + } + } + + private: + const size_t image_pointer_size_; + + DISALLOW_COPY_AND_ASSIGN(SetInterpreterEntrypointArtMethodVisitor); +}; + void ClassLinker::InitFromImage() { VLOG(startup) << "ClassLinker::InitFromImage entering"; CHECK(!init_done_); @@ -1187,23 +1211,10 @@ void ClassLinker::InitFromImage() { // Set entry point to interpreter if in InterpretOnly mode. if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) { - const auto& header = space->GetImageHeader(); - const auto& methods = header.GetMethodsSection(); - const auto art_method_size = ArtMethod::ObjectSize(image_pointer_size_); - for (uintptr_t pos = 0; pos < methods.Size(); pos += art_method_size) { - auto* method = reinterpret_cast<ArtMethod*>(space->Begin() + pos + methods.Offset()); - if (kIsDebugBuild && !method->IsRuntimeMethod()) { - CHECK(method->GetDeclaringClass() != nullptr); - } - if (!method->IsNative()) { - method->SetEntryPointFromInterpreterPtrSize( - artInterpreterToInterpreterBridge, image_pointer_size_); - if (!method->IsRuntimeMethod() && method != runtime->GetResolutionMethod()) { - method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), - image_pointer_size_); - } - } - } + const ImageHeader& header = space->GetImageHeader(); + const ImageSection& methods = header.GetMethodsSection(); + SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_); + methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_); } // reinit class_roots_ @@ -1239,11 +1250,8 @@ void ClassLinker::InitFromImage() { bool ClassLinker::ClassInClassTable(mirror::Class* klass) { ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - auto it = class_table_.Find(GcRoot<mirror::Class>(klass)); - if (it == class_table_.end()) { - return false; - } - return it->Read() == klass; + ClassTable* const class_table = ClassTableForClassLoader(klass->GetClassLoader()); + return class_table != nullptr && class_table->Contains(klass); } void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { @@ -1255,8 +1263,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { // There is 3 GC cases to handle: // Non moving concurrent: // This case is easy to handle since the reference members of ArtMethod and ArtFields are held - // live by the class and class roots. In this case we probably don't even need to call - // VisitNativeRoots. + // live by the class and class roots. // // Moving non-concurrent: // This case needs to call visit VisitNativeRoots in case the classes or dex cache arrays move. @@ -1267,35 +1274,18 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { // Moving concurrent: // Need to make sure to not copy ArtMethods without doing read barriers since the roots are // marked concurrently and we don't hold the classlinker_classes_lock_ when we do the copy. - for (GcRoot<mirror::Class>& root : class_table_) { - buffered_visitor.VisitRoot(root); - if ((flags & kVisitRootFlagNonMoving) == 0) { - // Don't bother visiting ArtField and ArtMethod if kVisitRootFlagNonMoving is set since - // these roots are all reachable from the class or dex cache. - root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); - } - } - // PreZygote classes can't move so we won't need to update fields' declaring classes. - for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) { - buffered_visitor.VisitRoot(root); - if ((flags & kVisitRootFlagNonMoving) == 0) { - root.Read()->VisitNativeRoots(buffered_visitor, image_pointer_size_); - } + boot_class_table_.VisitRoots(buffered_visitor); + for (GcRoot<mirror::ClassLoader>& root : class_loaders_) { + // May be null for boot ClassLoader. + root.VisitRoot(visitor, RootInfo(kRootVMInternal)); } } else if ((flags & kVisitRootFlagNewRoots) != 0) { for (auto& root : new_class_roots_) { mirror::Class* old_ref = root.Read<kWithoutReadBarrier>(); - old_ref->VisitNativeRoots(buffered_visitor, image_pointer_size_); root.VisitRoot(visitor, RootInfo(kRootStickyClass)); mirror::Class* new_ref = root.Read<kWithoutReadBarrier>(); - if (UNLIKELY(new_ref != old_ref)) { - // Uh ohes, GC moved a root in the log. Need to search the class_table and update the - // corresponding object. This is slow, but luckily for us, this may only happen with a - // concurrent moving GC. - auto it = class_table_.Find(GcRoot<mirror::Class>(old_ref)); - DCHECK(it != class_table_.end()); - *it = GcRoot<mirror::Class>(new_ref); - } + // Concurrent moving GC marked new roots through the to-space invariant. + CHECK_EQ(new_ref, old_ref); } } buffered_visitor.Flush(); // Flush before clearing new_class_roots_. @@ -1344,91 +1334,105 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { } } -void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) { - if (dex_cache_image_class_lookup_required_) { - MoveImageClassesToClassTable(); - } - // TODO: why isn't this a ReaderMutexLock? - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - for (GcRoot<mirror::Class>& root : class_table_) { - if (!visitor(root.Read(), arg)) { - return; - } - } - for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) { - if (!visitor(root.Read(), arg)) { - return; +void ClassLinker::VisitClassesInternal(ClassVisitor* visitor) { + if (boot_class_table_.Visit(visitor)) { + for (GcRoot<mirror::ClassLoader>& root : class_loaders_) { + ClassTable* const class_table = root.Read()->GetClassTable(); + if (class_table != nullptr && !class_table->Visit(visitor)) { + return; + } } } } -static bool GetClassesVisitorSet(mirror::Class* c, void* arg) { - std::set<mirror::Class*>* classes = reinterpret_cast<std::set<mirror::Class*>*>(arg); - classes->insert(c); - return true; +void ClassLinker::VisitClasses(ClassVisitor* visitor) { + if (dex_cache_image_class_lookup_required_) { + MoveImageClassesToClassTable(); + } + Thread* const self = Thread::Current(); + ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); + // Not safe to have thread suspension when we are holding a lock. + if (self != nullptr) { + ScopedAssertNoThreadSuspension nts(self, __FUNCTION__); + VisitClassesInternal(visitor); + } else { + VisitClassesInternal(visitor); + } } -struct GetClassesVisitorArrayArg { - Handle<mirror::ObjectArray<mirror::Class>>* classes; - int32_t index; - bool success; +class GetClassesInToVector : public ClassVisitor { + public: + bool Visit(mirror::Class* klass) OVERRIDE { + classes_.push_back(klass); + return true; + } + std::vector<mirror::Class*> classes_; }; -static bool GetClassesVisitorArray(mirror::Class* c, void* varg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - GetClassesVisitorArrayArg* arg = reinterpret_cast<GetClassesVisitorArrayArg*>(varg); - if (arg->index < (*arg->classes)->GetLength()) { - (*arg->classes)->Set(arg->index, c); - arg->index++; - return true; - } else { - arg->success = false; +class GetClassInToObjectArray : public ClassVisitor { + public: + explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr) + : arr_(arr), index_(0) {} + + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ++index_; + if (index_ <= arr_->GetLength()) { + arr_->Set(index_ - 1, klass); + return true; + } return false; } -} -void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) { + bool Succeeded() const SHARED_REQUIRES(Locks::mutator_lock_) { + return index_ <= arr_->GetLength(); + } + + private: + mirror::ObjectArray<mirror::Class>* const arr_; + int32_t index_; +}; + +void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) { // TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem // is avoiding duplicates. if (!kMovingClasses) { - std::set<mirror::Class*> classes; - VisitClasses(GetClassesVisitorSet, &classes); - for (mirror::Class* klass : classes) { - if (!visitor(klass, arg)) { + GetClassesInToVector accumulator; + VisitClasses(&accumulator); + for (mirror::Class* klass : accumulator.classes_) { + if (!visitor->Visit(klass)) { return; } } } else { - Thread* self = Thread::Current(); + Thread* const self = Thread::Current(); StackHandleScope<1> hs(self); - MutableHandle<mirror::ObjectArray<mirror::Class>> classes = - hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr); - GetClassesVisitorArrayArg local_arg; - local_arg.classes = &classes; - local_arg.success = false; + auto classes = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr); // We size the array assuming classes won't be added to the class table during the visit. // If this assumption fails we iterate again. - while (!local_arg.success) { + while (true) { size_t class_table_size; { ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - class_table_size = class_table_.Size() + pre_zygote_class_table_.Size(); + // Add 100 in case new classes get loaded when we are filling in the object array. + class_table_size = NumZygoteClasses() + NumNonZygoteClasses() + 100; } mirror::Class* class_type = mirror::Class::GetJavaLangClass(); mirror::Class* array_of_class = FindArrayClass(self, &class_type); classes.Assign( mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, class_table_size)); CHECK(classes.Get() != nullptr); // OOME. - local_arg.index = 0; - local_arg.success = true; - VisitClasses(GetClassesVisitorArray, &local_arg); + GetClassInToObjectArray accumulator(classes.Get()); + VisitClasses(&accumulator); + if (accumulator.Succeeded()) { + break; + } } for (int32_t i = 0; i < classes->GetLength(); ++i) { // If the class table shrank during creation of the clases array we expect null elements. If // the class table grew then the loop repeats. If classes are created after the loop has // finished then we don't visit. mirror::Class* klass = classes->Get(i); - if (klass != nullptr && !visitor(klass, arg)) { + if (klass != nullptr && !visitor->Visit(klass)) { return; } } @@ -1456,6 +1460,10 @@ ClassLinker::~ClassLinker() { mirror::LongArray::ResetArrayClass(); mirror::ShortArray::ResetArrayClass(); STLDeleteElements(&oat_files_); + for (GcRoot<mirror::ClassLoader>& root : class_loaders_) { + ClassTable* const class_table = root.Read()->GetClassTable(); + delete class_table; + } } mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) { @@ -1598,7 +1606,7 @@ ClassPathEntry FindInClassPath(const char* descriptor, static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return class_loader == nullptr || class_loader->GetClass() == soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_BootClassLoader); @@ -2119,7 +2127,7 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl // Returns true if the method must run with interpreter, false otherwise. static bool NeedsInterpreter(ArtMethod* method, const void* quick_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (quick_code == nullptr) { // No code: need interpreter. // May return true for native code, in the case of generic JNI @@ -2206,11 +2214,6 @@ void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class // Install entry point from interpreter. bool enter_interpreter = NeedsInterpreter(method, method->GetEntryPointFromQuickCompiledCode()); - if (enter_interpreter && !method->IsNative()) { - method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge); - } else { - method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); - } if (method->IsAbstract()) { method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); @@ -2286,23 +2289,35 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file, } } -ArtField* ClassLinker::AllocArtFieldArray(Thread* self, size_t length) { - auto* const la = Runtime::Current()->GetLinearAlloc(); - auto* ptr = reinterpret_cast<ArtField*>(la->AllocArray<ArtField>(self, length)); - CHECK(ptr!= nullptr); - std::uninitialized_fill_n(ptr, length, ArtField()); - return ptr; +LengthPrefixedArray<ArtField>* ClassLinker::AllocArtFieldArray(Thread* self, size_t length) { + if (length == 0) { + return nullptr; + } + // If the ArtField alignment changes, review all uses of LengthPrefixedArray<ArtField>. + static_assert(alignof(ArtField) == 4, "ArtField alignment is expected to be 4."); + size_t storage_size = LengthPrefixedArray<ArtField>::ComputeSize(length); + void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size); + auto* ret = new(array_storage) LengthPrefixedArray<ArtField>(length); + CHECK(ret != nullptr); + std::uninitialized_fill_n(&ret->At(0), length, ArtField()); + return ret; } -ArtMethod* ClassLinker::AllocArtMethodArray(Thread* self, size_t length) { - const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_); - uintptr_t ptr = reinterpret_cast<uintptr_t>( - Runtime::Current()->GetLinearAlloc()->Alloc(self, method_size * length)); - CHECK_NE(ptr, 0u); +LengthPrefixedArray<ArtMethod>* ClassLinker::AllocArtMethodArray(Thread* self, size_t length) { + if (length == 0) { + return nullptr; + } + const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_); + const size_t method_size = ArtMethod::Size(image_pointer_size_); + const size_t storage_size = + LengthPrefixedArray<ArtMethod>::ComputeSize(length, method_size, method_alignment); + void* array_storage = Runtime::Current()->GetLinearAlloc()->Alloc(self, storage_size); + auto* ret = new (array_storage) LengthPrefixedArray<ArtMethod>(length); + CHECK(ret != nullptr); for (size_t i = 0; i < length; ++i) { - new(reinterpret_cast<void*>(ptr + i * method_size)) ArtMethod; + new(reinterpret_cast<void*>(&ret->At(i, method_size, method_alignment))) ArtMethod; } - return reinterpret_cast<ArtMethod*>(ptr); + return ret; } void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, @@ -2317,8 +2332,7 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, // We allow duplicate definitions of the same field in a class_data_item // but ignore the repeated indexes here, b/21868015. ClassDataItemIterator it(dex_file, class_data); - ArtField* sfields = - it.NumStaticFields() != 0 ? AllocArtFieldArray(self, it.NumStaticFields()) : nullptr; + LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, it.NumStaticFields()); size_t num_sfields = 0; uint32_t last_field_idx = 0u; for (; it.HasNextStaticField(); it.Next()) { @@ -2326,17 +2340,15 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, DCHECK_GE(field_idx, last_field_idx); // Ordering enforced by DexFileVerifier. if (num_sfields == 0 || LIKELY(field_idx > last_field_idx)) { DCHECK_LT(num_sfields, it.NumStaticFields()); - LoadField(it, klass, &sfields[num_sfields]); + LoadField(it, klass, &sfields->At(num_sfields)); ++num_sfields; last_field_idx = field_idx; } } - klass->SetSFields(sfields); - klass->SetNumStaticFields(num_sfields); + klass->SetSFieldsPtr(sfields); DCHECK_EQ(klass->NumStaticFields(), num_sfields); // Load instance fields. - ArtField* ifields = - it.NumInstanceFields() != 0 ? AllocArtFieldArray(self, it.NumInstanceFields()) : nullptr; + LengthPrefixedArray<ArtField>* ifields = AllocArtFieldArray(self, it.NumInstanceFields()); size_t num_ifields = 0u; last_field_idx = 0u; for (; it.HasNextInstanceField(); it.Next()) { @@ -2344,7 +2356,7 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, DCHECK_GE(field_idx, last_field_idx); // Ordering enforced by DexFileVerifier. if (num_ifields == 0 || LIKELY(field_idx > last_field_idx)) { DCHECK_LT(num_ifields, it.NumInstanceFields()); - LoadField(it, klass, &ifields[num_ifields]); + LoadField(it, klass, &ifields->At(num_ifields)); ++num_ifields; last_field_idx = field_idx; } @@ -2356,18 +2368,11 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, << ", unique instance fields: " << num_ifields << "/" << it.NumInstanceFields() << ")"; // NOTE: Not shrinking the over-allocated sfields/ifields. } - klass->SetIFields(ifields); - klass->SetNumInstanceFields(num_ifields); + klass->SetIFieldsPtr(ifields); DCHECK_EQ(klass->NumInstanceFields(), num_ifields); // Load methods. - if (it.NumDirectMethods() != 0) { - klass->SetDirectMethodsPtr(AllocArtMethodArray(self, it.NumDirectMethods())); - } - klass->SetNumDirectMethods(it.NumDirectMethods()); - if (it.NumVirtualMethods() != 0) { - klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, it.NumVirtualMethods())); - } - klass->SetNumVirtualMethods(it.NumVirtualMethods()); + klass->SetDirectMethodsPtr(AllocArtMethodArray(self, it.NumDirectMethods())); + klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, it.NumVirtualMethods())); size_t class_def_method_index = 0; uint32_t last_dex_method_index = DexFile::kDexNoIndex; size_t last_class_def_method_index = 0; @@ -2395,6 +2400,8 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file, } DCHECK(!it.HasNext()); } + // Ensure that the card is marked so that remembered sets pick up native roots. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass.Get()); self->AllowThreadSuspension(); } @@ -2476,8 +2483,8 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, bool ClassLinker::IsDexFileRegisteredLocked(const DexFile& dex_file) { dex_lock_.AssertSharedHeld(Thread::Current()); - for (size_t i = 0; i != dex_caches_.size(); ++i) { - mirror::DexCache* dex_cache = GetDexCache(i); + for (GcRoot<mirror::DexCache>& root : dex_caches_) { + mirror::DexCache* dex_cache = root.Read(); if (dex_cache->GetDexFile() == &dex_file) { return true; } @@ -2775,8 +2782,7 @@ mirror::Class* ClassLinker::FindPrimitiveClass(char type) { return nullptr; } -mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass, - size_t hash) { +mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) { if (VLOG_IS_ON(class_linker)) { mirror::DexCache* dex_cache = klass->GetDexCache(); std::string source; @@ -2787,11 +2793,13 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k LOG(INFO) << "Loaded class " << descriptor << source; } WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - mirror::Class* existing = LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash); + mirror::ClassLoader* const class_loader = klass->GetClassLoader(); + ClassTable* const class_table = InsertClassTableForClassLoader(class_loader); + mirror::Class* existing = class_table->Lookup(descriptor, hash); if (existing != nullptr) { return existing; } - if (kIsDebugBuild && !klass->IsTemp() && klass->GetClassLoader() == nullptr && + if (kIsDebugBuild && !klass->IsTemp() && class_loader == nullptr && dex_cache_image_class_lookup_required_) { // Check a class loaded with the system class loader matches one in the image if the class // is in the image. @@ -2801,118 +2809,63 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k } } VerifyObject(klass); - class_table_.InsertWithHash(GcRoot<mirror::Class>(klass), hash); + class_table->InsertWithHash(klass, hash); + if (class_loader != nullptr) { + // This is necessary because we need to have the card dirtied for remembered sets. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader); + } if (log_new_class_table_roots_) { new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); } return nullptr; } -void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods, - size_t new_num_methods) { - // classlinker_classes_lock_ is used to guard against races between root marking and changing the - // direct and virtual method pointers. - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - klass->SetNumVirtualMethods(new_num_methods); +void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass, + LengthPrefixedArray<ArtMethod>* new_methods) { klass->SetVirtualMethodsPtr(new_methods); - if (log_new_class_table_roots_) { - new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); - } -} - -mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* klass, - size_t hash) { - WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - auto existing_it = class_table_.FindWithHash(std::make_pair(descriptor, klass->GetClassLoader()), - hash); - CHECK(existing_it != class_table_.end()); - mirror::Class* existing = existing_it->Read(); - CHECK_NE(existing, klass) << descriptor; - CHECK(!existing->IsResolved()) << descriptor; - CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor; - - CHECK(!klass->IsTemp()) << descriptor; - if (kIsDebugBuild && klass->GetClassLoader() == nullptr && - dex_cache_image_class_lookup_required_) { - // Check a class loaded with the system class loader matches one in the image if the class - // is in the image. - existing = LookupClassFromImage(descriptor); - if (existing != nullptr) { - CHECK_EQ(klass, existing) << descriptor; - } - } - VerifyObject(klass); - - // Update the element in the hash set. - *existing_it = GcRoot<mirror::Class>(klass); - if (log_new_class_table_roots_) { - new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); - } - - return existing; + // Need to mark the card so that the remembered sets and mod union tables get updated. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass); } bool ClassLinker::RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - auto pair = std::make_pair(descriptor, class_loader); - auto it = class_table_.Find(pair); - if (it != class_table_.end()) { - class_table_.Erase(it); - return true; - } - it = pre_zygote_class_table_.Find(pair); - if (it != pre_zygote_class_table_.end()) { - pre_zygote_class_table_.Erase(it); - return true; - } - return false; + ClassTable* const class_table = ClassTableForClassLoader(class_loader); + return class_table != nullptr && class_table->Remove(descriptor); } mirror::Class* ClassLinker::LookupClass(Thread* self, const char* descriptor, size_t hash, mirror::ClassLoader* class_loader) { { ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - mirror::Class* result = LookupClassFromTableLocked(descriptor, class_loader, hash); - if (result != nullptr) { - return result; + ClassTable* const class_table = ClassTableForClassLoader(class_loader); + if (class_table != nullptr) { + mirror::Class* result = class_table->Lookup(descriptor, hash); + if (result != nullptr) { + return result; + } } } if (class_loader != nullptr || !dex_cache_image_class_lookup_required_) { return nullptr; - } else { - // Lookup failed but need to search dex_caches_. - mirror::Class* result = LookupClassFromImage(descriptor); - if (result != nullptr) { - InsertClass(descriptor, result, hash); - } else { - // Searching the image dex files/caches failed, we don't want to get into this situation - // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image - // classes into the class table. - constexpr uint32_t kMaxFailedDexCacheLookups = 1000; - if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) { - MoveImageClassesToClassTable(); - } - } - return result; } -} - -mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor, - mirror::ClassLoader* class_loader, - size_t hash) { - auto descriptor_pair = std::make_pair(descriptor, class_loader); - auto it = pre_zygote_class_table_.FindWithHash(descriptor_pair, hash); - if (it == pre_zygote_class_table_.end()) { - it = class_table_.FindWithHash(descriptor_pair, hash); - if (it == class_table_.end()) { - return nullptr; + // Lookup failed but need to search dex_caches_. + mirror::Class* result = LookupClassFromImage(descriptor); + if (result != nullptr) { + result = InsertClass(descriptor, result, hash); + } else { + // Searching the image dex files/caches failed, we don't want to get into this situation + // often as map searches are faster, so after kMaxFailedDexCacheLookups move all image + // classes into the class table. + constexpr uint32_t kMaxFailedDexCacheLookups = 1000; + if (++failed_dex_cache_class_lookups_ > kMaxFailedDexCacheLookups) { + MoveImageClassesToClassTable(); } } - return it->Read(); + return result; } static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetImageSpace(); CHECK(image != nullptr); mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches); @@ -2928,6 +2881,7 @@ void ClassLinker::MoveImageClassesToClassTable() { ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table"); mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(); std::string temp; + ClassTable* const class_table = InsertClassTableForClassLoader(nullptr); for (int32_t i = 0; i < dex_caches->GetLength(); i++) { mirror::DexCache* dex_cache = dex_caches->Get(i); mirror::ObjectArray<mirror::Class>* types = dex_cache->GetResolvedTypes(); @@ -2937,12 +2891,12 @@ void ClassLinker::MoveImageClassesToClassTable() { DCHECK(klass->GetClassLoader() == nullptr); const char* descriptor = klass->GetDescriptor(&temp); size_t hash = ComputeModifiedUtf8Hash(descriptor); - mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash); + mirror::Class* existing = class_table->Lookup(descriptor, hash); if (existing != nullptr) { CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != " << PrettyClassAndClassLoader(klass); } else { - class_table_.Insert(GcRoot<mirror::Class>(klass)); + class_table->Insert(klass); if (log_new_class_table_roots_) { new_class_roots_.push_back(GcRoot<mirror::Class>(klass)); } @@ -2955,9 +2909,13 @@ void ClassLinker::MoveImageClassesToClassTable() { void ClassLinker::MoveClassTableToPreZygote() { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - DCHECK(pre_zygote_class_table_.Empty()); - pre_zygote_class_table_ = std::move(class_table_); - class_table_.Clear(); + boot_class_table_.FreezeSnapshot(); + for (GcRoot<mirror::ClassLoader>& root : class_loaders_) { + ClassTable* const class_table = root.Read()->GetClassTable(); + if (class_table != nullptr) { + class_table->FreezeSnapshot(); + } + } } mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) { @@ -2989,31 +2947,18 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas MoveImageClassesToClassTable(); } WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - while (true) { - auto it = class_table_.Find(descriptor); - if (it == class_table_.end()) { - break; - } - result.push_back(it->Read()); - class_table_.Erase(it); - } - for (mirror::Class* k : result) { - class_table_.Insert(GcRoot<mirror::Class>(k)); + const size_t hash = ComputeModifiedUtf8Hash(descriptor); + mirror::Class* klass = boot_class_table_.Lookup(descriptor, hash); + if (klass != nullptr) { + result.push_back(klass); } - size_t pre_zygote_start = result.size(); - // Now handle the pre zygote table. - // Note: This dirties the pre-zygote table but shouldn't be an issue since LookupClasses is only - // called from the debugger. - while (true) { - auto it = pre_zygote_class_table_.Find(descriptor); - if (it == pre_zygote_class_table_.end()) { - break; + for (GcRoot<mirror::ClassLoader>& root : class_loaders_) { + // There can only be one class with the same descriptor per class loader. + ClassTable* const class_table = root.Read()->GetClassTable(); + klass = class_table->Lookup(descriptor, hash); + if (klass != nullptr) { + result.push_back(klass); } - result.push_back(it->Read()); - pre_zygote_class_table_.Erase(it); - } - for (size_t i = pre_zygote_start; i < result.size(); ++i) { - pre_zygote_class_table_.Insert(GcRoot<mirror::Class>(result[i])); } } @@ -3046,6 +2991,18 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) { mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifyingAtRuntime, self); } + // Skip verification if we are forcing a soft fail. + // This has to be before the normal verification enabled check, + // since technically verification is disabled in this mode. + if (UNLIKELY(Runtime::Current()->IsVerificationSoftFail())) { + // Force verification to be a 'soft failure'. + mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self); + // As this is a fake verified status, make sure the methods are _not_ marked preverified + // later. + klass->SetPreverified(); + return; + } + // Skip verification if disabled. if (!Runtime::Current()->IsVerificationEnabled()) { mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self); @@ -3271,14 +3228,13 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, } const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(*code_item, 0); uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr); - ClassLinker* linker = Runtime::Current()->GetClassLinker(); for (uint32_t idx = 0; idx < handlers_size; idx++) { CatchHandlerIterator iterator(handlers_ptr); for (; iterator.HasNext(); iterator.Next()) { // Ensure exception types are resolved so that they don't need resolution to be delivered, // unresolved exception types will be ignored by exception delivery if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) { - mirror::Class* exception_type = linker->ResolveType(iterator.GetHandlerTypeIndex(), method); + mirror::Class* exception_type = ResolveType(iterator.GetHandlerTypeIndex(), method); if (exception_type == nullptr) { DCHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); @@ -3310,7 +3266,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache()); mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self); std::string descriptor(GetDescriptorForProxy(klass.Get())); - size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str()); + const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str()); // Insert the class before loading the fields as the field roots // (ArtField::declaring_class_) are only visited from the class @@ -3321,25 +3277,24 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& // Instance fields are inherited, but we add a couple of static fields... const size_t num_fields = 2; - ArtField* sfields = AllocArtFieldArray(self, num_fields); - klass->SetSFields(sfields); - klass->SetNumStaticFields(num_fields); + LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, num_fields); + klass->SetSFieldsPtr(sfields); // 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by // our proxy, so Class.getInterfaces doesn't return the flattened set. - ArtField* interfaces_sfield = &sfields[0]; - interfaces_sfield->SetDexFieldIndex(0); - interfaces_sfield->SetDeclaringClass(klass.Get()); - interfaces_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); + ArtField& interfaces_sfield = sfields->At(0); + interfaces_sfield.SetDexFieldIndex(0); + interfaces_sfield.SetDeclaringClass(klass.Get()); + interfaces_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); // 2. Create a static field 'throws' that holds exceptions thrown by our methods. - ArtField* throws_sfield = &sfields[1]; - throws_sfield->SetDexFieldIndex(1); - throws_sfield->SetDeclaringClass(klass.Get()); - throws_sfield->SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); + ArtField& throws_sfield = sfields->At(1); + throws_sfield.SetDexFieldIndex(1); + throws_sfield.SetDeclaringClass(klass.Get()); + throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal); // Proxies have 1 direct method, the constructor - auto* directs = AllocArtMethodArray(self, 1); + LengthPrefixedArray<ArtMethod>* directs = AllocArtMethodArray(self, 1); // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we // want to throw OOM in the future. if (UNLIKELY(directs == nullptr)) { @@ -3347,13 +3302,12 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& return nullptr; } klass->SetDirectMethodsPtr(directs); - klass->SetNumDirectMethods(1u); CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_)); // Create virtual method using specified prototypes. auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods)); DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass()) - << PrettyClass(h_methods->GetClass()); + << PrettyClass(h_methods->GetClass()); const size_t num_virtual_methods = h_methods->GetLength(); auto* virtuals = AllocArtMethodArray(self, num_virtual_methods); // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we @@ -3363,7 +3317,6 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& return nullptr; } klass->SetVirtualMethodsPtr(virtuals); - klass->SetNumVirtualMethods(num_virtual_methods); for (size_t i = 0; i < num_virtual_methods; ++i) { auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); auto* prototype = h_methods->Get(i)->GetArtMethod(); @@ -3395,12 +3348,12 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& CHECK_NE(klass.Get(), new_class.Get()); klass.Assign(new_class.Get()); - CHECK_EQ(interfaces_sfield->GetDeclaringClass(), klass.Get()); - interfaces_sfield->SetObject<false>(klass.Get(), - soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)); - CHECK_EQ(throws_sfield->GetDeclaringClass(), klass.Get()); - throws_sfield->SetObject<false>(klass.Get(), - soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws)); + CHECK_EQ(interfaces_sfield.GetDeclaringClass(), klass.Get()); + interfaces_sfield.SetObject<false>(klass.Get(), + soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)); + CHECK_EQ(throws_sfield.GetDeclaringClass(), klass.Get()); + throws_sfield.SetObject<false>( + klass.Get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws)); { // Lock on klass is released. Lock new class object. @@ -3410,7 +3363,7 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& // sanity checks if (kIsDebugBuild) { - CHECK(klass->GetIFields() == nullptr); + CHECK(klass->GetIFieldsPtr() == nullptr); CheckProxyConstructor(klass->GetDirectMethod(0, image_pointer_size_)); for (size_t i = 0; i < num_virtual_methods; ++i) { @@ -3444,8 +3397,7 @@ std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) { return DotToDescriptor(name->ToModifiedUtf8().c_str()); } -ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, - ArtMethod* proxy_method) { +ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) { DCHECK(proxy_class->IsProxyClass()); DCHECK(proxy_method->IsProxyMethod()); { @@ -3516,7 +3468,6 @@ void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prot // At runtime the method looks like a reference and argument saving method, clone the code // related parameters from this method. out->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler()); - out->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); } void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const { @@ -3775,7 +3726,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self, ObjectLock<mirror::Class>& lock) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { while (true) { self->AssertNoPendingException(); CHECK(!klass->IsInitialized()); @@ -3819,7 +3770,7 @@ static void ThrowSignatureCheckResolveReturnTypeException(Handle<mirror::Class> Handle<mirror::Class> super_klass, ArtMethod* method, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(Thread::Current()->IsExceptionPending()); DCHECK(!m->IsProxyMethod()); const DexFile* dex_file = m->GetDexFile(); @@ -3843,7 +3794,7 @@ static void ThrowSignatureCheckResolveArgException(Handle<mirror::Class> klass, ArtMethod* method, ArtMethod* m, uint32_t index, uint32_t arg_type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(Thread::Current()->IsExceptionPending()); DCHECK(!m->IsProxyMethod()); const DexFile* dex_file = m->GetDexFile(); @@ -3863,7 +3814,7 @@ static void ThrowSignatureMismatch(Handle<mirror::Class> klass, Handle<mirror::Class> super_klass, ArtMethod* method, const std::string& error_msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ThrowLinkageError(klass.Get(), "Class %s method %s resolves differently in %s %s: %s", PrettyDescriptor(klass.Get()).c_str(), @@ -3878,7 +3829,7 @@ static bool HasSameSignatureWithDifferentClassLoaders(Thread* self, Handle<mirror::Class> super_klass, ArtMethod* method1, ArtMethod* method2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { { StackHandleScope<1> hs(self); Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType())); @@ -4024,35 +3975,55 @@ bool ClassLinker::EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) { - ArtField* fields = new_class->GetIFields(); - DCHECK_EQ(temp_class->NumInstanceFields(), new_class->NumInstanceFields()); - for (size_t i = 0, count = new_class->NumInstanceFields(); i < count; i++) { - if (fields[i].GetDeclaringClass() == temp_class) { - fields[i].SetDeclaringClass(new_class); + DCHECK_EQ(temp_class->NumInstanceFields(), 0u); + for (ArtField& field : new_class->GetIFields()) { + if (field.GetDeclaringClass() == temp_class) { + field.SetDeclaringClass(new_class); } } - fields = new_class->GetSFields(); - DCHECK_EQ(temp_class->NumStaticFields(), new_class->NumStaticFields()); - for (size_t i = 0, count = new_class->NumStaticFields(); i < count; i++) { - if (fields[i].GetDeclaringClass() == temp_class) { - fields[i].SetDeclaringClass(new_class); + DCHECK_EQ(temp_class->NumStaticFields(), 0u); + for (ArtField& field : new_class->GetSFields()) { + if (field.GetDeclaringClass() == temp_class) { + field.SetDeclaringClass(new_class); } } - DCHECK_EQ(temp_class->NumDirectMethods(), new_class->NumDirectMethods()); + DCHECK_EQ(temp_class->NumDirectMethods(), 0u); for (auto& method : new_class->GetDirectMethods(image_pointer_size_)) { if (method.GetDeclaringClass() == temp_class) { method.SetDeclaringClass(new_class); } } - DCHECK_EQ(temp_class->NumVirtualMethods(), new_class->NumVirtualMethods()); + DCHECK_EQ(temp_class->NumVirtualMethods(), 0u); for (auto& method : new_class->GetVirtualMethods(image_pointer_size_)) { if (method.GetDeclaringClass() == temp_class) { method.SetDeclaringClass(new_class); } } + + // Make sure the remembered set and mod-union tables know that we updated some of the native + // roots. + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class); +} + +ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) { + if (class_loader == nullptr) { + return &boot_class_table_; + } + ClassTable* class_table = class_loader->GetClassTable(); + if (class_table == nullptr) { + class_table = new ClassTable; + class_loaders_.push_back(class_loader); + // Don't already have a class table, add it to the class loader. + class_loader->SetClassTable(class_table); + } + return class_table; +} + +ClassTable* ClassLinker::ClassTableForClassLoader(mirror::ClassLoader* class_loader) { + return class_loader == nullptr ? &boot_class_table_ : class_loader->GetClassTable(); } bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass, @@ -4096,6 +4067,14 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: // Retire the temporary class and create the correctly sized resolved class. StackHandleScope<1> hs(self); auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_)); + // Set arrays to null since we don't want to have multiple classes with the same ArtField or + // ArtMethod array pointers. If this occurs, it causes bugs in remembered sets since the GC + // may not see any references to the target space and clean the card for a class if another + // class had the same array pointer. + klass->SetDirectMethodsPtrUnchecked(nullptr); + klass->SetVirtualMethodsPtr(nullptr); + klass->SetSFieldsPtrUnchecked(nullptr); + klass->SetIFieldsPtrUnchecked(nullptr); if (UNLIKELY(h_new_class.Get() == nullptr)) { self->AssertPendingOOMException(); mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self); @@ -4105,9 +4084,26 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror: CHECK_EQ(h_new_class->GetClassSize(), class_size); ObjectLock<mirror::Class> lock(self, h_new_class); FixupTemporaryDeclaringClass(klass.Get(), h_new_class.Get()); - mirror::Class* existing = UpdateClass(descriptor, h_new_class.Get(), - ComputeModifiedUtf8Hash(descriptor)); - CHECK(existing == nullptr || existing == klass.Get()); + + { + WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); + mirror::ClassLoader* const class_loader = h_new_class.Get()->GetClassLoader(); + ClassTable* const table = InsertClassTableForClassLoader(class_loader); + mirror::Class* existing = table->UpdateClass(descriptor, h_new_class.Get(), + ComputeModifiedUtf8Hash(descriptor)); + CHECK_EQ(existing, klass.Get()); + if (kIsDebugBuild && class_loader == nullptr && dex_cache_image_class_lookup_required_) { + // Check a class loaded with the system class loader matches one in the image if the class + // is in the image. + mirror::Class* const image_class = LookupClassFromImage(descriptor); + if (image_class != nullptr) { + CHECK_EQ(klass.Get(), existing) << descriptor; + } + } + if (log_new_class_table_roots_) { + new_class_roots_.push_back(GcRoot<mirror::Class>(h_new_class.Get())); + } + } // This will notify waiters on temp class that saw the not yet resolved class in the // class_table_ during EnsureResolved. @@ -4240,7 +4236,7 @@ static bool CheckSuperClassChange(Handle<mirror::Class> klass, const DexFile& dex_file, const DexFile::ClassDef& class_def, mirror::Class* super_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Check for unexpected changes in the superclass. // Quick check 1) is the super_class class-loader the boot class loader? This always has // precedence. @@ -4383,6 +4379,11 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) { klass->SetFinalizable(); } + // Inherit class loader flag form super class. + if (super->IsClassLoaderClass()) { + klass->SetClassLoaderClass(); + } + // Inherit reference flags (if any) from the superclass. int reference_flags = (super->GetAccessFlags() & kAccReferenceFlagsMask); if (reference_flags != 0) { @@ -4433,7 +4434,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass, class MethodNameAndSignatureComparator FINAL : public ValueObject { public: explicit MethodNameAndSignatureComparator(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + SHARED_REQUIRES(Locks::mutator_lock_) : dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())), name_(nullptr), name_len_(0) { DCHECK(!method->IsProxyMethod()) << PrettyMethod(method); @@ -4447,7 +4448,7 @@ class MethodNameAndSignatureComparator FINAL : public ValueObject { } bool HasSameNameAndSignature(ArtMethod* other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!other->IsProxyMethod()) << PrettyMethod(other); const DexFile* other_dex_file = other->GetDexFile(); const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex()); @@ -4483,7 +4484,7 @@ class LinkVirtualHashTable { image_pointer_size_(image_pointer_size) { std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_); } - void Add(uint32_t virtual_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Add(uint32_t virtual_method_index) SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking( virtual_method_index, image_pointer_size_); const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName(); @@ -4498,7 +4499,7 @@ class LinkVirtualHashTable { hash_table_[index] = virtual_method_index; } uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const char* name = comparator->GetName(); uint32_t hash = ComputeModifiedUtf8Hash(name); size_t index = hash % hash_size_; @@ -4691,7 +4692,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass const bool have_interfaces = interfaces.Get() != nullptr; const size_t num_interfaces = have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces(); - const size_t method_size = ArtMethod::ObjectSize(image_pointer_size_); + const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_); + const size_t method_size = ArtMethod::Size(image_pointer_size_); if (num_interfaces == 0) { if (super_ifcount == 0) { // Class implements no interfaces. @@ -4881,7 +4883,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass const bool super_interface = is_super && extend_super_iftable; auto method_array(hs2.NewHandle(iftable->GetMethodArray(i))); - ArtMethod* input_virtual_methods = nullptr; + LengthPrefixedArray<ArtMethod>* input_virtual_methods = nullptr; Handle<mirror::PointerArray> input_vtable_array = NullHandle<mirror::PointerArray>(); int32_t input_array_length = 0; if (super_interface) { @@ -4916,8 +4918,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass // matter which direction we go. We walk it backward anyway.) for (k = input_array_length - 1; k >= 0; --k) { ArtMethod* vtable_method = input_virtual_methods != nullptr ? - reinterpret_cast<ArtMethod*>( - reinterpret_cast<uintptr_t>(input_virtual_methods) + method_size * k) : + &input_virtual_methods->At(k, method_size, method_alignment) : input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_); ArtMethod* vtable_method_for_name_comparison = vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_); @@ -4973,21 +4974,30 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass const size_t old_method_count = klass->NumVirtualMethods(); const size_t new_method_count = old_method_count + miranda_methods.size(); // Attempt to realloc to save RAM if possible. - ArtMethod* old_virtuals = klass->GetVirtualMethodsPtr(); + LengthPrefixedArray<ArtMethod>* old_virtuals = klass->GetVirtualMethodsPtr(); // The Realloced virtual methods aren't visiblef from the class roots, so there is no issue // where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the // realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since // CopyFrom has internal read barriers. - auto* virtuals = reinterpret_cast<ArtMethod*>(runtime->GetLinearAlloc()->Realloc( - self, old_virtuals, old_method_count * method_size, new_method_count * method_size)); + const size_t old_size = old_virtuals != nullptr + ? LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count, + method_size, + method_alignment) + : 0u; + const size_t new_size = LengthPrefixedArray<ArtMethod>::ComputeSize(new_method_count, + method_size, + method_alignment); + auto* virtuals = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>( + runtime->GetLinearAlloc()->Realloc(self, old_virtuals, old_size, new_size)); if (UNLIKELY(virtuals == nullptr)) { self->AssertPendingOOMException(); + self->EndAssertNoThreadSuspension(old_cause); return false; } ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter()); if (virtuals != old_virtuals) { // Maps from heap allocated miranda method to linear alloc miranda method. - StrideIterator<ArtMethod> out(reinterpret_cast<uintptr_t>(virtuals), method_size); + StrideIterator<ArtMethod> out = virtuals->Begin(method_size, method_alignment); // Copy over the old methods + miranda methods. for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) { move_table.emplace(&m, &*out); @@ -4997,8 +5007,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass ++out; } } - StrideIterator<ArtMethod> out( - reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); + StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment) + old_method_count); // Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and // we want the roots of the miranda methods to get visited. for (ArtMethod* mir_method : miranda_methods) { @@ -5007,7 +5016,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass move_table.emplace(mir_method, &*out); ++out; } - UpdateClassVirtualMethods(klass.Get(), virtuals, new_method_count); + virtuals->SetLength(new_method_count); + UpdateClassVirtualMethods(klass.Get(), virtuals); // Done copying methods, they are all roots in the class now, so we can end the no thread // suspension assert. self->EndAssertNoThreadSuspension(old_cause); @@ -5020,8 +5030,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass self->AssertPendingOOMException(); return false; } - out = StrideIterator<ArtMethod>( - reinterpret_cast<uintptr_t>(virtuals) + old_method_count * method_size, method_size); + out = virtuals->Begin(method_size, method_alignment) + old_method_count; size_t vtable_pos = old_vtable_count; for (size_t i = old_method_count; i < new_method_count; ++i) { // Leave the declaring class alone as type indices are relative to it @@ -5075,7 +5084,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } // Put some random garbage in old virtuals to help find stale pointers. if (virtuals != old_virtuals) { - memset(old_virtuals, 0xFEu, ArtMethod::ObjectSize(image_pointer_size_) * old_method_count); + memset(old_virtuals, 0xFEu, old_size); } } else { self->EndAssertNoThreadSuspension(old_cause); @@ -5100,7 +5109,7 @@ bool ClassLinker::LinkStaticFields(Thread* self, Handle<mirror::Class> klass, si } struct LinkFieldsComparator { - explicit LinkFieldsComparator() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + explicit LinkFieldsComparator() SHARED_REQUIRES(Locks::mutator_lock_) { } // No thread safety analysis as will be called from STL. Checked lock held in constructor. bool operator()(ArtField* field1, ArtField* field2) @@ -5137,7 +5146,8 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ size_t* class_size) { self->AllowThreadSuspension(); const size_t num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields(); - ArtField* const fields = is_static ? klass->GetSFields() : klass->GetIFields(); + LengthPrefixedArray<ArtField>* const fields = is_static ? klass->GetSFieldsPtr() : + klass->GetIFieldsPtr(); // Initialize field_offset MemberOffset field_offset(0); @@ -5160,7 +5170,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ const char* old_no_suspend_cause = self->StartAssertNoThreadSuspension( "Naked ArtField references in deque"); for (size_t i = 0; i < num_fields; i++) { - grouped_and_sorted_fields.push_back(&fields[i]); + grouped_and_sorted_fields.push_back(&fields->At(i)); } std::sort(grouped_and_sorted_fields.begin(), grouped_and_sorted_fields.end(), LinkFieldsComparator()); @@ -5183,7 +5193,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4)); AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps); } - DCHECK(IsAligned<sizeof(mirror::HeapReference<mirror::Object>)>(field_offset.Uint32Value())); + DCHECK_ALIGNED(field_offset.Uint32Value(), sizeof(mirror::HeapReference<mirror::Object>)); grouped_and_sorted_fields.pop_front(); num_reference_fields++; field->SetOffset(field_offset); @@ -5205,7 +5215,8 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ // We know there are no non-reference fields in the Reference classes, and we know // that 'referent' is alphabetically last, so this is easy... CHECK_EQ(num_reference_fields, num_fields) << PrettyClass(klass.Get()); - CHECK_STREQ(fields[num_fields - 1].GetName(), "referent") << PrettyClass(klass.Get()); + CHECK_STREQ(fields->At(num_fields - 1).GetName(), "referent") + << PrettyClass(klass.Get()); --num_reference_fields; } @@ -5239,15 +5250,15 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ sizeof(mirror::HeapReference<mirror::Object>)); MemberOffset current_ref_offset = start_ref_offset; for (size_t i = 0; i < num_fields; i++) { - ArtField* field = &fields[i]; + ArtField* field = &fields->At(i); VLOG(class_linker) << "LinkFields: " << (is_static ? "static" : "instance") << " class=" << PrettyClass(klass.Get()) << " field=" << PrettyField(field) << " offset=" << field->GetOffsetDuringLinking(); if (i != 0) { - ArtField* const prev_field = &fields[i - 1]; + ArtField* const prev_field = &fields->At(i - 1); // NOTE: The field names can be the same. This is not possible in the Java language // but it's valid Java/dex bytecode and for example proguard can generate such bytecode. - CHECK_LE(strcmp(prev_field->GetName(), field->GetName()), 0); + DCHECK_LE(strcmp(prev_field->GetName(), field->GetName()), 0); } Primitive::Type type = field->GetTypeAsPrimitiveType(); bool is_primitive = type != Primitive::kPrimNot; @@ -5597,23 +5608,22 @@ const char* ClassLinker::MethodShorty(uint32_t method_idx, ArtMethod* referrer, return dex_file.GetMethodShorty(method_id, length); } -void ClassLinker::DumpAllClasses(int flags) { - if (dex_cache_image_class_lookup_required_) { - MoveImageClassesToClassTable(); - } - // TODO: at the time this was written, it wasn't safe to call PrettyField with the ClassLinker - // lock held, because it might need to resolve a field's type, which would try to take the lock. - std::vector<mirror::Class*> all_classes; - { - ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - for (GcRoot<mirror::Class>& it : class_table_) { - all_classes.push_back(it.Read()); - } - } +class DumpClassVisitor : public ClassVisitor { + public: + explicit DumpClassVisitor(int flags) : flags_(flags) {} - for (size_t i = 0; i < all_classes.size(); ++i) { - all_classes[i]->DumpClass(std::cerr, flags); + bool Visit(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + klass->DumpClass(LOG(ERROR), flags_); + return true; } + + private: + const int flags_; +}; + +void ClassLinker::DumpAllClasses(int flags) { + DumpClassVisitor visitor(flags); + VisitClasses(&visitor); } static OatFile::OatMethod CreateOatMethod(const void* code) { @@ -5647,30 +5657,48 @@ void ClassLinker::SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const { OatFile::OatMethod oat_method = CreateOatMethod(method_code); oat_method.LinkMethod(method); - method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); } void ClassLinker::SetEntryPointsToInterpreter(ArtMethod* method) const { if (!method->IsNative()) { - method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge); method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); } else { const void* quick_method_code = GetQuickGenericJniStub(); OatFile::OatMethod oat_method = CreateOatMethod(quick_method_code); oat_method.LinkMethod(method); - method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); } } void ClassLinker::DumpForSigQuit(std::ostream& os) { - Thread* self = Thread::Current(); + ScopedObjectAccess soa(Thread::Current()); if (dex_cache_image_class_lookup_required_) { - ScopedObjectAccess soa(self); MoveImageClassesToClassTable(); } - ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes=" - << class_table_.Size() << "\n"; + ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_); + os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes=" + << NumNonZygoteClasses() << "\n"; +} + +size_t ClassLinker::NumZygoteClasses() const { + size_t sum = boot_class_table_.NumZygoteClasses(); + for (const GcRoot<mirror::ClassLoader>& root : class_loaders_) { + ClassTable* const class_table = root.Read()->GetClassTable(); + if (class_table != nullptr) { + sum += class_table->NumZygoteClasses(); + } + } + return sum; +} + +size_t ClassLinker::NumNonZygoteClasses() const { + size_t sum = boot_class_table_.NumNonZygoteClasses(); + for (const GcRoot<mirror::ClassLoader>& root : class_loaders_) { + ClassTable* const class_table = root.Read()->GetClassTable(); + if (class_table != nullptr) { + sum += class_table->NumNonZygoteClasses(); + } + } + return sum; } size_t ClassLinker::NumLoadedClasses() { @@ -5679,7 +5707,7 @@ size_t ClassLinker::NumLoadedClasses() { } ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); // Only return non zygote classes since these are the ones which apps which care about. - return class_table_.Size(); + return NumNonZygoteClasses(); } pid_t ClassLinker::GetClassesLockOwner() { @@ -5750,43 +5778,6 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) { return descriptor; } -std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root) - const { - std::string temp; - return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp)); -} - -bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, - const GcRoot<mirror::Class>& b) const { - if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) { - return false; - } - std::string temp; - return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp)); -} - -std::size_t ClassLinker::ClassDescriptorHashEquals::operator()( - const std::pair<const char*, mirror::ClassLoader*>& element) const { - return ComputeModifiedUtf8Hash(element.first); -} - -bool ClassLinker::ClassDescriptorHashEquals::operator()( - const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) const { - if (a.Read()->GetClassLoader() != b.second) { - return false; - } - return a.Read()->DescriptorEquals(b.first); -} - -bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, - const char* descriptor) const { - return a.Read()->DescriptorEquals(descriptor); -} - -std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descriptor) const { - return ComputeModifiedUtf8Hash(descriptor); -} - bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) { if (Runtime::Current()->UseJit()) { // JIT can have direct code pointers from any method to any other method. @@ -5910,7 +5901,10 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi } ArtMethod* ClassLinker::CreateRuntimeMethod() { - ArtMethod* method = AllocArtMethodArray(Thread::Current(), 1); + const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_); + const size_t method_size = ArtMethod::Size(image_pointer_size_); + LengthPrefixedArray<ArtMethod>* method_array = AllocArtMethodArray(Thread::Current(), 1); + ArtMethod* method = &method_array->At(0, method_size, method_alignment); CHECK(method != nullptr); method->SetDexMethodIndex(DexFile::kDexNoIndex); CHECK(method->IsRuntimeMethod()); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index d9935cbfda..7243a25a48 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -17,7 +17,6 @@ #ifndef ART_RUNTIME_CLASS_LINKER_H_ #define ART_RUNTIME_CLASS_LINKER_H_ -#include <deque> #include <string> #include <utility> #include <vector> @@ -26,6 +25,7 @@ #include "base/hash_set.h" #include "base/macros.h" #include "base/mutex.h" +#include "class_table.h" #include "dex_file.h" #include "gc_root.h" #include "jni.h" @@ -57,8 +57,6 @@ class Runtime; class ScopedObjectAccessAlreadyRunnable; template<size_t kNumReferences> class PACKED(4) StackHandleScope; -typedef bool (ClassVisitor)(mirror::Class* c, void* arg); - enum VisitRootFlags : uint8_t; class ClassLinker { @@ -110,16 +108,16 @@ class ClassLinker { // Initialize class linker by bootstraping from dex files. void InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Initialize class linker from one or more images. - void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void InitFromImage() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. mirror::Class* FindClass(Thread* self, const char* descriptor, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Finds a class in the path class loader, loading it if necessary without using JNI. Hash // function is supposed to be ComputeModifiedUtf8Hash(descriptor). Returns true if the @@ -130,16 +128,16 @@ class ClassLinker { Thread* self, const char* descriptor, size_t hash, Handle<mirror::ClassLoader> class_loader, mirror::Class** result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Finds a class by its descriptor using the "system" class loader, ie by searching the // boot_class_path_. mirror::Class* FindSystemClass(Thread* self, const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Finds the array class given for the element class. mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Returns true if the class linker is initialized. bool IsInitialized() const { @@ -150,65 +148,62 @@ class ClassLinker { mirror::Class* DefineClass(Thread* self, const char* descriptor, size_t hash, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Finds a class by its descriptor, returning null if it isn't wasn't loaded // by the given 'class_loader'. mirror::Class* LookupClass(Thread* self, const char* descriptor, size_t hash, mirror::ClassLoader* class_loader) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* FindPrimitiveClass(char type) SHARED_REQUIRES(Locks::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void DumpAllClasses(int flags) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); + REQUIRES(!Locks::classlinker_classes_lock_); size_t NumLoadedClasses() - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx, Handle<mirror::DexCache> dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a type with the given ID from the DexFile, storing the // result in DexCache. The ClassLoader is used to search for the @@ -217,7 +212,7 @@ class ClassLinker { mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -228,31 +223,29 @@ class ClassLinker { Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, ArtMethod* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. What is unique is the is_static argument which is // used to determine if we are resolving a static or non-static // field. - ArtField* ResolveField(const DexFile& dex_file, - uint32_t field_idx, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* ResolveField(const DexFile& dex_file, uint32_t field_idx, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, bool is_static) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as @@ -261,32 +254,31 @@ class ClassLinker { ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Get shorty from method index without resolution. Used to do handlerization. const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no <clinit> execution is possible. bool EnsureInitialized(Thread* self, Handle<mirror::Class> c, bool can_init_fields, bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); // Initializes classes that have instances in the image but that have // <clinit> methods so they could not be initialized by the compiler. - void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void RunRootClinits() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); void RegisterDexFile(const DexFile& dex_file) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); const OatFile* RegisterOatFile(const OatFile* oat_file) - LOCKS_EXCLUDED(dex_lock_); + REQUIRES(!dex_lock_); const std::vector<const DexFile*>& GetBootClassPath() { return boot_class_path_; @@ -294,34 +286,29 @@ class ClassLinker { // Returns the first non-image oat file in the class path. const OatFile* GetPrimaryOatFile() - LOCKS_EXCLUDED(dex_lock_); + REQUIRES(!dex_lock_); - void VisitClasses(ClassVisitor* visitor, void* arg) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitClasses(ClassVisitor* visitor) + REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Less efficient variant of VisitClasses that copies the class_table_ into secondary storage // so that it can visit individual classes without holding the doesn't hold the // Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code // can race with insertion and deletion of classes while the visitor is being called. - void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitClassesWithoutClassesLock(ClassVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); mirror::DexCache* FindDexCache(const DexFile& dex_file) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) - LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void FixupDexCaches(ArtMethod* resolution_method) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Finds or creates the oat file holding dex_location. Then loads and returns // all corresponding dex files (there may be more than one dex file loaded @@ -337,80 +324,83 @@ class ClassLinker { // This method should not be called with the mutator_lock_ held, because it // could end up starving GC if we need to generate or relocate any oat // files. - std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat( + std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat( const char* dex_location, const char* oat_location, std::vector<std::string>* error_msgs) - LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_); + REQUIRES(!dex_lock_, !Locks::mutator_lock_); // Allocate an instance of a java.lang.Object. - mirror::Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* AllocObject(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // TODO: replace this with multiple methods that allocate the correct managed type. template <class T> mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); + + LengthPrefixedArray<ArtField>* AllocArtFieldArray(Thread* self, size_t length); - ArtMethod* AllocArtMethodArray(Thread* self, size_t length); + LengthPrefixedArray<ArtMethod>* AllocArtMethodArray(Thread* self, size_t length); mirror::PointerArray* AllocPointerArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - ArtField* AllocArtFieldArray(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self, - size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray( + Thread* self, size_t length) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); void VerifyClass(Thread* self, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass, mirror::Class::Status& oat_file_class_status) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, ArtMethod* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name, jobjectArray interfaces, jobject loader, jobjectArray methods, jobjectArray throws) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string GetDescriptorForProxy(mirror::Class* proxy_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized const void* GetQuickOatCodeFor(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the oat code for a method from a method index. const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get compiled code for a method, return null if no code // exists. This is unlike Get..OatCodeFor which will return a bridge // or interpreter entrypoint. const void* GetOatMethodQuickCodeFor(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + + const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found) + SHARED_REQUIRES(Locks::mutator_lock_); pid_t GetClassesLockOwner(); // For SignalCatcher. pid_t GetDexLockOwner(); // For SignalCatcher. - mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_REQUIRES(Locks::mutator_lock_); static const char* GetClassRootDescriptor(ClassRoot class_root); @@ -429,20 +419,20 @@ class ClassLinker { // Set the entrypoints up for method to the given code. void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Set the entrypoints up for method to the enter the interpreter. void SetEntryPointsToInterpreter(ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Attempts to insert a class into a class table. Returns null if // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_REQUIRES(Locks::mutator_lock_) { mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read(); DCHECK(class_roots != nullptr); return class_roots; @@ -450,23 +440,23 @@ class ClassLinker { // Move all of the image classes into the class table for faster lookups. void MoveImageClassesToClassTable() - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring // that no more classes are ever added to the pre zygote table which makes it that the pages // always remain shared dirty instead of private dirty. void MoveClassTableToPreZygote() - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the method can be called with its direct code pointer, false otherwise. bool MayBeCalledWithDirectCodePointer(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files. // Note: the objects are not completely set up. Do not use this outside of tests and the compiler. jobject CreatePathClassLoader(Thread* self, std::vector<const DexFile*>& dex_files) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); size_t GetImagePointerSize() const { DCHECK(ValidPointerSize(image_pointer_size_)) << image_pointer_size_; @@ -475,51 +465,60 @@ class ClassLinker { // Used by image writer for checking. bool ClassInClassTable(mirror::Class* klass) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* CreateRuntimeMethod(); // Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache // entries are roots, but potentially not image classes. - void DropFindArrayClassCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_); private: - const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitClassesInternal(ClassVisitor* visitor) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns the number of zygote and image classes. + size_t NumZygoteClasses() const + REQUIRES(Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns the number of non zygote nor image classes. + size_t NumNonZygoteClasses() const + REQUIRES(Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); OatFile& GetImageOatFile(gc::space::ImageSpace* space) - LOCKS_EXCLUDED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - void FinishInit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FinishInit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_, !Roles::uninterruptible_); // For early bootstrapping by Init mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // Alloc* convenience functions to avoid needing to pass in mirror::Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. mirror::Class* AllocClass(Thread* self, uint32_t class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); mirror::Class* CreateArrayClass(Thread* self, const char* descriptor, size_t hash, Handle<mirror::ClassLoader> class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_, !Roles::uninterruptible_); void AppendToBootClassPath(Thread* self, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); // Precomputes size needed for Class, in the case of a non-temporary class this size must be // sufficient to hold all static fields. @@ -530,131 +529,129 @@ class ClassLinker { // table. void SetupClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, Handle<mirror::Class> klass, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void LoadClass(Thread* self, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data, Handle<mirror::Class> klass, const OatFile::OatClass* oat_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtField* dst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtMethod* dst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupStaticTrampolines(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on // error and sets found to false. OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache) - EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(dex_lock_, Locks::mutator_lock_); bool InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_run_clinit, bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self, ObjectLock<mirror::Class>& lock); bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor, Handle<mirror::ClassLoader> class_loader1, Handle<mirror::ClassLoader> class_loader2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, ArtMethod* method, mirror::Class* klass1, mirror::Class* klass2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, MutableHandle<mirror::Class>* h_new_class_out) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::classlinker_classes_lock_); bool LinkSuperClass(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); - bool LinkMethods(Thread* self, Handle<mirror::Class> klass, + bool LinkMethods(Thread* self, + Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, ArtMethod** out_imt) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass, Handle<mirror::ObjectArray<mirror::Class>> interfaces, ArtMethod** out_imt) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class, uint32_t class_def_method_index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckProxyConstructor(ArtMethod* constructor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots ReaderWriterMutex* DexLock() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) LOCK_RETURNED(dex_lock_) { return &dex_lock_; } - size_t GetDexCacheCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_) { + size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) { return dex_caches_.size(); } - mirror::DexCache* GetDexCache(size_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_); + mirror::DexCache* GetDexCache(size_t idx) SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_); const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) - LOCKS_EXCLUDED(dex_lock_); + REQUIRES(!dex_lock_); // Returns the boot image oat file. - const OatFile* GetBootOatFile() SHARED_LOCKS_REQUIRED(dex_lock_); + const OatFile* GetBootOatFile() SHARED_REQUIRES(dex_lock_); void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the // class access flags to determine whether this has been done before. void EnsurePreverifiedMethods(Handle<mirror::Class> c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - mirror::Class* LookupClassFromTableLocked(const char* descriptor, - mirror::ClassLoader* class_loader, - size_t hash) - SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_); - - mirror::Class* UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* LookupClassFromImage(const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns null if not found. + ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_); + // Insert a new class table if not found. + ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::classlinker_classes_lock_); // EnsureResolved is called to make sure that a class in the class_table_ has been resolved // before returning it to the caller. Its the responsibility of the thread that placed the class @@ -663,13 +660,13 @@ class ClassLinker { // retire a class, the version of the class in the table is returned and this may differ from // the class passed in. mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass) - WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + WARN_UNUSED SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetClassRoot(ClassRoot class_root, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return the quick generic JNI stub for testing. const void* GetRuntimeQuickGenericJniStub() const; @@ -678,20 +675,22 @@ class ClassLinker { // class. // Note: Currently we only store the descriptor, so we cannot throw the exact throwable, only // a recreation with a custom string. - void ThrowEarlierClassFailure(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ThrowEarlierClassFailure(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!dex_lock_); // Check for duplicate class definitions of the given oat file against all open oat files. - bool HasCollisions(const OatFile* oat_file, std::string* error_msg) LOCKS_EXCLUDED(dex_lock_); + bool HasCollisions(const OatFile* oat_file, std::string* error_msg) REQUIRES(!dex_lock_); - bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool HasInitWithString(Thread* self, const char* descriptor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void UpdateClassVirtualMethods(mirror::Class* klass, ArtMethod* new_methods, - size_t new_num_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_); + void UpdateClassVirtualMethods(mirror::Class* klass, + LengthPrefixedArray<ArtMethod>* new_methods) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::classlinker_classes_lock_); std::vector<const DexFile*> boot_class_path_; std::vector<std::unique_ptr<const DexFile>> opened_dex_files_; @@ -701,43 +700,16 @@ class ClassLinker { std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_); std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_); - class ClassDescriptorHashEquals { - public: - // Same class loader and descriptor. - std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS; - bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const - NO_THREAD_SAFETY_ANALYSIS; - // Same class loader and descriptor. - std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const - NO_THREAD_SAFETY_ANALYSIS; - bool operator()(const GcRoot<mirror::Class>& a, - const std::pair<const char*, mirror::ClassLoader*>& b) const - NO_THREAD_SAFETY_ANALYSIS; - // Same descriptor. - bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const - NO_THREAD_SAFETY_ANALYSIS; - std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS; - }; - class GcRootEmptyFn { - public: - void MakeEmpty(GcRoot<mirror::Class>& item) const { - item = GcRoot<mirror::Class>(); - } - bool IsEmpty(const GcRoot<mirror::Class>& item) const { - return item.IsNull(); - } - }; + // This contains the class laoders which have class tables. It is populated by + // InsertClassTableForClassLoader. + std::vector<GcRoot<mirror::ClassLoader>> class_loaders_ + GUARDED_BY(Locks::classlinker_classes_lock_); + + // Boot class path table. Since the class loader for this is null. + ClassTable boot_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_); - // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results - // should be compared for a matching Class descriptor and class loader. - typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, - ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> - Table; - // This contains strong roots. To enable concurrent root scanning of - // the class table, be careful to use a read barrier when accessing this. - Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_); - Table pre_zygote_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_); - std::vector<GcRoot<mirror::Class>> new_class_roots_; + // New class roots, only used by CMS since the GC needs to mark these in the pause. + std::vector<GcRoot<mirror::Class>> new_class_roots_ GUARDED_BY(Locks::classlinker_classes_lock_); // Do we need to search dex caches to find image classes? bool dex_cache_image_class_lookup_required_; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index a4e0227a6b..3c84d8fc0a 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -46,7 +46,7 @@ namespace art { class ClassLinkerTest : public CommonRuntimeTest { protected: void AssertNonExistentClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr); EXPECT_TRUE(self->IsExceptionPending()); @@ -58,13 +58,13 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertPrimitiveClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(self, descriptor.c_str())); } void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(primitive != nullptr); ASSERT_TRUE(primitive->GetClass() != nullptr); ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass()); @@ -102,7 +102,7 @@ class ClassLinkerTest : public CommonRuntimeTest { void AssertArrayClass(const std::string& array_descriptor, const std::string& component_type, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); StackHandleScope<2> hs(self); Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader)); @@ -116,7 +116,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(array.Get() != nullptr); ASSERT_TRUE(array->GetClass() != nullptr); ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass()); @@ -159,7 +159,7 @@ class ClassLinkerTest : public CommonRuntimeTest { EXPECT_EQ(class_linker_->FindArrayClass(self, &array_ptr), array.Get()); } - void AssertMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AssertMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { EXPECT_TRUE(method != nullptr); EXPECT_TRUE(method->GetDeclaringClass() != nullptr); EXPECT_TRUE(method->GetName() != nullptr); @@ -174,7 +174,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertField(mirror::Class* klass, ArtField* field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { EXPECT_TRUE(field != nullptr); EXPECT_EQ(klass, field->GetDeclaringClass()); EXPECT_TRUE(field->GetName() != nullptr); @@ -182,7 +182,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string temp; EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp)); if (descriptor == "Ljava/lang/Object;") { @@ -319,7 +319,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != nullptr); Thread* self = Thread::Current(); StackHandleScope<1> hs(self); @@ -339,7 +339,7 @@ class ClassLinkerTest : public CommonRuntimeTest { } void AssertDexFile(const DexFile& dex, mirror::ClassLoader* class_loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Verify all the classes defined in this file for (size_t i = 0; i < dex.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex.GetClassDef(i); @@ -385,7 +385,7 @@ struct CheckOffsets { std::string class_descriptor; std::vector<CheckOffset> offsets; - bool Check() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Check() SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); mirror::Class* klass = Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str()); @@ -500,14 +500,10 @@ struct ClassOffsets : public CheckOffsets<mirror::Class> { addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable"); addOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name"); - addOffset(OFFSETOF_MEMBER(mirror::Class, num_direct_methods_), "numDirectMethods"); - addOffset(OFFSETOF_MEMBER(mirror::Class, num_instance_fields_), "numInstanceFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_), "numReferenceInstanceFields"); addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_), "numReferenceStaticFields"); - addOffset(OFFSETOF_MEMBER(mirror::Class, num_static_fields_), "numStaticFields"); - addOffset(OFFSETOF_MEMBER(mirror::Class, num_virtual_methods_), "numVirtualMethods"); addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize"); addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType"); addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_), @@ -550,6 +546,7 @@ struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement> struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> { ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") { + addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, class_table_), "classTable"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent"); addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache"); @@ -841,21 +838,21 @@ TEST_F(ClassLinkerTest, ValidateBoxedTypes) { NullHandle<mirror::ClassLoader> class_loader; mirror::Class* c; c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Byte;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Character;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Double;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Float;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Integer;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Long;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Short;", class_loader); - EXPECT_STREQ("value", c->GetIFields()[0].GetName()); + EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName()); } TEST_F(ClassLinkerTest, TwoClassLoadersOneClass) { @@ -1107,7 +1104,7 @@ TEST_F(ClassLinkerTest, ValidatePredefinedClassSizes) { } static void CheckMethod(ArtMethod* method, bool verified) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!method->IsNative() && !method->IsAbstract()) { EXPECT_EQ((method->GetAccessFlags() & kAccPreverified) != 0U, verified) << PrettyMethod(method, true); @@ -1115,7 +1112,7 @@ static void CheckMethod(ArtMethod* method, bool verified) } static void CheckPreverified(mirror::Class* c, bool preverified) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified) << "Class " << PrettyClass(c) << " not as expected"; for (auto& m : c->GetDirectMethods(sizeof(void*))) { diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h new file mode 100644 index 0000000000..dc60a2c239 --- /dev/null +++ b/runtime/class_table-inl.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_CLASS_TABLE_INL_H_ +#define ART_RUNTIME_CLASS_TABLE_INL_H_ + +#include "class_table.h" + +namespace art { + +template<class Visitor> +void ClassTable::VisitRoots(Visitor& visitor) { + for (ClassSet& class_set : classes_) { + for (GcRoot<mirror::Class>& root : class_set) { + visitor.VisitRoot(root.AddressWithoutBarrier()); + } + } +} + +template<class Visitor> +void ClassTable::VisitRoots(const Visitor& visitor) { + for (ClassSet& class_set : classes_) { + for (GcRoot<mirror::Class>& root : class_set) { + visitor.VisitRoot(root.AddressWithoutBarrier()); + } + } +} + +} // namespace art + +#endif // ART_RUNTIME_CLASS_TABLE_INL_H_ diff --git a/runtime/class_table.cc b/runtime/class_table.cc new file mode 100644 index 0000000000..fc8e6c49da --- /dev/null +++ b/runtime/class_table.cc @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "class_table.h" + +#include "mirror/class-inl.h" + +namespace art { + +ClassTable::ClassTable() { + classes_.push_back(ClassSet()); +} + +void ClassTable::FreezeSnapshot() { + classes_.push_back(ClassSet()); +} + +bool ClassTable::Contains(mirror::Class* klass) { + for (ClassSet& class_set : classes_) { + auto it = class_set.Find(GcRoot<mirror::Class>(klass)); + if (it != class_set.end()) { + return it->Read() == klass; + } + } + return false; +} + +mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) { + // Should only be updating latest table. + auto existing_it = classes_.back().FindWithHash(descriptor, hash); + if (kIsDebugBuild && existing_it == classes_.back().end()) { + for (const ClassSet& class_set : classes_) { + if (class_set.FindWithHash(descriptor, hash) != class_set.end()) { + LOG(FATAL) << "Updating class found in frozen table " << descriptor; + } + } + LOG(FATAL) << "Updating class not found " << descriptor; + } + mirror::Class* const existing = existing_it->Read(); + CHECK_NE(existing, klass) << descriptor; + CHECK(!existing->IsResolved()) << descriptor; + CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor; + CHECK(!klass->IsTemp()) << descriptor; + VerifyObject(klass); + // Update the element in the hash set with the new class. This is safe to do since the descriptor + // doesn't change. + *existing_it = GcRoot<mirror::Class>(klass); + return existing; +} + +bool ClassTable::Visit(ClassVisitor* visitor) { + for (ClassSet& class_set : classes_) { + for (GcRoot<mirror::Class>& root : class_set) { + if (!visitor->Visit(root.Read())) { + return false; + } + } + } + return true; +} + +size_t ClassTable::NumZygoteClasses() const { + size_t sum = 0; + for (size_t i = 0; i < classes_.size() - 1; ++i) { + sum += classes_[i].Size(); + } + return sum; +} + +size_t ClassTable::NumNonZygoteClasses() const { + return classes_.back().Size(); +} + +mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) { + for (ClassSet& class_set : classes_) { + auto it = class_set.FindWithHash(descriptor, hash); + if (it != class_set.end()) { + return it->Read(); + } + } + return nullptr; +} + +void ClassTable::Insert(mirror::Class* klass) { + classes_.back().Insert(GcRoot<mirror::Class>(klass)); +} + +void ClassTable::InsertWithHash(mirror::Class* klass, size_t hash) { + classes_.back().InsertWithHash(GcRoot<mirror::Class>(klass), hash); +} + +bool ClassTable::Remove(const char* descriptor) { + for (ClassSet& class_set : classes_) { + auto it = class_set.Find(descriptor); + if (it != class_set.end()) { + class_set.Erase(it); + return true; + } + } + return false; +} + +std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root) + const { + std::string temp; + return ComputeModifiedUtf8Hash(root.Read()->GetDescriptor(&temp)); +} + +bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, + const GcRoot<mirror::Class>& b) const { + DCHECK_EQ(a.Read()->GetClassLoader(), b.Read()->GetClassLoader()); + std::string temp; + return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp)); +} + +bool ClassTable::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a, + const char* descriptor) const { + return a.Read()->DescriptorEquals(descriptor); +} + +std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const char* descriptor) const { + return ComputeModifiedUtf8Hash(descriptor); +} + +} // namespace art diff --git a/runtime/class_table.h b/runtime/class_table.h new file mode 100644 index 0000000000..6b18d9009d --- /dev/null +++ b/runtime/class_table.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_CLASS_TABLE_H_ +#define ART_RUNTIME_CLASS_TABLE_H_ + +#include <string> +#include <utility> +#include <vector> + +#include "base/allocator.h" +#include "base/hash_set.h" +#include "base/macros.h" +#include "base/mutex.h" +#include "dex_file.h" +#include "gc_root.h" +#include "object_callbacks.h" +#include "runtime.h" + +namespace art { + +namespace mirror { + class ClassLoader; +} // namespace mirror + +class ClassVisitor { + public: + virtual ~ClassVisitor() {} + // Return true to continue visiting. + virtual bool Visit(mirror::Class* klass) = 0; +}; + +// Each loader has a ClassTable +class ClassTable { + public: + ClassTable(); + + // Used by image writer for checking. + bool Contains(mirror::Class* klass) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Freeze the current class tables by allocating a new table and never updating or modifying the + // existing table. This helps prevents dirty pages after caused by inserting after zygote fork. + void FreezeSnapshot() + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns the number of classes in previous snapshots. + size_t NumZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_); + + // Returns all off the classes in the lastest snapshot. + size_t NumNonZygoteClasses() const REQUIRES(Locks::classlinker_classes_lock_); + + // Update a class in the table with the new class. Returns the existing class which was replaced. + mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock. + template<class Visitor> + void VisitRoots(Visitor& visitor) + SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS; + template<class Visitor> + void VisitRoots(const Visitor& visitor) + SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS; + + // Return false if the callback told us to exit. + bool Visit(ClassVisitor* visitor) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + mirror::Class* Lookup(const char* descriptor, size_t hash) + SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_); + + void Insert(mirror::Class* klass) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void InsertWithHash(mirror::Class* klass, size_t hash) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Returns true if the class was found and removed, false otherwise. + bool Remove(const char* descriptor) + REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + private: + class ClassDescriptorHashEquals { + public: + // Same class loader and descriptor. + std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS; + bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const + NO_THREAD_SAFETY_ANALYSIS;; + // Same descriptor. + bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const + NO_THREAD_SAFETY_ANALYSIS; + std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS; + }; + class GcRootEmptyFn { + public: + void MakeEmpty(GcRoot<mirror::Class>& item) const { + item = GcRoot<mirror::Class>(); + } + bool IsEmpty(const GcRoot<mirror::Class>& item) const { + return item.IsNull(); + } + }; + // hash set which hashes class descriptor, and compares descriptors and class loaders. Results + // should be compared for a matching Class descriptor and class loader. + typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, + ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> + ClassSet; + + // TODO: shard lock to have one per class loader. + // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot. + std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_); +}; + +} // namespace art + +#endif // ART_RUNTIME_CLASS_TABLE_H_ diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index 2332f97895..a474ae6361 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -122,12 +122,12 @@ class CommonRuntimeTest : public testing::Test { std::string GetTestDexFileName(const char* name); std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::unique_ptr<const DexFile> OpenTestDexFile(const char* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - jobject LoadDex(const char* dex_name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject LoadDex(const char* dex_name) SHARED_REQUIRES(Locks::mutator_lock_); std::string android_data_; std::string dalvik_cache_; diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 3acd366cd2..de692d1368 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -34,7 +34,7 @@ namespace art { static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (referrer != nullptr) { std::string location(referrer->GetLocation()); if (!location.empty()) { @@ -46,7 +46,7 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer) static void ThrowException(const char* exception_descriptor, mirror::Class* referrer, const char* fmt, va_list* args = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream msg; if (args != nullptr) { std::string vmsg; @@ -62,7 +62,7 @@ static void ThrowException(const char* exception_descriptor, static void ThrowWrappedException(const char* exception_descriptor, mirror::Class* referrer, const char* fmt, va_list* args = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream msg; if (args != nullptr) { std::string vmsg; @@ -336,7 +336,7 @@ void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) { static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx, const DexFile& dex_file, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream msg; msg << "Attempt to invoke " << type << " method '" << PrettyMethod(method_idx, dex_file, true) << "' on a null object reference"; diff --git a/runtime/common_throws.h b/runtime/common_throws.h index b391c5b92e..2402e6f7a0 100644 --- a/runtime/common_throws.h +++ b/runtime/common_throws.h @@ -33,169 +33,169 @@ class StringPiece; // AbstractMethodError void ThrowAbstractMethodError(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ArithmeticException -void ThrowArithmeticExceptionDivideByZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; +void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ArrayIndexOutOfBoundsException void ThrowArrayIndexOutOfBoundsException(int index, int length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ArrayStoreException void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ClassCircularityError void ThrowClassCircularityError(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ClassCastException void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowClassCastException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // ClassFormatError void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IllegalAccessError void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed, ArtMethod* called, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IllegalAccessException void ThrowIllegalAccessException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IllegalArgumentException void ThrowIllegalArgumentException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IncompatibleClassChangeError void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type, ArtMethod* method, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method, mirror::Object* this_object, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static, ArtMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // IOException void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // LinkageError void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NegativeArraySizeException void ThrowNegativeArraySizeException(int size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNegativeArraySizeException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NoSuchFieldError void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, const StringPiece& type, const StringPiece& name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // NoSuchMethodError void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name, const Signature& signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNoSuchMethodError(uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NullPointerException void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerExceptionFromDexPC() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNullPointerException(const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // RuntimeException void ThrowRuntimeException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // VerifyError void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR; + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; } // namespace art diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h index b296e39c5e..af7b04f62e 100644 --- a/runtime/compiler_callbacks.h +++ b/runtime/compiler_callbacks.h @@ -38,7 +38,7 @@ class CompilerCallbacks { virtual ~CompilerCallbacks() { } virtual bool MethodVerified(verifier::MethodVerifier* verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual void ClassRejected(ClassReference ref) = 0; // Return true if we should attempt to relocate to a random base address if we have not already diff --git a/runtime/debugger.cc b/runtime/debugger.cc index ddbbeacbf8..0cbbb79767 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -72,7 +72,7 @@ class Breakpoint { public: Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) { CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing || deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization || @@ -81,14 +81,14 @@ class Breakpoint { method_ = soa.EncodeMethod(method); } - Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_) : method_(nullptr), dex_pc_(other.dex_pc_), deoptimization_kind_(other.deoptimization_kind_) { ScopedObjectAccessUnchecked soa(Thread::Current()); method_ = soa.EncodeMethod(other.Method()); } - ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) { ScopedObjectAccessUnchecked soa(Thread::Current()); return soa.DecodeMethod(method_); } @@ -111,7 +111,7 @@ class Breakpoint { }; static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc()); return os; } @@ -123,7 +123,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (method->IsNative()) { // TODO: post location events is a suspension point and native method entry stubs aren't. return; @@ -149,7 +149,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (method->IsNative()) { // TODO: post location events is a suspension point and native method entry stubs aren't. return; @@ -166,7 +166,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method, uint32_t dex_pc) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method) << " " << dex_pc; @@ -174,7 +174,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) { // We also listen to kMethodExited instrumentation event and the current instruction is a // RETURN so we know the MethodExited method is going to be called right after us. Like in @@ -195,47 +195,47 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field); } void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value); } void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::PostException(exception_object); } // We only care about how many backward branches were executed in the Jit. void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method) << " " << dex_pc_offset; } private: static bool IsReturn(ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = method->GetCodeItem(); const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]); return instruction->IsReturn(); } - static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) { return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved); } - static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) { return IsListeningTo(instrumentation::Instrumentation::kMethodExited); } static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return (Dbg::GetInstrumentationEvents() & event) != 0; } @@ -298,8 +298,8 @@ bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const { } static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(!Locks::breakpoint_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) { @@ -311,7 +311,7 @@ static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc) } static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) { + REQUIRES(!Locks::thread_suspend_count_lock_) { MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); // A thread may be suspended for GC; in this code, we really want to know whether // there's a debugger suspension active. @@ -319,7 +319,7 @@ static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thr } static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error); if (o == nullptr) { *error = JDWP::ERR_INVALID_OBJECT; @@ -334,7 +334,7 @@ static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* er } static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error); if (o == nullptr) { *error = JDWP::ERR_INVALID_OBJECT; @@ -350,8 +350,8 @@ static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error) static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) { mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error); if (thread_peer == nullptr) { // This isn't even an object. @@ -381,14 +381,14 @@ static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { } static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string temp; const char* descriptor = klass->GetDescriptor(&temp); return BasicTagFromDescriptor(descriptor); } static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(c != nullptr); if (c->IsArrayClass()) { return JDWP::JT_ARRAY; @@ -764,7 +764,7 @@ JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, OwnedMonitorVisitor(Thread* thread, Context* context, std::vector<JDWP::ObjectId>* monitor_vector, std::vector<uint32_t>* stack_depth_vector) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), current_stack_depth(0), monitors(monitor_vector), @@ -781,7 +781,7 @@ JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, } static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg); visitor->monitors->push_back(gRegistry->Add(owned_monitor)); visitor->stack_depths->push_back(visitor->current_stack_depth); @@ -948,33 +948,27 @@ JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* return JDWP::ERR_NONE; } -void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) { - // Get the complete list of reference classes (i.e. all classes except - // the primitive types). - // Returns a newly-allocated buffer full of RefTypeId values. - struct ClassListCreator { - explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) { - } +// Get the complete list of reference classes (i.e. all classes except +// the primitive types). +// Returns a newly-allocated buffer full of RefTypeId values. +class ClassListCreator : public ClassVisitor { + public: + explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {} - static bool Visit(mirror::Class* c, void* arg) { - return reinterpret_cast<ClassListCreator*>(arg)->Visit(c); - } - - // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses - // annotalysis. - bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS { - if (!c->IsPrimitive()) { - classes->push_back(gRegistry->AddRefType(c)); - } - return true; + bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (!c->IsPrimitive()) { + classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c)); } + return true; + } - std::vector<JDWP::RefTypeId>* const classes; - }; + private: + std::vector<JDWP::RefTypeId>* const classes_; +}; +void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) { ClassListCreator clc(classes); - Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit, - &clc); + Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc); } JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, @@ -1270,17 +1264,17 @@ JDWP::FieldId Dbg::ToFieldId(const ArtField* f) { } static JDWP::MethodId ToMethodId(const ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m)); } static ArtField* FromFieldId(JDWP::FieldId fid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid)); } static ArtMethod* FromMethodId(JDWP::MethodId mid) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid)); } @@ -1326,10 +1320,7 @@ bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* eve return modifier_instance == event_instance; } -void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) { +void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) { if (m == nullptr) { memset(location, 0, sizeof(*location)); } else { @@ -1376,7 +1367,7 @@ static uint32_t MangleAccessFlags(uint32_t accessFlags) { * the end. */ static uint16_t MangleSlot(uint16_t slot, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // We should not get here for a method without code (native, proxy or abstract). Log it and @@ -1398,7 +1389,7 @@ static uint16_t MangleSlot(uint16_t slot, ArtMethod* m) * slots to dex style argument placement. */ static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // We should not get here for a method without code (native, proxy or abstract). Log it and @@ -1424,7 +1415,8 @@ static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error return DexFile::kDexNoIndex16; } -JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) { +JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, + JDWP::ExpandBuf* pReply) { JDWP::JdwpError error; mirror::Class* c = DecodeClass(class_id, &error); if (c == nullptr) { @@ -1437,7 +1429,8 @@ JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_ge expandBufAdd4BE(pReply, instance_field_count + static_field_count); for (size_t i = 0; i < instance_field_count + static_field_count; ++i) { - ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count); + ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : + c->GetStaticField(i - instance_field_count); expandBufAddFieldId(pReply, ToFieldId(f)); expandBufAddUtf8String(pReply, f->GetName()); expandBufAddUtf8String(pReply, f->GetTypeDescriptor()); @@ -1553,7 +1546,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", @@ -1641,7 +1634,7 @@ JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) { } static JValue GetArtFieldValue(ArtField* f, mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Primitive::Type fieldType = f->GetTypeAsPrimitiveType(); JValue field_value; switch (fieldType) { @@ -1688,7 +1681,7 @@ static JValue GetArtFieldValue(ArtField* f, mirror::Object* o) static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JDWP::JdwpError error; mirror::Class* c = DecodeClass(ref_type_id, &error); if (ref_type_id != 0 && c == nullptr) { @@ -1744,7 +1737,7 @@ JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::Fiel } static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Primitive::Type fieldType = f->GetTypeAsPrimitiveType(); // Debugging only happens at runtime so we know we are not running in a transaction. static constexpr bool kNoTransactionMode = false; @@ -1815,7 +1808,7 @@ static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, int width, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JDWP::JdwpError error; mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error); if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { @@ -1945,7 +1938,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_group_id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id, error); if (*error != JDWP::ERR_NONE) { @@ -2004,7 +1997,7 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP:: static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group, std::vector<JDWP::ObjectId>* child_thread_group_ids) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(thread_group != nullptr); // Get the ArrayList<ThreadGroup> "groups" out of this thread group... @@ -2100,6 +2093,7 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) { case kWaitingInMainDebuggerLoop: case kWaitingInMainSignalCatcherLoop: case kWaitingPerformingGc: + case kWaitingWeakGcRootRead: case kWaiting: return JDWP::TS_WAIT; // Don't add a 'default' here so the compiler can spot incompatible enum changes. @@ -2157,7 +2151,7 @@ JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group, mirror::Object* peer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Do we want threads from all thread groups? if (desired_thread_group == nullptr) { return true; @@ -2201,7 +2195,7 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* } } -static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) { struct CountStackDepthVisitor : public StackVisitor { explicit CountStackDepthVisitor(Thread* thread_in) : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), @@ -2244,7 +2238,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram public: GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in, JDWP::ExpandBuf* buf_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), depth_(0), start_frame_(start_frame_in), @@ -2253,7 +2247,7 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram expandBufAdd4BE(buf_, frame_count_); } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (GetMethod()->IsRuntimeMethod()) { return true; // The debugger can't do anything useful with a frame that has no Method*. } @@ -2365,7 +2359,7 @@ void Dbg::SuspendSelf() { struct GetThisVisitor : public StackVisitor { GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_object(nullptr), frame_id(frame_id_in) {} @@ -2407,7 +2401,7 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame class FindFrameVisitor FINAL : public StackVisitor { public: FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {} @@ -2481,14 +2475,14 @@ JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pRe constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION; static std::string GetStackContextAsString(const StackVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false), PrettyMethod(visitor.GetMethod()).c_str()); } static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg, JDWP::JdwpTag tag) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg << GetStackContextAsString(visitor); return kStackFrameLocalAccessError; @@ -2650,7 +2644,7 @@ JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) { template<typename T> static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg, JDWP::JdwpTag tag, T value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Failed to write " << tag << " local " << value << " (0x" << std::hex << value << ") into register v" << vreg << GetStackContextAsString(visitor); @@ -2735,7 +2729,7 @@ JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTa } static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(location != nullptr); if (m == nullptr) { memset(location, 0, sizeof(*location)); @@ -2813,7 +2807,7 @@ void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc, class CatchLocationFinder : public StackVisitor { public: CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), exception_(exception), @@ -2825,7 +2819,7 @@ class CatchLocationFinder : public StackVisitor { throw_dex_pc_(DexFile::kDexNoIndex) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = GetMethod(); DCHECK(method != nullptr); if (method->IsRuntimeMethod()) { @@ -2859,15 +2853,15 @@ class CatchLocationFinder : public StackVisitor { return true; // Continue stack walk. } - ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) { return catch_method_; } - ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) { return throw_method_; } - mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) { return this_at_throw_.Get(); } @@ -3169,7 +3163,7 @@ void Dbg::ManageDeoptimization() { } static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); if (code_item == nullptr) { // TODO We should not be asked to watch location in a native or abstract method so the code item @@ -3190,7 +3184,7 @@ static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m) } static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) { for (Breakpoint& breakpoint : gBreakpoints) { if (breakpoint.Method() == m) { return &breakpoint; @@ -3207,7 +3201,7 @@ bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) { // Sanity checks all existing breakpoints on the same method. static void SanityCheckExistingBreakpoints(ArtMethod* m, DeoptimizationRequest::Kind deoptimization_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) { for (const Breakpoint& breakpoint : gBreakpoints) { if (breakpoint.Method() == m) { CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind()); @@ -3236,7 +3230,7 @@ static void SanityCheckExistingBreakpoints(ArtMethod* m, static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self, ArtMethod* m, const Breakpoint** existing_brkpt) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!Dbg::RequiresDeoptimization()) { // We already run in interpreter-only mode so we don't need to deoptimize anything. VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method " @@ -3497,8 +3491,8 @@ bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) { class ScopedThreadSuspension { public: ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + REQUIRES(!Locks::thread_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : thread_(nullptr), error_(JDWP::ERR_NONE), self_suspend_(false), @@ -3559,7 +3553,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently // is for step-out. struct SingleStepStackVisitor : public StackVisitor { - explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), stack_depth(0), method(nullptr), @@ -3594,10 +3588,10 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize // Find the dex_pc values that correspond to the current line, for line-based single-stepping. struct DebugCallbackContext { - explicit DebugCallbackContext(SingleStepControl* single_step_control_cb, - int32_t line_number_cb, const DexFile::CodeItem* code_item) - : single_step_control_(single_step_control_cb), line_number_(line_number_cb), - code_item_(code_item), last_pc_valid(false), last_pc(0) { + DebugCallbackContext(SingleStepControl* single_step_control_cb, + int32_t line_number_cb, const DexFile::CodeItem* code_item) + : single_step_control_(single_step_control_cb), line_number_(line_number_cb), + code_item_(code_item), last_pc_valid(false), last_pc(0) { } static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) { @@ -4423,7 +4417,7 @@ class HeapChunkContext { needHeader_ = false; } - void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Flush() SHARED_REQUIRES(Locks::mutator_lock_) { if (pieceLenField_ == nullptr) { // Flush immediately post Reset (maybe back-to-back Flush). Ignore. CHECK(needHeader_); @@ -4439,13 +4433,13 @@ class HeapChunkContext { } static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes); } static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes); } @@ -4465,7 +4459,7 @@ class HeapChunkContext { } // Returns true if the object is not an empty chunk. - bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) { // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken // in the following code not to allocate memory, by ensuring buf_ is of the correct size if (used_bytes == 0) { @@ -4502,7 +4496,7 @@ class HeapChunkContext { } void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (ProcessRecord(start, used_bytes)) { uint8_t state = ExamineNativeObject(start); AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/); @@ -4511,7 +4505,7 @@ class HeapChunkContext { } void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { if (ProcessRecord(start, used_bytes)) { // Determine the type of this chunk. // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. @@ -4523,7 +4517,7 @@ class HeapChunkContext { } void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Make sure there's enough room left in the buffer. // We need to use two bytes for every fractional 256 allocation units used by the chunk plus // 17 bytes for any header. @@ -4556,12 +4550,12 @@ class HeapChunkContext { *p_++ = length - 1; } - uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) { return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); } uint8_t ExamineJavaObject(mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { if (o == nullptr) { return HPSG_STATE(SOLIDITY_FREE, 0); } @@ -4611,7 +4605,7 @@ class HeapChunkContext { }; static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment); HeapChunkContext::HeapChunkJavaCallback( obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg); @@ -4637,7 +4631,7 @@ void Dbg::DdmSendHeapSegments(bool native) { // Send a series of heap segment chunks. HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native); if (native) { -#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC) +#if defined(__ANDROID__) && defined(USE_DLMALLOC) dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context); HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context); // Indicate end of a space. #else @@ -4776,7 +4770,7 @@ class StringTable { }; static const char* GetMethodSourceFile(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(method != nullptr); const char* source_file = method->GetDeclaringClassSourceFile(); return (source_file != nullptr) ? source_file : ""; @@ -4841,6 +4835,9 @@ jbyteArray Dbg::GetRecentAllocations() { CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()); records = &dummy; } + // We don't need to wait on the condition variable records->new_record_condition_, because this + // function only reads the class objects, which are already marked so it doesn't change their + // reachability. // // Part 1: generate string tables. @@ -4855,7 +4852,7 @@ jbyteArray Dbg::GetRecentAllocations() { count > 0 && it != end; count--, it++) { const gc::AllocRecord* record = it->second; std::string temp; - class_names.Add(record->GetClass()->GetDescriptor(&temp)); + class_names.Add(record->GetClassDescriptor(&temp)); for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) { ArtMethod* m = record->StackElement(i).GetMethod(); class_names.Add(m->GetDeclaringClassDescriptor()); @@ -4907,7 +4904,7 @@ jbyteArray Dbg::GetRecentAllocations() { const gc::AllocRecord* record = it->second; size_t stack_depth = record->GetDepth(); size_t allocated_object_class_name_index = - class_names.IndexOf(record->GetClass()->GetDescriptor(&temp)); + class_names.IndexOf(record->GetClassDescriptor(&temp)); JDWP::Append4BE(bytes, record->ByteCount()); JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid())); JDWP::Append2BE(bytes, allocated_object_class_name_index); diff --git a/runtime/debugger.h b/runtime/debugger.h index fd7d46c37e..a9fa6ce8cb 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -79,7 +79,7 @@ struct DebugInvokeReq { JDWP::ExpandBuf* const reply; void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq); @@ -155,15 +155,15 @@ class DeoptimizationRequest { DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {} DeoptimizationRequest(const DeoptimizationRequest& other) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) { // Create a new JNI global reference for the method. SetMethod(other.Method()); } - ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_); - void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_); // Name 'Kind()' would collide with the above enum name. Kind GetKind() const { @@ -205,7 +205,7 @@ class Dbg { static void StopJdwp(); // Invoked by the GC in case we need to keep DDMS informed. - static void GcDidFinish() LOCKS_EXCLUDED(Locks::mutator_lock_); + static void GcDidFinish() REQUIRES(!Locks::mutator_lock_); // Return the DebugInvokeReq for the current thread. static DebugInvokeReq* GetInvokeReq(); @@ -219,8 +219,8 @@ class Dbg { */ static void Connected(); static void GoActive() - LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_); - static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_, !Locks::deoptimization_lock_, !Locks::mutator_lock_); + static void Disconnected() REQUIRES(!Locks::deoptimization_lock_, !Locks::mutator_lock_); static void Dispose() { gDisposed = true; } @@ -239,8 +239,7 @@ class Dbg { // Returns true if a method has any breakpoints. static bool MethodHasAnyBreakpoints(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::breakpoint_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_); static bool IsDisposed() { return gDisposed; @@ -254,248 +253,233 @@ class Dbg { static int64_t LastDebuggerActivity(); static void UndoDebuggerSuspensions() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); /* * Class, Object, Array */ static std::string GetClassName(JDWP::RefTypeId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static std::string GetClassName(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void GetClassList(std::vector<JDWP::RefTypeId>* classes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static size_t GetTagWidth(JDWP::JdwpTag tag); static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count, JDWP::Request* request) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, JDWP::ObjectId* new_array_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // // Event filtering. // static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchLocation(const JDWP::JdwpLocation& expected_location, const JDWP::EventLocation& event_location) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id, ArtField* event_field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // // Monitors. // static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id, std::vector<JDWP::ObjectId>* monitors, std::vector<uint32_t>* stack_depths) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectId* contended_monitor) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // // Heap. // static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, std::vector<uint64_t>* counts) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>* instances) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, std::vector<JDWP::ObjectId>* referring_objects) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // // Methods and fields. // static std::string GetMethodName(JDWP::MethodId method_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id, std::vector<uint8_t>* bytecodes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static std::string GetFieldName(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Thread, ThreadGroup, Frame */ static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::ObjectId GetSystemThreadGroupId() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state); static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // static void WaitForSuspend(JDWP::ObjectId thread_id); // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0, // returns all threads. static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - static JDWP::ObjectId GetThreadSelfId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_); + static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); static void SuspendVM() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static void ResumeVM() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); static void ResumeThread(JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static void SuspendSelf(); static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, JDWP::ObjectId* result) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetLocalValues(JDWP::Request* request) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); /* * Debugger notification @@ -508,47 +492,42 @@ class Dbg { }; static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostFieldModificationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, ArtField* f, const JValue* field_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostException(mirror::Throwable* exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostThreadStart(Thread* t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostThreadDeath(Thread* t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostClassPrepare(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void UpdateDebugger(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc, int event_flags, const JValue* return_value) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Indicates whether we need deoptimization for debugging. static bool RequiresDeoptimization(); // Records deoptimization request in the queue. static void RequestDeoptimization(const DeoptimizationRequest& req) - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each // request and finally resumes all threads. static void ManageDeoptimization() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Breakpoints. static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) - LOCKS_EXCLUDED(Locks::breakpoint_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * Forced interpreter checkers for single-step and continue support. @@ -557,7 +536,7 @@ class Dbg { // Indicates whether we need to force the use of interpreter to invoke a method. // This allows to single-step or continue into the called method. static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -568,7 +547,7 @@ class Dbg { // method through the resolution trampoline. This allows to single-step or continue into // the called method. static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -579,7 +558,7 @@ class Dbg { // a method through the resolution trampoline. This allows to deoptimize the stack for // debugging when we returned from the called method. static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -590,7 +569,7 @@ class Dbg { // interpreter into the runtime. This allows to deoptimize the stack and continue // execution with interpreter for debugging. static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsDebuggerActive()) { return false; } @@ -600,10 +579,9 @@ class Dbg { // Single-stepping. static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void UnconfigureStep(JDWP::ObjectId thread_id) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * Invoke support @@ -623,9 +601,8 @@ class Dbg { JDWP::MethodId method_id, uint32_t arg_count, uint64_t arg_values[], JDWP::JdwpTag* arg_types, uint32_t options) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Called by the event thread to execute a method prepared by the JDWP thread in the given // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply @@ -642,30 +619,29 @@ class Dbg { * DDM support. */ static void DdmSendThreadNotification(Thread* t, uint32_t type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSetThreadNotification(bool enable) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen); - static void DdmConnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void DdmDisconnected() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_); + static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Allocation tracking support. */ - static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); static jbyteArray GetRecentAllocations() - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void DumpRecentAllocations() LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_); enum HpifWhen { HPIF_WHEN_NEVER = 0, @@ -674,7 +650,7 @@ class Dbg { HPIF_WHEN_EVERY_GC = 3 }; static int DdmHandleHpifChunk(HpifWhen when) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); enum HpsgWhen { HPSG_WHEN_NEVER = 0, @@ -687,78 +663,76 @@ class Dbg { static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); static void DdmSendHeapInfo(HpifWhen reason) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void DdmSendHeapSegments(bool native) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static ObjectRegistry* GetObjectRegistry() { return gRegistry; } static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::FieldId ToFieldId(const ArtField* f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static JDWP::JdwpState* GetJdwpState(); - static uint32_t GetInstrumentationEvents() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) { return instrumentation_events_; } private: static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag, uint64_t result_value, JDWP::ObjectId exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static JDWP::JdwpError SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag, uint64_t value, size_t width) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_); static void PostThreadStartOrStop(Thread*, uint32_t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void PostLocationEvent(ArtMethod* method, int pcOffset, mirror::Object* thisPtr, int eventFlags, const JValue* return_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); static void RequestDeoptimizationLocked(const DeoptimizationRequest& req) - EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Indicates whether the debugger is making requests. static bool gDebuggerActive; diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index d30fac4cde..52590a5970 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -29,6 +29,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" +#include "base/hash_map.h" #include "base/logging.h" #include "base/stringprintf.h" #include "class_linker.h" diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 7ac264a0c5..a1ddbc7d61 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -22,7 +22,6 @@ #include <unordered_map> #include <vector> -#include "base/hash_map.h" #include "base/logging.h" #include "base/mutex.h" // For Locks::mutator_lock_. #include "base/value_object.h" @@ -43,6 +42,8 @@ namespace mirror { class ArtField; class ArtMethod; class ClassLinker; +template <class Key, class Value, class EmptyFn, class HashFn, class Pred, class Alloc> +class HashMap; class MemMap; class OatDexFile; class Signature; @@ -786,7 +787,10 @@ class DexFile { // Get the pointer to the start of the debugging data const uint8_t* GetDebugInfoStream(const CodeItem* code_item) const { - if (code_item->debug_info_off_ == 0) { + // Check that the offset is in bounds. + // Note that although the specification says that 0 should be used if there + // is no debug information, some applications incorrectly use 0xFFFFFFFF. + if (code_item->debug_info_off_ == 0 || code_item->debug_info_off_ >= size_) { return nullptr; } else { return begin_ + code_item->debug_info_off_; @@ -867,7 +871,7 @@ class DexFile { // // This is used by runtime; therefore use art::Method not art::DexFile::Method. int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DecodeDebugInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, DexDebugNewPositionCb position_cb, DexDebugNewLocalCb local_cb, @@ -1048,7 +1052,12 @@ class DexFile { return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(a, b) == 0; } }; - typedef HashMap<const char*, const ClassDef*, UTF16EmptyFn, UTF16HashCmp, UTF16HashCmp> Index; + using Index = HashMap<const char*, + const ClassDef*, + UTF16EmptyFn, + UTF16HashCmp, + UTF16HashCmp, + std::allocator<std::pair<const char*, const ClassDef*>>>; mutable Atomic<Index*> class_def_index_; // If this dex file was loaded from an oat file, oat_dex_file_ contains a @@ -1311,10 +1320,10 @@ class EncodedStaticFieldValueIterator { EncodedStaticFieldValueIterator(const DexFile& dex_file, Handle<mirror::DexCache>* dex_cache, Handle<mirror::ClassLoader>* class_loader, ClassLinker* linker, const DexFile::ClassDef& class_def) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive> - void ReadValueToField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ReadValueToField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_); bool HasNext() const { return pos_ < array_size_; } diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc index 90b8fdbadc..eec49839ef 100644 --- a/runtime/dex_file_verifier.cc +++ b/runtime/dex_file_verifier.cc @@ -794,13 +794,13 @@ bool DexFileVerifier::CheckIntraCodeItem() { } const DexFile::TryItem* try_items = DexFile::GetTryItems(*code_item, 0); - ptr_ = DexFile::GetCatchHandlerData(*code_item, 0); - uint32_t handlers_size = DecodeUnsignedLeb128(&ptr_); - if (!CheckListSize(try_items, try_items_size, sizeof(DexFile::TryItem), "try_items size")) { return false; } + ptr_ = DexFile::GetCatchHandlerData(*code_item, 0); + uint32_t handlers_size = DecodeUnsignedLeb128(&ptr_); + if (UNLIKELY((handlers_size == 0) || (handlers_size >= 65536))) { ErrorStringPrintf("Invalid handlers_size: %ud", handlers_size); return false; diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h index 0ddbf7c7a8..df2d3799ab 100644 --- a/runtime/dex_instruction.h +++ b/runtime/dex_instruction.h @@ -488,6 +488,12 @@ class Instruction { // Returns true if the instruction allows control flow to go to the following instruction. bool CanFlowThrough() const; + // Returns true if the instruction is a quickened instruction. + bool IsQuickened() const { + return (kInstructionIndexTypes[Opcode()] == kIndexFieldOffset) || + (kInstructionIndexTypes[Opcode()] == kIndexVtableOffset); + } + // Returns true if this instruction is a switch. bool IsSwitch() const { return (kInstructionFlags[Opcode()] & kSwitch) != 0; diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h index f892f980a1..1ae2b1b108 100644 --- a/runtime/dex_instruction_utils.h +++ b/runtime/dex_instruction_utils.h @@ -144,49 +144,49 @@ constexpr DexInvokeType InvokeInstructionType(Instruction::Code opcode) { constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionIGet(opcode)); + DCHECK(IsInstructionIGet(code)); #endif return static_cast<DexMemAccessType>(code - Instruction::IGET); } constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionIPut(opcode)); + DCHECK(IsInstructionIPut(code)); #endif return static_cast<DexMemAccessType>(code - Instruction::IPUT); } constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionSGet(opcode)); + DCHECK(IsInstructionSGet(code)); #endif return static_cast<DexMemAccessType>(code - Instruction::SGET); } constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionSPut(opcode)); + DCHECK(IsInstructionSPut(code)); #endif return static_cast<DexMemAccessType>(code - Instruction::SPUT); } constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionAGet(opcode)); + DCHECK(IsInstructionAGet(code)); #endif return static_cast<DexMemAccessType>(code - Instruction::AGET); } constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionAPut(opcode)); + DCHECK(IsInstructionAPut(code)); #endif return static_cast<DexMemAccessType>(code - Instruction::APUT); } constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionIGetOrIPut(opcode)); + DCHECK(IsInstructionIGetOrIPut(code)); #endif return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code); } @@ -216,14 +216,14 @@ static inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Co constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionSGetOrSPut(opcode)); + DCHECK(IsInstructionSGetOrSPut(code)); #endif return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code); } constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) { #if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions. - DCHECK(IsInstructionAGetOrAPut(opcode)); + DCHECK(IsInstructionAGetOrAPut(code)); #endif return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code); } diff --git a/runtime/elf.h b/runtime/elf.h index 4514bb2688..d1efc92c30 100644 --- a/runtime/elf.h +++ b/runtime/elf.h @@ -42,7 +42,7 @@ typedef uint64_t Elf64_Xword; typedef int64_t Elf64_Sxword; // Object file magic string. -static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' }; +static constexpr char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' }; // e_ident size and indices. enum { @@ -60,10 +60,10 @@ enum { }; // BEGIN android-added for <elf.h> compat -const char ELFMAG0 = ElfMagic[EI_MAG0]; -const char ELFMAG1 = ElfMagic[EI_MAG1]; -const char ELFMAG2 = ElfMagic[EI_MAG2]; -const char ELFMAG3 = ElfMagic[EI_MAG3]; +constexpr char ELFMAG0 = ElfMagic[EI_MAG0]; +constexpr char ELFMAG1 = ElfMagic[EI_MAG1]; +constexpr char ELFMAG2 = ElfMagic[EI_MAG2]; +constexpr char ELFMAG3 = ElfMagic[EI_MAG3]; // END android-added for <elf.h> compat struct Elf32_Ehdr { diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index 9fd8c87435..723ee74eb6 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -1868,7 +1868,8 @@ const File& ElfFile::GetFile() const { DELEGATE_TO_IMPL(GetFile); } -bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) { +bool ElfFile::GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, + uint64_t* size) const { if (elf32_.get() == nullptr) { CHECK(elf64_.get() != nullptr); diff --git a/runtime/elf_file.h b/runtime/elf_file.h index 48cb4b8b2e..1188c97658 100644 --- a/runtime/elf_file.h +++ b/runtime/elf_file.h @@ -60,7 +60,7 @@ class ElfFile { const File& GetFile() const; - bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size); + bool GetSectionOffsetAndSize(const char* section_name, uint64_t* offset, uint64_t* size) const; uint64_t FindSymbolAddress(unsigned section_type, const std::string& symbol_name, diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index de925b7e8c..66e88ba885 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -41,7 +41,7 @@ namespace art { inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, const InlineInfo& inline_info, uint8_t inlining_depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t method_index = inline_info.GetMethodIndexAtDepth(inlining_depth); InvokeType invoke_type = static_cast<InvokeType>( inline_info.GetInvokeTypeAtDepth(inlining_depth)); @@ -74,7 +74,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, Runtime::CalleeSaveType type, bool do_caller_check = false) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type)); const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type); @@ -110,7 +110,7 @@ inline ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, } inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetCalleeSaveMethodCaller( self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */); } @@ -403,7 +403,7 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx, ArtMethod* referrer, // Explicit template declarations of FindFieldFromCode for all field access types. #define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ +template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \ ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \ ArtMethod* referrer, \ Thread* self, size_t expected_size) \ @@ -426,15 +426,15 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite); template<InvokeType type, bool access_check> inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_object, - ArtMethod** referrer, Thread* self) { + ArtMethod* referrer, Thread* self) { ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); - ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer); + ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, referrer); if (resolved_method == nullptr) { StackHandleScope<1> hs(self); mirror::Object* null_this = nullptr; HandleWrapper<mirror::Object> h_this( hs.NewHandleWrapper(type == kStatic ? &null_this : this_object)); - resolved_method = class_linker->ResolveMethod(self, method_idx, *referrer, type); + resolved_method = class_linker->ResolveMethod(self, method_idx, referrer, type); } if (UNLIKELY(resolved_method == nullptr)) { DCHECK(self->IsExceptionPending()); // Throw exception and unwind. @@ -448,11 +448,11 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ // Incompatible class change should have been handled in resolve method. if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, - *referrer); + referrer); return nullptr; // Failure. } mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = (*referrer)->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); bool can_access_resolved_method = referring_class->CheckResolvedMethodAccess<type>(methods_class, resolved_method, method_idx); @@ -480,7 +480,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize()); } case kSuper: { - mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass(); + mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass(); uint16_t vtable_index = resolved_method->GetMethodIndex(); if (access_check) { // Check existence of super class. @@ -517,7 +517,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ resolved_method, class_linker->GetImagePointerSize()); if (UNLIKELY(interface_method == nullptr)) { ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, - *this_object, *referrer); + *this_object, referrer); return nullptr; // Failure. } return interface_method; @@ -531,10 +531,10 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ // Explicit template declarations of FindMethodFromCode for all invoke types. #define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \ ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \ mirror::Object** this_object, \ - ArtMethod** referrer, \ + ArtMethod* referrer, \ Thread* self) #define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \ diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index fc7f8b782a..eaf33f6b7f 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -38,7 +38,7 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, ArtMethod* referrer, Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(component_count < 0)) { ThrowNegativeArraySizeException(component_count); return nullptr; // Failure @@ -298,7 +298,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons interface_method->GetArtMethod(), sizeof(void*)); auto* virtual_methods = proxy_class->GetVirtualMethodsPtr(); size_t num_virtuals = proxy_class->NumVirtualMethods(); - size_t method_size = ArtMethod::ObjectSize(sizeof(void*)); + size_t method_size = ArtMethod::Size(sizeof(void*)); int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) - reinterpret_cast<uintptr_t>(virtual_methods)) / method_size; CHECK_LT(throws_index, static_cast<int>(num_virtuals)); diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 47865a2a80..53f2677e7e 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -45,12 +45,12 @@ template <const bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, ArtMethod* method, Thread* self, bool* slow_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, Thread* self, bool* slow_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. @@ -61,21 +61,21 @@ ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method and a resolved class, create an instance. template <bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method and an initialized class, create an instance. template <bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kAccessCheck> @@ -83,7 +83,7 @@ ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, int32_t component_count, ArtMethod* method, bool* slow_path) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If // it cannot be resolved, throw an error. If it can, use it to create an array. @@ -95,7 +95,7 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, @@ -103,13 +103,13 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* kl ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self, bool access_check, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, int32_t component_count, @@ -117,7 +117,7 @@ extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Thread* self, bool access_check, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Type of find field operation for fast and slow case. enum FindFieldType { @@ -134,47 +134,47 @@ enum FindFieldType { template<FindFieldType type, bool access_check> inline ArtField* FindFieldFromCode( uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<InvokeType type, bool access_check> inline ArtMethod* FindMethodFromCode( - uint32_t method_idx, mirror::Object** this_object, ArtMethod** referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t method_idx, mirror::Object** this_object, ArtMethod* referrer, Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_); // Fast path field resolution that can't initialize classes or throw exceptions. inline ArtField* FindFieldFast( uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Fast path method resolution that can't throw exceptions. inline ArtMethod* FindMethodFast( uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); inline mirror::Class* ResolveVerifyAndClinit( uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); -extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // TODO: annotalysis disabled as monitor semantics are maintained in Java code. inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) NO_THREAD_SAFETY_ANALYSIS; void CheckReferenceResult(mirror::Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty, jobject rcvr_jobj, jobject interface_art_method_jobj, std::vector<jvalue>& args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <typename INT_TYPE, typename FLOAT_TYPE> inline INT_TYPE art_float_to_integral(FLOAT_TYPE f); diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h index 7a4415846c..331de91a40 100644 --- a/runtime/entrypoints/quick/callee_save_frame.h +++ b/runtime/entrypoints/quick/callee_save_frame.h @@ -39,32 +39,32 @@ class ScopedQuickEntrypointChecks { explicit ScopedQuickEntrypointChecks(Thread *self, bool entry_check = kIsDebugBuild, bool exit_check = kIsDebugBuild) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) { + SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) { if (entry_check) { TestsOnEntry(); } } - ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) : self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) { if (kIsDebugBuild) { TestsOnEntry(); } } - ~ScopedQuickEntrypointChecks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) { if (exit_check_) { TestsOnExit(); } } private: - void TestsOnEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(self_); self_->VerifyStack(); } - void TestsOnExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(self_); self_->VerifyStack(); } diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index f56b5e45b6..9311791a42 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -30,7 +30,7 @@ static constexpr bool kUseTlabFastPath = true; #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ uint32_t type_idx, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \ @@ -57,7 +57,7 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \ } \ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ mirror::Class* klass, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ UNUSED(method); \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ @@ -84,7 +84,7 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \ } \ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ mirror::Class* klass, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ UNUSED(method); \ ScopedQuickEntrypointChecks sqec(self); \ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \ @@ -109,34 +109,34 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \ } \ extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \ mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \ allocator_type); \ } \ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (!instrumented_bool) { \ return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \ @@ -146,7 +146,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \ } \ extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ if (!instrumented_bool) { \ return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \ @@ -157,7 +157,7 @@ extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix## extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \ mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \ Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ ScopedQuickEntrypointChecks sqec(self); \ StackHandleScope<1> hs(self); \ Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \ @@ -166,7 +166,7 @@ extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \ } \ extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \ int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ StackHandleScope<1> hs(self); \ Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \ return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \ @@ -174,7 +174,7 @@ extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \ } \ extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( \ mirror::String* string, Thread* self) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ StackHandleScope<1> hs(self); \ Handle<mirror::String> handle_string(hs.NewHandle(string)); \ return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \ diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.h b/runtime/entrypoints/quick/quick_alloc_entrypoints.h index ec0aef57a7..14a8e0428b 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.h +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.h @@ -31,10 +31,10 @@ void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints); // holding the runtime shutdown lock and the mutator lock when we update the entrypoints. void SetQuickAllocEntryPointsAllocator(gc::AllocatorType allocator) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); + REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); void SetQuickAllocEntryPointsInstrumented(bool instrumented) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); + REQUIRES(Locks::mutator_lock_, Locks::runtime_shutdown_lock_); } // namespace art diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc index 37de380151..968ac534b3 100644 --- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc @@ -21,7 +21,7 @@ namespace art { // Assignable test for code, won't throw. Null and equality tests already performed extern "C" uint32_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(klass != nullptr); DCHECK(ref_class != nullptr); return klass->IsAssignableFrom(ref_class) ? 1 : 0; diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc index f1b54459df..a4feac1ea1 100644 --- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -28,7 +28,7 @@ namespace art { -extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); if (VLOG_IS_ON(deopt)) { diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc index 3cefc47fd2..b12b1189c2 100644 --- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -26,7 +26,7 @@ namespace art { extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Called to ensure static storage base is initialized for direct static field reads and writes. // A class may be accessing another class' fields when it doesn't have access, as access has been // given by inheritance. @@ -36,7 +36,7 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, } extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Called when method->dex_cache_resolved_types_[] misses. ScopedQuickEntrypointChecks sqec(self); auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly); @@ -44,7 +44,7 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* s } extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Called when caller isn't guaranteed to have access to a type and the dex cache may be // unpopulated. ScopedQuickEntrypointChecks sqec(self); @@ -53,7 +53,7 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type } extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kRefsOnly); return ResolveStringFromCode(caller, string_idx); diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index cef2510451..3d3f7a1bdb 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -20,6 +20,7 @@ #include <jni.h> #include "base/macros.h" +#include "base/mutex.h" #include "offsets.h" #define QUICK_ENTRYPOINT_OFFSET(ptr_size, x) \ @@ -71,6 +72,16 @@ extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_o Thread* self) NO_THREAD_SAFETY_ANALYSIS HOT_ATTR; +// Read barrier entrypoints. +// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to this function directly. +// For x86 and x86_64, compilers need a wrapper assembly function, to handle mismatch in ABI. +// This is the read barrier slow path for instance and static fields and reference-type arrays. +// TODO: Currently the read barrier does not have a fast path for compilers to directly generate. +// Ideally the slow path should only take one parameter "ref". +extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref, mirror::Object* obj, + uint32_t offset) + SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR; + } // namespace art #endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index 60bbf4ac82..73d8ae76ae 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -145,7 +145,8 @@ V(NewStringFromStringBuffer, void) \ V(NewStringFromStringBuilder, void) \ \ - V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) + V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \ + V(ReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t) #endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ #undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint. diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index 871cf3c256..0a1d80648d 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -27,7 +27,7 @@ namespace art { extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr)) { @@ -42,7 +42,7 @@ extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referr extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr)) { @@ -57,7 +57,7 @@ extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* re extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr)) { @@ -73,7 +73,7 @@ extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* refe extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr)) { @@ -89,7 +89,7 @@ extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx, extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t)); if (LIKELY(field != nullptr)) { @@ -105,7 +105,7 @@ extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t)); if (LIKELY(field != nullptr)) { @@ -121,7 +121,7 @@ extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(mirror::HeapReference<mirror::Object>)); @@ -138,7 +138,7 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx, extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -158,7 +158,7 @@ extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -177,7 +177,7 @@ extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Obj } extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -197,7 +197,7 @@ extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Objec extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -217,7 +217,7 @@ extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Objec extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -237,7 +237,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -258,7 +258,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object* extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(mirror::HeapReference<mirror::Object>)); @@ -279,7 +279,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror: extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t)); if (LIKELY(field != nullptr)) { @@ -310,7 +310,7 @@ extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value, extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t)); if (LIKELY(field != nullptr)) { @@ -341,7 +341,7 @@ extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value, extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != nullptr)) { @@ -360,7 +360,7 @@ extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value, extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, uint64_t new_value, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != nullptr)) { @@ -379,7 +379,7 @@ extern "C" int artSet64StaticFromCode(uint32_t field_idx, ArtMethod* referrer, extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(mirror::HeapReference<mirror::Object>)); @@ -402,7 +402,7 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -441,7 +441,7 @@ extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -481,7 +481,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -509,7 +509,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint64_t new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t)); if (LIKELY(field != nullptr && obj != nullptr)) { @@ -534,7 +534,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj, extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj, mirror::Object* new_value, ArtMethod* referrer, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(mirror::HeapReference<mirror::Object>)); @@ -557,4 +557,16 @@ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj return -1; // failure } +// TODO: Currently the read barrier does not have a fast path. Ideally the slow path should only +// take one parameter "ref", which is generated by the fast path. +extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED, + mirror::Object* obj, uint32_t offset) { + DCHECK(kUseReadBarrier); + uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset; + mirror::HeapReference<mirror::Object>* ref_addr = + reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr); + return ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, true>(obj, MemberOffset(offset), + ref_addr); +} + } // namespace art diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc index d3991cdb78..22b2fa3f45 100644 --- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -26,7 +26,7 @@ namespace art { */ extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array, ArtMethod* method, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); const uint16_t* const insns = method->GetCodeItem()->insns_; const Instruction::ArrayDataPayload* payload = diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc index 2b5c15bcbd..ad5ee8475e 100644 --- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc @@ -28,7 +28,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, mirror::Object* this_object, Thread* self, uintptr_t lr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip // that part. ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false); @@ -50,7 +50,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method, extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp, uint64_t gpr_result, uint64_t fpr_result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Compute address of return PC and sanity check that it currently holds 0. size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, Runtime::kRefsOnly); uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) + diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index de225ad8e8..f69c39e8bc 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -63,7 +63,7 @@ static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS { } static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JNIEnvExt* env = self->GetJniEnv(); env->locals.SetSegmentState(env->local_ref_cookie); env->local_ref_cookie = saved_local_ref_cookie; diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc index 4423c08288..3bf001e249 100644 --- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc @@ -21,7 +21,7 @@ namespace art { extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ { ScopedQuickEntrypointChecks sqec(self); if (UNLIKELY(obj == nullptr)) { @@ -41,7 +41,7 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self) } extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ { ScopedQuickEntrypointChecks sqec(self); if (UNLIKELY(obj == nullptr)) { diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc index 87e0c6eecd..47b3eff40d 100644 --- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc @@ -19,7 +19,7 @@ namespace art { -extern "C" void artTestSuspendFromCode(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { // Called when suspend count check value is 0 and thread->suspend_count_ != 0 ScopedQuickEntrypointChecks sqec(self); self->CheckSuspend(); diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index f22edc1b9e..5a82b3ae2e 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -25,14 +25,14 @@ namespace art { // Deliver an exception that's pending on thread helping set up a callee save frame on the way. extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); self->QuickDeliverException(); } // Called by generated call to throw an exception. extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { /* * exception may be null, in which case this routine should * throw NPE. NOTE: this is a convenience for generated code, @@ -51,7 +51,7 @@ extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* excepti // Called by generated call to throw a NPE exception. extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); self->NoteSignalBeingHandled(); ThrowNullPointerExceptionFromDexPC(); @@ -61,7 +61,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self) // Called by generated call to throw an arithmetic divide by zero exception. extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArithmeticExceptionDivideByZero(); self->QuickDeliverException(); @@ -69,14 +69,14 @@ extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self) // Called by generated call to throw an array index out of bounds exception. extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArrayIndexOutOfBoundsException(index, length); self->QuickDeliverException(); } extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); self->NoteSignalBeingHandled(); ThrowStackOverflowError(self); @@ -85,7 +85,7 @@ extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self) } extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowNoSuchMethodError(method_idx); self->QuickDeliverException(); @@ -94,7 +94,7 @@ extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Threa extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); DCHECK(!dest_type->IsAssignableFrom(src_type)); ThrowClassCastException(dest_type, src_type); @@ -103,7 +103,7 @@ extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type, extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArrayStoreException(value->GetClass(), array->GetClass()); self->QuickDeliverException(); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 4f76ebdd40..da4b82c43b 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -280,7 +280,7 @@ class QuickArgumentVisitor { // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the // 1st GPR. static mirror::Object* GetProxyThisObject(ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK((*sp)->IsProxyMethod()); CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes()); CHECK_GT(kNumQuickGprArgs, 0u); @@ -291,19 +291,19 @@ class QuickArgumentVisitor { return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr(); } - static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); return GetCalleeSaveMethodCaller(sp, Runtime::kRefsAndArgs); } - static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); uint8_t* previous_sp = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize; return *reinterpret_cast<ArtMethod**>(previous_sp); } - static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs); ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>( @@ -329,14 +329,14 @@ class QuickArgumentVisitor { } // For the given quick ref and args quick frame, return the caller's PC. - static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK((*sp)->IsCalleeSaveMethod()); uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset; return *reinterpret_cast<uintptr_t*>(lr); } QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty, - uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) : is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset), fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset), @@ -421,7 +421,7 @@ class QuickArgumentVisitor { } } - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) { // (a) 'stack_args_' should point to the first method's argument // (b) whatever the argument type it is, the 'stack_index_' should // be moved forward along with every visiting. @@ -571,7 +571,7 @@ class QuickArgumentVisitor { // Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It // allows to use the QuickArgumentVisitor constants without moving all the code in its own module. extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return QuickArgumentVisitor::GetProxyThisObject(sp); } @@ -582,7 +582,7 @@ class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor { uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; private: ShadowFrame* const sf_; @@ -625,7 +625,7 @@ void BuildQuickShadowFrameVisitor::Visit() { } extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Ensure we don't get thread suspension until the object arguments are safely in the shadow // frame. ScopedQuickEntrypointChecks sqec(self); @@ -692,9 +692,9 @@ class BuildQuickArgumentVisitor FINAL : public QuickArgumentVisitor { ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; - void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); private: ScopedObjectAccessUnchecked* const soa_; @@ -753,7 +753,7 @@ void BuildQuickArgumentVisitor::FixupReferences() { // field within the proxy object, which will box the primitive arguments and deal with error cases. extern "C" uint64_t artQuickProxyInvokeHandler( ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method); DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method); // Ensure we don't get thread suspension until the object arguments are safely in jobjects. @@ -809,9 +809,9 @@ class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor { uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; - void FixupReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_); private: ScopedObjectAccessUnchecked* const soa_; @@ -842,7 +842,7 @@ void RememberForGcArgumentVisitor::FixupReferences() { // Lazily resolve a method for quick. Called by stub code. extern "C" const void* artQuickResolutionTrampoline( ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // The resolution trampoline stashes the resolved method into the callee-save frame to transport // it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely // does not have the same stack layout as the callee-save method). @@ -1196,7 +1196,7 @@ template<class T> class BuildNativeCallFrameStateMachine { return gpr_index_ > 0; } - void AdvanceHandleScope(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { uintptr_t handle = PushHandle(ptr); if (HaveHandleScopeGpr()) { gpr_index_--; @@ -1384,7 +1384,7 @@ template<class T> class BuildNativeCallFrameStateMachine { void PushStack(uintptr_t val) { delegate_->PushStack(val); } - uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { return delegate_->PushHandle(ref); } @@ -1443,11 +1443,11 @@ class ComputeNativeCallFrameSize { } virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(sm); } - void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) { BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this); WalkHeader(&sm); @@ -1519,7 +1519,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // // Note: assumes ComputeAll() has been run before. void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = **m; DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); @@ -1560,7 +1560,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie. // Returns the new bottom. Note: this may be unaligned. uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // First, fix up the layout of the callee-save frame. // We have to squeeze in the HandleScope, and relocate the method pointer. LayoutCalleeSaveFrame(self, m, sp, handle_scope); @@ -1578,7 +1578,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len, HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Walk(shorty, shorty_len); // JNI part. @@ -1594,7 +1594,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize { // Add JNIEnv* and jobj/jclass before the shorty-derived elements. void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: uint32_t num_handle_scope_references_; @@ -1650,7 +1650,7 @@ class FillNativeCall { cur_stack_arg_++; } - virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) { LOG(FATAL) << "(Non-JNI) Native call does not use handles."; UNREACHABLE(); } @@ -1688,16 +1688,16 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { } } - void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; - void FinalizeHandleScope(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); StackReference<mirror::Object>* GetFirstHandleScopeEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return handle_scope_->GetHandle(0).GetReference(); } - jobject GetFirstHandleScopeJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) { return handle_scope_->GetHandle(0).ToJObject(); } @@ -1713,7 +1713,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args), handle_scope_(handle_scope), cur_entry_(0) {} - uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) { FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args); @@ -1721,7 +1721,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor { cur_entry_ = 0U; } - void ResetRemainingScopeSlots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) { // Initialize padding entries. size_t expected_slots = handle_scope_->NumberOfReferences(); while (cur_entry_ < expected_slots) { @@ -1841,7 +1841,7 @@ void artQuickGenericJniEndJNINonRef(Thread* self, uint32_t cookie, jobject lock) * 2) An error, if the value is negative. */ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* called = *sp; DCHECK(called->IsNative()) << PrettyMethod(called, true); uint32_t shorty_len = 0; @@ -1914,7 +1914,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** * unlocking. */ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, uint64_t result_f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp); ArtMethod* called = *sp; @@ -1971,7 +1971,7 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self, jvalue result, // for the method pointer. // // It is valid to use this, as at the usage points here (returns from C functions) we are assuming -// to hold the mutator lock (see SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) annotations). +// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations). template<InvokeType type, bool access_check> static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self, @@ -1989,7 +1989,7 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o ScopedObjectAccessUnchecked soa(self->GetJniEnv()); RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa); visitor.VisitArguments(); - method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method, + method = FindMethodFromCode<type, access_check>(method_idx, &this_object, caller_method, self); visitor.FixupReferences(); } @@ -2013,7 +2013,7 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_o // Explicit artInvokeCommon template function declarations to please analysis tool. #define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ TwoWordReturn artInvokeCommon<type, access_check>( \ uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) @@ -2032,31 +2032,31 @@ EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(kSuper, true); // See comments in runtime_support_asm.S extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp); } extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp); } @@ -2064,7 +2064,7 @@ extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck( extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); // The optimizing compiler currently does not inline methods that have an interface // invocation. We use the outer method directly to avoid fetching a stack map, which is @@ -2112,7 +2112,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t dex_method_idx, ScopedObjectAccessUnchecked soa(self->GetJniEnv()); RememberForGcArgumentVisitor visitor(sp, false, shorty, shorty_len, &soa); visitor.VisitArguments(); - method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, &caller_method, + method = FindMethodFromCode<kInterface, false>(dex_method_idx, &this_object, caller_method, self); visitor.FixupReferences(); } diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc index 0a5ebfa81b..f7a3cd53cd 100644 --- a/runtime/entrypoints_order_test.cc +++ b/runtime/entrypoints_order_test.cc @@ -116,7 +116,7 @@ class EntrypointsOrderTest : public CommonRuntimeTest { EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_functions, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, interpreter_entrypoints, - sizeof(void*) * 3); + sizeof(void*) * 6); // Skip across the entrypoints structures. @@ -133,7 +133,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest { sizeof(void*) * kLockLevelCount); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, nested_signal_state, flip_function, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*)); - EXPECT_OFFSET_DIFF(Thread, tlsPtr_.method_verifier, Thread, wait_mutex_, sizeof(void*), + EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*)); + EXPECT_OFFSET_DIFF(Thread, tlsPtr_.thread_local_mark_stack, Thread, wait_mutex_, sizeof(void*), thread_tlsptr_end); } @@ -310,8 +311,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest { sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni, sizeof(void*)); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierSlow, sizeof(void*)); - CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierJni) + CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pReadBarrierSlow) + sizeof(void*) == sizeof(QuickEntryPoints), QuickEntryPoints_all); } }; diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc index bc3ba216e9..de4b3f437a 100644 --- a/runtime/exception_test.cc +++ b/runtime/exception_test.cc @@ -93,7 +93,7 @@ class ExceptionTest : public CommonRuntimeTest { // NOTE: Don't align the code (it will not be executed) but check that the Thumb2 // adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer(). - CHECK_EQ(mapping_table_offset & 1u, 0u); + CHECK_ALIGNED(mapping_table_offset, 2); const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset]; method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*)); diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 762f061ce2..c3a962737f 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -320,7 +320,7 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che return false; } - ArtMethod* method_obj = 0; + ArtMethod* method_obj = nullptr; uintptr_t return_pc = 0; uintptr_t sp = 0; @@ -331,7 +331,9 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che // If we don't have a potential method, we're outta here. VLOG(signals) << "potential method: " << method_obj; // TODO: Check linear alloc and image. - if (method_obj == 0 || !IsAligned<kObjectAlignment>(method_obj)) { + DCHECK_ALIGNED(ArtMethod::Size(sizeof(void*)), sizeof(void*)) + << "ArtMethod is not pointer aligned"; + if (method_obj == nullptr || !IsAligned<sizeof(void*)>(method_obj)) { VLOG(signals) << "no method"; return false; } diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index ac716eaabc..45db50010c 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -22,7 +22,6 @@ #include <string> #include "atomic.h" -#include "base/bit_utils.h" #include "base/logging.h" #include "base/macros.h" #include "mem_map.h" @@ -74,12 +73,12 @@ class AtomicStack { // Beware: Mixing atomic pushes and atomic pops will cause ABA problem. // Returns false if we overflowed the stack. - bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { return AtomicPushBackInternal(value, capacity_); } // Returns false if we overflowed the stack. - bool AtomicPushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { return AtomicPushBackInternal(value, growth_limit_); } @@ -87,7 +86,7 @@ class AtomicStack { // slots. Returns false if we overflowed the stack. bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address, StackReference<T>** end_address) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } @@ -113,7 +112,7 @@ class AtomicStack { return true; } - void AssertAllZero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { for (size_t i = 0; i < capacity_; ++i) { DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i; @@ -121,7 +120,7 @@ class AtomicStack { } } - void PushBack(T* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } @@ -131,7 +130,7 @@ class AtomicStack { begin_[index].Assign(value); } - T* PopBack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed()); // Decrement the back index non atomically. back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1); @@ -156,6 +155,10 @@ class AtomicStack { return Size() == 0; } + bool IsFull() const { + return Size() == growth_limit_; + } + size_t Size() const { DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed()); return back_index_.LoadRelaxed() - front_index_.LoadRelaxed(); @@ -190,12 +193,12 @@ class AtomicStack { } } - bool ContainsSorted(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(debug_is_sorted_); return std::binary_search(Begin(), End(), value, ObjectComparator()); } - bool Contains(const T* value) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) { for (auto cur = Begin(), end = End(); cur != end; ++cur) { if (cur->AsMirrorPtr() == value) { return true; @@ -217,7 +220,7 @@ class AtomicStack { // Returns false if we overflowed the stack. bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsDebugBuild) { debug_is_sorted_ = false; } diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h index 34e6aa31f2..88a6c6c6e4 100644 --- a/runtime/gc/accounting/card_table.h +++ b/runtime/gc/accounting/card_table.h @@ -107,8 +107,8 @@ class CardTable { size_t Scan(SpaceBitmap<kObjectAlignment>* bitmap, uint8_t* scan_begin, uint8_t* scan_end, const Visitor& visitor, const uint8_t minimum_age = kCardDirty) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Assertion used to check the given address is covered by the card table void CheckAddrIsInCardTable(const uint8_t* addr) const; diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h index 1648aef51f..0b96979a30 100644 --- a/runtime/gc/accounting/heap_bitmap.h +++ b/runtime/gc/accounting/heap_bitmap.h @@ -35,34 +35,34 @@ namespace accounting { class HeapBitmap { public: - bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + bool Test(const mirror::Object* obj) SHARED_REQUIRES(Locks::heap_bitmap_lock_); + void Clear(const mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_); template<typename LargeObjectSetVisitor> bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE; + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE; template<typename LargeObjectSetVisitor> bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE; + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE; ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const; LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const; void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); template <typename Visitor> void Visit(const Visitor& visitor) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC. void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC. void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); explicit HeapBitmap(Heap* heap) : heap_(heap) {} diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index cd3f910d95..5151819d96 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -21,16 +21,11 @@ #include "base/stl_util.h" #include "bitmap-inl.h" #include "card_table-inl.h" -#include "heap_bitmap.h" #include "gc/accounting/space_bitmap-inl.h" -#include "gc/collector/mark_sweep.h" -#include "gc/collector/mark_sweep-inl.h" #include "gc/heap.h" -#include "gc/space/space.h" #include "gc/space/image_space.h" +#include "gc/space/space.h" #include "mirror/object-inl.h" -#include "mirror/class-inl.h" -#include "mirror/object_array-inl.h" #include "space_bitmap-inl.h" #include "thread.h" @@ -59,8 +54,7 @@ class ModUnionAddToCardSetVisitor { class ModUnionAddToCardBitmapVisitor { public: - explicit ModUnionAddToCardBitmapVisitor(ModUnionTable::CardBitmap* bitmap, - CardTable* card_table) + ModUnionAddToCardBitmapVisitor(ModUnionTable::CardBitmap* bitmap, CardTable* card_table) : bitmap_(bitmap), card_table_(card_table) { } @@ -95,29 +89,46 @@ class ModUnionAddToCardVectorVisitor { class ModUnionUpdateObjectReferencesVisitor { public: - ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg, + ModUnionUpdateObjectReferencesVisitor(MarkObjectVisitor* visitor, space::ContinuousSpace* from_space, space::ContinuousSpace* immune_space, bool* contains_reference_to_other_space) - : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space), + : visitor_(visitor), from_space_(from_space), immune_space_(immune_space), contains_reference_to_other_space_(contains_reference_to_other_space) { } // Extra parameters are required since we use this same visitor signature for checking objects. void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { + MarkReference(obj->GetFieldObjectReferenceAddr(offset)); + } + + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + VisitRoot(root); + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + MarkReference(root); + } + + private: + template<bool kPoisonReferences> + void MarkReference(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) const + SHARED_REQUIRES(Locks::mutator_lock_) { // Only add the reference if it is non null and fits our criteria. - mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset); mirror::Object* ref = obj_ptr->AsMirrorPtr(); if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) { *contains_reference_to_other_space_ = true; - callback_(obj_ptr, arg_); + mirror::Object* new_object = visitor_->MarkObject(ref); + if (ref != new_object) { + obj_ptr->Assign(new_object); + } } } - private: - MarkHeapReferenceCallback* const callback_; - void* const arg_; + MarkObjectVisitor* const visitor_; // Space which we are scanning space::ContinuousSpace* const from_space_; space::ContinuousSpace* const immune_space_; @@ -129,25 +140,24 @@ class ModUnionScanImageRootVisitor { public: // Immune space is any other space which we don't care about references to. Currently this is // the image space in the case of the zygote mod union table. - ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg, + ModUnionScanImageRootVisitor(MarkObjectVisitor* visitor, space::ContinuousSpace* from_space, space::ContinuousSpace* immune_space, bool* contains_reference_to_other_space) - : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space), + : visitor_(visitor), from_space_(from_space), immune_space_(immune_space), contains_reference_to_other_space_(contains_reference_to_other_space) {} void operator()(Object* root) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(root != nullptr); - ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_, immune_space_, + ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, from_space_, immune_space_, contains_reference_to_other_space_); - root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor()); + root->VisitReferences(ref_visitor, VoidFunctor()); } private: - MarkHeapReferenceCallback* const callback_; - void* const arg_; + MarkObjectVisitor* const visitor_; // Space which we are scanning space::ContinuousSpace* const from_space_; space::ContinuousSpace* const immune_space_; @@ -164,14 +174,18 @@ void ModUnionTableReferenceCache::ClearCards() { class AddToReferenceArrayVisitor { public: - explicit AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table, - std::vector<mirror::HeapReference<Object>*>* references) - : mod_union_table_(mod_union_table), references_(references) { - } + AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table, + MarkObjectVisitor* visitor, + std::vector<mirror::HeapReference<Object>*>* references, + bool* has_target_reference) + : mod_union_table_(mod_union_table), + visitor_(visitor), + references_(references), + has_target_reference_(has_target_reference) {} // Extra parameters are required since we use this same visitor signature for checking objects. - void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset); mirror::Object* ref = ref_ptr->AsMirrorPtr(); // Only add the reference if it is non null and fits our criteria. @@ -181,42 +195,69 @@ class AddToReferenceArrayVisitor { } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (mod_union_table_->ShouldAddReference(root->AsMirrorPtr())) { + *has_target_reference_ = true; + // TODO: Add MarkCompressedReference callback here. + root->Assign(visitor_->MarkObject(root->AsMirrorPtr())); + } + } + private: ModUnionTableReferenceCache* const mod_union_table_; + MarkObjectVisitor* const visitor_; std::vector<mirror::HeapReference<Object>*>* const references_; + bool* const has_target_reference_; }; class ModUnionReferenceVisitor { public: - explicit ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table, - std::vector<mirror::HeapReference<Object>*>* references) - : mod_union_table_(mod_union_table), - references_(references) { - } + ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table, + MarkObjectVisitor* visitor, + std::vector<mirror::HeapReference<Object>*>* references, + bool* has_target_reference) + : mod_union_table_(mod_union_table), + visitor_(visitor), + references_(references), + has_target_reference_(has_target_reference) {} void operator()(Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { // We don't have an early exit since we use the visitor pattern, an early // exit should significantly speed this up. - AddToReferenceArrayVisitor visitor(mod_union_table_, references_); - obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor()); + AddToReferenceArrayVisitor visitor(mod_union_table_, + visitor_, + references_, + has_target_reference_); + obj->VisitReferences(visitor, VoidFunctor()); } + private: ModUnionTableReferenceCache* const mod_union_table_; + MarkObjectVisitor* const visitor_; std::vector<mirror::HeapReference<Object>*>* const references_; + bool* const has_target_reference_; }; class CheckReferenceVisitor { public: - explicit CheckReferenceVisitor(ModUnionTableReferenceCache* mod_union_table, - const std::set<const Object*>& references) - : mod_union_table_(mod_union_table), - references_(references) { + CheckReferenceVisitor(ModUnionTableReferenceCache* mod_union_table, + const std::set<const Object*>& references) + : mod_union_table_(mod_union_table), + references_(references) { } // Extra parameters are required since we use this same visitor signature for checking objects. - void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) && references_.find(ref) == references_.end()) { @@ -235,6 +276,18 @@ class CheckReferenceVisitor { } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (kIsDebugBuild && !root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr())); + } + private: ModUnionTableReferenceCache* const mod_union_table_; const std::set<const Object*>& references_; @@ -242,16 +295,16 @@ class CheckReferenceVisitor { class ModUnionCheckReferences { public: - explicit ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table, - const std::set<const Object*>& references) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) + ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table, + const std::set<const Object*>& references) + REQUIRES(Locks::heap_bitmap_lock_) : mod_union_table_(mod_union_table), references_(references) { } void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current()); CheckReferenceVisitor visitor(mod_union_table_, references_); - obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor()); + obj->VisitReferences(visitor, VoidFunctor()); } private: @@ -305,42 +358,68 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) { } } -void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, - void* arg) { - CardTable* card_table = heap_->GetCardTable(); - +void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) { + CardTable* const card_table = heap_->GetCardTable(); std::vector<mirror::HeapReference<Object>*> cards_references; - ModUnionReferenceVisitor add_visitor(this, &cards_references); - - for (const auto& card : cleared_cards_) { + // If has_target_reference is true then there was a GcRoot compressed reference which wasn't + // added. In this case we need to keep the card dirty. + // We don't know if the GcRoot addresses will remain constant, for example, classloaders have a + // hash set of GcRoot which may be resized or modified. + bool has_target_reference; + ModUnionReferenceVisitor add_visitor(this, visitor, &cards_references, &has_target_reference); + CardSet new_cleared_cards; + for (uint8_t* card : cleared_cards_) { // Clear and re-compute alloc space references associated with this card. cards_references.clear(); + has_target_reference = false; uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card)); uintptr_t end = start + CardTable::kCardSize; - auto* space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false); + space::ContinuousSpace* space = + heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false); DCHECK(space != nullptr); ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); live_bitmap->VisitMarkedRange(start, end, add_visitor); - // Update the corresponding references for the card. auto found = references_.find(card); if (found == references_.end()) { - if (cards_references.empty()) { - // No reason to add empty array. - continue; + // Don't add card for an empty reference array. + if (!cards_references.empty()) { + references_.Put(card, cards_references); } - references_.Put(card, cards_references); } else { - found->second = cards_references; + if (cards_references.empty()) { + references_.erase(found); + } else { + found->second = cards_references; + } + } + if (has_target_reference) { + // Keep this card for next time since it contains a GcRoot which matches the + // ShouldAddReference criteria. This usually occurs for class loaders. + new_cleared_cards.insert(card); } } - cleared_cards_.clear(); + cleared_cards_ = std::move(new_cleared_cards); size_t count = 0; - for (const auto& ref : references_) { - for (mirror::HeapReference<Object>* obj_ptr : ref.second) { - callback(obj_ptr, arg); + for (auto it = references_.begin(); it != references_.end();) { + std::vector<mirror::HeapReference<Object>*>& references = it->second; + // Since there is no card mark for setting a reference to null, we check each reference. + // If all of the references of a card are null then we can remove that card. This is racy + // with the mutators, but handled by rescanning dirty cards. + bool all_null = true; + for (mirror::HeapReference<Object>* obj_ptr : references) { + if (obj_ptr->AsMirrorPtr() != nullptr) { + all_null = false; + visitor->MarkHeapReference(obj_ptr); + } + } + count += references.size(); + if (!all_null) { + ++it; + } else { + // All null references, erase the array from the set. + it = references_.erase(it); } - count += ref.second.size(); } if (VLOG_IS_ON(heap)) { VLOG(gc) << "Marked " << count << " references in mod union table"; @@ -362,9 +441,9 @@ ModUnionTableCardCache::ModUnionTableCardCache(const std::string& name, Heap* he class CardBitVisitor { public: - CardBitVisitor(MarkHeapReferenceCallback* callback, void* arg, space::ContinuousSpace* space, + CardBitVisitor(MarkObjectVisitor* visitor, space::ContinuousSpace* space, space::ContinuousSpace* immune_space, ModUnionTable::CardBitmap* card_bitmap) - : callback_(callback), arg_(arg), space_(space), immune_space_(immune_space), + : visitor_(visitor), space_(space), immune_space_(immune_space), bitmap_(space->GetLiveBitmap()), card_bitmap_(card_bitmap) { DCHECK(immune_space_ != nullptr); } @@ -374,7 +453,7 @@ class CardBitVisitor { DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start))) << start << " " << *space_; bool reference_to_other_space = false; - ModUnionScanImageRootVisitor scan_visitor(callback_, arg_, space_, immune_space_, + ModUnionScanImageRootVisitor scan_visitor(visitor_, space_, immune_space_, &reference_to_other_space); bitmap_->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor); if (!reference_to_other_space) { @@ -384,8 +463,7 @@ class CardBitVisitor { } private: - MarkHeapReferenceCallback* const callback_; - void* const arg_; + MarkObjectVisitor* const visitor_; space::ContinuousSpace* const space_; space::ContinuousSpace* const immune_space_; ContinuousSpaceBitmap* const bitmap_; @@ -400,15 +478,14 @@ void ModUnionTableCardCache::ClearCards() { } // Mark all references to the alloc space(s). -void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, - void* arg) { +void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) { auto* image_space = heap_->GetImageSpace(); // If we don't have an image space, just pass in space_ as the immune space. Pass in the same // space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor. - CardBitVisitor visitor(callback, arg, space_, image_space != nullptr ? image_space : space_, + CardBitVisitor bit_visitor(visitor, space_, image_space != nullptr ? image_space : space_, card_bitmap_.get()); card_bitmap_->VisitSetBits( - 0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, visitor); + 0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, bit_visitor); } void ModUnionTableCardCache::Dump(std::ostream& os) { diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h index 2e232ca6a8..5888193e7b 100644 --- a/runtime/gc/accounting/mod_union_table.h +++ b/runtime/gc/accounting/mod_union_table.h @@ -76,13 +76,13 @@ class ModUnionTable { // Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards // before a call to update, for example, back-to-back sticky GCs. Also mark references to other // spaces which are stored in the mod-union table. - virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) = 0; + virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) = 0; // Verification, sanity checks that we don't have clean cards which conflict with out cached data // for said cards. Exclusive lock is required since verify sometimes uses // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the // bitmap or not. - virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0; + virtual void Verify() REQUIRES(Locks::heap_bitmap_lock_) = 0; // Returns true if a card is marked inside the mod union table. Used for testing. The address // doesn't need to be aligned. @@ -117,22 +117,22 @@ class ModUnionTableReferenceCache : public ModUnionTable { void ClearCards() OVERRIDE; // Update table based on cleared cards and mark all references to the other spaces. - void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and // VisitMarkedRange can't know if the callback will modify the bitmap or not. void Verify() OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Function that tells whether or not to add a reference to the table. virtual bool ShouldAddReference(const mirror::Object* ref) const = 0; virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE; - virtual void Dump(std::ostream& os) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void Dump(std::ostream& os) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); virtual void SetCards() OVERRIDE; @@ -157,9 +157,9 @@ class ModUnionTableCardCache : public ModUnionTable { virtual void ClearCards() OVERRIDE; // Mark all references to the alloc space(s). - virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Nothing to verify. virtual void Verify() OVERRIDE {} diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc index 363b76afd5..edab1b0a60 100644 --- a/runtime/gc/accounting/mod_union_table_test.cc +++ b/runtime/gc/accounting/mod_union_table_test.cc @@ -46,7 +46,7 @@ class ModUnionTableTest : public CommonRuntimeTest { } mirror::ObjectArray<mirror::Object>* AllocObjectArray( Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto* klass = GetObjectArrayClass(self, space); const size_t size = mirror::ComputeArraySize(component_count, 2); size_t bytes_allocated = 0, bytes_tl_bulk_allocated; @@ -67,7 +67,7 @@ class ModUnionTableTest : public CommonRuntimeTest { private: mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (java_lang_object_array_ == nullptr) { java_lang_object_array_ = Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass); @@ -93,12 +93,24 @@ class ModUnionTableTest : public CommonRuntimeTest { }; // Collect visited objects into container. -static void CollectVisitedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(ref != nullptr); - DCHECK(arg != nullptr); - reinterpret_cast<std::set<mirror::Object*>*>(arg)->insert(ref->AsMirrorPtr()); -} +class CollectVisitedVisitor : public MarkObjectVisitor { + public: + explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {} + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(ref != nullptr); + MarkObject(ref->AsMirrorPtr()); + } + virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(obj != nullptr); + out_->insert(obj); + return obj; + } + + private: + std::set<mirror::Object*>* const out_; +}; // A mod union table that only holds references to a specified target space. class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache { @@ -199,7 +211,8 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) { obj2->Set(3, other_space_ref2); table->ClearCards(); std::set<mirror::Object*> visited_before; - table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_before); + CollectVisitedVisitor collector_before(&visited_before); + table->UpdateAndMarkReferences(&collector_before); // Check that we visited all the references in other spaces only. ASSERT_GE(visited_before.size(), 2u); ASSERT_TRUE(visited_before.find(other_space_ref1) != visited_before.end()); @@ -230,7 +243,8 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) { } // Visit again and make sure the cards got cleared back to their sane state. std::set<mirror::Object*> visited_after; - table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited_after); + CollectVisitedVisitor collector_after(&visited_after); + table->UpdateAndMarkReferences(&collector_after); // Check that we visited a superset after. for (auto* obj : visited_before) { ASSERT_TRUE(visited_after.find(obj) != visited_after.end()) << obj; diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h index 436df923c5..86266e2500 100644 --- a/runtime/gc/accounting/read_barrier_table.h +++ b/runtime/gc/accounting/read_barrier_table.h @@ -51,8 +51,8 @@ class ReadBarrierTable { void Clear(uint8_t* start_addr, uint8_t* end_addr) { DCHECK(IsValidHeapAddr(start_addr)) << start_addr; DCHECK(IsValidHeapAddr(end_addr)) << end_addr; - DCHECK(IsAligned<kRegionSize>(start_addr)); - DCHECK(IsAligned<kRegionSize>(end_addr)); + DCHECK_ALIGNED(start_addr, kRegionSize); + DCHECK_ALIGNED(end_addr, kRegionSize); uint8_t* entry_start = EntryFromAddr(start_addr); uint8_t* entry_end = EntryFromAddr(end_addr); memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start); diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc index eeb385e957..277d319035 100644 --- a/runtime/gc/accounting/remembered_set.cc +++ b/runtime/gc/accounting/remembered_set.cc @@ -61,72 +61,80 @@ void RememberedSet::ClearCards() { class RememberedSetReferenceVisitor { public: - RememberedSetReferenceVisitor(MarkHeapReferenceCallback* callback, - DelayReferenceReferentCallback* ref_callback, - space::ContinuousSpace* target_space, - bool* const contains_reference_to_target_space, void* arg) - : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg), + RememberedSetReferenceVisitor(space::ContinuousSpace* target_space, + bool* const contains_reference_to_target_space, + collector::GarbageCollector* collector) + : collector_(collector), target_space_(target_space), contains_reference_to_target_space_(contains_reference_to_target_space) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset); if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) { *contains_reference_to_target_space_ = true; - callback_(ref_ptr, arg_); + collector_->MarkHeapReference(ref_ptr); DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr())); } } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { if (target_space_->HasAddress(ref->GetReferent())) { *contains_reference_to_target_space_ = true; - ref_callback_(klass, ref, arg_); + collector_->DelayReferenceReferent(klass, ref); + } + } + + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (target_space_->HasAddress(root->AsMirrorPtr())) { + *contains_reference_to_target_space_ = true; + root->Assign(collector_->MarkObject(root->AsMirrorPtr())); + DCHECK(!target_space_->HasAddress(root->AsMirrorPtr())); } } private: - MarkHeapReferenceCallback* const callback_; - DelayReferenceReferentCallback* const ref_callback_; + collector::GarbageCollector* const collector_; space::ContinuousSpace* const target_space_; - void* const arg_; bool* const contains_reference_to_target_space_; }; class RememberedSetObjectVisitor { public: - RememberedSetObjectVisitor(MarkHeapReferenceCallback* callback, - DelayReferenceReferentCallback* ref_callback, - space::ContinuousSpace* target_space, - bool* const contains_reference_to_target_space, void* arg) - : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg), + RememberedSetObjectVisitor(space::ContinuousSpace* target_space, + bool* const contains_reference_to_target_space, + collector::GarbageCollector* collector) + : collector_(collector), target_space_(target_space), contains_reference_to_target_space_(contains_reference_to_target_space) {} - void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - RememberedSetReferenceVisitor visitor(callback_, ref_callback_, target_space_, - contains_reference_to_target_space_, arg_); - obj->VisitReferences<kMovingClasses>(visitor, visitor); + void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { + RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_, + collector_); + obj->VisitReferences(visitor, visitor); } private: - MarkHeapReferenceCallback* const callback_; - DelayReferenceReferentCallback* const ref_callback_; + collector::GarbageCollector* const collector_; space::ContinuousSpace* const target_space_; - void* const arg_; bool* const contains_reference_to_target_space_; }; -void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, - DelayReferenceReferentCallback* ref_callback, - space::ContinuousSpace* target_space, void* arg) { +void RememberedSet::UpdateAndMarkReferences(space::ContinuousSpace* target_space, + collector::GarbageCollector* collector) { CardTable* card_table = heap_->GetCardTable(); bool contains_reference_to_target_space = false; - RememberedSetObjectVisitor obj_visitor(callback, ref_callback, target_space, - &contains_reference_to_target_space, arg); + RememberedSetObjectVisitor obj_visitor(target_space, &contains_reference_to_target_space, + collector); ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap(); CardSet remove_card_set; for (uint8_t* const card_addr : dirty_cards_) { diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h index c51e26db07..3a0dcf798d 100644 --- a/runtime/gc/accounting/remembered_set.h +++ b/runtime/gc/accounting/remembered_set.h @@ -29,6 +29,7 @@ namespace art { namespace gc { namespace collector { + class GarbageCollector; class MarkSweep; } // namespace collector namespace space { @@ -53,11 +54,10 @@ class RememberedSet { void ClearCards(); // Mark through all references to the target space. - void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, - DelayReferenceReferentCallback* ref_callback, - space::ContinuousSpace* target_space, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UpdateAndMarkReferences(space::ContinuousSpace* target_space, + collector::GarbageCollector* collector) + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void Dump(std::ostream& os); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 6546eb4245..b43f77f6b7 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -79,7 +79,7 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create( template<size_t kAlignment> void SpaceBitmap<kAlignment>::SetHeapLimit(uintptr_t new_end) { - DCHECK(IsAligned<kBitsPerIntPtrT * kAlignment>(new_end)); + DCHECK_ALIGNED(new_end, kBitsPerIntPtrT * kAlignment); size_t new_size = OffsetToIndex(new_end - heap_begin_) * sizeof(intptr_t); if (new_size < bitmap_size_) { bitmap_size_ = new_size; @@ -188,18 +188,16 @@ template<size_t kAlignment> void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback, mirror::Object* obj, mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Visit fields of parent classes first. mirror::Class* super = klass->GetSuperClass(); if (super != nullptr) { WalkInstanceFields(visited, callback, obj, super, arg); } // Walk instance fields - auto* fields = klass->GetIFields(); - for (size_t i = 0, count = klass->NumInstanceFields(); i < count; ++i) { - ArtField* field = &fields[i]; - if (!field->IsPrimitiveType()) { - mirror::Object* value = field->GetObj(obj); + for (ArtField& field : klass->GetIFields()) { + if (!field.IsPrimitiveType()) { + mirror::Object* value = field.GetObj(obj); if (value != nullptr) { WalkFieldsInOrder(visited, callback, value, arg); } @@ -222,11 +220,9 @@ void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited WalkInstanceFields(visited, callback, obj, klass, arg); // Walk static fields of a Class if (obj->IsClass()) { - auto* sfields = klass->GetSFields(); - for (size_t i = 0, count = klass->NumStaticFields(); i < count; ++i) { - ArtField* field = &sfields[i]; - if (!field->IsPrimitiveType()) { - mirror::Object* value = field->GetObj(nullptr); + for (ArtField& field : klass->GetSFields()) { + if (!field.IsPrimitiveType()) { + mirror::Object* value = field.GetObj(nullptr); if (value != nullptr) { WalkFieldsInOrder(visited, callback, value, arg); } diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h index 35faff3774..b8ff471c69 100644 --- a/runtime/gc/accounting/space_bitmap.h +++ b/runtime/gc/accounting/space_bitmap.h @@ -30,6 +30,7 @@ namespace art { namespace mirror { + class Class; class Object; } // namespace mirror class MemMap; @@ -122,7 +123,7 @@ class SpaceBitmap { // Visit the live objects in the range [visit_begin, visit_end). // TODO: Use lock annotations when clang is fixed. - // EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // REQUIRES(Locks::heap_bitmap_lock_) SHARED_REQUIRES(Locks::mutator_lock_); template <typename Visitor> void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const NO_THREAD_SAFETY_ANALYSIS; @@ -130,12 +131,12 @@ class SpaceBitmap { // Visits set bits in address order. The callback is not permitted to change the bitmap bits or // max during the traversal. void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Visits set bits with an in order traversal. The callback is not permitted to change the bitmap // bits or max during the traversal. void InOrderWalk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Walk through the bitmaps in increasing address order, and find the object pointers that // correspond to garbage objects. Call <callback> zero or more times with lists of these object @@ -203,12 +204,12 @@ class SpaceBitmap { // For an unvisited object, visit it then all its children found via fields. static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj, - void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void* arg) SHARED_REQUIRES(Locks::mutator_lock_); // Walk instance fields of the given Class. Separate function to allow recursion on the super // class. static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback, mirror::Object* obj, mirror::Class* klass, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Backing storage for bitmap. std::unique_ptr<MemMap> mem_map_; diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index 11921f44d3..16c9354137 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -20,7 +20,7 @@ #include "base/stl_util.h" #include "stack.h" -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/properties.h" #endif @@ -32,8 +32,17 @@ int32_t AllocRecordStackTraceElement::ComputeLineNumber() const { return method_->GetLineNumFromDexPC(dex_pc_); } +const char* AllocRecord::GetClassDescriptor(std::string* storage) const { + // klass_ could contain null only if we implement class unloading. + if (UNLIKELY(klass_.IsNull())) { + return "null"; + } else { + return klass_.Read()->GetDescriptor(storage); + } +} + void AllocRecordObjectMap::SetProperties() { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Check whether there's a system property overriding the max number of records. const char* propertyName = "dalvik.vm.allocTrackerMax"; char allocMaxString[PROPERTY_VALUE_MAX]; @@ -97,25 +106,28 @@ void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) { // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the // klass_ fields as strong roots. for (auto it = entries_.rbegin(), end = entries_.rend(); count > 0 && it != end; count--, ++it) { - buffered_visitor.VisitRoot(it->second->GetClassGcRoot()); + buffered_visitor.VisitRootIfNonNull(it->second->GetClassGcRoot()); } } -static inline void SweepClassObject(AllocRecord* record, IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { +static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { GcRoot<mirror::Class>& klass = record->GetClassGcRoot(); // This does not need a read barrier because this is called by GC. mirror::Object* old_object = klass.Read<kWithoutReadBarrier>(); - mirror::Object* new_object = UNLIKELY(old_object == nullptr) ? - nullptr : callback(old_object, arg); - if (UNLIKELY(old_object != new_object)) { - mirror::Class* new_klass = UNLIKELY(new_object == nullptr) ? nullptr : new_object->AsClass(); - klass = GcRoot<mirror::Class>(new_klass); + if (old_object != nullptr) { + // The class object can become null if we implement class unloading. + // In that case we might still want to keep the class name string (not implemented). + mirror::Object* new_object = visitor->IsMarked(old_object); + DCHECK(new_object != nullptr); + if (UNLIKELY(old_object != new_object)) { + klass = GcRoot<mirror::Class>(new_object->AsClass()); + } } } -void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedCallback* callback, void* arg) { +void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) { VLOG(heap) << "Start SweepAllocationRecords()"; size_t count_deleted = 0, count_moved = 0, count = 0; // Only the first (size - recent_record_max_) number of records can be deleted. @@ -130,11 +142,11 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedCallback* callback, vo // This does not need a read barrier because this is called by GC. mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>(); AllocRecord* record = it->second; - mirror::Object* new_object = old_object == nullptr ? nullptr : callback(old_object, arg); + mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object); if (new_object == nullptr) { if (count > delete_bound) { it->first = GcRoot<mirror::Object>(nullptr); - SweepClassObject(record, callback, arg); + SweepClassObject(record, visitor); ++it; } else { delete record; @@ -146,7 +158,7 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedCallback* callback, vo it->first = GcRoot<mirror::Object>(new_object); ++count_moved; } - SweepClassObject(record, callback, arg); + SweepClassObject(record, visitor); ++it; } } @@ -163,14 +175,9 @@ void AllocRecordObjectMap::DisallowNewAllocationRecords() { allow_new_record_ = false; } -void AllocRecordObjectMap::EnsureNewAllocationRecordsDisallowed() { - CHECK(!allow_new_record_); -} - - struct AllocRecordStackVisitor : public StackVisitor { AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), trace(trace_in), depth(0), @@ -259,7 +266,8 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, m } // Wait for GC's sweeping to complete and allow new records - while (UNLIKELY(!records->allow_new_record_)) { + while (UNLIKELY((!kUseReadBarrier && !records->allow_new_record_) || + (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { records->new_record_condition_.WaitHoldingLocks(self); } diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h index f5671537b7..0a4f53226d 100644 --- a/runtime/gc/allocation_record.h +++ b/runtime/gc/allocation_record.h @@ -39,7 +39,7 @@ class AllocRecordStackTraceElement { public: AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {} - int32_t ComputeLineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetMethod() const { return method_; @@ -184,11 +184,14 @@ class AllocRecord { return trace_->GetTid(); } - mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) { return klass_.Read(); } - GcRoot<mirror::Class>& GetClassGcRoot() { + const char* GetClassDescriptor(std::string* storage) const + SHARED_REQUIRES(Locks::mutator_lock_); + + GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) { return klass_; } @@ -218,12 +221,12 @@ class AllocRecordObjectMap { // in order to make sure the AllocRecordObjectMap object is not null. static void RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass, size_t byte_count) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::alloc_tracker_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); - AllocRecordObjectMap() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) + AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_) : alloc_record_max_(kDefaultNumAllocRecords), recent_record_max_(kDefaultNumRecentRecords), max_stack_depth_(kDefaultAllocStackDepth), @@ -235,8 +238,8 @@ class AllocRecordObjectMap { ~AllocRecordObjectMap(); void Put(mirror::Object* obj, AllocRecord* record) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { if (entries_.size() == alloc_record_max_) { delete entries_.front().second; entries_.pop_front(); @@ -244,56 +247,59 @@ class AllocRecordObjectMap { entries_.emplace_back(GcRoot<mirror::Object>(obj), record); } - size_t Size() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) { return entries_.size(); } - size_t GetRecentAllocationSize() const SHARED_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) { CHECK_LE(recent_record_max_, alloc_record_max_); size_t sz = entries_.size(); return std::min(recent_record_max_, sz); } void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); - - void SweepAllocationRecords(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); - + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); + + void SweepAllocationRecords(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); + + // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and + // AllowNewAllocationRecords(), in which case new allocation records can be added although they + // should be disallowed. However, this is GC-safe because new objects are not processed in this GC + // cycle. The only downside of not handling this case is that such new allocation records can be + // swept from the list. But missing the first few records is acceptable for using the button to + // enable allocation tracking. void DisallowNewAllocationRecords() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); void AllowNewAllocationRecords() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); - void EnsureNewAllocationRecordsDisallowed() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_); // TODO: Is there a better way to hide the entries_'s type? EntryList::iterator Begin() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.begin(); } EntryList::iterator End() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.end(); } EntryList::reverse_iterator RBegin() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.rbegin(); } EntryList::reverse_iterator REnd() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::alloc_tracker_lock_) { return entries_.rend(); } @@ -312,7 +318,7 @@ class AllocRecordObjectMap { // see the comment in typedef of EntryList EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_); - void SetProperties() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + void SetProperties() REQUIRES(Locks::alloc_tracker_lock_); }; } // namespace gc diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h index 0e91a4372c..0558921a56 100644 --- a/runtime/gc/allocator/dlmalloc.h +++ b/runtime/gc/allocator/dlmalloc.h @@ -35,7 +35,7 @@ #include "../../bionic/libc/upstream-dlmalloc/malloc.h" #pragma GCC diagnostic pop -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Define dlmalloc routines from bionic that cannot be included directly because of redefining // symbols from the include above. extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg); diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h index bba92a1f40..25fdd7cbc9 100644 --- a/runtime/gc/allocator/rosalloc-inl.h +++ b/runtime/gc/allocator/rosalloc-inl.h @@ -24,7 +24,7 @@ namespace gc { namespace allocator { inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() { - return kCheckZeroMemory && !running_on_valgrind_; + return kCheckZeroMemory && !is_running_on_memory_tool_; } template<bool kThreadSafe> diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index 49c7fda2e1..470bc1cb22 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -16,8 +16,9 @@ #include "rosalloc.h" +#include "base/memory_tool.h" #include "base/mutex-inl.h" -#include "gc/space/valgrind_settings.h" +#include "gc/space/memory_tool_settings.h" #include "mem_map.h" #include "mirror/class-inl.h" #include "mirror/object.h" @@ -50,7 +51,7 @@ RosAlloc::Run* RosAlloc::dedicated_full_run_ = reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_); RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, - PageReleaseMode page_release_mode, bool running_on_valgrind, + PageReleaseMode page_release_mode, bool running_on_memory_tool, size_t page_release_size_threshold) : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity), capacity_(capacity), max_capacity_(max_capacity), @@ -58,11 +59,11 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock), page_release_mode_(page_release_mode), page_release_size_threshold_(page_release_size_threshold), - running_on_valgrind_(running_on_valgrind) { + is_running_on_memory_tool_(running_on_memory_tool) { DCHECK_EQ(RoundUp(capacity, kPageSize), capacity); DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity); CHECK_LE(capacity, max_capacity); - CHECK(IsAligned<kPageSize>(page_release_size_threshold_)); + CHECK_ALIGNED(page_release_size_threshold_, kPageSize); if (!initialized_) { Initialize(); } @@ -110,6 +111,9 @@ RosAlloc::~RosAlloc() { for (size_t i = 0; i < kNumOfSizeBrackets; i++) { delete size_bracket_locks_[i]; } + if (is_running_on_memory_tool_) { + MEMORY_TOOL_MAKE_DEFINED(base_, capacity_); + } } void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) { @@ -345,7 +349,7 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) { fpr->magic_num_ = kMagicNumFree; } fpr->SetByteSize(this, byte_size); - DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this))); + DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize); DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end()); if (!free_page_runs_.empty()) { @@ -1166,7 +1170,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { // First mark slots to free in the bulk free bit map without locking the // size bracket locks. On host, unordered_set is faster than vector + flag. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ std::vector<Run*> runs; #else std::unordered_set<Run*, hash_run, eq_run> runs; @@ -1233,7 +1237,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { DCHECK_EQ(run->magic_num_, kMagicNum); // Set the bit in the bulk free bit map. freed_bytes += run->MarkBulkFreeBitMap(ptr); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ if (!run->to_be_bulk_freed_) { run->to_be_bulk_freed_ = true; runs.push_back(run); @@ -1248,7 +1252,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { // union the bulk free bit map into the thread-local free bit map // (for thread-local runs.) for (Run* run : runs) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ DCHECK(run->to_be_bulk_freed_); run->to_be_bulk_freed_ = false; #endif @@ -1563,7 +1567,7 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize); DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end()); size_t fpr_size = fpr->ByteSize(this); - DCHECK(IsAligned<kPageSize>(fpr_size)); + DCHECK_ALIGNED(fpr_size, kPageSize); void* start = fpr; if (kIsDebugBuild) { // In the debug build, the first page of a free page run @@ -1897,8 +1901,8 @@ void RosAlloc::Verify() { MutexLock lock_mu(self, lock_); size_t pm_end = page_map_size_; size_t i = 0; - size_t valgrind_modifier = running_on_valgrind_ ? - 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes : // Redzones before and after. + size_t memory_tool_modifier = is_running_on_memory_tool_ ? + 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : // Redzones before and after. 0; while (i < pm_end) { uint8_t pm = page_map_[i]; @@ -1912,7 +1916,7 @@ void RosAlloc::Verify() { CHECK(free_page_runs_.find(fpr) != free_page_runs_.end()) << "An empty page must belong to the free page run set"; size_t fpr_size = fpr->ByteSize(this); - CHECK(IsAligned<kPageSize>(fpr_size)) + CHECK_ALIGNED(fpr_size, kPageSize) << "A free page run size isn't page-aligned : " << fpr_size; size_t num_pages = fpr_size / kPageSize; CHECK_GT(num_pages, static_cast<uintptr_t>(0)) @@ -1938,15 +1942,15 @@ void RosAlloc::Verify() { idx++; } uint8_t* start = base_ + i * kPageSize; - if (running_on_valgrind_) { - start += ::art::gc::space::kDefaultValgrindRedZoneBytes; + if (is_running_on_memory_tool_) { + start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes; } mirror::Object* obj = reinterpret_cast<mirror::Object*>(start); size_t obj_size = obj->SizeOf(); - CHECK_GT(obj_size + valgrind_modifier, kLargeSizeThreshold) + CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold) << "A rosalloc large object size must be > " << kLargeSizeThreshold; - CHECK_EQ(num_pages, RoundUp(obj_size + valgrind_modifier, kPageSize) / kPageSize) - << "A rosalloc large object size " << obj_size + valgrind_modifier + CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize) + << "A rosalloc large object size " << obj_size + memory_tool_modifier << " does not match the page map table " << (num_pages * kPageSize) << std::endl << DumpPageMap(); i += num_pages; @@ -2011,11 +2015,11 @@ void RosAlloc::Verify() { } // Call Verify() here for the lock order. for (auto& run : runs) { - run->Verify(self, this, running_on_valgrind_); + run->Verify(self, this, is_running_on_memory_tool_); } } -void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) { +void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) { DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump(); const size_t idx = size_bracket_idx_; CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump(); @@ -2098,8 +2102,8 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_val } // Check each slot. size_t slots = 0; - size_t valgrind_modifier = running_on_valgrind ? - 2 * ::art::gc::space::kDefaultValgrindRedZoneBytes : + size_t memory_tool_modifier = running_on_memory_tool ? + 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : 0U; for (size_t v = 0; v < num_vec; v++, slots += 32) { DCHECK_GE(num_slots, slots) << "Out of bounds"; @@ -2113,16 +2117,16 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_val bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0; if (is_allocated && !is_thread_local_freed) { uint8_t* slot_addr = slot_base + (slots + i) * bracket_size; - if (running_on_valgrind) { - slot_addr += ::art::gc::space::kDefaultValgrindRedZoneBytes; + if (running_on_memory_tool) { + slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes; } mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr); size_t obj_size = obj->SizeOf(); - CHECK_LE(obj_size + valgrind_modifier, kLargeSizeThreshold) + CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold) << "A run slot contains a large object " << Dump(); - CHECK_EQ(SizeToIndex(obj_size + valgrind_modifier), idx) + CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx) << PrettyTypeOf(obj) << " " - << "obj_size=" << obj_size << "(" << obj_size + valgrind_modifier << "), idx=" << idx + << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx << " A run slot contains an object with wrong size " << Dump(); } } @@ -2159,7 +2163,7 @@ size_t RosAlloc::ReleasePages() { // to the next page. if (free_page_runs_.find(fpr) != free_page_runs_.end()) { size_t fpr_size = fpr->ByteSize(this); - DCHECK(IsAligned<kPageSize>(fpr_size)); + DCHECK_ALIGNED(fpr_size, kPageSize); uint8_t* start = reinterpret_cast<uint8_t*>(fpr); reclaimed_bytes += ReleasePageRange(start, start + fpr_size); size_t pages = fpr_size / kPageSize; diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index 0fcfe72b06..a7f29af274 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -51,7 +51,7 @@ class RosAlloc { bool IsFree() const { return !kIsDebugBuild || magic_num_ == kMagicNumFree; } - size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) { const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this); size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx]; @@ -60,7 +60,7 @@ class RosAlloc { return byte_size; } void SetByteSize(RosAlloc* rosalloc, size_t byte_size) - EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + REQUIRES(rosalloc->lock_) { DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this); size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); @@ -69,20 +69,20 @@ class RosAlloc { void* Begin() { return reinterpret_cast<void*>(this); } - void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this); uint8_t* end = fpr_base + ByteSize(rosalloc); return end; } bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc) - EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + REQUIRES(rosalloc->lock_) { return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_; } bool IsAtEndOfSpace(RosAlloc* rosalloc) - EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + REQUIRES(rosalloc->lock_) { return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_; } - bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { switch (rosalloc->page_release_mode_) { case kPageReleaseModeNone: return false; @@ -99,7 +99,7 @@ class RosAlloc { return false; } } - void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { + void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { uint8_t* start = reinterpret_cast<uint8_t*>(this); size_t byte_size = ByteSize(rosalloc); DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); @@ -253,9 +253,9 @@ class RosAlloc { // Dump the run metadata for debugging. std::string Dump(); // Verify for debugging. - void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_valgrind) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) + REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::thread_list_lock_); private: // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket @@ -503,7 +503,7 @@ class RosAlloc { const size_t page_release_size_threshold_; // Whether this allocator is running under Valgrind. - bool running_on_valgrind_; + bool is_running_on_memory_tool_; // The base address of the memory region that's managed by this allocator. uint8_t* Begin() { return base_; } @@ -512,56 +512,56 @@ class RosAlloc { // Page-granularity alloc/free void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); // Returns how many bytes were freed. - size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_); + size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_); // Allocate/free a run slot. void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Allocate/free a run slot without acquiring locks. - // TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) + // TODO: REQUIRES(Locks::mutator_lock_) void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); - void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx); + REQUIRES(!lock_); + void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_); // Returns the bracket size. size_t FreeFromRun(Thread* self, void* ptr, Run* run) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Used to allocate a new thread local run for a size bracket. - Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); + Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_); // Used to acquire a new/reused run for a size bracket. Used when a // thread-local or current run gets full. - Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); + Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_); // The internal of non-bulk Free(). - size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_); + size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_); // Allocates large objects. void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Revoke a run by adding it to non_full_runs_ or freeing the pages. - void RevokeRun(Thread* self, size_t idx, Run* run); + void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_); // Revoke the current runs which share an index with the thread local runs. - void RevokeThreadUnsafeCurrentRuns(); + void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_); // Release a range of pages. - size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_); + size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_); // Dumps the page map for debugging. - std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_); + std::string DumpPageMap() REQUIRES(lock_); public: RosAlloc(void* base, size_t capacity, size_t max_capacity, PageReleaseMode page_release_mode, - bool running_on_valgrind, + bool running_on_memory_tool, size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold); ~RosAlloc(); @@ -570,11 +570,11 @@ class RosAlloc { template<bool kThreadSafe = true> void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); size_t Free(Thread* self, void* ptr) - LOCKS_EXCLUDED(bulk_free_lock_); + REQUIRES(!bulk_free_lock_, !lock_); size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs) - LOCKS_EXCLUDED(bulk_free_lock_); + REQUIRES(!bulk_free_lock_, !lock_); // Returns true if the given allocation request can be allocated in // an existing thread local run without allocating a new run. @@ -589,7 +589,7 @@ class RosAlloc { ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size); // Returns the size of the allocated slot for a given allocated memory chunk. - size_t UsableSize(const void* ptr); + size_t UsableSize(const void* ptr) REQUIRES(!lock_); // Returns the size of the allocated slot for a given size. size_t UsableSize(size_t bytes) { if (UNLIKELY(bytes > kLargeSizeThreshold)) { @@ -600,33 +600,33 @@ class RosAlloc { } // Try to reduce the current footprint by releasing the free page // run at the end of the memory region, if any. - bool Trim(); + bool Trim() REQUIRES(!lock_); // Iterates over all the memory slots and apply the given function. void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Release empty pages. - size_t ReleasePages() LOCKS_EXCLUDED(lock_); + size_t ReleasePages() REQUIRES(!lock_); // Returns the current footprint. - size_t Footprint() LOCKS_EXCLUDED(lock_); + size_t Footprint() REQUIRES(!lock_); // Returns the current capacity, maximum footprint. - size_t FootprintLimit() LOCKS_EXCLUDED(lock_); + size_t FootprintLimit() REQUIRES(!lock_); // Update the current capacity. - void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_); + void SetFootprintLimit(size_t bytes) REQUIRES(!lock_); // Releases the thread-local runs assigned to the given thread back to the common set of runs. // Returns the total bytes of free slots in the revoked thread local runs. This is to be // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting. - size_t RevokeThreadLocalRuns(Thread* thread); + size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_); // Releases the thread-local runs assigned to all the threads back to the common set of runs. // Returns the total bytes of free slots in the revoked thread local runs. This is to be // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting. - size_t RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_); + size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_); // Assert the thread local runs of a thread are revoked. - void AssertThreadLocalRunsAreRevoked(Thread* thread); + void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_); // Assert all the thread local runs are revoked. - void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_); + void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_); static Run* GetDedicatedFullRun() { return dedicated_full_run_; @@ -647,9 +647,11 @@ class RosAlloc { } // Verify for debugging. - void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_, + !lock_); - void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes); + void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) + REQUIRES(!bulk_free_lock_, !lock_); private: friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h index f9a2ff6bc8..185a9b7fac 100644 --- a/runtime/gc/allocator_type.h +++ b/runtime/gc/allocator_type.h @@ -17,7 +17,7 @@ #ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_ #define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_ -#include <ostream> +#include <iosfwd> namespace art { namespace gc { diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index c7d2e9f2c9..263e67879b 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -17,8 +17,10 @@ #include "concurrent_copying.h" #include "art_field-inl.h" +#include "base/stl_util.h" #include "gc/accounting/heap_bitmap-inl.h" #include "gc/accounting/space_bitmap-inl.h" +#include "gc/reference_processor.h" #include "gc/space/image_space.h" #include "gc/space/space.h" #include "intern_table.h" @@ -37,17 +39,22 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "concurrent copying + mark sweep"), - region_space_(nullptr), gc_barrier_(new Barrier(0)), mark_queue_(2 * MB), + region_space_(nullptr), gc_barrier_(new Barrier(0)), + gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack", + 2 * MB, 2 * MB)), + mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock), + thread_running_gc_(nullptr), is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false), - heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), + heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff), + weak_ref_access_enabled_(true), skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock), rb_table_(heap_->GetReadBarrierTable()), force_evacuate_all_(false) { static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize, "The region space size and the read barrier table region size must match"); cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap)); + Thread* self = Thread::Current(); { - Thread* self = Thread::Current(); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); // Cache this so that we won't have to lock heap_bitmap_lock_ in // Mark() which could cause a nested lock on heap_bitmap_lock_ @@ -55,9 +62,27 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix) // (class_linker_lock_ and heap_bitmap_lock_). heap_mark_bitmap_ = heap->GetMarkBitmap(); } + { + MutexLock mu(self, mark_stack_lock_); + for (size_t i = 0; i < kMarkStackPoolSize; ++i) { + accounting::AtomicStack<mirror::Object>* mark_stack = + accounting::AtomicStack<mirror::Object>::Create( + "thread local mark stack", kMarkStackSize, kMarkStackSize); + pooled_mark_stacks_.push_back(mark_stack); + } + } +} + +void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) { + // Used for preserving soft references, should be OK to not have a CAS here since there should be + // no other threads which can trigger read barriers on the same referent during reference + // processing. + from_ref->Assign(Mark(from_ref->AsMirrorPtr())); + DCHECK(!from_ref->IsNull()); } ConcurrentCopying::~ConcurrentCopying() { + STLDeleteElements(&pooled_mark_stacks_); } void ConcurrentCopying::RunPhases() { @@ -65,6 +90,7 @@ void ConcurrentCopying::RunPhases() { CHECK(!is_active_); is_active_ = true; Thread* self = Thread::Current(); + thread_running_gc_ = self; Locks::mutator_lock_->AssertNotHeld(self); { ReaderMutexLock mu(self, *Locks::mutator_lock_); @@ -79,7 +105,7 @@ void ConcurrentCopying::RunPhases() { if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) { TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings()); ScopedPause pause(this); - CheckEmptyMarkQueue(); + CheckEmptyMarkStack(); if (kVerboseMode) { LOG(INFO) << "Verifying no from-space refs"; } @@ -87,7 +113,7 @@ void ConcurrentCopying::RunPhases() { if (kVerboseMode) { LOG(INFO) << "Done verifying no from-space refs"; } - CheckEmptyMarkQueue(); + CheckEmptyMarkStack(); } { ReaderMutexLock mu(self, *Locks::mutator_lock_); @@ -96,6 +122,7 @@ void ConcurrentCopying::RunPhases() { FinishPhase(); CHECK(is_active_); is_active_ = false; + thread_running_gc_ = nullptr; } void ConcurrentCopying::BindBitmaps() { @@ -132,7 +159,7 @@ void ConcurrentCopying::InitializePhase() { LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-" << reinterpret_cast<void*>(region_space_->Limit()); } - CHECK(mark_queue_.IsEmpty()); + CheckEmptyMarkStack(); immune_region_.Reset(); bytes_moved_.StoreRelaxed(0); objects_moved_.StoreRelaxed(0); @@ -154,11 +181,11 @@ void ConcurrentCopying::InitializePhase() { // Used to switch the thread roots of a thread from from-space refs to to-space refs. class ThreadFlipVisitor : public Closure { public: - explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) + ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab) : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) { } - virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { // Note: self is not necessarily equal to thread since thread may be suspended. Thread* self = Thread::Current(); CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) @@ -194,7 +221,7 @@ class FlipCallback : public Closure { : concurrent_copying_(concurrent_copying) { } - virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) { ConcurrentCopying* cc = concurrent_copying_; TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings()); // Note: self is not necessarily equal to thread since thread may be suspended. @@ -202,13 +229,14 @@ class FlipCallback : public Closure { CHECK(thread == self); Locks::mutator_lock_->AssertExclusiveHeld(self); cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_); - cc->SwapStacks(self); + cc->SwapStacks(); if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { cc->RecordLiveStackFreezeSize(self); cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated(); cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); } cc->is_marking_ = true; + cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) { CHECK(Runtime::Current()->IsAotCompiler()); TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings()); @@ -247,8 +275,8 @@ void ConcurrentCopying::FlipThreadRoots() { } } -void ConcurrentCopying::SwapStacks(Thread* self) { - heap_->SwapStacks(self); +void ConcurrentCopying::SwapStacks() { + heap_->SwapStacks(); } void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) { @@ -262,8 +290,8 @@ class ConcurrentCopyingImmuneSpaceObjVisitor { explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {} - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { DCHECK(obj != nullptr); DCHECK(collector_->immune_region_.ContainsObject(obj)); accounting::ContinuousSpaceBitmap* cc_bitmap = @@ -283,12 +311,12 @@ class ConcurrentCopyingImmuneSpaceObjVisitor { } else { // Newly marked. Set the gray bit and push it onto the mark stack. CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); - collector_->PushOntoMarkStack<true>(obj); + collector_->PushOntoMarkStack(obj); } } private: - ConcurrentCopying* collector_; + ConcurrentCopying* const collector_; }; class EmptyCheckpoint : public Closure { @@ -319,6 +347,7 @@ void ConcurrentCopying::MarkingPhase() { if (kVerboseMode) { LOG(INFO) << "GC MarkingPhase"; } + CHECK(weak_ref_access_enabled_); { // Mark the image root. The WB-based collectors do not need to // scan the image objects from roots by relying on the card table, @@ -334,6 +363,8 @@ void ConcurrentCopying::MarkingPhase() { } } } + // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part + // to also use the same function. { TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings()); Runtime::Current()->VisitConstantRoots(this); @@ -351,6 +382,7 @@ void ConcurrentCopying::MarkingPhase() { TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings()); Runtime::Current()->VisitNonThreadRoots(this); } + Runtime::Current()->GetHeap()->VisitAllocationRecords(this); // Immune spaces. for (auto& space : heap_->GetContinuousSpaces()) { @@ -367,37 +399,47 @@ void ConcurrentCopying::MarkingPhase() { Thread* self = Thread::Current(); { TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings()); - // Process the mark stack and issue an empty check point. If the - // mark stack is still empty after the check point, we're - // done. Otherwise, repeat. + // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The + // primary reasons are the fact that we need to use a checkpoint to process thread-local mark + // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock + // issue because running threads potentially blocking at WaitHoldingLocks, and that once we + // reach the point where we process weak references, we can avoid using a lock when accessing + // the GC mark stack, which makes mark stack processing more efficient. + + // Process the mark stack once in the thread local stack mode. This marks most of the live + // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system + // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray + // objects and push refs on the mark stack. ProcessMarkStack(); - size_t count = 0; - while (!ProcessMarkStack()) { - ++count; - if (kVerboseMode) { - LOG(INFO) << "Issue an empty check point. " << count; - } - IssueEmptyCheckpoint(); - } - // Need to ensure the mark stack is empty before reference - // processing to get rid of non-reference gray objects. - CheckEmptyMarkQueue(); - // Enable the GetReference slow path and disallow access to the system weaks. - GetHeap()->GetReferenceProcessor()->EnableSlowPath(); - Runtime::Current()->DisallowNewSystemWeaks(); - QuasiAtomic::ThreadFenceForConstructor(); - // Lock-unlock the system weak locks so that there's no thread in - // the middle of accessing system weaks. - Runtime::Current()->EnsureNewSystemWeaksDisallowed(); - // Note: Do not issue a checkpoint from here to the - // SweepSystemWeaks call or else a deadlock due to - // WaitHoldingLocks() would occur. + // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks + // for the last time before transitioning to the shared mark stack mode, which would process new + // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack() + // call above. At the same time, disable weak ref accesses using a per-thread flag. It's + // important to do these together in a single checkpoint so that we can ensure that mutators + // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and + // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on + // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref + // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones. + SwitchToSharedMarkStackMode(); + CHECK(!self->GetWeakRefAccessEnabled()); + // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here + // (which may be non-empty if there were refs found on thread-local mark stacks during the above + // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators + // (via read barriers) have no way to produce any more refs to process. Marking converges once + // before we process weak refs below. + ProcessMarkStack(); + CheckEmptyMarkStack(); + // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a + // lock from this point on. + SwitchToGcExclusiveMarkStackMode(); + CheckEmptyMarkStack(); if (kVerboseMode) { - LOG(INFO) << "Enabled the ref proc slow path & disabled access to system weaks."; LOG(INFO) << "ProcessReferences"; } - ProcessReferences(self, true); - CheckEmptyMarkQueue(); + // Process weak references. This may produce new refs to process and have them processed via + // ProcessMarkStack (in the GC exclusive mark stack mode). + ProcessReferences(self); + CheckEmptyMarkStack(); if (kVerboseMode) { LOG(INFO) << "SweepSystemWeaks"; } @@ -405,33 +447,52 @@ void ConcurrentCopying::MarkingPhase() { if (kVerboseMode) { LOG(INFO) << "SweepSystemWeaks done"; } - // Because hash_set::Erase() can call the hash function for - // arbitrary elements in the weak intern table in - // InternTable::Table::SweepWeaks(), the above SweepSystemWeaks() - // call may have marked some objects (strings) alive. So process - // the mark stack here once again. + // Process the mark stack here one last time because the above SweepSystemWeaks() call may have + // marked some objects (strings alive) as hash_set::Erase() can call the hash function for + // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks(). ProcessMarkStack(); - CheckEmptyMarkQueue(); - if (kVerboseMode) { - LOG(INFO) << "AllowNewSystemWeaks"; - } - Runtime::Current()->AllowNewSystemWeaks(); + CheckEmptyMarkStack(); + // Re-enable weak ref accesses. + ReenableWeakRefAccess(self); + // Issue an empty checkpoint to ensure no threads are still in the middle of a read barrier + // which may have a from-space ref cached in a local variable. IssueEmptyCheckpoint(); - // Disable marking. + // Marking is done. Disable marking. if (kUseTableLookupReadBarrier) { heap_->rb_table_->ClearAll(); DCHECK(heap_->rb_table_->IsAllCleared()); } - is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(1); - is_marking_ = false; - CheckEmptyMarkQueue(); + is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); + is_marking_ = false; // This disables the read barrier/marking of weak roots. + mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); + CheckEmptyMarkStack(); } + CHECK(weak_ref_access_enabled_); if (kVerboseMode) { LOG(INFO) << "GC end of MarkingPhase"; } } +void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { + if (kVerboseMode) { + LOG(INFO) << "ReenableWeakRefAccess"; + } + weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads. + QuasiAtomic::ThreadFenceForConstructor(); + // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. + { + MutexLock mu(self, *Locks::thread_list_lock_); + std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); + for (Thread* thread : thread_list) { + thread->SetWeakRefAccessEnabled(true); + } + } + // Unblock blocking threads. + GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); + Runtime::Current()->BroadcastForNewSystemWeaks(); +} + void ConcurrentCopying::IssueEmptyCheckpoint() { Thread* self = Thread::Current(); EmptyCheckpoint check_point(this); @@ -452,18 +513,61 @@ void ConcurrentCopying::IssueEmptyCheckpoint() { Locks::mutator_lock_->SharedLock(self); } -mirror::Object* ConcurrentCopying::PopOffMarkStack() { - return mark_queue_.Dequeue(); -} - -template<bool kThreadSafe> void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { - CHECK_EQ(is_mark_queue_push_disallowed_.LoadRelaxed(), 0) + CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) << " " << to_ref << " " << PrettyTypeOf(to_ref); - if (kThreadSafe) { - CHECK(mark_queue_.Enqueue(to_ref)) << "Mark queue overflow"; + Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? + CHECK(thread_running_gc_ != nullptr); + MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + if (mark_stack_mode == kMarkStackModeThreadLocal) { + if (self == thread_running_gc_) { + // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. + CHECK(self->GetThreadLocalMarkStack() == nullptr); + CHECK(!gc_mark_stack_->IsFull()); + gc_mark_stack_->PushBack(to_ref); + } else { + // Otherwise, use a thread-local mark stack. + accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack(); + if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) { + MutexLock mu(self, mark_stack_lock_); + // Get a new thread local mark stack. + accounting::AtomicStack<mirror::Object>* new_tl_mark_stack; + if (!pooled_mark_stacks_.empty()) { + // Use a pooled mark stack. + new_tl_mark_stack = pooled_mark_stacks_.back(); + pooled_mark_stacks_.pop_back(); + } else { + // None pooled. Create a new one. + new_tl_mark_stack = + accounting::AtomicStack<mirror::Object>::Create( + "thread local mark stack", 4 * KB, 4 * KB); + } + DCHECK(new_tl_mark_stack != nullptr); + DCHECK(new_tl_mark_stack->IsEmpty()); + new_tl_mark_stack->PushBack(to_ref); + self->SetThreadLocalMarkStack(new_tl_mark_stack); + if (tl_mark_stack != nullptr) { + // Store the old full stack into a vector. + revoked_mark_stacks_.push_back(tl_mark_stack); + } + } else { + tl_mark_stack->PushBack(to_ref); + } + } + } else if (mark_stack_mode == kMarkStackModeShared) { + // Access the shared GC mark stack with a lock. + MutexLock mu(self, mark_stack_lock_); + CHECK(!gc_mark_stack_->IsFull()); + gc_mark_stack_->PushBack(to_ref); } else { - CHECK(mark_queue_.EnqueueThreadUnsafe(to_ref)) << "Mark queue overflow"; + CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), + static_cast<uint32_t>(kMarkStackModeGcExclusive)); + CHECK(self == thread_running_gc_) + << "Only GC-running thread should access the mark stack " + << "in the GC exclusive mark stack mode"; + // Access the GC mark stack without a lock. + CHECK(!gc_mark_stack_->IsFull()); + gc_mark_stack_->PushBack(to_ref); } } @@ -495,7 +599,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { : collector_(collector) {} void operator()(mirror::Object* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { if (ref == nullptr) { // OK. return; @@ -520,7 +624,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor { } void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(root != nullptr); operator()(root); } @@ -534,21 +638,34 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor { explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector) : collector_(collector) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); visitor(ref); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { CHECK(klass->IsTypeOfReferenceClass()); this->operator()(ref, mirror::Reference::ReferentOffset(), false); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_); + visitor(root->AsMirrorPtr()); + } + private: - ConcurrentCopying* collector_; + ConcurrentCopying* const collector_; }; class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { @@ -556,17 +673,17 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor { explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectCallback(obj, collector_); } static void ObjectCallback(mirror::Object* obj, void *arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(obj != nullptr); ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); space::RegionSpace* region_space = collector->RegionSpace(); CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector); - obj->VisitReferences<true>(visitor, visitor); + obj->VisitReferences(visitor, visitor); if (kUseBakerReadBarrier) { if (collector->RegionSpace()->IsInToSpace(obj)) { CHECK(obj->GetReadBarrierPointer() == nullptr) @@ -629,23 +746,16 @@ class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor { : collector_(collector) {} void operator()(mirror::Object* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { if (ref == nullptr) { // OK. return; } collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); } - static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); - ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector); - DCHECK(root != nullptr); - visitor(*root); - } private: - ConcurrentCopying* collector_; + ConcurrentCopying* const collector_; }; class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { @@ -653,20 +763,33 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor { explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector) : collector_(collector) {} - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset); ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); visitor(ref); } - void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { CHECK(klass->IsTypeOfReferenceClass()); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_); + visitor(root->AsMirrorPtr()); + } + private: - ConcurrentCopying* collector_; + ConcurrentCopying* const collector_; }; class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { @@ -674,108 +797,325 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor { explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector) : collector_(collector) {} void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectCallback(obj, collector_); } static void ObjectCallback(mirror::Object* obj, void *arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(obj != nullptr); ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg); space::RegionSpace* region_space = collector->RegionSpace(); CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj); ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector); - obj->VisitReferences<true>(visitor, visitor); + obj->VisitReferences(visitor, visitor); } private: - ConcurrentCopying* collector_; + ConcurrentCopying* const collector_; +}; + +class RevokeThreadLocalMarkStackCheckpoint : public Closure { + public: + RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying, + bool disable_weak_ref_access) + : concurrent_copying_(concurrent_copying), + disable_weak_ref_access_(disable_weak_ref_access) { + } + + virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { + // Note: self is not necessarily equal to thread since thread may be suspended. + Thread* self = Thread::Current(); + CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) + << thread->GetState() << " thread " << thread << " self " << self; + // Revoke thread local mark stacks. + accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); + if (tl_mark_stack != nullptr) { + MutexLock mu(self, concurrent_copying_->mark_stack_lock_); + concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack); + thread->SetThreadLocalMarkStack(nullptr); + } + // Disable weak ref access. + if (disable_weak_ref_access_) { + thread->SetWeakRefAccessEnabled(false); + } + // If thread is a running mutator, then act on behalf of the garbage collector. + // See the code in ThreadList::RunCheckpoint. + if (thread->GetState() == kRunnable) { + concurrent_copying_->GetBarrier().Pass(self); + } + } + + private: + ConcurrentCopying* const concurrent_copying_; + const bool disable_weak_ref_access_; }; -bool ConcurrentCopying::ProcessMarkStack() { +void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) { + Thread* self = Thread::Current(); + RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); + ThreadList* thread_list = Runtime::Current()->GetThreadList(); + gc_barrier_->Init(self, 0); + size_t barrier_count = thread_list->RunCheckpoint(&check_point); + // If there are no threads to wait which implys that all the checkpoint functions are finished, + // then no need to release the mutator lock. + if (barrier_count == 0) { + return; + } + Locks::mutator_lock_->SharedUnlock(self); + { + ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); + gc_barrier_->Increment(self, barrier_count); + } + Locks::mutator_lock_->SharedLock(self); +} + +void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) { + Thread* self = Thread::Current(); + CHECK_EQ(self, thread); + accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack(); + if (tl_mark_stack != nullptr) { + CHECK(is_marking_); + MutexLock mu(self, mark_stack_lock_); + revoked_mark_stacks_.push_back(tl_mark_stack); + thread->SetThreadLocalMarkStack(nullptr); + } +} + +void ConcurrentCopying::ProcessMarkStack() { if (kVerboseMode) { LOG(INFO) << "ProcessMarkStack. "; } + bool empty_prev = false; + while (true) { + bool empty = ProcessMarkStackOnce(); + if (empty_prev && empty) { + // Saw empty mark stack for a second time, done. + break; + } + empty_prev = empty; + } +} + +bool ConcurrentCopying::ProcessMarkStackOnce() { + Thread* self = Thread::Current(); + CHECK(thread_running_gc_ != nullptr); + CHECK(self == thread_running_gc_); + CHECK(self->GetThreadLocalMarkStack() == nullptr); size_t count = 0; - mirror::Object* to_ref; - while ((to_ref = PopOffMarkStack()) != nullptr) { - ++count; - DCHECK(!region_space_->IsInFromSpace(to_ref)); - if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) - << " " << to_ref << " " << to_ref->GetReadBarrierPointer() - << " is_marked=" << IsMarked(to_ref); + MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + if (mark_stack_mode == kMarkStackModeThreadLocal) { + // Process the thread-local mark stacks and the GC mark stack. + count += ProcessThreadLocalMarkStacks(false); + while (!gc_mark_stack_->IsEmpty()) { + mirror::Object* to_ref = gc_mark_stack_->PopBack(); + ProcessMarkStackRef(to_ref); + ++count; } - // Scan ref fields. - Scan(to_ref); - // Mark the gray ref as white or black. - if (kUseBakerReadBarrier) { - DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) - << " " << to_ref << " " << to_ref->GetReadBarrierPointer() - << " is_marked=" << IsMarked(to_ref); - } - if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && - to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && - !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { - // Leave References gray so that GetReferent() will trigger RB. - CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; - } else { -#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER - if (kUseBakerReadBarrier) { - if (region_space_->IsInToSpace(to_ref)) { - // If to-space, change from gray to white. - bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), - ReadBarrier::WhitePtr()); - CHECK(success) << "Must succeed as we won the race."; - CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); - } else { - // If non-moving space/unevac from space, change from gray - // to black. We can't change gray to white because it's not - // safe to use CAS if two threads change values in opposite - // directions (A->B and B->A). So, we change it to black to - // indicate non-moving objects that have been marked - // through. Note we'd need to change from black to white - // later (concurrently). - bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), - ReadBarrier::BlackPtr()); - CHECK(success) << "Must succeed as we won the race."; - CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); + gc_mark_stack_->Reset(); + } else if (mark_stack_mode == kMarkStackModeShared) { + // Process the shared GC mark stack with a lock. + { + MutexLock mu(self, mark_stack_lock_); + CHECK(revoked_mark_stacks_.empty()); + } + while (true) { + std::vector<mirror::Object*> refs; + { + // Copy refs with lock. Note the number of refs should be small. + MutexLock mu(self, mark_stack_lock_); + if (gc_mark_stack_->IsEmpty()) { + break; + } + for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin(); + p != gc_mark_stack_->End(); ++p) { + refs.push_back(p->AsMirrorPtr()); } + gc_mark_stack_->Reset(); + } + for (mirror::Object* ref : refs) { + ProcessMarkStackRef(ref); + ++count; } -#else - DCHECK(!kUseBakerReadBarrier); -#endif } - if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { - ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); - visitor(to_ref); + } else { + CHECK_EQ(static_cast<uint32_t>(mark_stack_mode), + static_cast<uint32_t>(kMarkStackModeGcExclusive)); + { + MutexLock mu(self, mark_stack_lock_); + CHECK(revoked_mark_stacks_.empty()); + } + // Process the GC mark stack in the exclusive mode. No need to take the lock. + while (!gc_mark_stack_->IsEmpty()) { + mirror::Object* to_ref = gc_mark_stack_->PopBack(); + ProcessMarkStackRef(to_ref); + ++count; } + gc_mark_stack_->Reset(); } + // Return true if the stack was empty. return count == 0; } -void ConcurrentCopying::CheckEmptyMarkQueue() { - if (!mark_queue_.IsEmpty()) { - while (!mark_queue_.IsEmpty()) { - mirror::Object* obj = mark_queue_.Dequeue(); - if (kUseBakerReadBarrier) { - mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); - LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr - << " is_marked=" << IsMarked(obj); +size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) { + // Run a checkpoint to collect all thread local mark stacks and iterate over them all. + RevokeThreadLocalMarkStacks(disable_weak_ref_access); + size_t count = 0; + std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; + { + MutexLock mu(Thread::Current(), mark_stack_lock_); + // Make a copy of the mark stack vector. + mark_stacks = revoked_mark_stacks_; + revoked_mark_stacks_.clear(); + } + for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) { + for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) { + mirror::Object* to_ref = p->AsMirrorPtr(); + ProcessMarkStackRef(to_ref); + ++count; + } + { + MutexLock mu(Thread::Current(), mark_stack_lock_); + if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) { + // The pool has enough. Delete it. + delete mark_stack; + } else { + // Otherwise, put it into the pool for later reuse. + mark_stack->Reset(); + pooled_mark_stacks_.push_back(mark_stack); + } + } + } + return count; +} + +void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { + DCHECK(!region_space_->IsInFromSpace(to_ref)); + if (kUseBakerReadBarrier) { + DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) + << " " << to_ref << " " << to_ref->GetReadBarrierPointer() + << " is_marked=" << IsMarked(to_ref); + } + // Scan ref fields. + Scan(to_ref); + // Mark the gray ref as white or black. + if (kUseBakerReadBarrier) { + DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) + << " " << to_ref << " " << to_ref->GetReadBarrierPointer() + << " is_marked=" << IsMarked(to_ref); + } + if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() && + to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr && + !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) { + // Leave References gray so that GetReferent() will trigger RB. + CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref; + } else { +#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER + if (kUseBakerReadBarrier) { + if (region_space_->IsInToSpace(to_ref)) { + // If to-space, change from gray to white. + bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), + ReadBarrier::WhitePtr()); + CHECK(success) << "Must succeed as we won the race."; + CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr()); } else { - LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) - << " is_marked=" << IsMarked(obj); + // If non-moving space/unevac from space, change from gray + // to black. We can't change gray to white because it's not + // safe to use CAS if two threads change values in opposite + // directions (A->B and B->A). So, we change it to black to + // indicate non-moving objects that have been marked + // through. Note we'd need to change from black to white + // later (concurrently). + bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(), + ReadBarrier::BlackPtr()); + CHECK(success) << "Must succeed as we won the race."; + CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr()); + } + } +#else + DCHECK(!kUseBakerReadBarrier); +#endif + } + if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) { + ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this); + visitor(to_ref); + } +} + +void ConcurrentCopying::SwitchToSharedMarkStackMode() { + Thread* self = Thread::Current(); + CHECK(thread_running_gc_ != nullptr); + CHECK_EQ(self, thread_running_gc_); + CHECK(self->GetThreadLocalMarkStack() == nullptr); + MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), + static_cast<uint32_t>(kMarkStackModeThreadLocal)); + mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); + CHECK(weak_ref_access_enabled_.LoadRelaxed()); + weak_ref_access_enabled_.StoreRelaxed(false); + QuasiAtomic::ThreadFenceForConstructor(); + // Process the thread local mark stacks one last time after switching to the shared mark stack + // mode and disable weak ref accesses. + ProcessThreadLocalMarkStacks(true); + if (kVerboseMode) { + LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; + } +} + +void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { + Thread* self = Thread::Current(); + CHECK(thread_running_gc_ != nullptr); + CHECK_EQ(self, thread_running_gc_); + CHECK(self->GetThreadLocalMarkStack() == nullptr); + MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), + static_cast<uint32_t>(kMarkStackModeShared)); + mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); + QuasiAtomic::ThreadFenceForConstructor(); + if (kVerboseMode) { + LOG(INFO) << "Switched to GC exclusive mark stack mode"; + } +} + +void ConcurrentCopying::CheckEmptyMarkStack() { + Thread* self = Thread::Current(); + CHECK(thread_running_gc_ != nullptr); + CHECK_EQ(self, thread_running_gc_); + CHECK(self->GetThreadLocalMarkStack() == nullptr); + MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + if (mark_stack_mode == kMarkStackModeThreadLocal) { + // Thread-local mark stack mode. + RevokeThreadLocalMarkStacks(false); + MutexLock mu(Thread::Current(), mark_stack_lock_); + if (!revoked_mark_stacks_.empty()) { + for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { + while (!mark_stack->IsEmpty()) { + mirror::Object* obj = mark_stack->PopBack(); + if (kUseBakerReadBarrier) { + mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); + LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr + << " is_marked=" << IsMarked(obj); + } else { + LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) + << " is_marked=" << IsMarked(obj); + } + } } + LOG(FATAL) << "mark stack is not empty"; } - LOG(FATAL) << "mark queue is not empty"; + } else { + // Shared, GC-exclusive, or off. + MutexLock mu(Thread::Current(), mark_stack_lock_); + CHECK(gc_mark_stack_->IsEmpty()); + CHECK(revoked_mark_stacks_.empty()); } } void ConcurrentCopying::SweepSystemWeaks(Thread* self) { TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings()); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); - Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); + Runtime::Current()->SweepSystemWeaks(this); } void ConcurrentCopying::Sweep(bool swap_bitmaps) { @@ -788,7 +1128,7 @@ void ConcurrentCopying::Sweep(bool swap_bitmaps) { heap_->MarkAllocStackAsLive(live_stack); live_stack->Reset(); } - CHECK(mark_queue_.IsEmpty()); + CheckEmptyMarkStack(); TimingLogger::ScopedTiming split("Sweep", GetTimings()); for (const auto& space : GetHeap()->GetContinuousSpaces()) { if (space->IsContinuousMemMapAllocSpace()) { @@ -816,8 +1156,8 @@ class ConcurrentCopyingClearBlackPtrsVisitor { #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { DCHECK(obj != nullptr); DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj; DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj; @@ -884,8 +1224,8 @@ void ConcurrentCopying::ReclaimPhase() { } IssueEmptyCheckpoint(); // Disable the check. - is_mark_queue_push_disallowed_.StoreSequentiallyConsistent(0); - CheckEmptyMarkQueue(); + is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); + CheckEmptyMarkStack(); } { @@ -952,6 +1292,8 @@ void ConcurrentCopying::ReclaimPhase() { region_space_bitmap_ = nullptr; } + CheckEmptyMarkStack(); + if (kVerboseMode) { LOG(INFO) << "GC end of ReclaimPhase"; } @@ -961,8 +1303,8 @@ class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { public: explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc) : collector_(cc) {} - void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { DCHECK(ref != nullptr); DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref; DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref; @@ -978,7 +1320,7 @@ class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor { } private: - ConcurrentCopying* collector_; + ConcurrentCopying* const collector_; }; // Compute how much live objects are left in regions. @@ -1019,7 +1361,7 @@ class RootPrinter { template <class MirrorType> ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); } @@ -1027,13 +1369,13 @@ class RootPrinter { template <class MirrorType> void VisitRoot(mirror::Object** root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root; } template <class MirrorType> void VisitRoot(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr(); } }; @@ -1173,17 +1515,29 @@ class ConcurrentCopyingRefFieldsVisitor { : collector_(collector) {} void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) - const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { collector_->Process(obj, offset); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { CHECK(klass->IsTypeOfReferenceClass()); collector_->DelayReferenceReferent(klass, ref); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + collector_->MarkRoot(root); + } + private: ConcurrentCopying* const collector_; }; @@ -1192,12 +1546,13 @@ class ConcurrentCopyingRefFieldsVisitor { void ConcurrentCopying::Scan(mirror::Object* to_ref) { DCHECK(!region_space_->IsInFromSpace(to_ref)); ConcurrentCopyingRefFieldsVisitor visitor(this); - to_ref->VisitReferences<true>(visitor, visitor); + to_ref->VisitReferences(visitor, visitor); } // Process a field. inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) { - mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); + mirror::Object* ref = obj->GetFieldObject< + mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); if (ref == nullptr || region_space_->IsInToSpace(ref)) { return; } @@ -1214,8 +1569,8 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) // It was updated by the mutator. break; } - } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>( - offset, expected_ref, new_ref)); + } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier< + false, false, kVerifyNone>(offset, expected_ref, new_ref)); } // Process some roots. @@ -1243,22 +1598,18 @@ void ConcurrentCopying::VisitRoots( } } -void ConcurrentCopying::VisitRoots( - mirror::CompressedReference<mirror::Object>** roots, size_t count, - const RootInfo& info ATTRIBUTE_UNUSED) { - for (size_t i = 0; i < count; ++i) { - mirror::CompressedReference<mirror::Object>* root = roots[i]; - mirror::Object* ref = root->AsMirrorPtr(); - if (ref == nullptr || region_space_->IsInToSpace(ref)) { - continue; - } - mirror::Object* to_ref = Mark(ref); - if (to_ref == ref) { - continue; - } +void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) { + DCHECK(!root->IsNull()); + mirror::Object* const ref = root->AsMirrorPtr(); + if (region_space_->IsInToSpace(ref)) { + return; + } + mirror::Object* to_ref = Mark(ref); + if (to_ref != ref) { auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref); auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); + // If the cas fails, then it was updated by the mutator. do { if (ref != addr->LoadRelaxed().AsMirrorPtr()) { // It was updated by the mutator. @@ -1268,10 +1619,21 @@ void ConcurrentCopying::VisitRoots( } } +void ConcurrentCopying::VisitRoots( + mirror::CompressedReference<mirror::Object>** roots, size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) { + for (size_t i = 0; i < count; ++i) { + mirror::CompressedReference<mirror::Object>* const root = roots[i]; + if (!root->IsNull()) { + MarkRoot(root); + } + } +} + // Fill the given memory block with a dummy object. Used to fill in a // copy of objects that was lost in race. void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) { - CHECK(IsAligned<kObjectAlignment>(byte_size)); + CHECK_ALIGNED(byte_size, kObjectAlignment); memset(dummy_obj, 0, byte_size); mirror::Class* int_array_class = mirror::IntArray::GetArrayClass(); CHECK(int_array_class != nullptr); @@ -1304,7 +1666,7 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by // Reuse the memory blocks that were copy of objects that were lost in race. mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { // Try to reuse the blocks that were unused due to CAS failures. - CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size)); + CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); Thread* self = Thread::Current(); size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment); MutexLock mu(self, skipped_blocks_lock_); @@ -1323,7 +1685,7 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { // Not found. return nullptr; } - CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size)); + CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment); CHECK_GE(it->first - alloc_size, min_object_size) << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size; } @@ -1334,7 +1696,7 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { uint8_t* addr = it->second; CHECK_GE(byte_size, alloc_size); CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr))); - CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size)); + CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment); if (kVerboseMode) { LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size; } @@ -1342,7 +1704,7 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) { memset(addr, 0, byte_size); if (byte_size > alloc_size) { // Return the remainder to the map. - CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size)); + CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment); CHECK_GE(byte_size - alloc_size, min_object_size); FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size), byte_size - alloc_size); @@ -1475,7 +1837,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { } DCHECK(GetFwdPtr(from_ref) == to_ref); CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress); - PushOntoMarkStack<true>(to_ref); + PushOntoMarkStack(to_ref); return to_ref; } else { // The CAS failed. It may have lost the race or may have failed @@ -1608,7 +1970,7 @@ mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { if (kUseBakerReadBarrier) { DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); } - PushOntoMarkStack<true>(to_ref); + PushOntoMarkStack(to_ref); } } else { // from_ref is in a non-moving space. @@ -1635,7 +1997,7 @@ mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { if (kUseBakerReadBarrier) { DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); } - PushOntoMarkStack<true>(to_ref); + PushOntoMarkStack(to_ref); } } else { // Use the mark bitmap. @@ -1691,7 +2053,7 @@ mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { if (kUseBakerReadBarrier) { DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()); } - PushOntoMarkStack<true>(to_ref); + PushOntoMarkStack(to_ref); } } } @@ -1701,9 +2063,11 @@ mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) { } void ConcurrentCopying::FinishPhase() { + { + MutexLock mu(Thread::Current(), mark_stack_lock_); + CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize); + } region_space_ = nullptr; - CHECK(mark_queue_.IsEmpty()); - mark_queue_.Clear(); { MutexLock mu(Thread::Current(), skipped_blocks_lock_); skipped_blocks_map_.clear(); @@ -1712,14 +2076,9 @@ void ConcurrentCopying::FinishPhase() { heap_->ClearMarkedObjects(); } -mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) { - return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); -} - -bool ConcurrentCopying::IsHeapReferenceMarkedCallback( - mirror::HeapReference<mirror::Object>* field, void* arg) { +bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) { mirror::Object* from_ref = field->AsMirrorPtr(); - mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref); + mirror::Object* to_ref = IsMarked(from_ref); if (to_ref == nullptr) { return false; } @@ -1731,25 +2090,20 @@ bool ConcurrentCopying::IsHeapReferenceMarkedCallback( return true; } -mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) { - return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref); -} - -void ConcurrentCopying::ProcessMarkStackCallback(void* arg) { - reinterpret_cast<ConcurrentCopying*>(arg)->ProcessMarkStack(); +mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) { + return Mark(from_ref); } void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { - heap_->GetReferenceProcessor()->DelayReferenceReferent( - klass, reference, &IsHeapReferenceMarkedCallback, this); + heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); } -void ConcurrentCopying::ProcessReferences(Thread* self, bool concurrent) { +void ConcurrentCopying::ProcessReferences(Thread* self) { TimingLogger::ScopedTiming split("ProcessReferences", GetTimings()); + // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); GetHeap()->GetReferenceProcessor()->ProcessReferences( - concurrent, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), - &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this); + true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); } void ConcurrentCopying::RevokeAllThreadLocalBuffers() { diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index b1897b82f4..f382448615 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -49,89 +49,6 @@ namespace space { namespace collector { -// Concurrent queue. Used as the mark stack. TODO: use a concurrent -// stack for locality. -class MarkQueue { - public: - explicit MarkQueue(size_t size) : size_(size) { - CHECK(IsPowerOfTwo(size_)); - buf_.reset(new Atomic<mirror::Object*>[size_]); - CHECK(buf_.get() != nullptr); - Clear(); - } - - ALWAYS_INLINE Atomic<mirror::Object*>* GetSlotAddr(size_t index) { - return &(buf_.get()[index & (size_ - 1)]); - } - - // Multiple-proceducer enqueue. - bool Enqueue(mirror::Object* to_ref) { - size_t t; - do { - t = tail_.LoadRelaxed(); - size_t h = head_.LoadSequentiallyConsistent(); - if (t + size_ == h) { - // It's full. - return false; - } - } while (!tail_.CompareExchangeWeakSequentiallyConsistent(t, t + 1)); - // We got a slot but its content has not been filled yet at this point. - GetSlotAddr(t)->StoreSequentiallyConsistent(to_ref); - return true; - } - - // Thread-unsafe. - bool EnqueueThreadUnsafe(mirror::Object* to_ref) { - size_t t = tail_.LoadRelaxed(); - size_t h = head_.LoadRelaxed(); - if (t + size_ == h) { - // It's full. - return false; - } - GetSlotAddr(t)->StoreRelaxed(to_ref); - tail_.StoreRelaxed(t + 1); - return true; - } - - // Single-consumer dequeue. - mirror::Object* Dequeue() { - size_t h = head_.LoadRelaxed(); - size_t t = tail_.LoadSequentiallyConsistent(); - if (h == t) { - // it's empty. - return nullptr; - } - Atomic<mirror::Object*>* slot = GetSlotAddr(h); - mirror::Object* ref = slot->LoadSequentiallyConsistent(); - while (ref == nullptr) { - // Wait until the slot content becomes visible. - ref = slot->LoadSequentiallyConsistent(); - } - slot->StoreRelaxed(nullptr); - head_.StoreSequentiallyConsistent(h + 1); - return ref; - } - - bool IsEmpty() { - size_t h = head_.LoadSequentiallyConsistent(); - size_t t = tail_.LoadSequentiallyConsistent(); - return h == t; - } - - void Clear() { - head_.StoreRelaxed(0); - tail_.StoreRelaxed(0); - memset(buf_.get(), 0, size_ * sizeof(Atomic<mirror::Object*>)); - } - - private: - Atomic<size_t> head_; - Atomic<size_t> tail_; - - size_t size_; - std::unique_ptr<Atomic<mirror::Object*>[]> buf_; -}; - class ConcurrentCopying : public GarbageCollector { public: // TODO: disable thse flags for production use. @@ -145,14 +62,15 @@ class ConcurrentCopying : public GarbageCollector { ConcurrentCopying(Heap* heap, const std::string& name_prefix = ""); ~ConcurrentCopying(); - virtual void RunPhases() OVERRIDE; - void InitializePhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FinishPhase(); + virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void FinishPhase() REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); - void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -168,14 +86,15 @@ class ConcurrentCopying : public GarbageCollector { return region_space_; } void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsInToSpace(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(ref != nullptr); return IsMarked(ref) == ref; } - mirror::Object* Mark(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* Mark(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); bool IsMarking() const { return is_marking_; } @@ -185,67 +104,94 @@ class ConcurrentCopying : public GarbageCollector { Barrier& GetBarrier() { return *gc_barrier_; } + bool IsWeakRefAccessEnabled() { + return weak_ref_access_enabled_.LoadRelaxed(); + } + void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); private: - mirror::Object* PopOffMarkStack(); - template<bool kThreadSafe> - void PushOntoMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* Copy(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Scan(mirror::Object* to_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!skipped_blocks_lock_, !mark_stack_lock_); + void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); void Process(mirror::Object* obj, MemberOffset offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void MarkRoot(mirror::CompressedReference<mirror::Object>* root) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VerifyNoFromSpaceReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_); accounting::ObjectStack* GetAllocationStack(); accounting::ObjectStack* GetLiveStack(); - bool ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ProcessReferences(Thread* self, bool concurrent) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool IsHeapReferenceMarkedCallback( - mirror::HeapReference<mirror::Object>* field, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void ProcessMarkStackCallback(void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) + SHARED_REQUIRES(Locks::mutator_lock_); + void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); + void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_); + virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_); + void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); + virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_); + virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_); void SweepSystemWeaks(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); void Sweep(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); void SweepLargeObjects(bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); void ClearBlackPtrs() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* AllocateInSkippedBlock(size_t alloc_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void CheckEmptyMarkQueue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void IssueEmptyCheckpoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsOnAllocStack(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!skipped_blocks_lock_); + void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_); + bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* GetFwdPtr(mirror::Object* from_ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FlipThreadRoots() LOCKS_EXCLUDED(Locks::mutator_lock_); - void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_); + void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_); void RecordLiveStackFreezeSize(Thread* self); void ComputeUnevacFromSpaceLiveRatio(); void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); space::RegionSpace* region_space_; // The underlying region space. std::unique_ptr<Barrier> gc_barrier_; - MarkQueue mark_queue_; + std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; + Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + std::vector<accounting::ObjectStack*> revoked_mark_stacks_ + GUARDED_BY(mark_stack_lock_); + static constexpr size_t kMarkStackSize = kPageSize; + static constexpr size_t kMarkStackPoolSize = 256; + std::vector<accounting::ObjectStack*> pooled_mark_stacks_ + GUARDED_BY(mark_stack_lock_); + Thread* thread_running_gc_; bool is_marking_; // True while marking is ongoing. bool is_active_; // True while the collection is ongoing. bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant. @@ -258,7 +204,18 @@ class ConcurrentCopying : public GarbageCollector { size_t live_stack_freeze_size_; size_t from_space_num_objects_at_first_pause_; size_t from_space_num_bytes_at_first_pause_; - Atomic<int> is_mark_queue_push_disallowed_; + Atomic<int> is_mark_stack_push_disallowed_; + enum MarkStackMode { + kMarkStackModeOff = 0, // Mark stack is off. + kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto + // thread-local mark stacks. The GC-running thread pushes onto and + // pops off the GC mark stack without a lock. + kMarkStackModeShared, // All threads share the GC mark stack with a lock. + kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack + // without a lock. Other threads won't access the mark stack. + }; + Atomic<MarkStackMode> mark_stack_mode_; + Atomic<bool> weak_ref_access_enabled_; // How many objects and bytes we moved. Used for accounting. Atomic<size_t> bytes_moved_; @@ -284,6 +241,7 @@ class ConcurrentCopying : public GarbageCollector { friend class ThreadFlipVisitor; friend class FlipCallback; friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor; + friend class RevokeThreadLocalMarkStackCheckpoint; DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); }; diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 9b76d1aeef..954c80ec7b 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -17,6 +17,9 @@ #ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_ #define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_ +#include <stdint.h> +#include <vector> + #include "base/histogram.h" #include "base/mutex.h" #include "base/timing_logger.h" @@ -24,10 +27,16 @@ #include "gc/gc_cause.h" #include "gc_root.h" #include "gc_type.h" -#include <stdint.h> -#include <vector> +#include "object_callbacks.h" namespace art { + +namespace mirror { +class Class; +class Object; +class Reference; +} // namespace mirror + namespace gc { class Heap; @@ -113,7 +122,7 @@ class Iteration { DISALLOW_COPY_AND_ASSIGN(Iteration); }; -class GarbageCollector : public RootVisitor { +class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor { public: class SCOPED_LOCKABLE ScopedPause { public: @@ -133,7 +142,7 @@ class GarbageCollector : public RootVisitor { virtual GcType GetGcType() const = 0; virtual CollectorType GetCollectorType() const = 0; // Run the garbage collector. - void Run(GcCause gc_cause, bool clear_soft_references); + void Run(GcCause gc_cause, bool clear_soft_references) REQUIRES(!pause_histogram_lock_); Heap* GetHeap() const { return heap_; } @@ -141,11 +150,11 @@ class GarbageCollector : public RootVisitor { const CumulativeLogger& GetCumulativeTimings() const { return cumulative_timings_; } - void ResetCumulativeStatistics(); + void ResetCumulativeStatistics() REQUIRES(!pause_histogram_lock_); // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC, // this is the allocation space, for full GC then we swap the zygote bitmaps too. - void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_); + void SwapBitmaps() REQUIRES(Locks::heap_bitmap_lock_); + uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_); int64_t GetTotalFreedBytes() const { return total_freed_bytes_; } @@ -153,7 +162,7 @@ class GarbageCollector : public RootVisitor { return total_freed_objects_; } // Reset the cumulative timings and pause histogram. - void ResetMeasurements(); + void ResetMeasurements() REQUIRES(!pause_histogram_lock_); // Returns the estimated throughput in bytes / second. uint64_t GetEstimatedMeanThroughput() const; // Returns how many GC iterations have been run. @@ -170,7 +179,23 @@ class GarbageCollector : public RootVisitor { void RecordFree(const ObjectBytePair& freed); // Record a free of large objects. void RecordFreeLOS(const ObjectBytePair& freed); - void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_); + void DumpPerformanceInfo(std::ostream& os) REQUIRES(!pause_histogram_lock_); + + // Helper functions for querying if objects are marked. These are used for processing references, + // and will be used for reading system weaks while the GC is running. + virtual mirror::Object* IsMarked(mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_) = 0; + virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) + SHARED_REQUIRES(Locks::mutator_lock_) = 0; + // Used by reference processor. + virtual void ProcessMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) = 0; + // Force mark an object. + virtual mirror::Object* MarkObject(mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_) = 0; + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj) + SHARED_REQUIRES(Locks::mutator_lock_) = 0; + virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) + SHARED_REQUIRES(Locks::mutator_lock_) = 0; protected: // Run all of the GC phases. diff --git a/runtime/gc/collector/gc_type.h b/runtime/gc/collector/gc_type.h index f18e40fa74..401444a01d 100644 --- a/runtime/gc/collector/gc_type.h +++ b/runtime/gc/collector/gc_type.h @@ -17,7 +17,7 @@ #ifndef ART_RUNTIME_GC_COLLECTOR_GC_TYPE_H_ #define ART_RUNTIME_GC_COLLECTOR_GC_TYPE_H_ -#include <ostream> +#include <iosfwd> namespace art { namespace gc { diff --git a/runtime/gc/collector/immune_region.h b/runtime/gc/collector/immune_region.h index 30144f0b16..3ead501046 100644 --- a/runtime/gc/collector/immune_region.h +++ b/runtime/gc/collector/immune_region.h @@ -41,7 +41,7 @@ class ImmuneRegion { ImmuneRegion(); void Reset(); bool AddContinuousSpace(space::ContinuousSpace* space) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); bool ContainsSpace(const space::ContinuousSpace* space) const; // Returns true if an object is inside of the immune region (assumed to be marked). bool ContainsObject(const mirror::Object* obj) const ALWAYS_INLINE { diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 3c247cd4b4..60f833b349 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -21,34 +21,19 @@ #include "base/timing_logger.h" #include "gc/accounting/heap_bitmap-inl.h" #include "gc/accounting/mod_union_table.h" -#include "gc/accounting/remembered_set.h" #include "gc/accounting/space_bitmap-inl.h" #include "gc/heap.h" #include "gc/reference_processor.h" -#include "gc/space/bump_pointer_space.h" #include "gc/space/bump_pointer_space-inl.h" -#include "gc/space/image_space.h" #include "gc/space/large_object_space.h" #include "gc/space/space-inl.h" -#include "indirect_reference_table.h" -#include "intern_table.h" -#include "jni_internal.h" -#include "mark_sweep-inl.h" -#include "monitor.h" #include "mirror/class-inl.h" -#include "mirror/class_loader.h" -#include "mirror/dex_cache.h" -#include "mirror/reference-inl.h" #include "mirror/object-inl.h" -#include "mirror/object_array.h" -#include "mirror/object_array-inl.h" #include "runtime.h" #include "stack.h" #include "thread-inl.h" #include "thread_list.h" -using ::art::mirror::Object; - namespace art { namespace gc { namespace collector { @@ -67,7 +52,7 @@ void MarkCompact::BindBitmaps() { MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix) : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"), - space_(nullptr), collector_name_(name_) { + space_(nullptr), collector_name_(name_), updating_references_(false) { } void MarkCompact::RunPhases() { @@ -104,10 +89,10 @@ class CalculateObjectForwardingAddressVisitor { public: explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector) : collector_(collector) {} - void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, + void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment); - DCHECK(collector_->IsMarked(obj)); + DCHECK(collector_->IsMarked(obj) != nullptr); collector_->ForwardObject(obj); } @@ -141,8 +126,7 @@ void MarkCompact::InitializePhase() { void MarkCompact::ProcessReferences(Thread* self) { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); heap_->GetReferenceProcessor()->ProcessReferences( - false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), - &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); + false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); } class BitmapSetSlowPathVisitor { @@ -156,29 +140,29 @@ class BitmapSetSlowPathVisitor { } }; -inline void MarkCompact::MarkObject(mirror::Object* obj) { +inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) { if (obj == nullptr) { - return; + return nullptr; } if (kUseBakerOrBrooksReadBarrier) { // Verify all the objects have the correct forward pointer installed. obj->AssertReadBarrierPointer(); } - if (immune_region_.ContainsObject(obj)) { - return; - } - if (objects_before_forwarding_->HasAddress(obj)) { - if (!objects_before_forwarding_->Set(obj)) { - MarkStackPush(obj); // This object was not previously marked. - } - } else { - DCHECK(!space_->HasAddress(obj)); - BitmapSetSlowPathVisitor visitor; - if (!mark_bitmap_->Set(obj, visitor)) { - // This object was not previously marked. - MarkStackPush(obj); + if (!immune_region_.ContainsObject(obj)) { + if (objects_before_forwarding_->HasAddress(obj)) { + if (!objects_before_forwarding_->Set(obj)) { + MarkStackPush(obj); // This object was not previously marked. + } + } else { + DCHECK(!space_->HasAddress(obj)); + BitmapSetSlowPathVisitor visitor; + if (!mark_bitmap_->Set(obj, visitor)) { + // This object was not previously marked. + MarkStackPush(obj); + } } } + return obj; } void MarkCompact::MarkingPhase() { @@ -207,7 +191,7 @@ void MarkCompact::MarkingPhase() { heap_->RevokeAllThreadLocalAllocationStacks(self); } t.NewTiming("SwapStacks"); - heap_->SwapStacks(self); + heap_->SwapStacks(); { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); MarkRoots(); @@ -240,7 +224,7 @@ void MarkCompact::UpdateAndMarkModUnion() { TimingLogger::ScopedTiming t2( space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : "UpdateAndMarkImageModUnionTable", GetTimings()); - table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); + table->UpdateAndMarkReferences(this); } } } @@ -272,7 +256,7 @@ void MarkCompact::ReclaimPhase() { } void MarkCompact::ResizeMarkStack(size_t new_size) { - std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End()); + std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End()); CHECK_LE(mark_stack_->Size(), new_size); mark_stack_->Resize(new_size); for (auto& obj : temp) { @@ -280,7 +264,7 @@ void MarkCompact::ResizeMarkStack(size_t new_size) { } } -inline void MarkCompact::MarkStackPush(Object* obj) { +inline void MarkCompact::MarkStackPush(mirror::Object* obj) { if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { ResizeMarkStack(mark_stack_->Capacity() * 2); } @@ -288,23 +272,12 @@ inline void MarkCompact::MarkStackPush(Object* obj) { mark_stack_->PushBack(obj); } -void MarkCompact::ProcessMarkStackCallback(void* arg) { - reinterpret_cast<MarkCompact*>(arg)->ProcessMarkStack(); -} - -mirror::Object* MarkCompact::MarkObjectCallback(mirror::Object* root, void* arg) { - reinterpret_cast<MarkCompact*>(arg)->MarkObject(root); - return root; -} - -void MarkCompact::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, - void* arg) { - reinterpret_cast<MarkCompact*>(arg)->MarkObject(obj_ptr->AsMirrorPtr()); -} - -void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, - void* arg) { - reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref); +void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) { + if (updating_references_) { + UpdateHeapReference(obj_ptr); + } else { + MarkObject(obj_ptr->AsMirrorPtr()); + } } void MarkCompact::VisitRoots( @@ -328,8 +301,8 @@ class UpdateRootVisitor : public RootVisitor { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mirror::Object* obj = *roots[i]; mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj); @@ -342,8 +315,8 @@ class UpdateRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mirror::Object* obj = roots[i]->AsMirrorPtr(); mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj); @@ -362,8 +335,8 @@ class UpdateObjectReferencesVisitor { public: explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) { } - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { collector_->UpdateObjectReferences(obj); } @@ -373,6 +346,7 @@ class UpdateObjectReferencesVisitor { void MarkCompact::UpdateReferences() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); + updating_references_ = true; Runtime* runtime = Runtime::Current(); // Update roots. UpdateRootVisitor update_root_visitor(this); @@ -387,7 +361,7 @@ void MarkCompact::UpdateReferences() { space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" : "UpdateImageModUnionTableReferences", GetTimings()); - table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this); + table->UpdateAndMarkReferences(this); } else { // No mod union table, so we need to scan the space using bitmap visit. // Scan the space using bitmap visit. @@ -403,14 +377,15 @@ void MarkCompact::UpdateReferences() { CHECK(!kMovingClasses) << "Didn't update large object classes since they are assumed to not move."; // Update the system weaks, these should already have been swept. - runtime->SweepSystemWeaks(&MarkedForwardingAddressCallback, this); + runtime->SweepSystemWeaks(this); // Update the objects in the bump pointer space last, these objects don't have a bitmap. UpdateObjectReferencesVisitor visitor(this); objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()), reinterpret_cast<uintptr_t>(space_->End()), visitor); // Update the reference processor cleared list. - heap_->GetReferenceProcessor()->UpdateRoots(&MarkedForwardingAddressCallback, this); + heap_->GetReferenceProcessor()->UpdateRoots(this); + updating_references_ = false; } void MarkCompact::Compact() { @@ -436,10 +411,6 @@ void MarkCompact::MarkRoots() { Runtime::Current()->VisitRoots(this); } -mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) { - return reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj); -} - inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) { mirror::Object* obj = reference->AsMirrorPtr(); if (obj != nullptr) { @@ -451,37 +422,45 @@ inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Objec } } -void MarkCompact::UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference, - void* arg) { - reinterpret_cast<MarkCompact*>(arg)->UpdateHeapReference(reference); -} - class UpdateReferenceVisitor { public: explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) { } - void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const - ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const + ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); } void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { collector_->UpdateHeapReference( ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset())); } + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + root->Assign(collector_->GetMarkedForwardAddress(root->AsMirrorPtr())); + } + private: MarkCompact* const collector_; }; void MarkCompact::UpdateObjectReferences(mirror::Object* obj) { UpdateReferenceVisitor visitor(this); - obj->VisitReferences<kMovingClasses>(visitor, visitor); + obj->VisitReferences(visitor, visitor); } -inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) const { +inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) { DCHECK(obj != nullptr); if (objects_before_forwarding_->HasAddress(obj)) { DCHECK(objects_before_forwarding_->Test(obj)); @@ -491,33 +470,30 @@ inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) return ret; } DCHECK(!space_->HasAddress(obj)); - DCHECK(IsMarked(obj)); return obj; } -inline bool MarkCompact::IsMarked(const Object* object) const { +mirror::Object* MarkCompact::IsMarked(mirror::Object* object) { if (immune_region_.ContainsObject(object)) { - return true; + return object; + } + if (updating_references_) { + return GetMarkedForwardAddress(object); } if (objects_before_forwarding_->HasAddress(object)) { - return objects_before_forwarding_->Test(object); + return objects_before_forwarding_->Test(object) ? object : nullptr; } - return mark_bitmap_->Test(object); -} - -mirror::Object* MarkCompact::IsMarkedCallback(mirror::Object* object, void* arg) { - return reinterpret_cast<MarkCompact*>(arg)->IsMarked(object) ? object : nullptr; + return mark_bitmap_->Test(object) ? object : nullptr; } -bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr, - void* arg) { +bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) { // Side effect free since we call this before ever moving objects. - return reinterpret_cast<MarkCompact*>(arg)->IsMarked(ref_ptr->AsMirrorPtr()); + return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr; } void MarkCompact::SweepSystemWeaks() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); - Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); + Runtime::Current()->SweepSystemWeaks(this); } bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const { @@ -528,8 +504,8 @@ class MoveObjectVisitor { public: explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) { } - void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { collector_->MoveObject(obj, obj->SizeOf()); } @@ -592,8 +568,7 @@ void MarkCompact::SweepLargeObjects(bool swap_bitmaps) { // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been // marked, put it on the appropriate list in the heap for later processing. void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { - heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, - &HeapReferenceMarkedCallback, this); + heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); } class MarkCompactMarkObjectVisitor { @@ -601,33 +576,46 @@ class MarkCompactMarkObjectVisitor { explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) { } - void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Object was already verified when we scanned it. collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset)); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { collector_->DelayReferenceReferent(klass, ref); } + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + collector_->MarkObject(root->AsMirrorPtr()); + } + private: MarkCompact* const collector_; }; // Visit all of the references of an object and update. -void MarkCompact::ScanObject(Object* obj) { +void MarkCompact::ScanObject(mirror::Object* obj) { MarkCompactMarkObjectVisitor visitor(this); - obj->VisitReferences<kMovingClasses>(visitor, visitor); + obj->VisitReferences(visitor, visitor); } // Scan anything that's on the mark stack. void MarkCompact::ProcessMarkStack() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); while (!mark_stack_->IsEmpty()) { - Object* obj = mark_stack_->PopBack(); + mirror::Object* obj = mark_stack_->PopBack(); DCHECK(obj != nullptr); ScanObject(obj); } diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h index f59a2cd93a..8d91939057 100644 --- a/runtime/gc/collector/mark_compact.h +++ b/runtime/gc/collector/mark_compact.h @@ -64,13 +64,13 @@ class MarkCompact : public GarbageCollector { virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS; void InitializePhase(); - void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkingPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + void ReclaimPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + void FinishPhase() REQUIRES(Locks::mutator_lock_); void MarkReachableObjects() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -88,130 +88,106 @@ class MarkCompact : public GarbageCollector { void FindDefaultMarkBitmap(); void ScanObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Marks the root set at the start of a garbage collection. void MarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie // the image. Mark that portion of the heap as immune. - void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); void UnBindBitmaps() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); - void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. - void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); // Sweeps unmarked objects to complete the garbage collection. - void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); void SweepSystemWeaks() - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - - static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - - static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - - static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr, - void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - - static void ProcessMarkStackCallback(void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - - static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, - void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Schedules an unmarked object for reference processing. void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); protected: // Returns null if the object is not marked, otherwise returns the forwarding address (same as // object for non movable things). - mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + mirror::Object* GetMarkedForwardAddress(mirror::Object* object) + REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Marks or unmarks a large object based on whether or not set is true. If set is true, then we // mark, otherwise we unmark. bool MarkLargeObject(const mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Expand mark stack to 2x its current size. - void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if we should sweep the space. bool ShouldSweepSpace(space::ContinuousSpace* space) const; // Push an object onto the mark stack. - void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void UpdateAndMarkModUnion() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Recursively blackens objects on the mark stack. void ProcessMarkStack() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // 3 pass mark compact approach. - void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + void Compact() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding // bitmap. void CalculateObjectForwardingAddresses() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Update the references of objects by using the forwarding addresses. - void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - static void UpdateRootCallback(mirror::Object** root, void* arg, const RootInfo& /*root_info*/) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void UpdateReferences() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Move objects and restore lock words. - void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void MoveObjects() REQUIRES(Locks::mutator_lock_); // Move a single object to its forward address. - void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void MoveObject(mirror::Object* obj, size_t len) REQUIRES(Locks::mutator_lock_); // Mark a single object. - void MarkObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, - Locks::mutator_lock_); - bool IsMarked(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, + virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); + virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); + void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Update a single heap reference. void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); - static void UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference, - void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); // Update all of the references of a single object. void UpdateObjectReferences(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(Locks::mutator_lock_); // Revoke all the thread-local buffers. void RevokeAllThreadLocalBuffers(); @@ -242,6 +218,9 @@ class MarkCompact : public GarbageCollector { // Which lock words we need to restore as we are moving objects. std::deque<LockWord> lock_words_to_restore_; + // State whether or not we are updating references. + bool updating_references_; + private: friend class BitmapSetSlowPathVisitor; friend class CalculateObjectForwardingAddressVisitor; diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h index 4e3845e5d6..a3cc83132f 100644 --- a/runtime/gc/collector/mark_sweep-inl.h +++ b/runtime/gc/collector/mark_sweep-inl.h @@ -32,7 +32,7 @@ template<typename MarkVisitor, typename ReferenceVisitor> inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor, const ReferenceVisitor& ref_visitor) { DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces(); - obj->VisitReferences<false>(visitor, ref_visitor); + obj->VisitReferences(visitor, ref_visitor); if (kCountScannedTypes) { mirror::Class* klass = obj->GetClass<kVerifyNone>(); if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) { diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 1c9c41204a..b0a8a5bf2b 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -37,7 +37,6 @@ #include "gc/accounting/space_bitmap-inl.h" #include "gc/heap.h" #include "gc/reference_processor.h" -#include "gc/space/image_space.h" #include "gc/space/large_object_space.h" #include "gc/space/space-inl.h" #include "mark_sweep-inl.h" @@ -47,8 +46,6 @@ #include "thread-inl.h" #include "thread_list.h" -using ::art::mirror::Object; - namespace art { namespace gc { namespace collector { @@ -175,8 +172,7 @@ void MarkSweep::RunPhases() { void MarkSweep::ProcessReferences(Thread* self) { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); GetHeap()->GetReferenceProcessor()->ProcessReferences( - true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), - &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); + true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); } void MarkSweep::PausePhase() { @@ -194,7 +190,7 @@ void MarkSweep::PausePhase() { { TimingLogger::ScopedTiming t2("SwapStacks", GetTimings()); WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - heap_->SwapStacks(self); + heap_->SwapStacks(); live_stack_freeze_size_ = heap_->GetLiveStack()->Size(); // Need to revoke all the thread local allocation stacks since we just swapped the allocation // stacks and don't want anybody to allocate into the live stack. @@ -273,7 +269,7 @@ void MarkSweep::UpdateAndMarkModUnion() { TimingLogger::ScopedTiming t(name, GetTimings()); accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space); CHECK(mod_union_table != nullptr); - mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); + mod_union_table->UpdateAndMarkReferences(this); } } } @@ -333,7 +329,7 @@ void MarkSweep::ResizeMarkStack(size_t new_size) { // Someone else acquired the lock and expanded the mark stack before us. return; } - std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End()); + std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End()); CHECK_LE(mark_stack_->Size(), new_size); mark_stack_->Resize(new_size); for (auto& obj : temp) { @@ -341,7 +337,12 @@ void MarkSweep::ResizeMarkStack(size_t new_size) { } } -inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { +mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) { + MarkObject(obj, nullptr, MemberOffset(0)); + return obj; +} + +inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) { DCHECK(obj != nullptr); if (MarkObjectParallel(obj)) { MutexLock mu(Thread::Current(), mark_stack_lock_); @@ -353,28 +354,18 @@ inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) { } } -mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) { - MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg); - mark_sweep->MarkObject(obj); - return obj; -} - -void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { - reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr()); -} - -bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) { - return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr()); +bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) { + return IsMarked(ref->AsMirrorPtr()); } class MarkSweepMarkObjectSlowPath { public: - explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, Object* holder = nullptr, + explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr, MemberOffset offset = MemberOffset(0)) : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) { } - void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { + void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS { if (kProfileLargeObjects) { // TODO: Differentiate between marking and testing somehow. ++mark_sweep_->large_object_test_; @@ -450,7 +441,8 @@ class MarkSweepMarkObjectSlowPath { MemberOffset offset_; }; -inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffset offset) { +inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder, + MemberOffset offset) { DCHECK(obj != nullptr); if (kUseBakerOrBrooksReadBarrier) { // Verify all the objects have the correct pointer installed. @@ -481,7 +473,7 @@ inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffs } } -inline void MarkSweep::PushOnMarkStack(Object* obj) { +inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) { if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { // Lock is not needed but is here anyways to please annotalysis. MutexLock mu(Thread::Current(), mark_stack_lock_); @@ -491,14 +483,14 @@ inline void MarkSweep::PushOnMarkStack(Object* obj) { mark_stack_->PushBack(obj); } -inline bool MarkSweep::MarkObjectParallel(const Object* obj) { +inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) { DCHECK(obj != nullptr); if (kUseBakerOrBrooksReadBarrier) { // Verify all the objects have the correct pointer installed. obj->AssertReadBarrierPointer(); } if (immune_region_.ContainsObject(obj)) { - DCHECK(IsMarked(obj)); + DCHECK(IsMarked(obj) != nullptr); return false; } // Try to take advantage of locality of references within a space, failing this find the space @@ -511,8 +503,13 @@ inline bool MarkSweep::MarkObjectParallel(const Object* obj) { return !mark_bitmap_->AtomicTestAndSet(obj, visitor); } +void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) { + MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0)); +} + // Used to mark objects when processing the mark stack. If an object is null, it is not marked. -inline void MarkSweep::MarkObject(Object* obj, Object* holder, MemberOffset offset) { +inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder, + MemberOffset offset) { if (obj != nullptr) { MarkObjectNonNull(obj, holder, offset); } else if (kCountMarkedObjects) { @@ -525,8 +522,8 @@ class VerifyRootMarkedVisitor : public SingleRootVisitor { explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { } void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { - CHECK(collector_->IsMarked(root)) << info.ToString(); + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + CHECK(collector_->IsMarked(root) != nullptr) << info.ToString(); } private: @@ -550,7 +547,7 @@ void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, class VerifyRootVisitor : public SingleRootVisitor { public: void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // See if the root is on any space bitmap. auto* heap = Runtime::Current()->GetHeap(); if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) { @@ -599,8 +596,8 @@ class ScanObjectVisitor { explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {} - void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const ALWAYS_INLINE + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { if (kCheckLocks) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); @@ -618,8 +615,8 @@ class DelayReferenceReferentVisitor { } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { collector_->DelayReferenceReferent(klass, ref); } @@ -631,7 +628,7 @@ template <bool kUseFinger = false> class MarkStackTask : public Task { public: MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size, - StackReference<Object>* mark_stack) + StackReference<mirror::Object>* mark_stack) : mark_sweep_(mark_sweep), thread_pool_(thread_pool), mark_stack_pos_(mark_stack_size) { @@ -651,13 +648,33 @@ class MarkStackTask : public Task { protected: class MarkObjectParallelVisitor { public: - explicit MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, - MarkSweep* mark_sweep) ALWAYS_INLINE - : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} + ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task, + MarkSweep* mark_sweep) + : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {} - void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); + void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { + Mark(obj->GetFieldObject<mirror::Object>(offset)); + } + + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (kCheckLocks) { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + Mark(root->AsMirrorPtr()); + } + + private: + void Mark(mirror::Object* ref) const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) { if (kUseFinger) { std::atomic_thread_fence(std::memory_order_seq_cst); @@ -670,7 +687,6 @@ class MarkStackTask : public Task { } } - private: MarkStackTask<kUseFinger>* const chunk_task_; MarkSweep* const mark_sweep_; }; @@ -681,8 +697,8 @@ class MarkStackTask : public Task { : chunk_task_(chunk_task) {} // No thread safety analysis since multiple threads will use this visitor. - void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { MarkSweep* const mark_sweep = chunk_task_->mark_sweep_; MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep); DelayReferenceReferentVisitor ref_visitor(mark_sweep); @@ -704,11 +720,12 @@ class MarkStackTask : public Task { MarkSweep* const mark_sweep_; ThreadPool* const thread_pool_; // Thread local mark stack for this task. - StackReference<Object> mark_stack_[kMaxSize]; + StackReference<mirror::Object> mark_stack_[kMaxSize]; // Mark stack position. size_t mark_stack_pos_; - ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE void MarkStackPush(mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(mark_stack_pos_ == kMaxSize)) { // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task. mark_stack_pos_ /= 2; @@ -726,18 +743,18 @@ class MarkStackTask : public Task { } // Scans all of the objects - virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + virtual void Run(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { UNUSED(self); ScanObjectParallelVisitor visitor(this); // TODO: Tune this. static const size_t kFifoSize = 4; - BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; + BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; for (;;) { - Object* obj = nullptr; + mirror::Object* obj = nullptr; if (kUseMarkStackPrefetch) { while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { - Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); + mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr(); DCHECK(mark_stack_obj != nullptr); __builtin_prefetch(mark_stack_obj); prefetch_fifo.push_back(mark_stack_obj); @@ -764,7 +781,7 @@ class CardScanTask : public MarkStackTask<false> { CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, accounting::ContinuousSpaceBitmap* bitmap, uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size, - StackReference<Object>* mark_stack_obj, bool clear_card) + StackReference<mirror::Object>* mark_stack_obj, bool clear_card) : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj), bitmap_(bitmap), begin_(begin), @@ -815,8 +832,8 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__, GetTimings()); // Try to take some of the mark stack since we can pass this off to the worker tasks. - StackReference<Object>* mark_stack_begin = mark_stack_->Begin(); - StackReference<Object>* mark_stack_end = mark_stack_->End(); + StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin(); + StackReference<mirror::Object>* mark_stack_end = mark_stack_->End(); const size_t mark_stack_size = mark_stack_end - mark_stack_begin; // Estimated number of work tasks we will create. const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count; @@ -832,8 +849,8 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) { // Align up the end address. For example, the image space's end // may not be card-size-aligned. card_end = AlignUp(card_end, accounting::CardTable::kCardSize); - DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_begin)); - DCHECK(IsAligned<accounting::CardTable::kCardSize>(card_end)); + DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize); + DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize); // Calculate how many bytes of heap we will scan, const size_t address_range = card_end - card_begin; // Calculate how much address range each task gets. @@ -988,13 +1005,6 @@ void MarkSweep::RecursiveMark() { ProcessMarkStack(false); } -mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) { - if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) { - return object; - } - return nullptr; -} - void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) { ScanGrayObjects(paused, minimum_age); ProcessMarkStack(paused); @@ -1014,17 +1024,24 @@ void MarkSweep::ReMarkRoots() { void MarkSweep::SweepSystemWeaks(Thread* self) { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); - WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this); + ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); + Runtime::Current()->SweepSystemWeaks(this); } -mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) { - reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj); - // We don't actually want to sweep the object, so lets return "marked" - return obj; -} +class VerifySystemWeakVisitor : public IsMarkedVisitor { + public: + explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {} -void MarkSweep::VerifyIsLive(const Object* obj) { + virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + mark_sweep_->VerifyIsLive(obj); + return obj; + } + + MarkSweep* const mark_sweep_; +}; + +void MarkSweep::VerifyIsLive(const mirror::Object* obj) { if (!heap_->GetLiveBitmap()->Test(obj)) { // TODO: Consider live stack? Has this code bitrotted? CHECK(!heap_->allocation_stack_->Contains(obj)) @@ -1035,21 +1052,22 @@ void MarkSweep::VerifyIsLive(const Object* obj) { void MarkSweep::VerifySystemWeaks() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); // Verify system weaks, uses a special object visitor which returns the input object. - Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this); + VerifySystemWeakVisitor visitor(this); + Runtime::Current()->SweepSystemWeaks(&visitor); } class CheckpointMarkThreadRoots : public Closure, public RootVisitor { public: - explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep, - bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) + CheckpointMarkThreadRoots(MarkSweep* mark_sweep, + bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) : mark_sweep_(mark_sweep), revoke_ros_alloc_thread_local_buffers_at_checkpoint_( revoke_ros_alloc_thread_local_buffers_at_checkpoint) { } void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mark_sweep_->MarkObjectNonNullParallel(*roots[i]); } @@ -1057,8 +1075,8 @@ class CheckpointMarkThreadRoots : public Closure, public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { for (size_t i = 0; i < count; ++i) { mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr()); } @@ -1122,7 +1140,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma ObjectBytePair freed; ObjectBytePair freed_los; // How many objects are left in the array, modified after each space is swept. - StackReference<Object>* objects = allocations->Begin(); + StackReference<mirror::Object>* objects = allocations->Begin(); size_t count = allocations->Size(); // Change the order to ensure that the non-moving space last swept as an optimization. std::vector<space::ContinuousSpace*> sweep_spaces; @@ -1150,9 +1168,9 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma if (swap_bitmaps) { std::swap(live_bitmap, mark_bitmap); } - StackReference<Object>* out = objects; + StackReference<mirror::Object>* out = objects; for (size_t i = 0; i < count; ++i) { - Object* const obj = objects[i].AsMirrorPtr(); + mirror::Object* const obj = objects[i].AsMirrorPtr(); if (kUseThreadLocalAllocationStack && obj == nullptr) { continue; } @@ -1191,7 +1209,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma std::swap(large_live_objects, large_mark_objects); } for (size_t i = 0; i < count; ++i) { - Object* const obj = objects[i].AsMirrorPtr(); + mirror::Object* const obj = objects[i].AsMirrorPtr(); // Handle large objects. if (kUseThreadLocalAllocationStack && obj == nullptr) { continue; @@ -1250,18 +1268,17 @@ void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* if (kCountJavaLangRefs) { ++reference_count_; } - heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback, - this); + heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this); } -class MarkObjectVisitor { +class MarkVisitor { public: - explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { + explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) { } - void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const - ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) { if (kCheckLocks) { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); @@ -1269,22 +1286,34 @@ class MarkObjectVisitor { mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset); } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { + if (kCheckLocks) { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + mark_sweep_->MarkObject(root->AsMirrorPtr()); + } + private: MarkSweep* const mark_sweep_; }; // Scans an object reference. Determines the type of the reference // and dispatches to a specialized scanning routine. -void MarkSweep::ScanObject(Object* obj) { - MarkObjectVisitor mark_visitor(this); +void MarkSweep::ScanObject(mirror::Object* obj) { + MarkVisitor mark_visitor(this); DelayReferenceReferentVisitor ref_visitor(this); ScanObjectVisit(obj, mark_visitor, ref_visitor); } -void MarkSweep::ProcessMarkStackCallback(void* arg) { - reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false); -} - void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { Thread* self = Thread::Current(); ThreadPool* thread_pool = GetHeap()->GetThreadPool(); @@ -1317,12 +1346,12 @@ void MarkSweep::ProcessMarkStack(bool paused) { } else { // TODO: Tune this. static const size_t kFifoSize = 4; - BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo; + BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo; for (;;) { - Object* obj = nullptr; + mirror::Object* obj = nullptr; if (kUseMarkStackPrefetch) { while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { - Object* mark_stack_obj = mark_stack_->PopBack(); + mirror::Object* mark_stack_obj = mark_stack_->PopBack(); DCHECK(mark_stack_obj != nullptr); __builtin_prefetch(mark_stack_obj); prefetch_fifo.push_back(mark_stack_obj); @@ -1344,14 +1373,14 @@ void MarkSweep::ProcessMarkStack(bool paused) { } } -inline bool MarkSweep::IsMarked(const Object* object) const { +inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) { if (immune_region_.ContainsObject(object)) { - return true; + return object; } if (current_space_bitmap_->HasAddress(object)) { - return current_space_bitmap_->Test(object); + return current_space_bitmap_->Test(object) ? object : nullptr; } - return mark_bitmap_->Test(object); + return mark_bitmap_->Test(object) ? object : nullptr; } void MarkSweep::FinishPhase() { diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index d29d87af1e..8bd1dc7cd5 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -54,19 +54,18 @@ namespace collector { class MarkSweep : public GarbageCollector { public: - explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); + MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); ~MarkSweep() {} - virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS; + virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_); void InitializePhase(); - void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void PausePhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); - void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void PausePhase() REQUIRES(Locks::mutator_lock_, !mark_stack_lock_); + void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); + void FinishPhase(); virtual void MarkReachableObjects() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); bool IsConcurrent() const { return is_concurrent_; @@ -88,121 +87,96 @@ class MarkSweep : public GarbageCollector { // Marks all objects in the root set at the start of a garbage collection. void MarkRoots(Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void MarkNonThreadRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void MarkConcurrentRoots(VisitRootFlags flags) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Builds a mark stack and recursively mark until it empties. void RecursiveMark() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie // the image. Mark that portion of the heap as immune. - virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_); // Builds a mark stack with objects on dirty cards and recursively mark until it empties. void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Remarks the root set after completing the concurrent mark. void ReMarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void ProcessReferences(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); // Update and mark references from immune spaces. void UpdateAndMarkModUnion() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); // Pre clean cards to reduce how much work is needed in the pause. void PreCleanCards() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap. - virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. - void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); // Sweep only pointers within an array. WARNING: Trashes objects. void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); // Blackens an object. void ScanObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // No thread safety analysis due to lambdas. template<typename MarkVisitor, typename ReferenceVisitor> void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor, const ReferenceVisitor& ref_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); void SweepSystemWeaks(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void VerifySystemWeaks() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Verify that an object is live, either in a live bitmap or in the allocation stack. void VerifyIsLive(const mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static void ProcessMarkStackCallback(void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); // Marks an object. - void MarkObject(mirror::Object* obj, mirror::Object* holder = nullptr, - MemberOffset offset = MemberOffset(0)) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); + void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); Barrier& GetBarrier() { return *gc_barrier_; @@ -210,30 +184,23 @@ class MarkSweep : public GarbageCollector { // Schedules an unmarked object for reference processing. void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); protected: - // Returns true if the object has its bit set in the mark bitmap. - bool IsMarked(const mirror::Object* object) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static void VerifyImageRootVisitor(mirror::Object* root, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + // Returns object if the object is marked in the heap bitmap, otherwise null. + virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE + SHARED_REQUIRES(Locks::heap_bitmap_lock_); void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr, MemberOffset offset = MemberOffset(0)) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); // Marks an object atomically, safe to use from multiple threads. void MarkObjectNonNullParallel(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); // Returns true if we need to add obj to a mark stack. - bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; + bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // Verify the roots of the heap and print out information related to any invalid roots. // Called in MarkObject, so may we may not hold the mutator lock. @@ -241,31 +208,34 @@ class MarkSweep : public GarbageCollector { NO_THREAD_SAFETY_ANALYSIS; // Expand mark stack to 2x its current size. - void ExpandMarkStack() EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void ResizeMarkStack(size_t new_size) EXCLUSIVE_LOCKS_REQUIRED(mark_stack_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ExpandMarkStack() REQUIRES(mark_stack_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + void ResizeMarkStack(size_t new_size) REQUIRES(mark_stack_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Returns how many threads we should use for the current GC phase based on if we are paused, // whether or not we care about pauses. size_t GetThreadCount(bool paused) const; // Push a single reference on a mark stack. - void PushOnMarkStack(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PushOnMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!mark_stack_lock_); // Blackens objects grayed during a garbage collection. void ScanGrayObjects(bool paused, uint8_t minimum_age) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + virtual void ProcessMarkStack() OVERRIDE REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { + ProcessMarkStack(false); + } // Recursively blackens objects on the mark stack. void ProcessMarkStack(bool paused) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void ProcessMarkStackParallel(size_t thread_count) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by // IsExclusiveHeld. diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h index 1a211cd3b6..e9b4f6fba1 100644 --- a/runtime/gc/collector/partial_mark_sweep.h +++ b/runtime/gc/collector/partial_mark_sweep.h @@ -30,14 +30,14 @@ class PartialMarkSweep : public MarkSweep { return kGcTypePartial; } - explicit PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); + PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); ~PartialMarkSweep() {} protected: // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by // StickyMarkSweep. - virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep); diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index 7b19dc93a0..06d20f583a 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -34,7 +34,7 @@ class BitmapSetSlowPathVisitor { void operator()(const mirror::Object* obj) const { CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_"; // Marking a large object, make sure its aligned as a sanity check. - CHECK(IsAligned<kPageSize>(obj)); + CHECK_ALIGNED(obj, kPageSize); } private: @@ -83,6 +83,14 @@ inline void SemiSpace::MarkObject( } } +template<bool kPoisonReferences> +inline void SemiSpace::MarkObjectIfNotInToSpace( + mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) { + if (!to_space_->HasAddress(obj_ptr->AsMirrorPtr())) { + MarkObject(obj_ptr); + } +} + } // namespace collector } // namespace gc } // namespace art diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 82d02e7fb2..a355d406d9 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -157,8 +157,7 @@ void SemiSpace::InitializePhase() { void SemiSpace::ProcessReferences(Thread* self) { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); GetHeap()->GetReferenceProcessor()->ProcessReferences( - false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), - &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this); + false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); } void SemiSpace::MarkingPhase() { @@ -227,7 +226,7 @@ void SemiSpace::MarkingPhase() { TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings()); heap_->RevokeAllThreadLocalAllocationStacks(self_); } - heap_->SwapStacks(self_); + heap_->SwapStacks(); { WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); MarkRoots(); @@ -274,8 +273,7 @@ void SemiSpace::MarkingPhase() { class SemiSpaceScanObjectVisitor { public: explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} - void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, - Locks::heap_bitmap_lock_) { + void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(obj != nullptr); semi_space_->ScanObject(obj); } @@ -290,31 +288,50 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor { from_space_(from_space) {} void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); if (from_space_->HasAddress(ref)) { Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); LOG(FATAL) << ref << " found in from space"; } } + + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (kIsDebugBuild) { + Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + CHECK(!from_space_->HasAddress(root->AsMirrorPtr())); + } + private: - space::ContinuousMemMapAllocSpace* from_space_; + space::ContinuousMemMapAllocSpace* const from_space_; }; void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); - obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor()); + obj->VisitReferences(visitor, VoidFunctor()); } class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { public: explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} void operator()(Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { DCHECK(obj != nullptr); semi_space_->VerifyNoFromSpaceReferences(obj); } + private: SemiSpace* const semi_space_; }; @@ -336,7 +353,7 @@ void SemiSpace::MarkReachableObjects() { space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : "UpdateAndMarkImageModUnionTable", GetTimings()); - table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); + table->UpdateAndMarkReferences(this); DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr); } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) { // If the space has no mod union table (the non-moving space and main spaces when the bump @@ -351,8 +368,7 @@ void SemiSpace::MarkReachableObjects() { CHECK_EQ(rem_set != nullptr, kUseRememberedSet); if (rem_set != nullptr) { TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings()); - rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback, - from_space_, this); + rem_set->UpdateAndMarkReferences(from_space_, this); if (kIsDebugBuild) { // Verify that there are no from-space references that // remain in the space, that is, the remembered set (and the @@ -583,24 +599,14 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { return forward_address; } -void SemiSpace::ProcessMarkStackCallback(void* arg) { - reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); -} - -mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { +mirror::Object* SemiSpace::MarkObject(mirror::Object* root) { auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); - reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); + MarkObject(&ref); return ref.AsMirrorPtr(); } -void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, - void* arg) { - reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); -} - -void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, - void* arg) { - reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref); +void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) { + MarkObject(obj_ptr); } void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count, @@ -608,7 +614,9 @@ void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count, for (size_t i = 0; i < count; ++i) { auto* root = roots[i]; auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); - MarkObject(&ref); + // The root can be in the to-space since we may visit the declaring class of an ArtMethod + // multiple times if it is on the call stack. + MarkObjectIfNotInToSpace(&ref); if (*root != ref.AsMirrorPtr()) { *root = ref.AsMirrorPtr(); } @@ -618,7 +626,7 @@ void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count, void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) { for (size_t i = 0; i < count; ++i) { - MarkObject(roots[i]); + MarkObjectIfNotInToSpace(roots[i]); } } @@ -628,29 +636,9 @@ void SemiSpace::MarkRoots() { Runtime::Current()->VisitRoots(this); } -bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, - void* arg) { - mirror::Object* obj = object->AsMirrorPtr(); - mirror::Object* new_obj = - reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj); - if (new_obj == nullptr) { - return false; - } - if (new_obj != obj) { - // Write barrier is not necessary since it still points to the same object, just at a different - // address. - object->Assign(new_obj); - } - return true; -} - -mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { - return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); -} - void SemiSpace::SweepSystemWeaks() { TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings()); - Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); + Runtime::Current()->SweepSystemWeaks(this); } bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { @@ -688,8 +676,7 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been // marked, put it on the appropriate list in the heap for later processing. void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { - heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, - &HeapReferenceMarkedCallback, this); + heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this); } class SemiSpaceMarkObjectVisitor { @@ -698,17 +685,35 @@ class SemiSpaceMarkObjectVisitor { } void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Object was already verified when we scanned it. collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); } void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { collector_->DelayReferenceReferent(klass, ref); } + // TODO: Remove NO_THREAD_SAFETY_ANALYSIS when clang better understands visitors. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + NO_THREAD_SAFETY_ANALYSIS { + if (kIsDebugBuild) { + Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current()); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current()); + } + // We may visit the same root multiple times, so avoid marking things in the to-space since + // this is not handled by the GC. + collector_->MarkObjectIfNotInToSpace(root); + } + private: SemiSpace* const collector_; }; @@ -717,7 +722,7 @@ class SemiSpaceMarkObjectVisitor { void SemiSpace::ScanObject(Object* obj) { DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; SemiSpaceMarkObjectVisitor visitor(this); - obj->VisitReferences<kMovingClasses>(visitor, visitor); + obj->VisitReferences(visitor, visitor); } // Scan anything that's on the mark stack. @@ -746,8 +751,7 @@ void SemiSpace::ProcessMarkStack() { } } -inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { +mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) { // All immune objects are assumed marked. if (from_space_->HasAddress(obj)) { // Returns either the forwarding address or null. @@ -759,6 +763,20 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const return mark_bitmap_->Test(obj) ? obj : nullptr; } +bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) { + mirror::Object* obj = object->AsMirrorPtr(); + mirror::Object* new_obj = IsMarked(obj); + if (new_obj == nullptr) { + return false; + } + if (new_obj != obj) { + // Write barrier is not necessary since it still points to the same object, just at a different + // address. + object->Assign(new_obj); + } + return true; +} + void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { DCHECK(to_space != nullptr); to_space_ = to_space; diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index 3c25f539f3..b9246ca2fc 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -66,13 +66,13 @@ class SemiSpace : public GarbageCollector { virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS; virtual void InitializePhase(); - virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + virtual void MarkingPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + virtual void ReclaimPhase() REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); + virtual void FinishPhase() REQUIRES(Locks::mutator_lock_); void MarkReachableObjects() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual GcType GetGcType() const OVERRIDE { return kGcTypePartial; } @@ -101,105 +101,98 @@ class SemiSpace : public GarbageCollector { // Updates obj_ptr if the object has moved. template<bool kPoisonReferences> void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + + template<bool kPoisonReferences> + void MarkObjectIfNotInToSpace(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + + virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void ScanObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); void VerifyNoFromSpaceReferences(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Marks the root set at the start of a garbage collection. void MarkRoots() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie // the image. Mark that portion of the heap as immune. - virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_); void UnBindBitmaps() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::heap_bitmap_lock_); - void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void ProcessReferences(Thread* self) REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::mutator_lock_); // Sweeps unmarked objects to complete the garbage collection. - virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual void Sweep(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); // Sweeps unmarked objects to complete the garbage collection. - void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_); void SweepSystemWeaks() - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - - static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - - static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); - - static void ProcessMarkStackCallback(void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); - - static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref, - void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Schedules an unmarked object for reference processing. void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); protected: // Returns null if the object is not marked, otherwise returns the forwarding address (same as // object for non movable things). - mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE + REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_); - static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE + REQUIRES(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Marks or unmarks a large object based on whether or not set is true. If set is true, then we // mark, otherwise we unmark. bool MarkLargeObject(const mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Expand mark stack to 2x its current size. - void ResizeMarkStack(size_t new_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if we should sweep the space. virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; // Push an object onto the mark stack. - void MarkStackPush(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void UpdateAndMarkModUnion() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Recursively blackens objects on the mark stack. void ProcessMarkStack() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Revoke all the thread-local buffers. void RevokeAllThreadLocalBuffers(); diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h index b9ef137e89..e8f0672426 100644 --- a/runtime/gc/collector/sticky_mark_sweep.h +++ b/runtime/gc/collector/sticky_mark_sweep.h @@ -30,21 +30,21 @@ class StickyMarkSweep FINAL : public PartialMarkSweep { return kGcTypeSticky; } - explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); + StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = ""); ~StickyMarkSweep() {} protected: // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the // alloc space will be marked as immune. - void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void MarkReachableObjects() OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); void Sweep(bool swap_bitmaps) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); private: DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep); diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h index 9275e6d932..95ba380a01 100644 --- a/runtime/gc/collector_type.h +++ b/runtime/gc/collector_type.h @@ -17,7 +17,7 @@ #ifndef ART_RUNTIME_GC_COLLECTOR_TYPE_H_ #define ART_RUNTIME_GC_COLLECTOR_TYPE_H_ -#include <ostream> +#include <iosfwd> namespace art { namespace gc { diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h index 1f2643a9ac..0536f32df9 100644 --- a/runtime/gc/gc_cause.h +++ b/runtime/gc/gc_cause.h @@ -17,7 +17,7 @@ #ifndef ART_RUNTIME_GC_GC_CAUSE_H_ #define ART_RUNTIME_GC_GC_CAUSE_H_ -#include <ostream> +#include <iosfwd> namespace art { namespace gc { diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 2e661601fc..d1ab587aea 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -20,7 +20,6 @@ #include "heap.h" #include "base/time_utils.h" -#include "debugger.h" #include "gc/accounting/card_table-inl.h" #include "gc/allocation_record.h" #include "gc/collector/semi_space.h" @@ -39,8 +38,10 @@ namespace art { namespace gc { template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> -inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, - size_t byte_count, AllocatorType allocator, +inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, + mirror::Class* klass, + size_t byte_count, + AllocatorType allocator, const PreFenceVisitor& pre_fence_visitor) { if (kIsDebugBuild) { CheckPreconditionsForAllocObject(klass, byte_count); @@ -65,7 +66,6 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas // non moving space). This can happen if there is significant virtual address space // fragmentation. } - AllocationTimer alloc_timer(this, &obj); // bytes allocated for the (individual) object. size_t bytes_allocated; size_t usable_size; @@ -92,7 +92,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc && (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) && LIKELY(obj != nullptr)) { - DCHECK(!running_on_valgrind_); + DCHECK(!is_running_on_memory_tool_); obj->SetClass(klass); if (kUseBakerOrBrooksReadBarrier) { if (kUseBrooksReadBarrier) { @@ -209,7 +209,8 @@ inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { } template <bool kInstrumented, typename PreFenceVisitor> -inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass, +inline mirror::Object* Heap::AllocLargeObject(Thread* self, + mirror::Class** klass, size_t byte_count, const PreFenceVisitor& pre_fence_visitor) { // Save and restore the class in case it moves. @@ -221,11 +222,14 @@ inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klas } template <const bool kInstrumented, const bool kGrow> -inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, - size_t alloc_size, size_t* bytes_allocated, +inline mirror::Object* Heap::TryToAllocate(Thread* self, + AllocatorType allocator_type, + size_t alloc_size, + size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { - if (allocator_type != kAllocatorTypeTLAB && allocator_type != kAllocatorTypeRegionTLAB && + if (allocator_type != kAllocatorTypeTLAB && + allocator_type != kAllocatorTypeRegionTLAB && allocator_type != kAllocatorTypeRosAlloc && UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { return nullptr; @@ -244,8 +248,8 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator break; } case kAllocatorTypeRosAlloc: { - if (kInstrumented && UNLIKELY(running_on_valgrind_)) { - // If running on valgrind, we should be using the instrumented path. + if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) { + // If running on valgrind or asan, we should be using the instrumented path. size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size); if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, max_bytes_tl_bulk_allocated))) { @@ -254,7 +258,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } else { - DCHECK(!running_on_valgrind_); + DCHECK(!is_running_on_memory_tool_); size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size); if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, @@ -270,12 +274,12 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator break; } case kAllocatorTypeDlMalloc: { - if (kInstrumented && UNLIKELY(running_on_valgrind_)) { + if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) { // If running on valgrind, we should be using the instrumented path. ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } else { - DCHECK(!running_on_valgrind_); + DCHECK(!is_running_on_memory_tool_); ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } @@ -380,21 +384,6 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator return ret; } -inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) - : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr), - allocation_start_time_(kMeasureAllocationTime ? NanoTime() / kTimeAdjust : 0u) { } - -inline Heap::AllocationTimer::~AllocationTimer() { - if (kMeasureAllocationTime) { - mirror::Object* allocated_obj = *allocated_obj_ptr_; - // Only if the allocation succeeded, record the time. - if (allocated_obj != nullptr) { - uint64_t allocation_end_time = NanoTime() / kTimeAdjust; - heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_); - } - } -} - inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { // We need to have a zygote space or else our newly allocated large object can end up in the // Zygote resulting in it being prematurely freed. @@ -423,7 +412,8 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t return false; } -inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, +inline void Heap::CheckConcurrentGC(Thread* self, + size_t new_num_bytes_allocated, mirror::Object** obj) { if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { RequestConcurrentGCAndSaveObject(self, false, obj); diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 1b45ea1f5a..d7f918b4ff 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -37,13 +37,12 @@ #include "gc/accounting/atomic_stack.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap-inl.h" -#include "gc/accounting/mod_union_table.h" #include "gc/accounting/mod_union_table-inl.h" #include "gc/accounting/remembered_set.h" #include "gc/accounting/space_bitmap-inl.h" #include "gc/collector/concurrent_copying.h" #include "gc/collector/mark_compact.h" -#include "gc/collector/mark_sweep-inl.h" +#include "gc/collector/mark_sweep.h" #include "gc/collector/partial_mark_sweep.h" #include "gc/collector/semi_space.h" #include "gc/collector/sticky_mark_sweep.h" @@ -62,7 +61,6 @@ #include "image.h" #include "intern_table.h" #include "mirror/class-inl.h" -#include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/reference-inl.h" @@ -115,18 +113,34 @@ static constexpr size_t kDefaultAllocationStackSize = 8 * MB / // timeout on how long we wait for finalizers to run. b/21544853 static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u); -Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free, - double target_utilization, double foreground_heap_growth_multiplier, - size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name, - const InstructionSet image_instruction_set, CollectorType foreground_collector_type, +Heap::Heap(size_t initial_size, + size_t growth_limit, + size_t min_free, + size_t max_free, + double target_utilization, + double foreground_heap_growth_multiplier, + size_t capacity, + size_t non_moving_space_capacity, + const std::string& image_file_name, + const InstructionSet image_instruction_set, + CollectorType foreground_collector_type, CollectorType background_collector_type, - space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold, - size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, - size_t long_pause_log_threshold, size_t long_gc_log_threshold, - bool ignore_max_footprint, bool use_tlab, - bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, - bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, - bool verify_post_gc_rosalloc, bool gc_stress_mode, + space::LargeObjectSpaceType large_object_space_type, + size_t large_object_threshold, + size_t parallel_gc_threads, + size_t conc_gc_threads, + bool low_memory_mode, + size_t long_pause_log_threshold, + size_t long_gc_log_threshold, + bool ignore_max_footprint, + bool use_tlab, + bool verify_pre_gc_heap, + bool verify_pre_sweeping_heap, + bool verify_post_gc_heap, + bool verify_pre_gc_rosalloc, + bool verify_pre_sweeping_rosalloc, + bool verify_post_gc_rosalloc, + bool gc_stress_mode, bool use_homogeneous_space_compaction_for_oom, uint64_t min_interval_homogeneous_space_compaction_by_oom) : non_moving_space_(nullptr), @@ -191,10 +205,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max target_utilization_(target_utilization), foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier), total_wait_time_(0), - total_allocation_time_(0), verify_object_mode_(kVerifyObjectModeDisabled), disable_moving_gc_count_(0), - running_on_valgrind_(Runtime::Current()->RunningOnValgrind()), + is_running_on_memory_tool_(Runtime::Current()->IsRunningOnMemoryTool()), use_tlab_(use_tlab), main_space_backup_(nullptr), min_interval_homogeneous_space_compaction_by_oom_( @@ -216,7 +229,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max alloc_tracking_enabled_(false), backtrace_lock_(nullptr), seen_backtrace_count_(0u), - unique_backtrace_count_(0u) { + unique_backtrace_count_(0u), + gc_disabled_for_shutdown_(false) { if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() entering"; } @@ -467,6 +481,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable", *gc_complete_lock_)); task_processor_.reset(new TaskProcessor()); + reference_processor_.reset(new ReferenceProcessor()); pending_task_lock_ = new Mutex("Pending task lock"); if (ignore_max_footprint_) { SetIdealFootprint(std::numeric_limits<size_t>::max()); @@ -519,7 +534,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max if (gc_stress_mode_) { backtrace_lock_ = new Mutex("GC complete lock"); } - if (running_on_valgrind_ || gc_stress_mode_) { + if (is_running_on_memory_tool_ || gc_stress_mode_) { instrumentation->InstrumentQuickAllocEntryPoints(); } if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { @@ -527,8 +542,10 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max } } -MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, - size_t capacity, std::string* out_error_str) { +MemMap* Heap::MapAnonymousPreferredAddress(const char* name, + uint8_t* request_begin, + size_t capacity, + std::string* out_error_str) { while (true) { MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity, PROT_READ | PROT_WRITE, true, false, out_error_str); @@ -544,9 +561,12 @@ bool Heap::MayUseCollector(CollectorType type) const { return foreground_collector_type_ == type || background_collector_type_ == type; } -space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size, - size_t growth_limit, size_t capacity, - const char* name, bool can_move_objects) { +space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, + size_t initial_size, + size_t growth_limit, + size_t capacity, + const char* name, + bool can_move_objects) { space::MallocSpace* malloc_space = nullptr; if (kUseRosAlloc) { // Create rosalloc space. @@ -961,8 +981,6 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { total_paused_time += collector->GetTotalPausedTimeNs(); collector->DumpPerformanceInfo(os); } - uint64_t allocation_time = - static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust; if (total_duration != 0) { const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0; os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n"; @@ -980,11 +998,6 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { os << "Free memory until OOME " << PrettySize(GetFreeMemoryUntilOOME()) << "\n"; os << "Total memory " << PrettySize(GetTotalMemory()) << "\n"; os << "Max memory " << PrettySize(GetMaxMemory()) << "\n"; - if (kMeasureAllocationTime) { - os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n"; - os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated) - << "\n"; - } if (HasZygoteSpace()) { os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n"; } @@ -1017,7 +1030,6 @@ void Heap::ResetGcPerformanceInfo() { for (auto& collector : garbage_collectors_) { collector->ResetMeasurements(); } - total_allocation_time_.StoreRelaxed(0); total_bytes_freed_ever_ = 0; total_objects_freed_ever_ = 0; total_wait_time_ = 0; @@ -1278,7 +1290,7 @@ void Heap::TrimSpaces(Thread* self) { FinishGC(self, collector::kGcTypeNone); size_t native_reclaimed = 0; -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // Only trim the native heap if we don't care about pauses. if (!CareAboutPauseTimes()) { #if defined(USE_DLMALLOC) @@ -1291,7 +1303,7 @@ void Heap::TrimSpaces(Thread* self) { UNIMPLEMENTED(WARNING) << "Add trimming support"; #endif } -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ uint64_t end_ns = NanoTime(); VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration=" @@ -1432,10 +1444,10 @@ void Heap::VerifyObjectBody(mirror::Object* obj) { if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) { return; } - CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj; + CHECK_ALIGNED(obj, kObjectAlignment) << "Object isn't aligned"; mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset()); CHECK(c != nullptr) << "Null class in object " << obj; - CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj; + CHECK_ALIGNED(c, kObjectAlignment) << "Class " << c << " not aligned in object " << obj; CHECK(VerifyClassClass(c)); if (verify_object_mode_ > kVerifyObjectModeFast) { @@ -1495,8 +1507,10 @@ space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) return nullptr; } -mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator, - size_t alloc_size, size_t* bytes_allocated, +mirror::Object* Heap::AllocateInternalWithGc(Thread* self, + AllocatorType allocator, + size_t alloc_size, + size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated, mirror::Class** klass) { @@ -1695,21 +1709,24 @@ uint64_t Heap::GetBytesAllocatedEver() const { class InstanceCounter { public: - InstanceCounter(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, uint64_t* counts) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) { - } + InstanceCounter(const std::vector<mirror::Class*>& classes, + bool use_is_assignable_from, + uint64_t* counts) + SHARED_REQUIRES(Locks::mutator_lock_) + : classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {} + static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg); mirror::Class* instance_class = obj->GetClass(); CHECK(instance_class != nullptr); for (size_t i = 0; i < instance_counter->classes_.size(); ++i) { + mirror::Class* klass = instance_counter->classes_[i]; if (instance_counter->use_is_assignable_from_) { - if (instance_counter->classes_[i]->IsAssignableFrom(instance_class)) { + if (klass != nullptr && klass->IsAssignableFrom(instance_class)) { ++instance_counter->counts_[i]; } - } else if (instance_class == instance_counter->classes_[i]) { + } else if (instance_class == klass) { ++instance_counter->counts_[i]; } } @@ -1731,11 +1748,11 @@ void Heap::CountInstances(const std::vector<mirror::Class*>& classes, bool use_i class InstanceCollector { public: InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : class_(c), max_count_(max_count), instances_(instances) { } static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(arg != nullptr); InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg); if (obj->GetClass() == instance_collector->class_) { @@ -1753,7 +1770,8 @@ class InstanceCollector { DISALLOW_COPY_AND_ASSIGN(InstanceCollector); }; -void Heap::GetInstances(mirror::Class* c, int32_t max_count, +void Heap::GetInstances(mirror::Class* c, + int32_t max_count, std::vector<mirror::Object*>& instances) { InstanceCollector collector(c, max_count, instances); VisitObjects(&InstanceCollector::Callback, &collector); @@ -1761,14 +1779,15 @@ void Heap::GetInstances(mirror::Class* c, int32_t max_count, class ReferringObjectsFinder { public: - ReferringObjectsFinder(mirror::Object* object, int32_t max_count, + ReferringObjectsFinder(mirror::Object* object, + int32_t max_count, std::vector<mirror::Object*>& referring_objects) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : object_(object), max_count_(max_count), referring_objects_(referring_objects) { } static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj); } @@ -1776,18 +1795,22 @@ class ReferringObjectsFinder { // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for // annotalysis on visitors. void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS { - o->VisitReferences<true>(*this, VoidFunctor()); + o->VisitReferences(*this, VoidFunctor()); } // For Object::VisitReferences. - void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset); if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) { referring_objects_.push_back(obj); } } + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + private: const mirror::Object* const object_; const uint32_t max_count_; @@ -1865,7 +1888,7 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { static_cast<double>(space_size_before_compaction); tl->ResumeAll(); // Finish GC. - reference_processor_.EnqueueClearedReferences(self); + reference_processor_->EnqueueClearedReferences(self); GrowForUtilization(semi_space_collector_); LogGC(kGcCauseHomogeneousSpaceCompact, collector); FinishGC(self, collector::kGcTypeFull); @@ -1998,7 +2021,7 @@ void Heap::TransitionCollector(CollectorType collector_type) { ChangeCollector(collector_type); tl->ResumeAll(); // Can't call into java code with all threads suspended. - reference_processor_.EnqueueClearedReferences(self); + reference_processor_->EnqueueClearedReferences(self); uint64_t duration = NanoTime() - start_time; GrowForUtilization(semi_space_collector_); DCHECK(collector != nullptr); @@ -2077,9 +2100,11 @@ void Heap::ChangeCollector(CollectorType collector_type) { // Special compacting collector which uses sub-optimal bin packing to reduce zygote space size. class ZygoteCompactingCollector FINAL : public collector::SemiSpace { public: - explicit ZygoteCompactingCollector(gc::Heap* heap) : SemiSpace(heap, false, "zygote collector"), - bin_live_bitmap_(nullptr), bin_mark_bitmap_(nullptr) { - } + ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool) + : SemiSpace(heap, false, "zygote collector"), + bin_live_bitmap_(nullptr), + bin_mark_bitmap_(nullptr), + is_running_on_memory_tool_(is_running_on_memory_tool) {} void BuildBins(space::ContinuousSpace* space) { bin_live_bitmap_ = space->GetLiveBitmap(); @@ -2105,9 +2130,10 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace { accounting::ContinuousSpaceBitmap* bin_live_bitmap_; // Mark bitmap of the space which contains the bins. accounting::ContinuousSpaceBitmap* bin_mark_bitmap_; + const bool is_running_on_memory_tool_; static void Callback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(arg != nullptr); BinContext* context = reinterpret_cast<BinContext*>(arg); ZygoteCompactingCollector* collector = context->collector_; @@ -2119,20 +2145,22 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace { } void AddBin(size_t size, uintptr_t position) { + if (is_running_on_memory_tool_) { + MEMORY_TOOL_MAKE_DEFINED(reinterpret_cast<void*>(position), size); + } if (size != 0) { bins_.insert(std::make_pair(size, position)); } } - virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const { + virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const { // Don't sweep any spaces since we probably blasted the internal accounting of the free list // allocator. - UNUSED(space); return false; } virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { size_t obj_size = obj->SizeOf(); size_t alloc_size = RoundUp(obj_size, kObjectAlignment); mirror::Object* forward_address; @@ -2212,7 +2240,7 @@ void Heap::PreZygoteFork() { // Temporarily disable rosalloc verification because the zygote // compaction will mess up the rosalloc internal metadata. ScopedDisableRosAllocVerification disable_rosalloc_verif(this); - ZygoteCompactingCollector zygote_collector(this); + ZygoteCompactingCollector zygote_collector(this, is_running_on_memory_tool_); zygote_collector.BuildBins(non_moving_space_); // Create a new bump pointer space which we will compact into. space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(), @@ -2369,7 +2397,8 @@ collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* ta } } -collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause, +collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, + GcCause gc_cause, bool clear_soft_references) { Thread* self = Thread::Current(); Runtime* runtime = Runtime::Current(); @@ -2406,6 +2435,9 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_; return collector::kGcTypeNone; } + if (gc_disabled_for_shutdown_) { + return collector::kGcTypeNone; + } collector_type_running_ = collector_type_; } if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) { @@ -2472,7 +2504,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes(); RequestTrim(self); // Enqueue cleared references. - reference_processor_.EnqueueClearedReferences(self); + reference_processor_->EnqueueClearedReferences(self); // Grow the heap so that we know when to perform the next GC. GrowForUtilization(collector, bytes_allocated_before_gc); LogGC(gc_cause, collector); @@ -2576,7 +2608,7 @@ class RootMatchesObjectVisitor : public SingleRootVisitor { explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { } void VisitRoot(mirror::Object* root, const RootInfo& info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (root == obj_) { LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString(); } @@ -2597,24 +2629,23 @@ class ScanVisitor { // Verify a reference from an object. class VerifyReferenceVisitor : public SingleRootVisitor { public: - explicit VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) + VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} size_t GetFailureCount() const { return fail_count_->LoadSequentiallyConsistent(); } - void operator()(mirror::Class* klass, mirror::Reference* ref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - UNUSED(klass); + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const + SHARED_REQUIRES(Locks::mutator_lock_) { if (verify_referent_) { VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset()); } } - void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset); } @@ -2622,8 +2653,20 @@ class VerifyReferenceVisitor : public SingleRootVisitor { return heap_->IsLiveObjectLocked(obj, true, false, true); } - void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + const_cast<VerifyReferenceVisitor*>(this)->VisitRoot( + root->AsMirrorPtr(), RootInfo(kRootVMInternal)); + } + + virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { if (root == nullptr) { LOG(ERROR) << "Root is null with info " << root_info.GetType(); } else if (!VerifyReference(nullptr, root, MemberOffset(0))) { @@ -2736,27 +2779,25 @@ class VerifyReferenceVisitor : public SingleRootVisitor { // Verify all references within an object, for use with HeapBitmap::Visit. class VerifyObjectVisitor { public: - explicit VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) - : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) { - } + VerifyObjectVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent) + : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} - void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + void operator()(mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { // Note: we are verifying the references in obj but not obj itself, this is because obj must // be live or else how did we find it in the live bitmap? VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); // The class doesn't count as a reference but we should verify it anyways. - obj->VisitReferences<true>(visitor, visitor); + obj->VisitReferences(visitor, visitor); } static void VisitCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg); visitor->operator()(obj); } - void VerifyRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) { + void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) { ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_); Runtime::Current()->VisitRoots(&visitor); @@ -2848,11 +2889,16 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) { class VerifyReferenceCardVisitor { public: VerifyReferenceCardVisitor(Heap* heap, bool* failed) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) : heap_(heap), failed_(failed) { } + // There is no card marks for native roots on a class. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for // annotalysis on visitors. void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const @@ -2886,14 +2932,10 @@ class VerifyReferenceCardVisitor { if (!obj->IsObjectArray()) { mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass(); CHECK(klass != nullptr); - auto* fields = is_static ? klass->GetSFields() : klass->GetIFields(); - auto num_fields = is_static ? klass->NumStaticFields() : klass->NumInstanceFields(); - CHECK_EQ(fields == nullptr, num_fields == 0u); - for (size_t i = 0; i < num_fields; ++i) { - ArtField* cur = &fields[i]; - if (cur->GetOffset().Int32Value() == offset.Int32Value()) { + for (ArtField& field : (is_static ? klass->GetSFields() : klass->GetIFields())) { + if (field.GetOffset().Int32Value() == offset.Int32Value()) { LOG(ERROR) << (is_static ? "Static " : "") << "field in the live stack is " - << PrettyField(cur); + << PrettyField(&field); break; } } @@ -2925,9 +2967,9 @@ class VerifyLiveStackReferences { failed_(false) {} void operator()(mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_)); - obj->VisitReferences<true>(visitor, VoidFunctor()); + obj->VisitReferences(visitor, VoidFunctor()); } bool Failed() const { @@ -2958,8 +3000,7 @@ bool Heap::VerifyMissingCardMarks() { return !visitor.Failed(); } -void Heap::SwapStacks(Thread* self) { - UNUSED(self); +void Heap::SwapStacks() { if (kUseThreadLocalAllocationStack) { live_stack_->AssertAllZero(); } @@ -3012,7 +3053,9 @@ accounting::RememberedSet* Heap::FindRememberedSetFromSpace(space::Space* space) return it->second; } -void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_alloc_space_cards, +void Heap::ProcessCards(TimingLogger* timings, + bool use_rem_sets, + bool process_alloc_space_cards, bool clear_alloc_space_cards) { TimingLogger::ScopedTiming t(__FUNCTION__, timings); // Clear cards and keep track of cards cleared in the mod-union table. @@ -3048,8 +3091,13 @@ void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_a } } -static void IdentityMarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*, void*) { -} +struct IdentityMarkHeapReferenceVisitor : public MarkObjectVisitor { + virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE { + return obj; + } + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>*) OVERRIDE { + } +}; void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { Thread* const self = Thread::Current(); @@ -3067,18 +3115,19 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { if (verify_missing_card_marks_) { TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); - SwapStacks(self); + SwapStacks(); // Sort the live stack so that we can quickly binary search it later. CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName() << " missing card mark verification failed\n" << DumpSpaces(); - SwapStacks(self); + SwapStacks(); } if (verify_mod_union_table_) { TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings); ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); for (const auto& table_pair : mod_union_tables_) { accounting::ModUnionTable* mod_union_table = table_pair.second; - mod_union_table->UpdateAndMarkReferences(IdentityMarkHeapReferenceCallback, nullptr); + IdentityMarkHeapReferenceVisitor visitor; + mod_union_table->UpdateAndMarkReferences(&visitor); mod_union_table->Verify(); } } @@ -3091,8 +3140,7 @@ void Heap::PreGcVerification(collector::GarbageCollector* gc) { } } -void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) { - UNUSED(gc); +void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) { // TODO: Add a new runtime option for this? if (verify_pre_gc_rosalloc_) { RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification"); @@ -3400,8 +3448,8 @@ void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirro class Heap::ConcurrentGCTask : public HeapTask { public: - explicit ConcurrentGCTask(uint64_t target_time, bool force_full) - : HeapTask(target_time), force_full_(force_full) { } + ConcurrentGCTask(uint64_t target_time, bool force_full) + : HeapTask(target_time), force_full_(force_full) { } virtual void Run(Thread* self) OVERRIDE { gc::Heap* heap = Runtime::Current()->GetHeap(); heap->ConcurrentGC(self, force_full_); @@ -3412,7 +3460,7 @@ class Heap::ConcurrentGCTask : public HeapTask { const bool force_full_; // If true, force full (or partial) collection. }; -static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) { +static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_) { Runtime* runtime = Runtime::Current(); return runtime != nullptr && runtime->IsFinishedStarting() && !runtime->IsShuttingDown(self) && !self->IsHandlingStackOverflow(); @@ -3458,7 +3506,8 @@ void Heap::ConcurrentGC(Thread* self, bool force_full) { class Heap::CollectorTransitionTask : public HeapTask { public: - explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) { } + explicit CollectorTransitionTask(uint64_t target_time) : HeapTask(target_time) {} + virtual void Run(Thread* self) OVERRIDE { gc::Heap* heap = Runtime::Current()->GetHeap(); heap->DoPendingCollectorTransition(); @@ -3707,11 +3756,11 @@ void Heap::VisitAllocationRecords(RootVisitor* visitor) const { } } -void Heap::SweepAllocationRecords(IsMarkedCallback* visitor, void* arg) const { +void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const { if (IsAllocTrackingEnabled()) { MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->SweepAllocationRecords(visitor, arg); + GetAllocationRecords()->SweepAllocationRecords(visitor); } } } @@ -3734,17 +3783,6 @@ void Heap::DisallowNewAllocationRecords() const { } } -void Heap::EnsureNewAllocationRecordsDisallowed() const { - if (IsAllocTrackingEnabled()) { - // Lock and unlock once to ensure that no threads are still in the - // middle of adding new allocation records. - MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); - if (IsAllocTrackingEnabled()) { - GetAllocationRecords()->EnsureNewAllocationRecordsDisallowed(); - } - } -} - // Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp. class StackCrawlState { public: @@ -3813,5 +3851,12 @@ void Heap::CheckGcStressMode(Thread* self, mirror::Object** obj) { } } +void Heap::DisableGCForShutdown() { + Thread* const self = Thread::Current(); + CHECK(Runtime::Current()->IsShuttingDown(self)); + MutexLock mu(self, *gc_complete_lock_); + gc_disabled_for_shutdown_ = true; +} + } // namespace gc } // namespace art diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 1c75bd0089..d94f1091e0 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -26,22 +26,17 @@ #include "arch/instruction_set.h" #include "atomic.h" #include "base/time_utils.h" -#include "base/timing_logger.h" #include "gc/accounting/atomic_stack.h" #include "gc/accounting/card_table.h" #include "gc/accounting/read_barrier_table.h" #include "gc/gc_cause.h" -#include "gc/collector/garbage_collector.h" #include "gc/collector/gc_type.h" #include "gc/collector_type.h" #include "gc/space/large_object_space.h" #include "globals.h" -#include "jni.h" #include "object_callbacks.h" #include "offsets.h" -#include "reference_processor.h" #include "safe_map.h" -#include "thread_pool.h" #include "verify_object.h" namespace art { @@ -50,6 +45,7 @@ class ConditionVariable; class Mutex; class StackVisitor; class Thread; +class ThreadPool; class TimingLogger; namespace mirror { @@ -100,11 +96,7 @@ namespace space { class AgeCardVisitor { public: uint8_t operator()(uint8_t card) const { - if (card == accounting::CardTable::kCardDirty) { - return card - 1; - } else { - return 0; - } + return (card == accounting::CardTable::kCardDirty) ? card - 1 : 0; } }; @@ -134,7 +126,6 @@ std::ostream& operator<<(std::ostream& os, const ProcessState& process_state); class Heap { public: // If true, measure the total allocation time. - static constexpr bool kMeasureAllocationTime = false; static constexpr size_t kDefaultStartingSize = kPageSize; static constexpr size_t kDefaultInitialSize = 2 * MB; static constexpr size_t kDefaultMaximumSize = 256 * MB; @@ -169,49 +160,73 @@ class Heap { // Create a heap with the requested sizes. The possible empty // image_file_names names specify Spaces to load based on // ImageWriter output. - explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free, - size_t max_free, double target_utilization, - double foreground_heap_growth_multiplier, size_t capacity, - size_t non_moving_space_capacity, - const std::string& original_image_file_name, - InstructionSet image_instruction_set, - CollectorType foreground_collector_type, CollectorType background_collector_type, - space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold, - size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, - size_t long_pause_threshold, size_t long_gc_threshold, - bool ignore_max_footprint, bool use_tlab, - bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap, - bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc, - bool verify_post_gc_rosalloc, bool gc_stress_mode, - bool use_homogeneous_space_compaction, - uint64_t min_interval_homogeneous_space_compaction_by_oom); + Heap(size_t initial_size, + size_t growth_limit, + size_t min_free, + size_t max_free, + double target_utilization, + double foreground_heap_growth_multiplier, + size_t capacity, + size_t non_moving_space_capacity, + const std::string& original_image_file_name, + InstructionSet image_instruction_set, + CollectorType foreground_collector_type, + CollectorType background_collector_type, + space::LargeObjectSpaceType large_object_space_type, + size_t large_object_threshold, + size_t parallel_gc_threads, + size_t conc_gc_threads, + bool low_memory_mode, + size_t long_pause_threshold, + size_t long_gc_threshold, + bool ignore_max_footprint, + bool use_tlab, + bool verify_pre_gc_heap, + bool verify_pre_sweeping_heap, + bool verify_post_gc_heap, + bool verify_pre_gc_rosalloc, + bool verify_pre_sweeping_rosalloc, + bool verify_post_gc_rosalloc, + bool gc_stress_mode, + bool use_homogeneous_space_compaction, + uint64_t min_interval_homogeneous_space_compaction_by_oom); ~Heap(); // Allocates and initializes storage for an object instance. template <bool kInstrumented, typename PreFenceVisitor> - mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, + mirror::Object* AllocObject(Thread* self, + mirror::Class* klass, + size_t num_bytes, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, - GetCurrentAllocator(), - pre_fence_visitor); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_, + !Roles::uninterruptible_) { + return AllocObjectWithAllocator<kInstrumented, true>( + self, klass, num_bytes, GetCurrentAllocator(), pre_fence_visitor); } template <bool kInstrumented, typename PreFenceVisitor> - mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, + mirror::Object* AllocNonMovableObject(Thread* self, + mirror::Class* klass, + size_t num_bytes, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, - GetCurrentNonMovingAllocator(), - pre_fence_visitor); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_, + !Roles::uninterruptible_) { + return AllocObjectWithAllocator<kInstrumented, true>( + self, klass, num_bytes, GetCurrentNonMovingAllocator(), pre_fence_visitor); } template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> - ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( - Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, - const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self, + mirror::Class* klass, + size_t byte_count, + AllocatorType allocator, + const PreFenceVisitor& pre_fence_visitor) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_, + !Roles::uninterruptible_); AllocatorType GetCurrentAllocator() const { return current_allocator_; @@ -223,29 +238,29 @@ class Heap { // Visit all of the live objects in the heap. void VisitObjects(ObjectCallback callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); void VisitObjectsPaused(ObjectCallback callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void RegisterNativeAllocation(JNIEnv* env, size_t bytes); - void RegisterNativeFree(JNIEnv* env, size_t bytes); + void RegisterNativeAllocation(JNIEnv* env, size_t bytes) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); + void RegisterNativeFree(JNIEnv* env, size_t bytes) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); // Change the allocator, updates entrypoints. void ChangeAllocator(AllocatorType allocator) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_); // Transition the garbage collector during runtime, may copy objects from one space to another. - void TransitionCollector(CollectorType collector_type); + void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_); // Change the collector to be one of the possible options (MS, CMS, SS). void ChangeCollector(CollectorType collector_type) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); // The given reference is believed to be to an object in the Java heap, check the soundness of it. // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a @@ -253,61 +268,67 @@ class Heap { void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS; // Check sanity of all live references. - void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_); // Returns how many failures occured. size_t VerifyHeapReferences(bool verify_referents = true) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); bool VerifyMissingCardMarks() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, // and doesn't abort on error, allowing the caller to report more // meaningful diagnostics. - bool IsValidObjectAddress(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsValidObjectAddress(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_); // Faster alternative to IsHeapAddress since finding if an object is in the large object space is // very slow. bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). // Requires the heap lock to be held. - bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true, - bool search_live_stack = true, bool sorted = false) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); + bool IsLiveObjectLocked(mirror::Object* obj, + bool search_allocation_stack = true, + bool search_live_stack = true, + bool sorted = false) + SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_); // Returns true if there is any chance that the object (obj) will move. - bool IsMovableObject(const mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_); // Enables us to compacting GC until objects are released. - void IncrementDisableMovingGC(Thread* self); - void DecrementDisableMovingGC(Thread* self); + void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); + void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_); // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits. - void ClearMarkedObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void ClearMarkedObjects() REQUIRES(Locks::heap_bitmap_lock_); // Initiates an explicit garbage collection. - void CollectGarbage(bool clear_soft_references); + void CollectGarbage(bool clear_soft_references) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); // Does a concurrent GC, should only be called by the GC daemon thread // through runtime. - void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); + void ConcurrentGC(Thread* self, bool force_full) + REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_); // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. // The boolean decides whether to use IsAssignableFrom or == when comparing classes. - void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, + void CountInstances(const std::vector<mirror::Class*>& classes, + bool use_is_assignable_from, uint64_t* counts) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Implements JDWP RT_Instances. void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Implements JDWP OR_ReferringObjects. - void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void GetReferringObjects(mirror::Object* o, + int32_t max_count, + std::vector<mirror::Object*>& referring_objects) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to // implement dalvik.system.VMRuntime.clearGrowthLimit. @@ -315,7 +336,7 @@ class Heap { // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit. - void ClampGrowthLimit() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_); // Target ideal heap utilization ratio, implements // dalvik.system.VMRuntime.getTargetHeapUtilization. @@ -330,9 +351,9 @@ class Heap { // Set the heap's private space pointers to be the same as the space based on it's type. Public // due to usage by tests. void SetSpaceAsDefault(space::ContinuousSpace* continuous_space) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); - void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(!Locks::heap_bitmap_lock_); + void AddSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_); + void RemoveSpace(space::Space* space) REQUIRES(!Locks::heap_bitmap_lock_); // Set target ideal heap utilization ratio, implements // dalvik.system.VMRuntime.setTargetHeapUtilization. @@ -344,11 +365,11 @@ class Heap { // Blocks the caller until the garbage collector becomes idle and returns the type of GC we // waited for. - collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) - LOCKS_EXCLUDED(gc_complete_lock_); + collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_); // Update the heap's process state to a new value, may cause compaction to occur. - void UpdateProcessState(ProcessState process_state); + void UpdateProcessState(ProcessState process_state) + REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const { return continuous_spaces_; @@ -401,14 +422,17 @@ class Heap { // Must be called if a field of an Object in the heap changes, and before any GC safe-point. // The call is not needed if null is stored in the field. - ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, - const mirror::Object* /*new_value*/) { + ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, + MemberOffset offset ATTRIBUTE_UNUSED, + const mirror::Object* new_value ATTRIBUTE_UNUSED) { card_table_->MarkCard(dst); } // Write barrier for array operations that update many field positions - ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/, - size_t /*length TODO: element_count or byte_count?*/) { + ALWAYS_INLINE void WriteBarrierArray(const mirror::Object* dst, + int start_offset ATTRIBUTE_UNUSED, + // TODO: element_count or byte_count? + size_t length ATTRIBUTE_UNUSED) { card_table_->MarkCard(dst); } @@ -432,7 +456,8 @@ class Heap { } // Returns the number of objects currently allocated. - size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + size_t GetObjectsAllocated() const + REQUIRES(!Locks::heap_bitmap_lock_); // Returns the total number of objects allocated since the heap was created. uint64_t GetObjectsAllocatedEver() const; @@ -491,13 +516,13 @@ class Heap { bool fail_ok) const; space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const; - void DumpForSigQuit(std::ostream& os); + void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_); // Do a pending collector transition. - void DoPendingCollectorTransition(); + void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_); // Deflate monitors, ... and trim the spaces. - void Trim(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); + void Trim(Thread* self) REQUIRES(!*gc_complete_lock_); void RevokeThreadLocalBuffers(Thread* thread); void RevokeRosAllocThreadLocalBuffers(Thread* thread); @@ -505,17 +530,17 @@ class Heap { void AssertThreadLocalBuffersAreRevoked(Thread* thread); void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked(); void RosAllocVerification(TimingLogger* timings, const char* name) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); - accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) { return live_bitmap_.get(); } - accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) { return mark_bitmap_.get(); } - accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { + accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) { return live_stack_.get(); } @@ -523,13 +548,12 @@ class Heap { // Mark and empty stack. void FlushAllocStack() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Revoke all the thread-local allocation stacks. void RevokeAllThreadLocalAllocationStacks(Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); // Mark all the objects in the allocation stack in the specified bitmap. // TODO: Refactor? @@ -537,23 +561,23 @@ class Heap { accounting::SpaceBitmap<kObjectAlignment>* bitmap2, accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects, accounting::ObjectStack* stack) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Mark the specified allocation stack as live. void MarkAllocStackAsLive(accounting::ObjectStack* stack) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_); // Unbind any bound bitmaps. - void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_); // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. // Assumes there is only one image space. space::ImageSpace* GetImageSpace() const; // Permenantly disable moving garbage collection. - void DisableMovingGc(); + void DisableMovingGc() REQUIRES(!*gc_complete_lock_); space::DlMallocSpace* GetDlMallocSpace() const { return dlmalloc_space_; @@ -599,8 +623,8 @@ class Heap { std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // GC performance measuring - void DumpGcPerformanceInfo(std::ostream& os); - void ResetGcPerformanceInfo(); + void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_); + void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_); // Returns true if we currently care about pause times. bool CareAboutPauseTimes() const { @@ -631,7 +655,7 @@ class Heap { bool HasImageSpace() const; ReferenceProcessor* GetReferenceProcessor() { - return &reference_processor_; + return reference_processor_.get(); } TaskProcessor* GetTaskProcessor() { return task_processor_.get(); @@ -660,16 +684,16 @@ class Heap { return false; } - bool IsMovingGCDisabled(Thread* self) { + bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) { MutexLock mu(self, *gc_complete_lock_); return disable_moving_gc_count_ > 0; } // Request an asynchronous trim. - void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); + void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_); // Request asynchronous GC. - void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_); + void RequestConcurrentGC(Thread* self, bool force_full) REQUIRES(!*pending_task_lock_); // Whether or not we may use a garbage collector, used so that we only create collectors we need. bool MayUseCollector(CollectorType type) const; @@ -684,8 +708,8 @@ class Heap { uint64_t GetGcTime() const; uint64_t GetBlockingGcCount() const; uint64_t GetBlockingGcTime() const; - void DumpGcCountRateHistogram(std::ostream& os) const; - void DumpBlockingGcCountRateHistogram(std::ostream& os) const; + void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_); + void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_); // Allocation tracking support // Callers to this function use double-checked locking to ensure safety on allocation_records_ @@ -693,37 +717,35 @@ class Heap { return alloc_tracking_enabled_.LoadRelaxed(); } - void SetAllocTrackingEnabled(bool enabled) EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) { alloc_tracking_enabled_.StoreRelaxed(enabled); } AllocRecordObjectMap* GetAllocationRecords() const - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_) { + REQUIRES(Locks::alloc_tracker_lock_) { return allocation_records_.get(); } void SetAllocationRecords(AllocRecordObjectMap* records) - EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_); + REQUIRES(Locks::alloc_tracker_lock_); void VisitAllocationRecords(RootVisitor* visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); - void SweepAllocationRecords(IsMarkedCallback* visitor, void* arg) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + void SweepAllocationRecords(IsMarkedVisitor* visitor) const + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); void DisallowNewAllocationRecords() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); void AllowNewAllocationRecords() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::alloc_tracker_lock_); - void EnsureNewAllocationRecordsDisallowed() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::alloc_tracker_lock_); + void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_); private: class ConcurrentGCTask; @@ -734,10 +756,10 @@ class Heap { collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space, space::ContinuousMemMapAllocSpace* source_space, GcCause gc_cause) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); void LogGC(GcCause gc_cause, collector::GarbageCollector* collector); - void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); + void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_); // Create a mem map with a preferred base address. static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, @@ -761,15 +783,20 @@ class Heap { allocator_type != kAllocatorTypeTLAB; } static bool IsMovingGc(CollectorType collector_type) { - return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS || - collector_type == kCollectorTypeCC || collector_type == kCollectorTypeMC || + return + collector_type == kCollectorTypeSS || + collector_type == kCollectorTypeGSS || + collector_type == kCollectorTypeCC || + collector_type == kCollectorTypeMC || collector_type == kCollectorTypeHomogeneousSpaceCompact; } bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, + SHARED_REQUIRES(Locks::mutator_lock_); + ALWAYS_INLINE void CheckConcurrentGC(Thread* self, + size_t new_num_bytes_allocated, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*pending_task_lock_, !*gc_complete_lock_); accounting::ObjectStack* GetMarkStack() { return mark_stack_.get(); @@ -777,46 +804,56 @@ class Heap { // We don't force this to be inlined since it is a slow path. template <bool kInstrumented, typename PreFenceVisitor> - mirror::Object* AllocLargeObject(Thread* self, mirror::Class** klass, size_t byte_count, + mirror::Object* AllocLargeObject(Thread* self, + mirror::Class** klass, + size_t byte_count, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_); // Handles Allocate()'s slow allocation path with GC involved after // an initial allocation attempt failed. - mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes, - size_t* bytes_allocated, size_t* usable_size, + mirror::Object* AllocateInternalWithGc(Thread* self, + AllocatorType allocator, + size_t num_bytes, + size_t* bytes_allocated, + size_t* usable_size, size_t* bytes_tl_bulk_allocated, mirror::Class** klass) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Allocate into a specific space. - mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c, + mirror::Object* AllocateInto(Thread* self, + space::AllocSpace* space, + mirror::Class* c, size_t bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the // wrong space. - void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_); // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so // that the switch statement is constant optimized in the entrypoints. template <const bool kInstrumented, const bool kGrow> - ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type, - size_t alloc_size, size_t* bytes_allocated, + ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, + AllocatorType allocator_type, + size_t alloc_size, + size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool kGrow> ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size); // Returns true if the address passed in is within the address range of a continuous space. bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Run the finalizers. If timeout is non zero, then we use the VMRuntime version. void RunFinalization(JNIEnv* env, uint64_t timeout); @@ -824,36 +861,36 @@ class Heap { // Blocks the caller until the garbage collector becomes idle and returns the type of GC we // waited for. collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); + REQUIRES(gc_complete_lock_); void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time) - LOCKS_EXCLUDED(pending_task_lock_); + REQUIRES(!*pending_task_lock_); void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*pending_task_lock_); bool IsGCRequestPending() const; // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns // which type of Gc was actually ran. - collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause, + collector::GcType CollectGarbageInternal(collector::GcType gc_plan, + GcCause gc_cause, bool clear_soft_references) - LOCKS_EXCLUDED(gc_complete_lock_, - Locks::heap_bitmap_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_, + !*pending_task_lock_); void PreGcVerification(collector::GarbageCollector* gc) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_); void PreGcVerificationPaused(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); void PrePauseRosAllocVerification(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_); void PreSweepingGcVerification(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); void PostGcVerification(collector::GarbageCollector* gc) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_); void PostGcVerificationPaused(collector::GarbageCollector* gc) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_); // Update the watermark for the native allocated bytes based on the current number of native // bytes allocated and the target utilization ratio. @@ -863,16 +900,21 @@ class Heap { collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type); // Create a new alloc space and compact default alloc space to it. - HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact(); + HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_); // Create the main free list malloc space, either a RosAlloc space or DlMalloc space. - void CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit, + void CreateMainMallocSpace(MemMap* mem_map, + size_t initial_size, + size_t growth_limit, size_t capacity); // Create a malloc space based on a mem map. Does not set the space as default. - space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size, - size_t growth_limit, size_t capacity, - const char* name, bool can_move_objects); + space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap* mem_map, + size_t initial_size, + size_t growth_limit, + size_t capacity, + const char* name, + bool can_move_objects); // Given the current contents of the alloc space, increase the allowed heap footprint to match // the target utilization ratio. This should only be called immediately after a full garbage @@ -884,28 +926,33 @@ class Heap { size_t GetPercentFree(); static void VerificationCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::heap_bitmap_lock_); // Swap the allocation stack with the live stack. - void SwapStacks(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_); // Clear cards and update the mod union table. When process_alloc_space_cards is true, // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do // not process the alloc space if process_alloc_space_cards is false. - void ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_alloc_space_cards, + void ProcessCards(TimingLogger* timings, + bool use_rem_sets, + bool process_alloc_space_cards, bool clear_alloc_space_cards); // Push an object onto the allocation stack. void PushOnAllocationStack(Thread* self, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); void ClearConcurrentGCRequest(); - void ClearPendingTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); - void ClearPendingCollectorTransition(Thread* self) LOCKS_EXCLUDED(pending_task_lock_); + void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_); + void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_); // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark // sweep GC, false for other GC types. @@ -914,23 +961,23 @@ class Heap { } // Trim the managed and native spaces by releasing unused memory back to the OS. - void TrimSpaces(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); + void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_); // Trim 0 pages at the end of reference tables. void TrimIndirectReferenceTables(Thread* self); void VisitObjectsInternal(ObjectCallback callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_); void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_); - void UpdateGcCountRateHistograms() EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); + void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_); // GC stress mode attempts to do one GC per unique backtrace. void CheckGcStressMode(Thread* self, mirror::Object** obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_); // All-known continuous spaces, where objects lie within fixed bounds. std::vector<space::ContinuousSpace*> continuous_spaces_; @@ -1019,7 +1066,7 @@ class Heap { std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); // Reference processor; - ReferenceProcessor reference_processor_; + std::unique_ptr<ReferenceProcessor> reference_processor_; // Task processor, proxies heap trim requests to the daemon threads. std::unique_ptr<TaskProcessor> task_processor_; @@ -1167,9 +1214,6 @@ class Heap { // Total time which mutators are paused or waiting for GC to complete. uint64_t total_wait_time_; - // Total number of objects allocated in microseconds. - AtomicInteger total_allocation_time_; - // The current state of heap verification, may be enabled or disabled. VerifyObjectMode verify_object_mode_; @@ -1181,7 +1225,7 @@ class Heap { collector::MarkCompact* mark_compact_collector_; collector::ConcurrentCopying* concurrent_copying_collector_; - const bool running_on_valgrind_; + const bool is_running_on_memory_tool_; const bool use_tlab_; // Pointer to the space which becomes the new main space when we do homogeneous space compaction. @@ -1251,6 +1295,10 @@ class Heap { // Stack trace hashes that we already saw, std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_); + // We disable GC when we are shutting down the runtime in case there are daemon threads still + // allocating. + bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_); + friend class CollectorTransitionTask; friend class collector::GarbageCollector; friend class collector::MarkCompact; @@ -1261,21 +1309,8 @@ class Heap { friend class VerifyReferenceCardVisitor; friend class VerifyReferenceVisitor; friend class VerifyObjectVisitor; - friend class ScopedHeapFill; friend class space::SpaceTest; - class AllocationTimer { - public: - ALWAYS_INLINE AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr); - ALWAYS_INLINE ~AllocationTimer(); - private: - Heap* const heap_; - mirror::Object** allocated_obj_ptr_; - const uint64_t allocation_start_time_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationTimer); - }; - DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); }; diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index 4d51d387c8..39ba7432a1 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -17,6 +17,7 @@ #include "reference_processor.h" #include "base/time_utils.h" +#include "collector/garbage_collector.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/reference-inl.h" @@ -34,7 +35,7 @@ namespace gc { static constexpr bool kAsyncReferenceQueueAdd = false; ReferenceProcessor::ReferenceProcessor() - : process_references_args_(nullptr, nullptr, nullptr), + : collector_(nullptr), preserving_references_(false), condition_("reference processor condition", *Locks::reference_processor_lock_) , soft_reference_queue_(Locks::reference_queue_soft_references_lock_), @@ -53,15 +54,27 @@ void ReferenceProcessor::DisableSlowPath(Thread* self) { condition_.Broadcast(self); } +void ReferenceProcessor::BroadcastForSlowPath(Thread* self) { + CHECK(kUseReadBarrier); + MutexLock mu(self, *Locks::reference_processor_lock_); + condition_.Broadcast(self); +} + mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) { - mirror::Object* const referent = reference->GetReferent(); - // If the referent is null then it is already cleared, we can just return null since there is no - // scenario where it becomes non-null during the reference processing phase. - if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) { - return referent; + if (!kUseReadBarrier || self->GetWeakRefAccessEnabled()) { + // Under read barrier / concurrent copying collector, it's not safe to call GetReferent() when + // weak ref access is disabled as the call includes a read barrier which may push a ref onto the + // mark stack and interfere with termination of marking. + mirror::Object* const referent = reference->GetReferent(); + // If the referent is null then it is already cleared, we can just return null since there is no + // scenario where it becomes non-null during the reference processing phase. + if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) { + return referent; + } } MutexLock mu(self, *Locks::reference_processor_lock_); - while (SlowPathEnabled()) { + while ((!kUseReadBarrier && SlowPathEnabled()) || + (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { mirror::HeapReference<mirror::Object>* const referent_addr = reference->GetReferentReferenceAddr(); // If the referent became cleared, return it. Don't need barrier since thread roots can't get @@ -71,16 +84,14 @@ mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* } // Try to see if the referent is already marked by using the is_marked_callback. We can return // it to the mutator as long as the GC is not preserving references. - IsHeapReferenceMarkedCallback* const is_marked_callback = - process_references_args_.is_marked_callback_; - if (LIKELY(is_marked_callback != nullptr)) { + if (LIKELY(collector_ != nullptr)) { // If it's null it means not marked, but it could become marked if the referent is reachable // by finalizer referents. So we can not return in this case and must block. Otherwise, we // can return it to the mutator as long as the GC is not preserving references, in which // case only black nodes can be safely returned. If the GC is preserving references, the // mutator could take a white field from a grey or white node and move it somewhere else // in the heap causing corruption since this field would get swept. - if (is_marked_callback(referent_addr, process_references_args_.arg_)) { + if (collector_->IsMarkedHeapReference(referent_addr)) { if (!preserving_references_ || (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) { return referent_addr->AsMirrorPtr(); @@ -92,16 +103,6 @@ mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* return reference->GetReferent(); } -bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj, - void* arg) { - auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg); - // TODO: Add smarter logic for preserving soft references. - mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_); - DCHECK(new_obj != nullptr); - obj->Assign(new_obj); - return true; -} - void ReferenceProcessor::StartPreservingReferences(Thread* self) { MutexLock mu(self, *Locks::reference_processor_lock_); preserving_references_ = true; @@ -117,18 +118,18 @@ void ReferenceProcessor::StopPreservingReferences(Thread* self) { // Process reference class instances and schedule finalizations. void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references, - IsHeapReferenceMarkedCallback* is_marked_callback, - MarkObjectCallback* mark_object_callback, - ProcessMarkStackCallback* process_mark_stack_callback, - void* arg) { + collector::GarbageCollector* collector) { TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings); Thread* self = Thread::Current(); { MutexLock mu(self, *Locks::reference_processor_lock_); - process_references_args_.is_marked_callback_ = is_marked_callback; - process_references_args_.mark_callback_ = mark_object_callback; - process_references_args_.arg_ = arg; - CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent"; + collector_ = collector; + if (!kUseReadBarrier) { + CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent"; + } else { + // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent == false). + CHECK_EQ(!self->GetWeakRefAccessEnabled(), concurrent); + } } // Unless required to clear soft references with white references, preserve some white referents. if (!clear_soft_references) { @@ -137,16 +138,17 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing if (concurrent) { StartPreservingReferences(self); } - soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback, - &process_references_args_); - process_mark_stack_callback(arg); + // TODO: Add smarter logic for preserving soft references. The behavior should be a conditional + // mark if the SoftReference is supposed to be preserved. + soft_reference_queue_.ForwardSoftReferences(collector); + collector->ProcessMarkStack(); if (concurrent) { StopPreservingReferences(self); } } // Clear all remaining soft and weak references with white referents. - soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); - weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); + soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); + weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); { TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" : "(Paused)EnqueueFinalizerReferences", timings); @@ -154,18 +156,17 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing StartPreservingReferences(self); } // Preserve all white objects with finalize methods and schedule them for finalization. - finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback, - mark_object_callback, arg); - process_mark_stack_callback(arg); + finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, collector); + collector->ProcessMarkStack(); if (concurrent) { StopPreservingReferences(self); } } // Clear all finalizer referent reachable soft and weak references with white referents. - soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); - weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); + soft_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); + weak_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); // Clear all phantom references with white referents. - phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); + phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, collector); // At this point all reference queues other than the cleared references should be empty. DCHECK(soft_reference_queue_.IsEmpty()); DCHECK(weak_reference_queue_.IsEmpty()); @@ -177,8 +178,8 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing // could result in a stale is_marked_callback_ being called before the reference processing // starts since there is a small window of time where slow_path_enabled_ is enabled but the // callback isn't yet set. - process_references_args_.is_marked_callback_ = nullptr; - if (concurrent) { + collector_ = nullptr; + if (!kUseReadBarrier && concurrent) { // Done processing, disable the slow path and broadcast to the waiters. DisableSlowPath(self); } @@ -188,13 +189,12 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been // marked, put it on the appropriate list in the heap for later processing. void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, - IsHeapReferenceMarkedCallback* is_marked_callback, - void* arg) { + collector::GarbageCollector* collector) { // klass can be the class of the old object if the visitor already updated the class of ref. DCHECK(klass != nullptr); DCHECK(klass->IsTypeOfReferenceClass()); mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr(); - if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) { + if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) { Thread* self = Thread::Current(); // TODO: Remove these locks, and use atomic stacks for storing references? // We need to check that the references haven't already been enqueued since we can end up @@ -214,8 +214,8 @@ void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Re } } -void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) { - cleared_references_.UpdateRoots(callback, arg); +void ReferenceProcessor::UpdateRoots(IsMarkedVisitor* visitor) { + cleared_references_.UpdateRoots(visitor); } class ClearedReferenceTask : public HeapTask { @@ -264,7 +264,8 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference Thread* self = Thread::Current(); MutexLock mu(self, *Locks::reference_processor_lock_); // Wait untul we are done processing reference. - while (SlowPathEnabled()) { + while ((!kUseReadBarrier && SlowPathEnabled()) || + (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { condition_.WaitHoldingLocks(self); } // At this point, since the sentinel of the reference is live, it is guaranteed to not be diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h index a44319ba5a..d9dfedb464 100644 --- a/runtime/gc/reference_processor.h +++ b/runtime/gc/reference_processor.h @@ -28,6 +28,7 @@ namespace art { class TimingLogger; namespace mirror { +class Class; class FinalizerReference; class Object; class Reference; @@ -35,67 +36,54 @@ class Reference; namespace gc { +namespace collector { +class GarbageCollector; +} // namespace collector + class Heap; // Used to process java.lang.References concurrently or paused. class ReferenceProcessor { public: explicit ReferenceProcessor(); - static bool PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references, - IsHeapReferenceMarkedCallback* is_marked_callback, - MarkObjectCallback* mark_object_callback, - ProcessMarkStackCallback* process_mark_stack_callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) - LOCKS_EXCLUDED(Locks::reference_processor_lock_); + gc::collector::GarbageCollector* collector) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::heap_bitmap_lock_) + REQUIRES(!Locks::reference_processor_lock_); // The slow path bool is contained in the reference class object, can only be set once // Only allow setting this with mutators suspended so that we can avoid using a lock in the // GetReferent fast path as an optimization. - void EnableSlowPath() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnableSlowPath() SHARED_REQUIRES(Locks::mutator_lock_); + void BroadcastForSlowPath(Thread* self); // Decode the referent, may block if references are being processed. mirror::Object* GetReferent(Thread* self, mirror::Reference* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::reference_processor_lock_); - void EnqueueClearedReferences(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_); + void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_); void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, - IsHeapReferenceMarkedCallback* is_marked_callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UpdateRoots(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_); + collector::GarbageCollector* collector) + SHARED_REQUIRES(Locks::mutator_lock_); + void UpdateRoots(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_); // Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock. bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::reference_processor_lock_, - Locks::reference_queue_finalizer_references_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::reference_processor_lock_, + !Locks::reference_queue_finalizer_references_lock_); private: - class ProcessReferencesArgs { - public: - ProcessReferencesArgs(IsHeapReferenceMarkedCallback* is_marked_callback, - MarkObjectCallback* mark_callback, void* arg) - : is_marked_callback_(is_marked_callback), mark_callback_(mark_callback), arg_(arg) { - } - - // The is marked callback is null when the args aren't set up. - IsHeapReferenceMarkedCallback* is_marked_callback_; - MarkObjectCallback* mark_callback_; - void* arg_; - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(ProcessReferencesArgs); - }; - bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool SlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_); // Called by ProcessReferences. - void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::reference_processor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisableSlowPath(Thread* self) REQUIRES(Locks::reference_processor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // If we are preserving references it means that some dead objects may become live, we use start // and stop preserving to block mutators using GetReferrent from getting access to these // referents. - void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_); - void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(Locks::reference_processor_lock_); - // Process args, used by the GetReferent to return referents which are already marked. - ProcessReferencesArgs process_references_args_ GUARDED_BY(Locks::reference_processor_lock_); + void StartPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_); + void StopPreservingReferences(Thread* self) REQUIRES(!Locks::reference_processor_lock_); + // Collector which is clearing references, used by the GetReferent to return referents which are + // already marked. + collector::GarbageCollector* collector_ GUARDED_BY(Locks::reference_processor_lock_); // Boolean for whether or not we are preserving references (either soft references or finalizers). // If this is true, then we cannot return a referent (see comment in GetReferent). bool preserving_references_ GUARDED_BY(Locks::reference_processor_lock_); diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index 4ba3983d58..f5054289e3 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -137,12 +137,12 @@ size_t ReferenceQueue::GetLength() const { } void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, - IsHeapReferenceMarkedCallback* preserve_callback, - void* arg) { + collector::GarbageCollector* collector) { while (!IsEmpty()) { mirror::Reference* ref = DequeuePendingReference(); mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); - if (referent_addr->AsMirrorPtr() != nullptr && !preserve_callback(referent_addr, arg)) { + if (referent_addr->AsMirrorPtr() != nullptr && + !collector->IsMarkedHeapReference(referent_addr)) { // Referent is white, clear it. if (Runtime::Current()->IsActiveTransaction()) { ref->ClearReferent<true>(); @@ -157,14 +157,13 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, } void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references, - IsHeapReferenceMarkedCallback* is_marked_callback, - MarkObjectCallback* mark_object_callback, - void* arg) { + collector::GarbageCollector* collector) { while (!IsEmpty()) { mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference(); mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); - if (referent_addr->AsMirrorPtr() != nullptr && !is_marked_callback(referent_addr, arg)) { - mirror::Object* forward_address = mark_object_callback(referent_addr->AsMirrorPtr(), arg); + if (referent_addr->AsMirrorPtr() != nullptr && + !collector->IsMarkedHeapReference(referent_addr)) { + mirror::Object* forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); // If the referent is non-null the reference must queuable. DCHECK(ref->IsEnqueuable()); // Move the updated referent to the zombie field. @@ -180,8 +179,7 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc } } -void ReferenceQueue::ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, - void* arg) { +void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { if (UNLIKELY(IsEmpty())) { return; } @@ -190,15 +188,15 @@ void ReferenceQueue::ForwardSoftReferences(IsHeapReferenceMarkedCallback* preser do { mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); if (referent_addr->AsMirrorPtr() != nullptr) { - UNUSED(preserve_callback(referent_addr, arg)); + visitor->MarkHeapReference(referent_addr); } ref = ref->GetPendingNext(); } while (LIKELY(ref != head)); } -void ReferenceQueue::UpdateRoots(IsMarkedCallback* callback, void* arg) { +void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) { if (list_ != nullptr) { - list_ = down_cast<mirror::Reference*>(callback(list_, arg)); + list_ = down_cast<mirror::Reference*>(visitor->IsMarked(list_)); } } diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h index c45be8591f..aabac97742 100644 --- a/runtime/gc/reference_queue.h +++ b/runtime/gc/reference_queue.h @@ -22,6 +22,7 @@ #include <vector> #include "atomic.h" +#include "base/mutex.h" #include "base/timing_logger.h" #include "globals.h" #include "jni.h" @@ -36,6 +37,10 @@ class Reference; namespace gc { +namespace collector { +class GarbageCollector; +} // namespace collector + class Heap; // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the @@ -49,40 +54,39 @@ class ReferenceQueue { // since it uses a lock to avoid a race between checking for the references presence and adding // it. void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_); // Enqueue a reference, unlike EnqueuePendingReference, enqueue reference checks that the // reference IsEnqueueable. Not thread safe, used when mutators are paused to minimize lock // overhead. - void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_); // Enqueue a reference without checking that it is enqueable. - void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void EnqueuePendingReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_); // Dequeue the first reference (returns list_). - mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_); // Enqueues finalizer references with white referents. White referents are blackened, moved to // the zombie field, and the referent field is cleared. void EnqueueFinalizerReferences(ReferenceQueue* cleared_references, - IsHeapReferenceMarkedCallback* is_marked_callback, - MarkObjectCallback* mark_object_callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + collector::GarbageCollector* collector) + SHARED_REQUIRES(Locks::mutator_lock_); // Walks the reference list marking any references subject to the reference clearing policy. // References with a black referent are removed from the list. References with white referents // biased toward saving are blackened and also removed from the list. - void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ForwardSoftReferences(MarkObjectVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_); // Unlink the reference list clearing references objects with white referents. Cleared references // registered to a reference queue are scheduled for appending by the heap worker thread. void ClearWhiteReferences(ReferenceQueue* cleared_references, - IsHeapReferenceMarkedCallback* is_marked_callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + collector::GarbageCollector* collector) + SHARED_REQUIRES(Locks::mutator_lock_); - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - size_t GetLength() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_); + size_t GetLength() const SHARED_REQUIRES(Locks::mutator_lock_); bool IsEmpty() const { return list_ == nullptr; @@ -90,13 +94,13 @@ class ReferenceQueue { void Clear() { list_ = nullptr; } - mirror::Reference* GetList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Reference* GetList() SHARED_REQUIRES(Locks::mutator_lock_) { return list_; } // Visits list_, currently only used for the mark compact GC. - void UpdateRoots(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void UpdateRoots(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_); private: // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h index d9ad9a38ca..2263797d4a 100644 --- a/runtime/gc/space/bump_pointer_space-inl.h +++ b/runtime/gc/space/bump_pointer_space-inl.h @@ -63,7 +63,7 @@ inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t } inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t num_bytes) { - DCHECK(IsAligned<kAlignment>(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); uint8_t* old_end; uint8_t* new_end; do { @@ -87,7 +87,7 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) { } inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t num_bytes = obj->SizeOf(); if (usable_size != nullptr) { *usable_size = RoundUp(num_bytes, kAlignment); diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index df43606485..0e27d8467b 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -51,14 +51,14 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_); mirror::Object* AllocNonvirtual(size_t num_bytes); mirror::Object* AllocNonvirtualWithoutAccounting(size_t num_bytes); // Return the storage space required by obj. size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return AllocationSizeNonvirtual(obj, usable_size); } @@ -72,7 +72,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { } size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Removes the fork time growth limit on capacity, allowing the application to allocate up to the // maximum reserved size of the heap. @@ -99,19 +99,21 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { } // Reset the space to empty. - void Clear() OVERRIDE LOCKS_EXCLUDED(block_lock_); + void Clear() OVERRIDE REQUIRES(!block_lock_); void Dump(std::ostream& os) const; - size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_); - size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); - void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_); - void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); - - uint64_t GetBytesAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - uint64_t GetObjectsAllocated() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!block_lock_); + size_t RevokeAllThreadLocalBuffers() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); + void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_); + void AssertAllThreadLocalBuffersAreRevoked() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_); + + uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); + uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_); bool IsEmpty() const { return Begin() == End(); } @@ -130,10 +132,10 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Return the object which comes after obj, while ensuring alignment. static mirror::Object* GetNextObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Allocate a new TLAB, returns false if the allocation failed. - bool AllocNewTlab(Thread* self, size_t bytes); + bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_); BumpPointerSpace* AsBumpPointerSpace() OVERRIDE { return this; @@ -141,7 +143,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. void Walk(ObjectCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_); accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE; @@ -152,7 +154,7 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Object alignment within the space. static constexpr size_t kAlignment = 8; @@ -161,13 +163,13 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { BumpPointerSpace(const std::string& name, MemMap* mem_map); // Allocate a raw block of bytes. - uint8_t* AllocBlock(size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); - void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(block_lock_); + uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_); + void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_); // The main block is an unbounded block where objects go when there are no other blocks. This // enables us to maintain tightly packed objects when you are not using thread local buffers for // allocation. The main block starts at the space Begin(). - void UpdateMainBlock() EXCLUSIVE_LOCKS_REQUIRED(block_lock_); + void UpdateMainBlock() REQUIRES(block_lock_); uint8_t* growth_end_; AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions. diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 5237c7b08e..e1c5b6484e 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -20,13 +20,13 @@ #include "gc/accounting/card_table.h" #include "gc/accounting/space_bitmap-inl.h" #include "gc/heap.h" +#include "memory_tool_malloc_space-inl.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "runtime.h" #include "thread.h" #include "thread_list.h" #include "utils.h" -#include "valgrind_malloc_space-inl.h" namespace art { namespace gc { @@ -62,8 +62,8 @@ DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin // Everything is set so record in immutable structure and leave uint8_t* begin = mem_map->Begin(); - if (Runtime::Current()->RunningOnValgrind()) { - return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>( + if (Runtime::Current()->IsRunningOnMemoryTool()) { + return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>( mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit, can_move_objects, starting_size); } else { @@ -152,8 +152,8 @@ MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& n void* allocator, uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, bool can_move_objects) { - if (Runtime::Current()->RunningOnValgrind()) { - return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>( + if (Runtime::Current()->IsRunningOnMemoryTool()) { + return new MemoryToolMallocSpace<DlMallocSpace, kDefaultMemoryToolRedZoneBytes, true, false>( mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit, can_move_objects, starting_size_); } else { diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index 1f80f1fd6b..eab757a13e 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -30,7 +30,7 @@ namespace collector { namespace space { // An alloc space is a space where objects may be allocated and garbage collected. Not final as may -// be overridden by a ValgrindMallocSpace. +// be overridden by a MemoryToolMallocSpace. class DlMallocSpace : public MallocSpace { public: // Create a DlMallocSpace from an existing mem_map. @@ -46,30 +46,30 @@ class DlMallocSpace : public MallocSpace { static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit, size_t capacity, uint8_t* requested_begin, bool can_move_objects); - // Virtual to allow ValgrindMallocSpace to intercept. + // Virtual to allow MemoryToolMallocSpace to intercept. virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE LOCKS_EXCLUDED(lock_); - // Virtual to allow ValgrindMallocSpace to intercept. + OVERRIDE REQUIRES(!lock_); + // Virtual to allow MemoryToolMallocSpace to intercept. virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE LOCKS_EXCLUDED(lock_) { + OVERRIDE REQUIRES(!lock_) { return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } - // Virtual to allow ValgrindMallocSpace to intercept. + // Virtual to allow MemoryToolMallocSpace to intercept. virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE { return AllocationSizeNonvirtual(obj, usable_size); } - // Virtual to allow ValgrindMallocSpace to intercept. + // Virtual to allow MemoryToolMallocSpace to intercept. virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE - LOCKS_EXCLUDED(lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Virtual to allow ValgrindMallocSpace to intercept. + REQUIRES(!lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + // Virtual to allow MemoryToolMallocSpace to intercept. virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE - LOCKS_EXCLUDED(lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!lock_) + SHARED_REQUIRES(Locks::mutator_lock_); size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE { return num_bytes; @@ -86,7 +86,7 @@ class DlMallocSpace : public MallocSpace { // Faster non-virtual allocation path. mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Faster non-virtual allocation size path. size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size); @@ -104,7 +104,7 @@ class DlMallocSpace : public MallocSpace { // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be // in use, indicated by num_bytes equaling zero. - void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); // Returns the number of bytes that the space has currently obtained from the system. This is // greater or equal to the amount of live data in the space. @@ -136,7 +136,7 @@ class DlMallocSpace : public MallocSpace { } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace, @@ -147,7 +147,7 @@ class DlMallocSpace : public MallocSpace { mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE { diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index 93ff8aaff7..215c18b8d9 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -44,7 +44,7 @@ class ImageSpace : public MemMapSpace { // used to transfer ownership of the OatFile to the ClassLinker when // it is initialized. static ImageSpace* Create(const char* image, InstructionSet image_isa, std::string* error_msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Reads the image header from the specified image location for the // instruction set image_isa or dies trying. @@ -64,10 +64,10 @@ class ImageSpace : public MemMapSpace { // Releases the OatFile from the ImageSpace so it can be transfer to // the caller, presumably the ClassLinker. OatFile* ReleaseOatFile() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VerifyImageAllocations() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ImageHeader& GetImageHeader() const { return *reinterpret_cast<ImageHeader*>(Begin()); @@ -130,13 +130,13 @@ class ImageSpace : public MemMapSpace { // the OatFile in /data/dalvik-cache if necessary. static ImageSpace* Init(const char* image_filename, const char* image_location, bool validate_oat_file, std::string* error_msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); OatFile* OpenOatFile(const char* image, std::string* error_msg) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool ValidateOatFile(std::string* error_msg) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); friend class Space; diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index 52192e212c..2798b21f94 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -16,7 +16,9 @@ #include "large_object_space.h" +#include <valgrind.h> #include <memory> +#include <memcheck/memcheck.h> #include "gc/accounting/heap_bitmap-inl.h" #include "gc/accounting/space_bitmap-inl.h" @@ -32,12 +34,12 @@ namespace art { namespace gc { namespace space { -class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace { +class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace { public: - explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) { + explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) { } - ~ValgrindLargeObjectMapSpace() OVERRIDE { + ~MemoryToolLargeObjectMapSpace() OVERRIDE { // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't // freed since they are held live by the class linker. MutexLock mu(Thread::Current(), lock_); @@ -50,13 +52,14 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace { size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE { mirror::Object* obj = - LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated, + LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated, usable_size, bytes_tl_bulk_allocated); mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>( - reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes); - VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes); - VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes, - kValgrindRedZoneBytes); + reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes); + MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes); + MEMORY_TOOL_MAKE_NOACCESS( + reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes, + kMemoryToolRedZoneBytes); if (usable_size != nullptr) { *usable_size = num_bytes; // Since we have redzones, shrink the usable size. } @@ -73,7 +76,7 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace { size_t Free(Thread* self, mirror::Object* obj) OVERRIDE { mirror::Object* object_with_rdz = ObjectWithRedzone(obj); - VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr)); + MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr)); return LargeObjectMapSpace::Free(self, object_with_rdz); } @@ -84,15 +87,15 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace { private: static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) { return reinterpret_cast<const mirror::Object*>( - reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes); + reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes); } static mirror::Object* ObjectWithRedzone(mirror::Object* obj) { return reinterpret_cast<mirror::Object*>( - reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes); + reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes); } - static constexpr size_t kValgrindRedZoneBytes = kPageSize; + static constexpr size_t kMemoryToolRedZoneBytes = kPageSize; }; void LargeObjectSpace::SwapBitmaps() { @@ -119,8 +122,8 @@ LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) lock_("large object map space lock", kAllocSpaceLock) {} LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { - if (Runtime::Current()->RunningOnValgrind()) { - return new ValgrindLargeObjectMapSpace(name); + if (Runtime::Current()->IsRunningOnMemoryTool()) { + return new MemoryToolLargeObjectMapSpace(name); } else { return new LargeObjectMapSpace(name); } @@ -437,7 +440,7 @@ size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) { AllocationInfo* next_next_info = next_info->GetNextInfo(); // Next next info can't be free since we always coalesce. DCHECK(!next_next_info->IsFree()); - DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize())); + DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment); new_free_info = next_next_info; new_free_size += next_next_info->GetPrevFreeBytes(); RemoveFreePrev(next_next_info); diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h index 45ed0cd75f..c726998ea2 100644 --- a/runtime/gc/space/large_object_space.h +++ b/runtime/gc/space/large_object_space.h @@ -96,7 +96,7 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace { return Begin() <= byte_obj && byte_obj < End(); } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return true if the large object is a zygote large object. Potentially slow. virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0; @@ -130,11 +130,12 @@ class LargeObjectMapSpace : public LargeObjectSpace { // of malloc. static LargeObjectMapSpace* Create(const std::string& name); // Return the storage space required by obj. - size_t AllocationSize(mirror::Object* obj, size_t* usable_size); + size_t AllocationSize(mirror::Object* obj, size_t* usable_size) REQUIRES(!lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated); - size_t Free(Thread* self, mirror::Object* ptr); - void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + size_t* usable_size, size_t* bytes_tl_bulk_allocated) + REQUIRES(!lock_); + size_t Free(Thread* self, mirror::Object* ptr) REQUIRES(!lock_); + void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE REQUIRES(!lock_); // TODO: disabling thread safety analysis as this may be called when we already hold lock_. bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS; @@ -146,8 +147,8 @@ class LargeObjectMapSpace : public LargeObjectSpace { explicit LargeObjectMapSpace(const std::string& name); virtual ~LargeObjectMapSpace() {} - bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE LOCKS_EXCLUDED(lock_); - void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE LOCKS_EXCLUDED(lock_); + bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE REQUIRES(!lock_); + void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_); // Used to ensure mutual exclusion when the allocation spaces data structures are being modified. mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; @@ -163,12 +164,13 @@ class FreeListSpace FINAL : public LargeObjectSpace { virtual ~FreeListSpace(); static FreeListSpace* Create(const std::string& name, uint8_t* requested_begin, size_t capacity); size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE - EXCLUSIVE_LOCKS_REQUIRED(lock_); + REQUIRES(lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE; - size_t Free(Thread* self, mirror::Object* obj) OVERRIDE; - void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); - void Dump(std::ostream& os) const; + size_t* usable_size, size_t* bytes_tl_bulk_allocated) + OVERRIDE REQUIRES(!lock_); + size_t Free(Thread* self, mirror::Object* obj) OVERRIDE REQUIRES(!lock_); + void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); + void Dump(std::ostream& os) const REQUIRES(!lock_); protected: FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end); @@ -186,9 +188,9 @@ class FreeListSpace FINAL : public LargeObjectSpace { return GetAllocationAddressForSlot(GetSlotIndexForAllocationInfo(info)); } // Removes header from the free blocks set by finding the corresponding iterator and erasing it. - void RemoveFreePrev(AllocationInfo* info) EXCLUSIVE_LOCKS_REQUIRED(lock_); + void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_); bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE; - void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE; + void SetAllLargeObjectsAsZygoteObjects(Thread* self) OVERRIDE REQUIRES(!lock_); class SortByPrevFree { public: diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index b014217fe2..3a0d814a20 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -46,8 +46,8 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map, if (create_bitmaps) { size_t bitmap_index = bitmap_index_++; static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize); - CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin()))); - CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End()))); + CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->Begin()), kGcCardSize); + CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map->End()), kGcCardSize); live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create( StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)), Begin(), NonGrowthLimitCapacity())); @@ -164,10 +164,10 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l // alloc spaces. RevokeAllThreadLocalBuffers(); SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize))); - DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_)); - DCHECK(IsAligned<accounting::CardTable::kCardSize>(End())); - DCHECK(IsAligned<kPageSize>(begin_)); - DCHECK(IsAligned<kPageSize>(End())); + DCHECK_ALIGNED(begin_, accounting::CardTable::kCardSize); + DCHECK_ALIGNED(End(), accounting::CardTable::kCardSize); + DCHECK_ALIGNED(begin_, kPageSize); + DCHECK_ALIGNED(End(), kPageSize); size_t size = RoundUp(Size(), kPageSize); // Trimming the heap should be done by the caller since we may have invalidated the accounting // stored in between objects. diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index 5f3a1db3f7..4e56c4a429 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -20,8 +20,7 @@ #include "space.h" #include <ostream> -#include <valgrind.h> -#include <memcheck/memcheck.h> +#include "base/memory_tool.h" namespace art { namespace gc { @@ -64,9 +63,9 @@ class MallocSpace : public ContinuousMemMapAllocSpace { // amount of the storage space that may be used by obj. virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0; virtual size_t Free(Thread* self, mirror::Object* ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Returns the maximum bytes that could be allocated for the given // size in bulk, that is the maximum value for the @@ -161,8 +160,8 @@ class MallocSpace : public ContinuousMemMapAllocSpace { size_t maximum_size, bool low_memory_mode) = 0; virtual void RegisterRecentFree(mirror::Object* ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(lock_); virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() { return &SweepCallback; @@ -197,7 +196,7 @@ class MallocSpace : public ContinuousMemMapAllocSpace { private: static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(MallocSpace); }; diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h index bc329e129c..ea8b8aae5f 100644 --- a/runtime/gc/space/valgrind_malloc_space-inl.h +++ b/runtime/gc/space/memory_tool_malloc_space-inl.h @@ -14,22 +14,20 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_ -#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_ +#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_ +#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_ -#include "valgrind_malloc_space.h" - -#include <memcheck/memcheck.h> - -#include "valgrind_settings.h" +#include "base/memory_tool.h" +#include "memory_tool_malloc_space.h" +#include "memory_tool_settings.h" namespace art { namespace gc { namespace space { -namespace valgrind_details { +namespace memory_tool_details { -template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable> +template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable> inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes, size_t bytes_allocated, size_t usable_size, size_t bytes_tl_bulk_allocated, @@ -48,26 +46,26 @@ inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes, if (kUseObjSizeForUsable) { *usable_size_out = num_bytes; } else { - *usable_size_out = usable_size - 2 * kValgrindRedZoneBytes; + *usable_size_out = usable_size - 2 * kMemoryToolRedZoneBytes; } } // Left redzone. - VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes); + MEMORY_TOOL_MAKE_NOACCESS(obj_with_rdz, kMemoryToolRedZoneBytes); // Make requested memory readable. // (If the allocator assumes memory is zeroed out, we might get UNDEFINED warnings, so make // everything DEFINED initially.) mirror::Object* result = reinterpret_cast<mirror::Object*>( - reinterpret_cast<uint8_t*>(obj_with_rdz) + kValgrindRedZoneBytes); - VALGRIND_MAKE_MEM_DEFINED(result, num_bytes); + reinterpret_cast<uint8_t*>(obj_with_rdz) + kMemoryToolRedZoneBytes); + MEMORY_TOOL_MAKE_DEFINED(result, num_bytes); // Right redzone. Assumes that if bytes_allocated > usable_size, then the difference is // management data at the upper end, and for simplicity we will not protect that. // At the moment, this fits RosAlloc (no management data in a slot, usable_size == alloc_size) // and DlMalloc (allocation_size = (usable_size == num_bytes) + 4, 4 is management) - VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, - usable_size - (num_bytes + kValgrindRedZoneBytes)); + MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<uint8_t*>(result) + num_bytes, + usable_size - (num_bytes + kMemoryToolRedZoneBytes)); return result; } @@ -76,15 +74,15 @@ inline size_t GetObjSizeNoThreadSafety(mirror::Object* obj) NO_THREAD_SAFETY_ANA return obj->SizeOf<kVerifyNone>(); } -} // namespace valgrind_details +} // namespace memory_tool_details template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> mirror::Object* -ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::AllocWithGrowth( Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out, @@ -92,14 +90,14 @@ ValgrindMallocSpace<S, size_t bytes_allocated; size_t usable_size; size_t bytes_tl_bulk_allocated; - void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes, + void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes, &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated); if (obj_with_rdz == nullptr) { return nullptr; } - return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>( + return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( obj_with_rdz, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated, @@ -109,11 +107,11 @@ ValgrindMallocSpace<S, } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -mirror::Object* ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +mirror::Object* MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::Alloc( Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out, @@ -121,13 +119,13 @@ mirror::Object* ValgrindMallocSpace<S, size_t bytes_allocated; size_t usable_size; size_t bytes_tl_bulk_allocated; - void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes, + void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes, &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated); if (obj_with_rdz == nullptr) { return nullptr; } - return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, + return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(obj_with_rdz, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated, @@ -137,11 +135,11 @@ mirror::Object* ValgrindMallocSpace<S, } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -mirror::Object* ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +mirror::Object* MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::AllocThreadUnsafe( Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out, @@ -149,14 +147,14 @@ mirror::Object* ValgrindMallocSpace<S, size_t bytes_allocated; size_t usable_size; size_t bytes_tl_bulk_allocated; - void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes, + void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes, &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated); if (obj_with_rdz == nullptr) { return nullptr; } - return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>( + return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>( obj_with_rdz, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated, @@ -166,38 +164,39 @@ mirror::Object* ValgrindMallocSpace<S, } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -size_t ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +size_t MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::AllocationSize( mirror::Object* obj, size_t* usable_size) { size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>( - reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kValgrindRedZoneBytes : 0)), + reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)), usable_size); if (usable_size != nullptr) { if (kUseObjSizeForUsable) { - *usable_size = valgrind_details::GetObjSizeNoThreadSafety(obj); + *usable_size = memory_tool_details::GetObjSizeNoThreadSafety(obj); } else { - *usable_size = *usable_size - 2 * kValgrindRedZoneBytes; + *usable_size = *usable_size - 2 * kMemoryToolRedZoneBytes; } } return result; } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -size_t ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +size_t MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::Free( Thread* self, mirror::Object* ptr) { void* obj_after_rdz = reinterpret_cast<void*>(ptr); - uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kValgrindRedZoneBytes; + uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes; + // Make redzones undefined. size_t usable_size; size_t allocation_size = AllocationSize(ptr, &usable_size); @@ -206,20 +205,20 @@ size_t ValgrindMallocSpace<S, // Use the obj-size-for-usable flag to determine whether usable_size is the more important one, // e.g., whether there's data in the allocation_size (and usable_size can't be trusted). if (kUseObjSizeForUsable) { - VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size); + MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, allocation_size); } else { - VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, usable_size + 2 * kValgrindRedZoneBytes); + MEMORY_TOOL_MAKE_UNDEFINED(obj_with_rdz, usable_size + 2 * kMemoryToolRedZoneBytes); } return S::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz)); } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -size_t ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +size_t MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::FreeList( Thread* self, size_t num_ptrs, mirror::Object** ptrs) { @@ -232,32 +231,33 @@ size_t ValgrindMallocSpace<S, } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> template <typename... Params> -ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, - kUseObjSizeForUsable>::ValgrindMallocSpace( + kUseObjSizeForUsable>::MemoryToolMallocSpace( MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) { - VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, - mem_map->Size() - initial_size); + MEMORY_TOOL_MAKE_DEFINED(mem_map->Begin(), initial_size); + MEMORY_TOOL_MAKE_UNDEFINED(mem_map->Begin() + initial_size, + mem_map->Size() - initial_size); } template <typename S, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -size_t ValgrindMallocSpace<S, - kValgrindRedZoneBytes, +size_t MemoryToolMallocSpace<S, + kMemoryToolRedZoneBytes, kAdjustForRedzoneInAllocSize, kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) { - return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kValgrindRedZoneBytes); + return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes); } } // namespace space } // namespace gc } // namespace art -#endif // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_INL_H_ +#endif // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_INL_H_ diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h index a6b010a2a1..a5dbad9af6 100644 --- a/runtime/gc/space/valgrind_malloc_space.h +++ b/runtime/gc/space/memory_tool_malloc_space.h @@ -14,24 +14,22 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_ -#define ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_ +#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_ +#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_ #include "malloc_space.h" -#include <valgrind.h> - namespace art { namespace gc { namespace space { -// A specialization of DlMallocSpace/RosAllocSpace that places valgrind red zones around -// allocations. +// A specialization of DlMallocSpace/RosAllocSpace that places memory tool red +// zones around allocations. template <typename BaseMallocSpaceType, - size_t kValgrindRedZoneBytes, + size_t kMemoryToolRedZoneBytes, bool kAdjustForRedzoneInAllocSize, bool kUseObjSizeForUsable> -class ValgrindMallocSpace FINAL : public BaseMallocSpaceType { +class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType { public: mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) @@ -40,15 +38,15 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType { size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE; mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_); size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE; size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RegisterRecentFree(mirror::Object* ptr) OVERRIDE { UNUSED(ptr); @@ -57,15 +55,15 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType { size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE; template <typename... Params> - explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params); - virtual ~ValgrindMallocSpace() {} + MemoryToolMallocSpace(MemMap* mem_map, size_t initial_size, Params... params); + virtual ~MemoryToolMallocSpace() {} private: - DISALLOW_COPY_AND_ASSIGN(ValgrindMallocSpace); + DISALLOW_COPY_AND_ASSIGN(MemoryToolMallocSpace); }; } // namespace space } // namespace gc } // namespace art -#endif // ART_RUNTIME_GC_SPACE_VALGRIND_MALLOC_SPACE_H_ +#endif // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_MALLOC_SPACE_H_ diff --git a/runtime/gc/space/valgrind_settings.h b/runtime/gc/space/memory_tool_settings.h index 73da0fddc2..e9333c8c97 100644 --- a/runtime/gc/space/valgrind_settings.h +++ b/runtime/gc/space/memory_tool_settings.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_ -#define ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_ +#ifndef ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_ +#define ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_ namespace art { namespace gc { @@ -23,10 +23,10 @@ namespace space { // Default number of bytes to use as a red zone (rdz). A red zone of this size will be placed before // and after each allocation. 8 bytes provides long/double alignment. -static constexpr size_t kDefaultValgrindRedZoneBytes = 8; +static constexpr size_t kDefaultMemoryToolRedZoneBytes = 8; } // namespace space } // namespace gc } // namespace art -#endif // ART_RUNTIME_GC_SPACE_VALGRIND_SETTINGS_H_ +#endif // ART_RUNTIME_GC_SPACE_MEMORY_TOOL_SETTINGS_H_ diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 1cdf69dbe5..66fd62cee1 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -43,7 +43,7 @@ template<bool kForEvac> inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { - DCHECK(IsAligned<kAlignment>(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); mirror::Object* obj; if (LIKELY(num_bytes <= kRegionSize)) { // Non-large object. @@ -115,7 +115,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte size_t* usable_size, size_t* bytes_tl_bulk_allocated) { DCHECK(IsAllocated() && IsInToSpace()); - DCHECK(IsAligned<kAlignment>(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_); uint8_t* old_top; uint8_t* new_top; @@ -138,8 +138,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte return reinterpret_cast<mirror::Object*>(old_top); } -inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { size_t num_bytes = obj->SizeOf(); if (usable_size != nullptr) { if (LIKELY(num_bytes <= kRegionSize)) { @@ -266,7 +265,7 @@ template<bool kForEvac> mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { - DCHECK(IsAligned<kAlignment>(num_bytes)); + DCHECK_ALIGNED(num_bytes, kAlignment); DCHECK_GT(num_bytes, kRegionSize); size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize; DCHECK_GT(num_regs, 0U); diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 814ab6ce92..9a2d0c6d37 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -287,7 +287,7 @@ void RegionSpace::Dump(std::ostream& os) const { void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) { DCHECK(Contains(large_obj)); - DCHECK(IsAligned<kRegionSize>(large_obj)); + DCHECK_ALIGNED(large_obj, kRegionSize); MutexLock mu(Thread::Current(), region_lock_); uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj); uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize); @@ -366,7 +366,7 @@ void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { uint8_t* tlab_start = thread->GetTlabStart(); DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr); if (tlab_start != nullptr) { - DCHECK(IsAligned<kRegionSize>(tlab_start)); + DCHECK_ALIGNED(tlab_start, kRegionSize); Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start)); DCHECK(r->IsAllocated()); DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize); diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index 19109f0d59..14e800595c 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -42,29 +42,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Allocate num_bytes, returns null if the space is full. mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, - size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE; + size_t* usable_size, size_t* bytes_tl_bulk_allocated) + OVERRIDE REQUIRES(!region_lock_); // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector. mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); // The main allocation routine. template<bool kForEvac> ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, - size_t* bytes_tl_bulk_allocated); + size_t* bytes_tl_bulk_allocated) + REQUIRES(!region_lock_); // Allocate/free large objects (objects that are larger than the region size.) template<bool kForEvac> mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, - size_t* bytes_tl_bulk_allocated); - void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated); + size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_); + void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_); // Return the storage space required by obj. size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) { return AllocationSizeNonvirtual(obj, usable_size); } size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); size_t Free(Thread*, mirror::Object*) OVERRIDE { UNIMPLEMENTED(FATAL); @@ -83,19 +85,19 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { return nullptr; } - void Clear() OVERRIDE LOCKS_EXCLUDED(region_lock_); + void Clear() OVERRIDE REQUIRES(!region_lock_); void Dump(std::ostream& os) const; - void DumpRegions(std::ostream& os); - void DumpNonFreeRegions(std::ostream& os); + void DumpRegions(std::ostream& os) REQUIRES(!region_lock_); + void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_); - size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_); - void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_); - size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); - void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_); - void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, - Locks::thread_list_lock_); + size_t RevokeThreadLocalBuffers(Thread* thread) REQUIRES(!region_lock_); + void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_); + size_t RevokeAllThreadLocalBuffers() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_); + void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_); + void AssertAllThreadLocalBuffersAreRevoked() + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_); enum class RegionType : uint8_t { kRegionTypeAll, // All types. @@ -112,24 +114,24 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { kRegionStateLargeTail, // Large tail (non-first regions of a large allocation). }; - template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal(); - template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal(); - uint64_t GetBytesAllocated() { + template<RegionType kRegionType> uint64_t GetBytesAllocatedInternal() REQUIRES(!region_lock_); + template<RegionType kRegionType> uint64_t GetObjectsAllocatedInternal() REQUIRES(!region_lock_); + uint64_t GetBytesAllocated() REQUIRES(!region_lock_) { return GetBytesAllocatedInternal<RegionType::kRegionTypeAll>(); } - uint64_t GetObjectsAllocated() { + uint64_t GetObjectsAllocated() REQUIRES(!region_lock_) { return GetObjectsAllocatedInternal<RegionType::kRegionTypeAll>(); } - uint64_t GetBytesAllocatedInFromSpace() { + uint64_t GetBytesAllocatedInFromSpace() REQUIRES(!region_lock_) { return GetBytesAllocatedInternal<RegionType::kRegionTypeFromSpace>(); } - uint64_t GetObjectsAllocatedInFromSpace() { + uint64_t GetObjectsAllocatedInFromSpace() REQUIRES(!region_lock_) { return GetObjectsAllocatedInternal<RegionType::kRegionTypeFromSpace>(); } - uint64_t GetBytesAllocatedInUnevacFromSpace() { + uint64_t GetBytesAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) { return GetBytesAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>(); } - uint64_t GetObjectsAllocatedInUnevacFromSpace() { + uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) { return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>(); } @@ -148,12 +150,12 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. void Walk(ObjectCallback* callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { WalkInternal<false>(callback, arg); } void WalkToSpace(ObjectCallback* callback, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { WalkInternal<true>(callback, arg); } @@ -161,7 +163,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { return nullptr; } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_); // Object alignment within the space. static constexpr size_t kAlignment = kObjectAlignment; @@ -201,22 +203,22 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } void SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) - LOCKS_EXCLUDED(region_lock_); + REQUIRES(!region_lock_); - size_t FromSpaceSize(); - size_t UnevacFromSpaceSize(); - size_t ToSpaceSize(); - void ClearFromSpace(); + size_t FromSpaceSize() REQUIRES(!region_lock_); + size_t UnevacFromSpaceSize() REQUIRES(!region_lock_); + size_t ToSpaceSize() REQUIRES(!region_lock_); + void ClearFromSpace() REQUIRES(!region_lock_); void AddLiveBytes(mirror::Object* ref, size_t alloc_size) { Region* reg = RefToRegionUnlocked(ref); reg->AddLiveBytes(alloc_size); } - void AssertAllRegionLiveBytesZeroOrCleared(); + void AssertAllRegionLiveBytesZeroOrCleared() REQUIRES(!region_lock_); - void RecordAlloc(mirror::Object* ref); - bool AllocNewTlab(Thread* self); + void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_); + bool AllocNewTlab(Thread* self) REQUIRES(!region_lock_); uint32_t Time() { return time_; @@ -476,7 +478,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { friend class RegionSpace; }; - Region* RefToRegion(mirror::Object* ref) LOCKS_EXCLUDED(region_lock_) { + Region* RefToRegion(mirror::Object* ref) REQUIRES(!region_lock_) { MutexLock mu(Thread::Current(), region_lock_); return RefToRegionLocked(ref); } @@ -492,7 +494,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { return RefToRegionLocked(ref); } - Region* RefToRegionLocked(mirror::Object* ref) EXCLUSIVE_LOCKS_REQUIRED(region_lock_) { + Region* RefToRegionLocked(mirror::Object* ref) REQUIRES(region_lock_) { DCHECK(HasAddress(ref)); uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin()); size_t reg_idx = offset / kRegionSize; @@ -504,7 +506,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } mirror::Object* GetNextObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h index 25d44452e2..8bff2b4c0f 100644 --- a/runtime/gc/space/rosalloc_space-inl.h +++ b/runtime/gc/space/rosalloc_space-inl.h @@ -17,8 +17,9 @@ #ifndef ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_ #define ART_RUNTIME_GC_SPACE_ROSALLOC_SPACE_INL_H_ +#include "base/memory_tool.h" #include "gc/allocator/rosalloc-inl.h" -#include "gc/space/valgrind_settings.h" +#include "gc/space/memory_tool_settings.h" #include "rosalloc_space.h" #include "thread.h" @@ -26,26 +27,26 @@ namespace art { namespace gc { namespace space { -template<bool kMaybeRunningOnValgrind> +template<bool kMaybeIsRunningOnMemoryTool> inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { // obj is a valid object. Use its class in the header to get the size. // Don't use verification since the object may be dead if we are sweeping. size_t size = obj->SizeOf<kVerifyNone>(); - bool running_on_valgrind = false; - if (kMaybeRunningOnValgrind) { - running_on_valgrind = RUNNING_ON_VALGRIND != 0; - if (running_on_valgrind) { - size += 2 * kDefaultValgrindRedZoneBytes; + bool add_redzones = false; + if (kMaybeIsRunningOnMemoryTool) { + add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0; + if (add_redzones) { + size += 2 * kDefaultMemoryToolRedZoneBytes; } } else { - DCHECK_EQ(RUNNING_ON_VALGRIND, 0U); + DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U); } size_t size_by_size = rosalloc_->UsableSize(size); if (kIsDebugBuild) { - // On valgrind, the red zone has an impact... + // On memory tool, the red zone has an impact... const uint8_t* obj_ptr = reinterpret_cast<const uint8_t*>(obj); size_t size_by_ptr = rosalloc_->UsableSize( - obj_ptr - (running_on_valgrind ? kDefaultValgrindRedZoneBytes : 0)); + obj_ptr - (add_redzones ? kDefaultMemoryToolRedZoneBytes : 0)); if (size_by_size != size_by_ptr) { LOG(INFO) << "Found a bad sized obj of size " << size << " at " << std::hex << reinterpret_cast<intptr_t>(obj_ptr) << std::dec diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index bc4414daab..1a193c391d 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -30,7 +30,7 @@ #include "thread.h" #include "thread_list.h" #include "utils.h" -#include "valgrind_malloc_space-inl.h" +#include "memory_tool_malloc_space-inl.h" namespace art { namespace gc { @@ -43,7 +43,7 @@ static constexpr size_t kPrefetchLookAhead = 8; static constexpr bool kVerifyFreedBytes = false; // TODO: Fix -// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>; +// template class MemoryToolMallocSpace<RosAllocSpace, allocator::RosAlloc*>; RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end, @@ -61,10 +61,10 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin bool low_memory_mode, bool can_move_objects) { DCHECK(mem_map != nullptr); - bool running_on_valgrind = Runtime::Current()->RunningOnValgrind(); + bool running_on_memory_tool = Runtime::Current()->IsRunningOnMemoryTool(); allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size, - capacity, low_memory_mode, running_on_valgrind); + capacity, low_memory_mode, running_on_memory_tool); if (rosalloc == nullptr) { LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")"; return nullptr; @@ -78,10 +78,10 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin // Everything is set so record in immutable structure and leave uint8_t* begin = mem_map->Begin(); - // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with + // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with // AllocationSize caused by redzones. b/12944686 - if (running_on_valgrind) { - return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>( + if (running_on_memory_tool) { + return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>( mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit, can_move_objects, starting_size, low_memory_mode); } else { @@ -134,7 +134,7 @@ RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_siz allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size, size_t maximum_size, bool low_memory_mode, - bool running_on_valgrind) { + bool running_on_memory_tool) { // clear errno to allow PLOG on error errno = 0; // create rosalloc using our backing storage starting at begin and @@ -145,7 +145,7 @@ allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_ low_memory_mode ? art::gc::allocator::RosAlloc::kPageReleaseModeAll : art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd, - running_on_valgrind); + running_on_memory_tool); if (rosalloc != nullptr) { rosalloc->SetFootprintLimit(initial_size); } else { @@ -180,8 +180,8 @@ MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& n void* allocator, uint8_t* begin, uint8_t* end, uint8_t* limit, size_t growth_limit, bool can_move_objects) { - if (Runtime::Current()->RunningOnValgrind()) { - return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>( + if (Runtime::Current()->IsRunningOnMemoryTool()) { + return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>( mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_); } else { @@ -370,7 +370,7 @@ void RosAllocSpace::Clear() { delete rosalloc_; rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_, NonGrowthLimitCapacity(), low_memory_mode_, - Runtime::Current()->RunningOnValgrind()); + Runtime::Current()->IsRunningOnMemoryTool()); SetFootprintLimit(footprint_limit); } diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 36268f76f8..bc1473850c 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -31,7 +31,7 @@ namespace collector { namespace space { // An alloc space implemented using a runs-of-slots memory allocator. Not final as may be -// overridden by a ValgrindMallocSpace. +// overridden by a MemoryToolMallocSpace. class RosAllocSpace : public MallocSpace { public: // Create a RosAllocSpace with the requested sizes. The requested @@ -48,7 +48,7 @@ class RosAllocSpace : public MallocSpace { mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE LOCKS_EXCLUDED(lock_); + OVERRIDE REQUIRES(!lock_); mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE { return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size, @@ -56,7 +56,7 @@ class RosAllocSpace : public MallocSpace { } mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE REQUIRES(Locks::mutator_lock_) { return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } @@ -64,9 +64,9 @@ class RosAllocSpace : public MallocSpace { return AllocationSizeNonvirtual<true>(obj, usable_size); } size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) { @@ -95,7 +95,7 @@ class RosAllocSpace : public MallocSpace { ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes); // TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held. - template<bool kMaybeRunningOnValgrind> + template<bool kMaybeIsRunningOnMemoryTool> size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) NO_THREAD_SAFETY_ANALYSIS; @@ -104,7 +104,7 @@ class RosAllocSpace : public MallocSpace { } size_t Trim() OVERRIDE; - void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_); + void Walk(WalkCallback callback, void* arg) OVERRIDE REQUIRES(!lock_); size_t GetFootprint() OVERRIDE; size_t GetFootprintLimit() OVERRIDE; void SetFootprintLimit(size_t limit) OVERRIDE; @@ -134,7 +134,7 @@ class RosAllocSpace : public MallocSpace { return this; } - void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Verify() REQUIRES(Locks::mutator_lock_) { rosalloc_->Verify(); } @@ -158,19 +158,19 @@ class RosAllocSpace : public MallocSpace { void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size, size_t maximum_size, bool low_memory_mode) OVERRIDE { return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode, - RUNNING_ON_VALGRIND != 0); + RUNNING_ON_MEMORY_TOOL != 0); } static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size, size_t maximum_size, bool low_memory_mode, - bool running_on_valgrind); + bool running_on_memory_tool); void InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), void* arg, bool do_null_callback_at_end) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); void InspectAllRosAllocWithSuspendAll( void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), void* arg, bool do_null_callback_at_end) - LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); + REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_); // Underlying rosalloc. allocator::RosAlloc* rosalloc_; diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 871ebac8a7..fc558cf8e4 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -219,7 +219,7 @@ class AllocSpace { virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated); } @@ -420,10 +420,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { return this; } - bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void BindLiveToMarkBitmap() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_); + void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_); + void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_); // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping. void SwapBitmaps(); diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h index 6e0e0d24c7..4d2db11ac2 100644 --- a/runtime/gc/space/space_test.h +++ b/runtime/gc/space/space_test.h @@ -49,7 +49,7 @@ class SpaceTest : public CommonRuntimeTest { heap->SetSpaceAsDefault(space); } - mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); if (byte_array_class_ == nullptr) { @@ -65,7 +65,7 @@ class SpaceTest : public CommonRuntimeTest { mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size, @@ -79,7 +79,7 @@ class SpaceTest : public CommonRuntimeTest { mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes, size_t* bytes_allocated, size_t* usable_size, size_t* bytes_tl_bulk_allocated) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self))); mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size, @@ -91,7 +91,7 @@ class SpaceTest : public CommonRuntimeTest { } void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Note the minimum size, which is the size of a zero-length byte array. EXPECT_GE(size, SizeOfZeroLengthByteArray()); EXPECT_TRUE(byte_array_class != nullptr); diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h index 934a234345..f2889e2301 100644 --- a/runtime/gc/space/zygote_space.h +++ b/runtime/gc/space/zygote_space.h @@ -33,7 +33,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace { static ZygoteSpace* Create(const std::string& name, MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap, accounting::ContinuousSpaceBitmap* mark_bitmap) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void Dump(std::ostream& os) const; @@ -77,7 +77,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace { } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() { diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h index 5f486192f0..e40fa06319 100644 --- a/runtime/gc/task_processor.h +++ b/runtime/gc/task_processor.h @@ -54,17 +54,17 @@ class TaskProcessor { public: TaskProcessor(); virtual ~TaskProcessor(); - void AddTask(Thread* self, HeapTask* task) LOCKS_EXCLUDED(lock_); - HeapTask* GetTask(Thread* self) LOCKS_EXCLUDED(lock_); - void Start(Thread* self) LOCKS_EXCLUDED(lock_); + void AddTask(Thread* self, HeapTask* task) REQUIRES(!*lock_); + HeapTask* GetTask(Thread* self) REQUIRES(!*lock_); + void Start(Thread* self) REQUIRES(!*lock_); // Stop tells the RunAllTasks to finish up the remaining tasks as soon as // possible then return. - void Stop(Thread* self) LOCKS_EXCLUDED(lock_); - void RunAllTasks(Thread* self) LOCKS_EXCLUDED(lock_); - bool IsRunning() const LOCKS_EXCLUDED(lock_); + void Stop(Thread* self) REQUIRES(!*lock_); + void RunAllTasks(Thread* self) REQUIRES(!*lock_); + bool IsRunning() const REQUIRES(!*lock_); void UpdateTargetRunTime(Thread* self, HeapTask* target_time, uint64_t new_target_time) - LOCKS_EXCLUDED(lock_); - Thread* GetRunningThread() const LOCKS_EXCLUDED(lock_); + REQUIRES(!*lock_); + Thread* GetRunningThread() const REQUIRES(!*lock_); private: class CompareByTargetRunTime { diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc index f06f68d3cf..2c44da231e 100644 --- a/runtime/gc/task_processor_test.cc +++ b/runtime/gc/task_processor_test.cc @@ -102,7 +102,7 @@ TEST_F(TaskProcessorTest, Interrupt) { class TestOrderTask : public HeapTask { public: - explicit TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter) + TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter) : HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) { } virtual void Run(Thread* thread) OVERRIDE { diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h new file mode 100644 index 0000000000..e3cefc443b --- /dev/null +++ b/runtime/gc/weak_root_state.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_WEAK_ROOT_STATE_H_ +#define ART_RUNTIME_GC_WEAK_ROOT_STATE_H_ + +#include <iosfwd> + +namespace art { +namespace gc { + +enum WeakRootState { + // Can read or add weak roots. + kWeakRootStateNormal, + // Need to wait until we can read weak roots. + kWeakRootStateNoReadsOrWrites, + // Need to mark new weak roots to make sure they don't get swept. + // kWeakRootStateMarkNewRoots is currently unused but I was planning on using to allow adding new + // weak roots during the CMS reference processing phase. + kWeakRootStateMarkNewRoots, +}; + +std::ostream& operator<<(std::ostream& os, const WeakRootState&); + +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_WEAK_ROOT_STATE_H_ diff --git a/runtime/gc_root.h b/runtime/gc_root.h index bb604f04c5..83471e6b96 100644 --- a/runtime/gc_root.h +++ b/runtime/gc_root.h @@ -91,24 +91,24 @@ class RootVisitor { // Single root version, not overridable. ALWAYS_INLINE void VisitRoot(mirror::Object** roots, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { VisitRoots(&roots, 1, info); } // Single root version, not overridable. ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** roots, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (*roots != nullptr) { VisitRoot(roots, info); } } virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; }; // Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't @@ -116,7 +116,7 @@ class RootVisitor { class SingleRootVisitor : public RootVisitor { private: void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { VisitRoot(*roots[i], info); } @@ -124,7 +124,7 @@ class SingleRootVisitor : public RootVisitor { void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, const RootInfo& info) OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { VisitRoot(roots[i]->AsMirrorPtr(), info); } @@ -169,10 +169,10 @@ class GcRoot { public: template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoot(RootVisitor* visitor, const RootInfo& info) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsNull()); mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ }; visitor->VisitRoots(roots, 1u, info); @@ -180,7 +180,7 @@ class GcRoot { } void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsNull()) { VisitRoot(visitor, info); } @@ -195,7 +195,7 @@ class GcRoot { return root_.IsNull(); } - ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE GcRoot(MirrorType* ref = nullptr) SHARED_REQUIRES(Locks::mutator_lock_); private: // Root visitors take pointers to root_ and place the min CompressedReference** arrays. We use a @@ -222,7 +222,7 @@ class BufferedRootVisitor { template <class MirrorType> ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!root.IsNull()) { VisitRoot(root); } @@ -230,27 +230,27 @@ class BufferedRootVisitor { template <class MirrorType> ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); } } template <class MirrorType> - void VisitRoot(GcRoot<MirrorType>& root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitRoot(GcRoot<MirrorType>& root) SHARED_REQUIRES(Locks::mutator_lock_) { VisitRoot(root.AddressWithoutBarrier()); } template <class MirrorType> void VisitRoot(mirror::CompressedReference<MirrorType>* root) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(buffer_pos_ >= kBufferSize)) { Flush(); } roots_[buffer_pos_++] = root; } - void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Flush() SHARED_REQUIRES(Locks::mutator_lock_) { visitor_->VisitRoots(roots_, buffer_pos_, root_info_); buffer_pos_ = 0; } diff --git a/runtime/handle.h b/runtime/handle.h index d94d87552a..f939ec5018 100644 --- a/runtime/handle.h +++ b/runtime/handle.h @@ -50,19 +50,19 @@ class Handle : public ValueObject { ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) { } - ALWAYS_INLINE T& operator*() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T& operator*() const SHARED_REQUIRES(Locks::mutator_lock_) { return *Get(); } - ALWAYS_INLINE T* operator->() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T* operator->() const SHARED_REQUIRES(Locks::mutator_lock_) { return Get(); } - ALWAYS_INLINE T* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T* Get() const SHARED_REQUIRES(Locks::mutator_lock_) { return down_cast<T*>(reference_->AsMirrorPtr()); } - ALWAYS_INLINE jobject ToJObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) { // Special case so that we work with NullHandles. return nullptr; @@ -71,12 +71,12 @@ class Handle : public ValueObject { } ALWAYS_INLINE StackReference<mirror::Object>* GetReference() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reference_; } ALWAYS_INLINE const StackReference<mirror::Object>* GetReference() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reference_; } @@ -108,22 +108,22 @@ class MutableHandle : public Handle<T> { } ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(handle.reference_) { } ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Handle<T>::operator=(handle); return *this; } ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(reference) { } - ALWAYS_INLINE T* Assign(T* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE T* Assign(T* reference) SHARED_REQUIRES(Locks::mutator_lock_) { StackReference<mirror::Object>* ref = Handle<T>::GetReference(); T* old = down_cast<T*>(ref->AsMirrorPtr()); ref->Assign(reference); @@ -131,12 +131,12 @@ class MutableHandle : public Handle<T> { } template<typename S> - explicit MutableHandle(const MutableHandle<S>& handle) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit MutableHandle(const MutableHandle<S>& handle) SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(handle) { } template<typename S> - explicit MutableHandle(StackReference<S>* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit MutableHandle(StackReference<S>* reference) SHARED_REQUIRES(Locks::mutator_lock_) : Handle<T>(reference) { } diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h index 9a0e52efd3..e617348ce8 100644 --- a/runtime/handle_scope.h +++ b/runtime/handle_scope.h @@ -60,16 +60,16 @@ class PACKED(4) HandleScope { } ALWAYS_INLINE mirror::Object* GetReference(size_t i) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const; @@ -106,7 +106,7 @@ class PACKED(4) HandleScope { } // Semi-hidden constructor. Construction expected by generated code and StackHandleScope. - explicit HandleScope(HandleScope* link, uint32_t num_references) : + HandleScope(HandleScope* link, uint32_t num_references) : link_(link), number_of_references_(num_references) { } @@ -150,14 +150,14 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { ALWAYS_INLINE ~StackHandleScope(); template<class T> - ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_); template<class T> ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); Thread* Self() const { return self_; @@ -165,7 +165,7 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope { private: template<class T> - ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_LT(i, kNumReferences); return MutableHandle<T>(&GetReferences()[i]); } @@ -209,7 +209,7 @@ class StackHandleScopeCollection { } template<class T> - MutableHandle<T> NewHandle(T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) { if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) { StackHandleScope<kNumReferencesPerScope>* scope = new StackHandleScope<kNumReferencesPerScope>(self_); diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index 71a69aa5dd..a9a236fa69 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -240,7 +240,7 @@ class EndianOutput { } void AddIdList(mirror::ObjectArray<mirror::Object>* values) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const int32_t length = values->GetLength(); for (int32_t i = 0; i < length; ++i) { AddObjectId(values->GetWithoutChecks(i)); @@ -429,8 +429,7 @@ class Hprof : public SingleRootVisitor { } void Dump() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, Locks::alloc_tracker_lock_) { + REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !Locks::alloc_tracker_lock_) { { MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); if (Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) { @@ -471,26 +470,26 @@ class Hprof : public SingleRootVisitor { private: static void VisitObjectCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(obj != nullptr); DCHECK(arg != nullptr); reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj); } void DumpHeapObject(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpHeapClass(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpHeapArray(mirror::Array* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ProcessHeap(bool header_first) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { // Reset current heap and object count. current_heap_ = HPROF_HEAP_DEFAULT; objects_in_segment_ = 0; @@ -504,7 +503,7 @@ class Hprof : public SingleRootVisitor { } } - void ProcessBody() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ProcessBody() REQUIRES(Locks::mutator_lock_) { Runtime* const runtime = Runtime::Current(); // Walk the roots and the heap. output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime); @@ -517,7 +516,7 @@ class Hprof : public SingleRootVisitor { output_->EndRecord(); } - void ProcessHeader(bool string_first) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ProcessHeader(bool string_first) REQUIRES(Locks::mutator_lock_) { // Write the header. WriteFixedHeader(); // Write the string and class tables, and any stack traces, to the header. @@ -536,7 +535,7 @@ class Hprof : public SingleRootVisitor { output_->EndRecord(); } - void WriteClassTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void WriteClassTable() SHARED_REQUIRES(Locks::mutator_lock_) { for (const auto& p : classes_) { mirror::Class* c = p.first; HprofClassSerialNumber sn = p.second; @@ -585,11 +584,11 @@ class Hprof : public SingleRootVisitor { } void VisitRoot(mirror::Object* obj, const RootInfo& root_info) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag, uint32_t thread_serial); - HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) { if (c != nullptr) { auto it = classes_.find(c); if (it == classes_.end()) { @@ -604,7 +603,7 @@ class Hprof : public SingleRootVisitor { } HprofStackTraceSerialNumber LookupStackTraceSerialNumber(const mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto r = allocation_records_.find(obj); if (r == allocation_records_.end()) { return kHprofNullStackTrace; @@ -616,7 +615,7 @@ class Hprof : public SingleRootVisitor { } } - HprofStringId LookupStringId(mirror::String* string) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HprofStringId LookupStringId(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_) { return LookupStringId(string->ToModifiedUtf8()); } @@ -634,7 +633,7 @@ class Hprof : public SingleRootVisitor { return id; } - HprofStringId LookupClassNameId(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HprofStringId LookupClassNameId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) { return LookupStringId(PrettyDescriptor(c)); } @@ -662,7 +661,7 @@ class Hprof : public SingleRootVisitor { __ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF)); } - void WriteStackTraces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void WriteStackTraces() SHARED_REQUIRES(Locks::mutator_lock_) { // Write a dummy stack trace record so the analysis tools don't freak out. output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime); __ AddStackTraceSerialNumber(kHprofNullStackTrace); @@ -725,7 +724,7 @@ class Hprof : public SingleRootVisitor { } bool DumpToDdmsBuffered(size_t overall_size ATTRIBUTE_UNUSED, size_t max_length ATTRIBUTE_UNUSED) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { LOG(FATAL) << "Unimplemented"; UNREACHABLE(); // // Send the data off to DDMS. @@ -738,7 +737,7 @@ class Hprof : public SingleRootVisitor { } bool DumpToFile(size_t overall_size, size_t max_length) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { // Where exactly are we writing to? int out_fd; if (fd_ >= 0) { @@ -787,7 +786,7 @@ class Hprof : public SingleRootVisitor { } bool DumpToDdmsDirect(size_t overall_size, size_t max_length, uint32_t chunk_type) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::mutator_lock_) { CHECK(direct_to_ddms_); JDWP::JdwpState* state = Dbg::GetJdwpState(); CHECK(state != nullptr); @@ -818,7 +817,7 @@ class Hprof : public SingleRootVisitor { } void PopulateAllocationTrackingTraces() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::alloc_tracker_lock_) { + REQUIRES(Locks::mutator_lock_, Locks::alloc_tracker_lock_) { gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords(); CHECK(records != nullptr); HprofStackTraceSerialNumber next_trace_sn = kHprofNullStackTrace + 1; @@ -884,6 +883,7 @@ class Hprof : public SingleRootVisitor { gc::EqAllocRecordTypesPtr<gc::AllocRecordStackTraceElement>> frames_; std::unordered_map<const mirror::Object*, const gc::AllocRecordStackTrace*> allocation_records_; + friend class GcRootVisitor; DISALLOW_COPY_AND_ASSIGN(Hprof); }; @@ -1024,12 +1024,47 @@ void Hprof::MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeap ++objects_in_segment_; } +// Use for visiting the GcRoots held live by ArtFields, ArtMethods, and ClassLoaders. +class GcRootVisitor { + public: + explicit GcRootVisitor(Hprof* hprof) : hprof_(hprof) {} + + void operator()(mirror::Object* obj ATTRIBUTE_UNUSED, + MemberOffset offset ATTRIBUTE_UNUSED, + bool is_static ATTRIBUTE_UNUSED) const {} + + // Note that these don't have read barriers. Its OK however since the GC is guaranteed to not be + // running during the hprof dumping process. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::Object* obj = root->AsMirrorPtr(); + // The two cases are either classes or dex cache arrays. If it is a dex cache array, then use + // VM internal. Otherwise the object is a declaring class of an ArtField or ArtMethod or a + // class from a ClassLoader. + hprof_->VisitRoot(obj, RootInfo(obj->IsClass() ? kRootStickyClass : kRootVMInternal)); + } + + + private: + Hprof* const hprof_; +}; + void Hprof::DumpHeapObject(mirror::Object* obj) { // Ignore classes that are retired. if (obj->IsClass() && obj->AsClass()->IsRetired()) { return; } + GcRootVisitor visitor(this); + obj->VisitReferences(visitor, VoidFunctor()); + gc::Heap* const heap = Runtime::Current()->GetHeap(); const gc::space::ContinuousSpace* const space = heap->FindContinuousSpaceFromObject(obj, true); HprofHeapId heap_type = HPROF_HEAP_APP; diff --git a/runtime/image.cc b/runtime/image.cc index 44193da4ee..2586959e55 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,7 +24,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '7', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '9', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, @@ -147,4 +147,28 @@ std::ostream& operator<<(std::ostream& os, const ImageSection& section) { return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End(); } +void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const { + for (size_t pos = 0; pos < Size(); ) { + auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos); + for (size_t i = 0; i < array->Length(); ++i) { + visitor->Visit(&array->At(i, sizeof(ArtField))); + } + pos += array->ComputeSize(array->Length()); + } +} + +void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor, + uint8_t* base, + size_t pointer_size) const { + const size_t method_alignment = ArtMethod::Alignment(pointer_size); + const size_t method_size = ArtMethod::Size(pointer_size); + for (size_t pos = 0; pos < Size(); ) { + auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos); + for (size_t i = 0; i < array->Length(); ++i) { + visitor->Visit(&array->At(i, method_size, method_alignment)); + } + pos += array->ComputeSize(array->Length(), method_size, method_alignment); + } +} + } // namespace art diff --git a/runtime/image.h b/runtime/image.h index d856f218af..1a0d8fd92f 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -24,6 +24,23 @@ namespace art { +class ArtField; +class ArtMethod; + +class ArtMethodVisitor { + public: + virtual ~ArtMethodVisitor() {} + + virtual void Visit(ArtMethod* method) = 0; +}; + +class ArtFieldVisitor { + public: + virtual ~ArtFieldVisitor() {} + + virtual void Visit(ArtField* method) = 0; +}; + class PACKED(4) ImageSection { public: ImageSection() : offset_(0), size_(0) { } @@ -47,6 +64,12 @@ class PACKED(4) ImageSection { return offset - offset_ < size_; } + // Visit ArtMethods in the section starting at base. + void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const; + + // Visit ArtMethods in the section starting at base. + void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const; + private: uint32_t offset_; uint32_t size_; @@ -156,9 +179,9 @@ class PACKED(4) ImageHeader { } mirror::Object* GetImageRoot(ImageRoot image_root) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::ObjectArray<mirror::Object>* GetImageRoots() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RelocateImage(off_t delta); diff --git a/runtime/indenter.h b/runtime/indenter.h index 38b398de4a..78b18f63ab 100644 --- a/runtime/indenter.h +++ b/runtime/indenter.h @@ -19,10 +19,13 @@ #include "base/logging.h" #include "base/macros.h" +#include <ostream> #include <streambuf> -const char kIndentChar =' '; -const size_t kIndentBy1Count = 2; +namespace art { + +constexpr char kIndentChar =' '; +constexpr size_t kIndentBy1Count = 2; class Indenter : public std::streambuf { public: @@ -99,9 +102,60 @@ class Indenter : public std::streambuf { const char text_[8]; // Number of times text is output. - const size_t count_; + size_t count_; + + friend class VariableIndentationOutputStream; DISALLOW_COPY_AND_ASSIGN(Indenter); }; +class VariableIndentationOutputStream { + public: + explicit VariableIndentationOutputStream(std::ostream* os, char text = kIndentChar) + : indenter_(os->rdbuf(), text, 0u), + indented_os_(&indenter_) { + } + + std::ostream& Stream() { + return indented_os_; + } + + void IncreaseIndentation(size_t adjustment) { + indenter_.count_ += adjustment; + } + + void DecreaseIndentation(size_t adjustment) { + DCHECK_GE(indenter_.count_, adjustment); + indenter_.count_ -= adjustment; + } + + private: + Indenter indenter_; + std::ostream indented_os_; + + DISALLOW_COPY_AND_ASSIGN(VariableIndentationOutputStream); +}; + +class ScopedIndentation { + public: + explicit ScopedIndentation(VariableIndentationOutputStream* vios, + size_t adjustment = kIndentBy1Count) + : vios_(vios), + adjustment_(adjustment) { + vios_->IncreaseIndentation(adjustment_); + } + + ~ScopedIndentation() { + vios_->DecreaseIndentation(adjustment_); + } + + private: + VariableIndentationOutputStream* const vios_; + const size_t adjustment_; + + DISALLOW_COPY_AND_ASSIGN(ScopedIndentation); +}; + +} // namespace art + #endif // ART_RUNTIME_INDENTER_H_ diff --git a/runtime/indenter_test.cc b/runtime/indenter_test.cc index 1919e3d18e..1a26d7b68e 100644 --- a/runtime/indenter_test.cc +++ b/runtime/indenter_test.cc @@ -17,6 +17,8 @@ #include "gtest/gtest.h" #include "indenter.h" +namespace art { + TEST(IndenterTest, MultiLineTest) { std::ostringstream output; Indenter indent_filter(output.rdbuf(), '\t', 2); @@ -33,3 +35,5 @@ TEST(IndenterTest, MultiLineTest) { input << "\n"; EXPECT_EQ(output.str(), "\t\thello\n\t\thello again\n"); } + +} // namespace art diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index 20e42221bb..c9ba6cfada 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -28,14 +28,16 @@ namespace art { +static constexpr bool kDumpStackOnNonLocalReference = false; + template<typename T> class MutatorLockedDumpable { public: explicit MutatorLockedDumpable(T& value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : value_(value) { + SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) { } - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) { value_.Dump(os); } @@ -47,7 +49,7 @@ class MutatorLockedDumpable { template<typename T> std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs) -// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) however annotalysis +// TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis // currently fails for this. NO_THREAD_SAFETY_ANALYSIS { rhs.Dump(os); @@ -183,7 +185,9 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { if (env->check_jni) { ScopedObjectAccess soa(self); LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread"; - self->Dump(LOG(WARNING)); + if (kDumpStackOnNonLocalReference) { + self->Dump(LOG(WARNING)); + } } return true; } diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h index dea5dfdf90..c398555ca4 100644 --- a/runtime/indirect_reference_table.h +++ b/runtime/indirect_reference_table.h @@ -199,7 +199,7 @@ union IRTSegmentState { static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3; class IrtEntry { public: - void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { ++serial_; if (serial_ == kIRTPrevCount) { serial_ = 0; @@ -227,12 +227,11 @@ static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t), class IrtIterator { public: - explicit IrtIterator(IrtEntry* table, size_t i, size_t capacity) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + IrtIterator(IrtEntry* table, size_t i, size_t capacity) SHARED_REQUIRES(Locks::mutator_lock_) : table_(table), i_(i), capacity_(capacity) { } - IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + IrtIterator& operator++() SHARED_REQUIRES(Locks::mutator_lock_) { ++i_; return *this; } @@ -278,7 +277,7 @@ class IndirectReferenceTable { * failed during expansion). */ IndirectRef Add(uint32_t cookie, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Given an IndirectRef in the table, return the Object it refers to. @@ -286,14 +285,14 @@ class IndirectReferenceTable { * Returns kInvalidIndirectRefObject if iref is invalid. */ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + mirror::Object* Get(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE; // Synchronized get which reads a reference, acquiring a lock if necessary. template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/, IndirectRef iref) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Get<kReadBarrierOption>(iref); } @@ -302,7 +301,7 @@ class IndirectReferenceTable { * * Updates an existing indirect reference to point to a new object. */ - void Update(IndirectRef iref, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Update(IndirectRef iref, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); /* * Remove an existing entry. @@ -317,7 +316,7 @@ class IndirectReferenceTable { void AssertEmpty(); - void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_); /* * Return the #of entries in the entire table. This includes holes, and @@ -337,7 +336,7 @@ class IndirectReferenceTable { } void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetSegmentState() const { return segment_state_.all; @@ -352,7 +351,7 @@ class IndirectReferenceTable { } // Release pages past the end of the table that may have previously held references. - void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Trim() SHARED_REQUIRES(Locks::mutator_lock_); private: // Extract the table index from an indirect reference. diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc index c20002bdf9..f376ec0c6d 100644 --- a/runtime/indirect_reference_table_test.cc +++ b/runtime/indirect_reference_table_test.cc @@ -26,7 +26,7 @@ namespace art { class IndirectReferenceTableTest : public CommonRuntimeTest {}; static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostringstream oss; irt->Dump(oss); if (num_objects == 0) { diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index d37ddcb88b..e28d578121 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -49,12 +49,20 @@ constexpr bool kVerboseInstrumentation = false; static constexpr StackVisitor::StackWalkKind kInstrumentationStackWalk = StackVisitor::StackWalkKind::kSkipInlinedFrames; -static bool InstallStubsClassVisitor(mirror::Class* klass, void* arg) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { - Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg); - instrumentation->InstallStubsForClass(klass); - return true; // we visit all classes. -} +class InstallStubsClassVisitor : public ClassVisitor { + public: + explicit InstallStubsClassVisitor(Instrumentation* instrumentation) + : instrumentation_(instrumentation) {} + + bool Visit(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) { + instrumentation_->InstallStubsForClass(klass); + return true; // we visit all classes. + } + + private: + Instrumentation* const instrumentation_; +}; + Instrumentation::Instrumentation() : instrumentation_stubs_installed_(false), entry_exit_stubs_installed_(false), @@ -87,7 +95,7 @@ void Instrumentation::InstallStubsForClass(mirror::Class* klass) { } static void UpdateEntrypoints(ArtMethod* method, const void* quick_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* const runtime = Runtime::Current(); jit::Jit* jit = runtime->GetJit(); if (jit != nullptr) { @@ -99,19 +107,6 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code) } } method->SetEntryPointFromQuickCompiledCode(quick_code); - if (!method->IsResolutionMethod()) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - if (class_linker->IsQuickToInterpreterBridge(quick_code) || - (class_linker->IsQuickResolutionStub(quick_code) && - Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly() && - !method->IsNative() && !method->IsProxyMethod())) { - DCHECK(!method->IsNative()) << PrettyMethod(method); - DCHECK(!method->IsProxyMethod()) << PrettyMethod(method); - method->SetEntryPointFromInterpreter(art::artInterpreterToInterpreterBridge); - } else { - method->SetEntryPointFromInterpreter(art::artInterpreterToCompiledCodeBridge); - } - } } void Instrumentation::InstallStubsForMethod(ArtMethod* method) { @@ -164,7 +159,7 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) { // Since we may already have done this previously, we need to push new instrumentation frame before // existing instrumentation frames. static void InstrumentationInstallStack(Thread* thread, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { struct InstallStackVisitor FINAL : public StackVisitor { InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc) : StackVisitor(thread_in, context, kInstrumentationStackWalk), @@ -174,7 +169,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) last_return_pc_(0) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m == nullptr) { if (kVerboseInstrumentation) { @@ -304,7 +299,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) // Removes the instrumentation exit pc as the return PC for every quick frame. static void InstrumentationRestoreStack(Thread* thread, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { struct RestoreStackVisitor FINAL : public StackVisitor { RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc, Instrumentation* instrumentation) @@ -315,7 +310,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg) instrumentation_stack_(thread_in->GetInstrumentationStack()), frames_removed_(0) {} - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (instrumentation_stack_->size() == 0) { return false; // Stop. } @@ -576,14 +571,16 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir entry_exit_stubs_installed_ = true; interpreter_stubs_installed_ = false; } - runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this); + InstallStubsClassVisitor visitor(this); + runtime->GetClassLinker()->VisitClasses(&visitor); instrumentation_stubs_installed_ = true; MutexLock mu(self, *Locks::thread_list_lock_); runtime->GetThreadList()->ForEach(InstrumentationInstallStack, this); } else { interpreter_stubs_installed_ = false; entry_exit_stubs_installed_ = false; - runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this); + InstallStubsClassVisitor visitor(this); + runtime->GetClassLinker()->VisitClasses(&visitor); // Restore stack only if there is no method currently deoptimized. bool empty; { @@ -944,7 +941,7 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread, static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame, int delta) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta; if (frame_id != instrumentation_frame.frame_id_) { LOG(ERROR) << "Expected frame_id=" << frame_id << " but found " diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index db8e9c2508..93ff567dc3 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -63,24 +63,24 @@ struct InstrumentationListener { // Call-back for when a method is entered. virtual void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, - uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when a method is exited. virtual void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when a method is popped due to an exception throw. A method will either cause a // MethodExited call-back or a MethodUnwind call-back when its activation is removed. virtual void MethodUnwind(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when the dex pc moves in a method. virtual void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when we read from a field. virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, @@ -92,11 +92,11 @@ struct InstrumentationListener { // Call-back when an exception is caught. virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Call-back for when we get a backward branch. virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; }; // Instrumentation is a catch-all for when extra information is required from the runtime. The @@ -129,90 +129,83 @@ class Instrumentation { // for saying you should have suspended all threads (installing stubs while threads are running // will break). void AddListener(InstrumentationListener* listener, uint32_t events) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); // Removes a listener possibly removing instrumentation stubs. void RemoveListener(InstrumentationListener* listener, uint32_t events) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_); // Deoptimization. void EnableDeoptimization() - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(deoptimized_methods_lock_); + REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_); void DisableDeoptimization(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(deoptimized_methods_lock_); + REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_); bool AreAllMethodsDeoptimized() const { return interpreter_stubs_installed_; } - bool ShouldNotifyMethodEnterExitEvents() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ShouldNotifyMethodEnterExitEvents() const SHARED_REQUIRES(Locks::mutator_lock_); // Executes everything with interpreter. void DeoptimizeEverything(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); // Executes everything with compiled code (or interpreter if there is no code). void UndeoptimizeEverything(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static // method (except a class initializer) set to the resolution trampoline will be deoptimized only // once its declaring class is initialized. void Deoptimize(ArtMethod* method) - LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_); // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method // (except a class initializer) set to the resolution trampoline will be updated only once its // declaring class is initialized. void Undeoptimize(ArtMethod* method) - LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !deoptimized_methods_lock_); // Indicates whether the method has been deoptimized so it is executed with the interpreter. bool IsDeoptimized(ArtMethod* method) - LOCKS_EXCLUDED(deoptimized_methods_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!deoptimized_methods_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Enable method tracing by installing instrumentation entry/exit stubs or interpreter. void EnableMethodTracing(const char* key, bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter. void DisableMethodTracing(const char* key) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_); + REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_, + !deoptimized_methods_lock_); InterpreterHandlerTable GetInterpreterHandlerTable() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return interpreter_handler_table_; } - void InstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_); - void UninstrumentQuickAllocEntryPoints() LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_); + void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_); + void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_); void InstrumentQuickAllocEntryPointsLocked() - EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_); + REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_, + !Locks::runtime_shutdown_lock_); void UninstrumentQuickAllocEntryPointsLocked() - EXCLUSIVE_LOCKS_REQUIRED(Locks::instrument_entrypoints_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::runtime_shutdown_lock_); - void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); + REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_, + !Locks::runtime_shutdown_lock_); + void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_); // Update the code of a method respecting any installed stubs. void UpdateMethodsCode(ArtMethod* method, const void* quick_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); // Get the quick code for the given method. More efficient than asking the class linker as it // will short-cut to GetCode if instrumentation and static method resolution stubs aren't // installed. const void* GetQuickCodeFor(ArtMethod* method, size_t pointer_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ForceInterpretOnly() { interpret_only_ = true; @@ -232,39 +225,39 @@ class Instrumentation { return instrumentation_stubs_installed_; } - bool HasMethodEntryListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasMethodEntryListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_method_entry_listeners_; } - bool HasMethodExitListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasMethodExitListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_method_exit_listeners_; } - bool HasMethodUnwindListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasMethodUnwindListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_method_unwind_listeners_; } - bool HasDexPcListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasDexPcListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_dex_pc_listeners_; } - bool HasFieldReadListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasFieldReadListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_field_read_listeners_; } - bool HasFieldWriteListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasFieldWriteListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_field_write_listeners_; } - bool HasExceptionCaughtListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasExceptionCaughtListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_exception_caught_listeners_; } - bool HasBackwardBranchListeners() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasBackwardBranchListeners() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_backward_branch_listeners_; } - bool IsActive() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) { return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ || have_field_read_listeners_ || have_field_write_listeners_ || have_exception_caught_listeners_ || have_method_unwind_listeners_; @@ -274,7 +267,7 @@ class Instrumentation { // listeners into executing code and get method enter events for methods already on the stack. void MethodEnterEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasMethodEntryListeners())) { MethodEnterEventImpl(thread, this_object, method, dex_pc); } @@ -284,7 +277,7 @@ class Instrumentation { void MethodExitEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasMethodExitListeners())) { MethodExitEventImpl(thread, this_object, method, dex_pc, return_value); } @@ -293,12 +286,12 @@ class Instrumentation { // Inform listeners that a method has been exited due to an exception. void MethodUnwindEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Inform listeners that the dex pc has moved (only supported by the interpreter). void DexPcMovedEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasDexPcListeners())) { DexPcMovedEventImpl(thread, this_object, method, dex_pc); } @@ -306,7 +299,7 @@ class Instrumentation { // Inform listeners that a backward branch has been taken (only supported by the interpreter). void BackwardBranch(Thread* thread, ArtMethod* method, int32_t offset) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasBackwardBranchListeners())) { BackwardBranchImpl(thread, method, offset); } @@ -316,7 +309,7 @@ class Instrumentation { void FieldReadEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasFieldReadListeners())) { FieldReadEventImpl(thread, this_object, method, dex_pc, field); } @@ -326,7 +319,7 @@ class Instrumentation { void FieldWriteEvent(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (UNLIKELY(HasFieldWriteListeners())) { FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value); } @@ -334,30 +327,31 @@ class Instrumentation { // Inform listeners that an exception was caught. void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Called when an instrumented method is entered. The intended link register (lr) is saved so // that returning causes a branch to the method exit stub. Generates method enter events. void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object, ArtMethod* method, uintptr_t lr, bool interpreter_entry) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Called when an instrumented method is exited. Removes the pushed instrumentation frame // returning the intended link register. Generates method exit events. TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc, uint64_t gpr_result, uint64_t fpr_result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); // Pops an instrumentation frame from the current thread and generate an unwind event. void PopMethodForUnwind(Thread* self, bool is_deoptimization) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Call back for configure stubs. - void InstallStubsForClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void InstallStubsForClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!deoptimized_methods_lock_); void InstallStubsForMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); private: InstrumentationLevel GetCurrentInstrumentationLevel() const; @@ -368,11 +362,10 @@ class Instrumentation { // instrumentation level it needs. Therefore the current instrumentation level // becomes the highest instrumentation level required by a client. void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::classlinker_classes_lock_, - deoptimized_methods_lock_); + REQUIRES(Locks::mutator_lock_, !deoptimized_methods_lock_, !Locks::thread_list_lock_, + !Locks::classlinker_classes_lock_); - void UpdateInterpreterHandlerTable() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) { + void UpdateInterpreterHandlerTable() REQUIRES(Locks::mutator_lock_) { interpreter_handler_table_ = IsActive() ? kAlternativeHandlerTable : kMainHandlerTable; } @@ -382,38 +375,36 @@ class Instrumentation { void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void MethodExitEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void BackwardBranchImpl(Thread* thread, ArtMethod* method, int32_t offset) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FieldReadEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Read barrier-aware utility functions for accessing deoptimized_methods_ bool AddDeoptimizedMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_); bool IsDeoptimizedMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); bool RemoveDeoptimizedMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_); ArtMethod* BeginDeoptimizedMethod() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); bool IsDeoptimizedMethodsEmpty() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, deoptimized_methods_lock_); + SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code? bool instrumentation_stubs_installed_; @@ -508,7 +499,7 @@ struct InstrumentationStackFrame { interpreter_entry_(interpreter_entry) { } - std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* this_object_; ArtMethod* method_; diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc index 85bb8c4197..b49f7e1bfa 100644 --- a/runtime/instrumentation_test.cc +++ b/runtime/instrumentation_test.cc @@ -44,7 +44,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_method_enter_event = true; } @@ -53,7 +53,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, const JValue& return_value ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_method_exit_event = true; } @@ -61,7 +61,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_method_unwind_event = true; } @@ -69,7 +69,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio mirror::Object* this_object ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, uint32_t new_dex_pc ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_dex_pc_moved_event = true; } @@ -78,7 +78,7 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio ArtMethod* method ATTRIBUTE_UNUSED, uint32_t dex_pc ATTRIBUTE_UNUSED, ArtField* field ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_field_read_event = true; } @@ -88,20 +88,20 @@ class TestInstrumentationListener FINAL : public instrumentation::Instrumentatio uint32_t dex_pc ATTRIBUTE_UNUSED, ArtField* field ATTRIBUTE_UNUSED, const JValue& field_value ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_field_written_event = true; } void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_exception_caught_event = true; } void BackwardBranch(Thread* thread ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, int32_t dex_pc_offset ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { received_backward_branch_event = true; } @@ -198,7 +198,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -213,7 +213,7 @@ class InstrumentationTest : public CommonRuntimeTest { void UndeoptimizeMethod(Thread* self, ArtMethod* method, const char* key, bool disable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -227,7 +227,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -241,7 +241,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -255,7 +255,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -266,7 +266,7 @@ class InstrumentationTest : public CommonRuntimeTest { } void DisableMethodTracing(Thread* self, const char* key) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime* runtime = Runtime::Current(); instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation(); self->TransitionFromRunnableToSuspended(kSuspended); @@ -278,7 +278,7 @@ class InstrumentationTest : public CommonRuntimeTest { private: static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (event_type) { case instrumentation::Instrumentation::kMethodEntered: return instr->HasMethodEntryListeners(); @@ -305,7 +305,7 @@ class InstrumentationTest : public CommonRuntimeTest { static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type, Thread* self, ArtMethod* method, mirror::Object* obj, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { switch (event_type) { case instrumentation::Instrumentation::kMethodEntered: instr->MethodEnterEvent(self, obj, method, dex_pc); diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index 2a962784ca..2be570ac85 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -19,7 +19,9 @@ #include <memory> #include "gc_root-inl.h" +#include "gc/collector/garbage_collector.h" #include "gc/space/image_space.h" +#include "gc/weak_root_state.h" #include "mirror/dex_cache.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" @@ -31,8 +33,8 @@ namespace art { InternTable::InternTable() : image_added_to_intern_table_(false), log_new_roots_(false), - allow_new_interns_(true), - new_intern_condition_("New intern condition", *Locks::intern_table_lock_) { + weak_intern_condition_("New intern condition", *Locks::intern_table_lock_), + weak_root_state_(gc::kWeakRootStateNormal) { } size_t InternTable::Size() const { @@ -182,8 +184,7 @@ void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) { } } -mirror::String* InternTable::LookupStringFromImage(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +mirror::String* InternTable::LookupStringFromImage(mirror::String* s) { if (image_added_to_intern_table_) { return nullptr; } @@ -211,39 +212,71 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s) return nullptr; } -void InternTable::AllowNewInterns() { - Thread* self = Thread::Current(); - MutexLock mu(self, *Locks::intern_table_lock_); - allow_new_interns_ = true; - new_intern_condition_.Broadcast(self); +void InternTable::EnsureNewWeakInternsDisallowed() { + // Lock and unlock once to ensure that no threads are still in the + // middle of adding new interns. + MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); + CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites); } -void InternTable::DisallowNewInterns() { +void InternTable::BroadcastForNewInterns() { + CHECK(kUseReadBarrier); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::intern_table_lock_); - allow_new_interns_ = false; + weak_intern_condition_.Broadcast(self); } -void InternTable::EnsureNewInternsDisallowed() { - // Lock and unlock once to ensure that no threads are still in the - // middle of adding new interns. - MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - CHECK(!allow_new_interns_); +void InternTable::WaitUntilAccessible(Thread* self) { + Locks::intern_table_lock_->ExclusiveUnlock(self); + self->TransitionFromRunnableToSuspended(kWaitingWeakGcRootRead); + Locks::intern_table_lock_->ExclusiveLock(self); + while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) { + weak_intern_condition_.Wait(self); + } + Locks::intern_table_lock_->ExclusiveUnlock(self); + self->TransitionFromSuspendedToRunnable(); + Locks::intern_table_lock_->ExclusiveLock(self); } -mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) { +mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) { if (s == nullptr) { return nullptr; } - Thread* self = Thread::Current(); + Thread* const self = Thread::Current(); MutexLock mu(self, *Locks::intern_table_lock_); - while (UNLIKELY(!allow_new_interns_)) { - new_intern_condition_.WaitHoldingLocks(self); + if (kDebugLocking && !holding_locks) { + Locks::mutator_lock_->AssertSharedHeld(self); + CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock"; + } + while (true) { + if (holding_locks) { + if (!kUseReadBarrier) { + CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal); + } else { + CHECK(self->GetWeakRefAccessEnabled()); + } + } + // Check the strong table for a match. + mirror::String* strong = LookupStrong(s); + if (strong != nullptr) { + return strong; + } + if ((!kUseReadBarrier && weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) || + (kUseReadBarrier && self->GetWeakRefAccessEnabled())) { + break; + } + // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only + // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is + // cleared. + CHECK(!holding_locks); + StackHandleScope<1> hs(self); + auto h = hs.NewHandleWrapper(&s); + WaitUntilAccessible(self); } - // Check the strong table for a match. - mirror::String* strong = LookupStrong(s); - if (strong != nullptr) { - return strong; + if (!kUseReadBarrier) { + CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal); + } else { + CHECK(self->GetWeakRefAccessEnabled()); } // There is no match in the strong table, check the weak table. mirror::String* weak = LookupWeak(s); @@ -275,12 +308,17 @@ mirror::String* InternTable::InternStrong(const char* utf8_data) { return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data)); } +mirror::String* InternTable::InternStrongImageString(mirror::String* s) { + // May be holding the heap bitmap lock. + return Insert(s, true, true); +} + mirror::String* InternTable::InternStrong(mirror::String* s) { - return Insert(s, true); + return Insert(s, true, false); } mirror::String* InternTable::InternWeak(mirror::String* s) { - return Insert(s, false); + return Insert(s, false, false); } bool InternTable::ContainsWeak(mirror::String* s) { @@ -288,9 +326,9 @@ bool InternTable::ContainsWeak(mirror::String* s) { return LookupWeak(s) == s; } -void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) { +void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - weak_interns_.SweepWeaks(callback, arg); + weak_interns_.SweepWeaks(visitor); } void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) { @@ -393,16 +431,16 @@ void InternTable::Table::VisitRoots(RootVisitor* visitor) { } } -void InternTable::Table::SweepWeaks(IsMarkedCallback* callback, void* arg) { - SweepWeaks(&pre_zygote_table_, callback, arg); - SweepWeaks(&post_zygote_table_, callback, arg); +void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) { + SweepWeaks(&pre_zygote_table_, visitor); + SweepWeaks(&post_zygote_table_, visitor); } -void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) { +void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) { for (auto it = set->begin(), end = set->end(); it != end;) { // This does not need a read barrier because this is called by GC. mirror::Object* object = it->Read<kWithoutReadBarrier>(); - mirror::Object* new_object = callback(object, arg); + mirror::Object* new_object = visitor->IsMarked(object); if (new_object == nullptr) { it = set->Erase(it); } else { @@ -416,4 +454,16 @@ size_t InternTable::Table::Size() const { return pre_zygote_table_.Size() + post_zygote_table_.Size(); } +void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) { + MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); + ChangeWeakRootStateLocked(new_state); +} + +void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) { + weak_root_state_ = new_state; + if (new_state != gc::kWeakRootStateNoReadsOrWrites) { + weak_intern_condition_.Broadcast(Thread::Current()); + } +} + } // namespace art diff --git a/runtime/intern_table.h b/runtime/intern_table.h index 97ce73c52e..ae9f7a7acd 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -19,10 +19,12 @@ #include <unordered_set> +#include "atomic.h" #include "base/allocator.h" #include "base/hash_set.h" #include "base/mutex.h" #include "gc_root.h" +#include "gc/weak_root_state.h" #include "object_callbacks.h" namespace art { @@ -54,62 +56,78 @@ class InternTable { public: InternTable(); - // Interns a potentially new string in the 'strong' table. (See above.) + // Interns a potentially new string in the 'strong' table. May cause thread suspension. mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - // Interns a potentially new string in the 'strong' table. (See above.) - mirror::String* InternStrong(const char* utf8_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Only used by image writer. Special version that may not cause thread suspension since the GC + // can not be running while we are doing image writing. Maybe be called while while holding a + // lock since there will not be thread suspension. + mirror::String* InternStrongImageString(mirror::String* s) + SHARED_REQUIRES(Locks::mutator_lock_); - // Interns a potentially new string in the 'strong' table. (See above.) - mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Interns a potentially new string in the 'strong' table. May cause thread suspension. + mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - // Interns a potentially new string in the 'weak' table. (See above.) - mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Interns a potentially new string in the 'strong' table. May cause thread suspension. + mirror::String* InternStrong(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Interns a potentially new string in the 'weak' table. May cause thread suspension. + mirror::String* InternWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SweepInternTableWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::intern_table_lock_); + + bool ContainsWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::intern_table_lock_); // Total number of interned strings. - size_t Size() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t Size() const REQUIRES(!Locks::intern_table_lock_); // Total number of weakly live interned strings. - size_t StrongSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t StrongSize() const REQUIRES(!Locks::intern_table_lock_); // Total number of strongly live interned strings. - size_t WeakSize() const LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_); void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); - void DumpForSigQuit(std::ostream& os) const; + void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_); - void DisallowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); + void AllowNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); + void EnsureNewInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_); + void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); + void EnsureNewWeakInternsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_); // Adds all of the resolved image strings from the image space into the intern table. The // advantage of doing this is preventing expensive DexFile::FindStringId calls. void AddImageStringsToTable(gc::space::ImageSpace* image_space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); + // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages. void SwapPostZygoteWithPreZygote() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); // Add an intern table which was serialized to the image. void AddImageInternTable(gc::space::ImageSpace* image_space) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); // Read the intern table from memory. The elements aren't copied, the intern hash set data will // point to somewhere within ptr. Only reads the strong interns. - size_t ReadFromMemory(const uint8_t* ptr) LOCKS_EXCLUDED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Write the post zygote intern table to a pointer. Only writes the strong interns since it is // expected that there is no weak interns since this is called from the image writer. - size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::intern_table_lock_); + size_t WriteToMemory(uint8_t* ptr) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::intern_table_lock_); + + // Change the weak root state. May broadcast to waiters. + void ChangeWeakRootState(gc::WeakRootState new_state) + REQUIRES(!Locks::intern_table_lock_); private: class StringHashEquals { @@ -132,39 +150,33 @@ class InternTable { // weak interns and strong interns. class Table { public: - mirror::String* Find(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void Insert(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::intern_table_lock_); + void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::intern_table_lock_); void Remove(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void SweepWeaks(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); + void SweepWeaks(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); + void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_); + size_t Size() const REQUIRES(Locks::intern_table_lock_); // Read pre zygote table is called from ReadFromMemory which happens during runtime creation // when we load the image intern table. Returns how many bytes were read. size_t ReadIntoPreZygoteTable(const uint8_t* ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in // the post zygote table. Returns how many bytes were written. size_t WriteFromPostZygoteTable(uint8_t* ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); private: typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals, TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet; - void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages // caused by modifying the zygote intern table hash table. The pre zygote table are the @@ -174,56 +186,51 @@ class InternTable { UnorderedSet post_zygote_table_; }; - // Insert if non null, otherwise return null. - mirror::String* Insert(mirror::String* s, bool is_strong) - LOCKS_EXCLUDED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // Insert if non null, otherwise return null. Must be called holding the mutator lock. + // If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we + // require GC is not running since it is not safe to wait while holding locks. + mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks) + REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); mirror::String* LookupStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* LookupWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveStrong(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveWeak(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); // Transaction rollback access. mirror::String* LookupStringFromImage(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertStrongFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); mirror::String* InsertWeakFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveStrongFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void RemoveWeakFromTransaction(mirror::String* s) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - friend class Transaction; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); size_t ReadFromMemoryLocked(const uint8_t* ptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + + // Change the weak root state. May broadcast to waiters. + void ChangeWeakRootStateLocked(gc::WeakRootState new_state) + REQUIRES(Locks::intern_table_lock_); + + // Wait until we can read weak roots. + void WaitUntilAccessible(Thread* self) + REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_); - bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_); - ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_); + ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_); // Since this contains (strong) roots, they need a read barrier to // enable concurrent intern table (strong) root scan. Do not // directly access the strings in it. Use functions that contain @@ -235,6 +242,11 @@ class InternTable { // not directly access the strings in it. Use functions that contain // read barriers. Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_); + // Weak root state, used for concurrent system weak processing and more. + gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_); + + friend class Transaction; + DISALLOW_COPY_AND_ASSIGN(InternTable); }; } // namespace art diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc index 194d0af6fe..b60b32d06b 100644 --- a/runtime/intern_table_test.cc +++ b/runtime/intern_table_test.cc @@ -60,9 +60,9 @@ TEST_F(InternTableTest, Size) { EXPECT_EQ(2U, t.Size()); } -class TestPredicate { +class TestPredicate : public IsMarkedVisitor { public: - bool IsMarked(const mirror::Object* s) const { + mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { bool erased = false; for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) { if (*it == s) { @@ -72,7 +72,7 @@ class TestPredicate { } } EXPECT_TRUE(erased); - return false; + return nullptr; } void Expect(const mirror::String* s) { @@ -87,13 +87,6 @@ class TestPredicate { mutable std::vector<const mirror::String*> expected_; }; -mirror::Object* IsMarkedSweepingCallback(mirror::Object* object, void* arg) { - if (reinterpret_cast<TestPredicate*>(arg)->IsMarked(object)) { - return object; - } - return nullptr; -} - TEST_F(InternTableTest, SweepInternTableWeaks) { ScopedObjectAccess soa(Thread::Current()); InternTable t; @@ -115,7 +108,7 @@ TEST_F(InternTableTest, SweepInternTableWeaks) { p.Expect(s1.Get()); { ReaderMutexLock mu(soa.Self(), *Locks::heap_bitmap_lock_); - t.SweepInternTableWeaks(IsMarkedSweepingCallback, &p); + t.SweepInternTableWeaks(&p); } EXPECT_EQ(2U, t.Size()); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 26860e7100..6c6232c437 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -28,7 +28,7 @@ namespace interpreter { static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty, Object* receiver, uint32_t* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler, // it should be removed and JNI compiled stubs used instead. ScopedObjectAccessUnchecked soa(self); @@ -240,23 +240,23 @@ JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) UNREACHABLE(); } // Explicit definitions of ExecuteGotoImpl. -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<> SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); #endif static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) { @@ -395,7 +395,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive } void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JValue value; // Set value to last known result in case the shadow frame chain is empty. value.SetJ(ret_val->GetJ()); diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index 446c5bb4a5..61140a24cf 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -35,26 +35,26 @@ namespace interpreter { // Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array. extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JValue* ret_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace interpreter extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace art diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index fa103b15d1..f923b848fa 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -19,6 +19,7 @@ #include <cmath> #include "debugger.h" +#include "entrypoints/runtime_asm_entrypoints.h" #include "mirror/array-inl.h" #include "unstarted_runtime.h" #include "verifier/method_verifier.h" @@ -191,7 +192,7 @@ EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-q template<Primitive::Type field_type> static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JValue field_value; switch (field_type) { case Primitive::kPrimBoolean: @@ -449,7 +450,7 @@ void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame) // Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame. static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame, size_t dest_reg, size_t src_reg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Uint required, so that sign extension does not make this wrong on 64b systems uint32_t src_value = shadow_frame.GetVReg(src_reg); mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg); @@ -481,7 +482,7 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) { } // Separate declaration is required solely for the attributes. -template<bool is_range, bool do_assignability_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template<bool is_range, bool do_assignability_check> SHARED_REQUIRES(Locks::mutator_lock_) static inline bool DoCallCommon(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, @@ -490,6 +491,23 @@ static inline bool DoCallCommon(ArtMethod* called_method, uint32_t arg[Instruction::kMaxVarArgRegs], uint32_t vregC) ALWAYS_INLINE; +SHARED_REQUIRES(Locks::mutator_lock_) +static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) ALWAYS_INLINE; + +static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) { + ArtMethod* target = new_shadow_frame->GetMethod(); + if (UNLIKELY(target->IsNative() || target->IsProxyMethod())) { + return false; + } + Runtime* runtime = Runtime::Current(); + ClassLinker* class_linker = runtime->GetClassLinker(); + return runtime->GetInstrumentation()->IsForcedInterpretOnly() || + // Doing this check avoids doing compiled/interpreter transitions. + class_linker->IsQuickToInterpreterBridge(target->GetEntryPointFromQuickCompiledCode()) || + // Force the use of interpreter when it is required by the debugger. + Dbg::IsForcedInterpreterNeededForCalling(self, target); +} + template<bool is_range, bool do_assignability_check> static inline bool DoCallCommon(ArtMethod* called_method, Thread* self, @@ -660,28 +678,11 @@ static inline bool DoCallCommon(ArtMethod* called_method, // Do the call now. if (LIKELY(Runtime::Current()->IsStarted())) { - if (kIsDebugBuild && new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter() == nullptr) { - LOG(FATAL) << "Attempt to invoke non-executable method: " - << PrettyMethod(new_shadow_frame->GetMethod()); - UNREACHABLE(); - } - if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly() && - !new_shadow_frame->GetMethod()->IsNative() && - !new_shadow_frame->GetMethod()->IsProxyMethod() && - new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter() - == artInterpreterToCompiledCodeBridge) { - LOG(FATAL) << "Attempt to call compiled code when -Xint: " - << PrettyMethod(new_shadow_frame->GetMethod()); - UNREACHABLE(); - } - // Force the use of interpreter when it is required by the debugger. - EntryPointFromInterpreter* entry; - if (UNLIKELY(Dbg::IsForcedInterpreterNeededForCalling(self, new_shadow_frame->GetMethod()))) { - entry = &art::artInterpreterToInterpreterBridge; + if (NeedsInterpreter(self, new_shadow_frame)) { + artInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result); } else { - entry = new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter(); + artInterpreterToCompiledCodeBridge(self, code_item, new_shadow_frame, result); } - entry(self, code_item, new_shadow_frame, result); } else { UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg); } @@ -833,7 +834,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame, return true; } -// TODO fix thread analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). +// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_). template<typename T> static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count) NO_THREAD_SAFETY_ANALYSIS { @@ -844,7 +845,7 @@ static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* arra } void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(Runtime::Current()->IsActiveTransaction()); DCHECK(array != nullptr); DCHECK_LE(count, array->GetLength()); @@ -883,7 +884,7 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) // Explicit DoCall template function declarations. #define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \ ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ @@ -896,7 +897,7 @@ EXPLICIT_DO_CALL_TEMPLATE_DECL(true, true); // Explicit DoLambdaCall template function declarations. #define EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoLambdaCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \ ShadowFrame& shadow_frame, \ const Instruction* inst, \ @@ -910,7 +911,7 @@ EXPLICIT_DO_LAMBDA_CALL_TEMPLATE_DECL(true, true); // Explicit DoFilledNewArray template function declarations. #define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \ const ShadowFrame& shadow_frame, \ Thread* self, JValue* result) diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index a12a58d48f..6468659d9f 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -34,6 +34,7 @@ #include "dex_instruction-inl.h" #include "entrypoints/entrypoint_utils-inl.h" #include "handle_scope-inl.h" +#include "lambda/box_table.h" #include "mirror/class-inl.h" #include "mirror/method.h" #include "mirror/object-inl.h" @@ -71,7 +72,7 @@ extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); void ThrowNullPointerExceptionFromInterpreter() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS { ref->MonitorEnter(self); @@ -83,13 +84,13 @@ static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANA void AbortTransactionF(Thread* self, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AbortTransactionV(Thread* self, const char* fmt, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Invokes the given method. This is part of the invocation support and is used by DoInvoke and // DoInvokeVirtualQuick functions. @@ -113,7 +114,7 @@ bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_fr // // If the validation fails, return false and raise an exception. static inline bool IsValidLambdaTargetOrThrow(ArtMethod* called_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { bool success = false; if (UNLIKELY(called_method == nullptr)) { @@ -168,7 +169,7 @@ static inline bool DoCreateLambda(Thread* self, ShadowFrame& shadow_frame, mirror::Object* receiver = nullptr; // Always static. (see 'kStatic') ArtMethod* sf_method = shadow_frame.GetMethod(); ArtMethod* const called_method = FindMethodFromCode<kStatic, do_access_check>( - method_idx, &receiver, &sf_method, self); + method_idx, &receiver, sf_method, self); uint32_t vregA = inst->VRegA_21c(); @@ -190,7 +191,7 @@ static inline bool DoCreateLambda(Thread* self, ShadowFrame& shadow_frame, // (Exceptions are thrown by creating a new exception and then being put in the thread TLS) static inline ArtMethod* ReadLambdaClosureFromVRegsOrThrow(ShadowFrame& shadow_frame, uint32_t vreg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO(iam): Introduce a closure abstraction that will contain the captured variables // instead of just an ArtMethod. // This is temporarily using 2 vregs because a native ArtMethod can be up to 64-bit, @@ -253,7 +254,7 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr Object* receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC); ArtMethod* sf_method = shadow_frame.GetMethod(); ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>( - method_idx, &receiver, &sf_method, self); + method_idx, &receiver, sf_method, self); // The shadow frame should already be pushed, so we don't need to update it. if (UNLIKELY(called_method == nullptr)) { CHECK(self->IsExceptionPending()); @@ -305,32 +306,32 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, // Returns true on success, otherwise throws an exception and returns false. template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check> bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, - uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_); // Handles iget-quick, iget-wide-quick and iget-object-quick instructions. // Returns true on success, otherwise throws an exception and returns false. template<Primitive::Type field_type> bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Handles iput-XXX and sput-XXX instructions. // Returns true on success, otherwise throws an exception and returns false. template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check, bool transaction_active> bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst, - uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_); // Handles iput-quick, iput-wide-quick and iput-object-quick instructions. // Returns true on success, otherwise throws an exception and returns false. template<Primitive::Type field_type, bool transaction_active> bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the // java.lang.String class is initialized. static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Class* java_lang_string_class = String::GetJavaLangString(); if (UNLIKELY(!java_lang_string_class->IsInitialized())) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -357,7 +358,7 @@ static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uin // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -375,7 +376,7 @@ static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg, // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg, int32_t dividend, int32_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -393,7 +394,7 @@ static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg, // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const int64_t kMinLong = std::numeric_limits<int64_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -411,7 +412,7 @@ static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg, // Returns true on success, otherwise throws a java.lang.ArithmeticException and return false. static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg, int64_t dividend, int64_t divisor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const int64_t kMinLong = std::numeric_limits<int64_t>::min(); if (UNLIKELY(divisor == 0)) { ThrowArithmeticExceptionDivideByZero(); @@ -435,7 +436,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame, // Returns the branch offset to the next instruction to execute. static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH); const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t(); int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data)); @@ -446,10 +447,10 @@ static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& return 3; } const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]); - DCHECK(IsAligned<4>(keys)); + DCHECK_ALIGNED(keys, 4); int32_t first_key = keys[0]; const int32_t* targets = reinterpret_cast<const int32_t*>(&switch_data[4]); - DCHECK(IsAligned<4>(targets)); + DCHECK_ALIGNED(targets, 4); int32_t index = test_val - first_key; if (index >= 0 && index < size) { return targets[index]; @@ -463,7 +464,7 @@ static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& // Returns the branch offset to the next instruction to execute. static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame, uint16_t inst_data) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH); const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t(); int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data)); @@ -474,9 +475,9 @@ static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& return 3; } const int32_t* keys = reinterpret_cast<const int32_t*>(&switch_data[2]); - DCHECK(IsAligned<4>(keys)); + DCHECK_ALIGNED(keys, 4); const int32_t* entries = keys + size; - DCHECK(IsAligned<4>(entries)); + DCHECK_ALIGNED(entries, 4); int lo = 0; int hi = size - 1; while (lo <= hi) { @@ -496,7 +497,7 @@ static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& template <bool _do_check> static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, - uint16_t inst_data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_) { /* * box-lambda vA, vB /// opcode 0xf8, format 22x * - vA is the target register where the Object representation of the closure will be stored into @@ -506,8 +507,8 @@ static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const In uint32_t vreg_target_object = inst->VRegA_22x(inst_data); uint32_t vreg_source_closure = inst->VRegB_22x(); - ArtMethod* const closure_method = ReadLambdaClosureFromVRegsOrThrow(shadow_frame, - vreg_source_closure); + ArtMethod* closure_method = ReadLambdaClosureFromVRegsOrThrow(shadow_frame, + vreg_source_closure); // Failed lambda target runtime check, an exception was raised. if (UNLIKELY(closure_method == nullptr)) { @@ -515,28 +516,21 @@ static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const In return false; } - // Convert the ArtMethod into a java.lang.reflect.Method which will serve - // as the temporary 'boxed' version of the lambda. This is good enough - // to check all the basic object identities that a boxed lambda must retain. + mirror::Object* closure_as_object = + Runtime::Current()->GetLambdaBoxTable()->BoxLambda(closure_method); - // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class - // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object - // TODO: Repeated boxing should return the same object reference - mirror::Method* method_as_object = - mirror::Method::CreateFromArtMethod(self, closure_method); - - if (UNLIKELY(method_as_object == nullptr)) { - // Most likely an OOM has occurred. + // Failed to box the lambda, an exception was raised. + if (UNLIKELY(closure_as_object == nullptr)) { CHECK(self->IsExceptionPending()); return false; } - shadow_frame.SetVRegReference(vreg_target_object, method_as_object); + shadow_frame.SetVRegReference(vreg_target_object, closure_as_object); return true; } -template <bool _do_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -static inline bool DoUnboxLambda(Thread* self ATTRIBUTE_UNUSED, +template <bool _do_check> SHARED_REQUIRES(Locks::mutator_lock_) +static inline bool DoUnboxLambda(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) { @@ -556,38 +550,30 @@ static inline bool DoUnboxLambda(Thread* self ATTRIBUTE_UNUSED, return false; } - // Raise ClassCastException if object is not instanceof java.lang.reflect.Method - if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) { - ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass()); + ArtMethod* unboxed_closure = nullptr; + // Raise an exception if unboxing fails. + if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object, + &unboxed_closure)) { + CHECK(self->IsExceptionPending()); return false; } - // TODO(iam): We must check that the closure object extends/implements the type - // specified in [type id]. This is not currently implemented since it's always a Method. - - // If we got this far, the inputs are valid. - // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target. - mirror::AbstractMethod* boxed_closure_as_method = - down_cast<mirror::AbstractMethod*>(boxed_closure_object); - - ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod(); DCHECK(unboxed_closure != nullptr); - WriteLambdaClosureIntoVRegs(shadow_frame, *unboxed_closure, vreg_target_closure); return true; } uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame, uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame) __attribute__((cold)) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst, const uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { constexpr bool kTracing = false; if (kTracing) { #define TRACE_LOG std::cerr @@ -619,7 +605,7 @@ static inline bool IsBackwardBranch(int32_t branch_offset) { // Explicitly instantiate all DoInvoke functions. #define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ JValue* result) @@ -640,7 +626,7 @@ EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface) // invoke-interface/range. // Explicitly instantiate all DoInvokeVirtualQuick functions. #define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst, uint16_t inst_data, \ JValue* result) @@ -651,7 +637,7 @@ EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick- // Explicitly instantiate all DoCreateLambda functions. #define EXPLICIT_DO_CREATE_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoCreateLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, \ const Instruction* inst) @@ -661,7 +647,7 @@ EXPLICIT_DO_CREATE_LAMBDA_DECL(true); // create-lambda // Explicitly instantiate all DoInvokeLambda functions. #define EXPLICIT_DO_INVOKE_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoInvokeLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \ uint16_t inst_data, JValue* result); @@ -671,7 +657,7 @@ EXPLICIT_DO_INVOKE_LAMBDA_DECL(true); // invoke-lambda // Explicitly instantiate all DoBoxLambda functions. #define EXPLICIT_DO_BOX_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoBoxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \ uint16_t inst_data); @@ -681,7 +667,7 @@ EXPLICIT_DO_BOX_LAMBDA_DECL(true); // box-lambda // Explicitly instantiate all DoUnBoxLambda functions. #define EXPLICIT_DO_UNBOX_LAMBDA_DECL(_do_check) \ -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ +template SHARED_REQUIRES(Locks::mutator_lock_) \ bool DoUnboxLambda<_do_check>(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \ uint16_t inst_data); diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index ec923b6eb2..7027cbfc52 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -2536,16 +2536,16 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF } // NOLINT(readability/fn_size) // Explicit definitions of ExecuteGotoImpl. -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteGotoImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteGotoImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteGotoImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 78090bbe0c..544f7886e9 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -2283,16 +2283,16 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item, } // NOLINT(readability/fn_size) // Explicit definitions of ExecuteSwitchImpl. -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteSwitchImpl<true, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) HOT_ATTR +template SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR JValue ExecuteSwitchImpl<false, false>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteSwitchImpl<true, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); -template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) +template SHARED_REQUIRES(Locks::mutator_lock_) JValue ExecuteSwitchImpl<false, true>(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register); diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 43e24faed3..c559389dba 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -46,7 +46,7 @@ namespace interpreter { static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) { va_list args; @@ -69,7 +69,7 @@ static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> class Handle<mirror::ClassLoader> class_loader, JValue* result, const std::string& method_name, bool initialize_class, bool abort_if_not_found) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(className.Get() != nullptr); std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str())); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); @@ -99,7 +99,7 @@ static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> class // actually the transaction abort exception. This must not be wrapped, as it signals an // initialization abort. static void CheckExceptionGenerateClassNotFound(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (self->IsExceptionPending()) { // If it is not the transaction abort exception, wrap it. std::string type(PrettyTypeOf(self->GetException())); @@ -111,7 +111,7 @@ static void CheckExceptionGenerateClassNotFound(Thread* self) } static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Object* param = shadow_frame->GetVRegReference(arg_offset); if (param == nullptr) { AbortTransactionOrFail(self, "Null-pointer in Class.forName."); @@ -229,20 +229,16 @@ void UnstartedRuntime::UnstartedClassGetDeclaredField( mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass(); mirror::String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString(); ArtField* found = nullptr; - ArtField* fields = klass->GetIFields(); - for (int32_t i = 0, count = klass->NumInstanceFields(); i < count; ++i) { - ArtField* f = &fields[i]; - if (name2->Equals(f->GetName())) { - found = f; + for (ArtField& field : klass->GetIFields()) { + if (name2->Equals(field.GetName())) { + found = &field; break; } } if (found == nullptr) { - fields = klass->GetSFields(); - for (int32_t i = 0, count = klass->NumStaticFields(); i < count; ++i) { - ArtField* f = &fields[i]; - if (name2->Equals(f->GetName())) { - found = f; + for (ArtField& field : klass->GetSFields()) { + if (name2->Equals(field.GetName())) { + found = &field; break; } } @@ -294,7 +290,7 @@ static void PrimitiveArrayCopy(Thread* self, mirror::Array* src_array, int32_t src_pos, mirror::Array* dst_array, int32_t dst_pos, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) { AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.", PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(), @@ -490,7 +486,7 @@ void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits( } static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile* dex_file = dex_cache->GetDexFile(); if (dex_file == nullptr) { return nullptr; @@ -601,7 +597,7 @@ void UnstartedRuntime::UnstartedMemoryPeekLong( static void UnstartedMemoryPeekArray( Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { int64_t address_long = shadow_frame->GetVRegLong(arg_offset); mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2); if (obj == nullptr) { @@ -840,7 +836,7 @@ void UnstartedRuntime::UnstartedStringFastSubstring( // This allows getting the char array for new style of String objects during compilation. void UnstartedRuntime::UnstartedStringToCharArray( Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString(); if (string == nullptr) { AbortTransactionOrFail(self, "String.charAt with null object"); diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h index a357d5fa18..03d7026ef7 100644 --- a/runtime/interpreter/unstarted_runtime.h +++ b/runtime/interpreter/unstarted_runtime.h @@ -52,14 +52,14 @@ class UnstartedRuntime { ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void Jni(Thread* self, ArtMethod* method, mirror::Object* receiver, uint32_t* args, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: // Methods that intercept available libcore implementations. @@ -68,7 +68,7 @@ class UnstartedRuntime { ShadowFrame* shadow_frame, \ JValue* result, \ size_t arg_offset) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); #include "unstarted_runtime_list.h" UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT) #undef UNSTARTED_RUNTIME_DIRECT_LIST @@ -82,7 +82,7 @@ class UnstartedRuntime { mirror::Object* receiver, \ uint32_t* args, \ JValue* result) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); #include "unstarted_runtime_list.h" UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI) #undef UNSTARTED_RUNTIME_DIRECT_LIST diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index 4b672e06f4..a1ae2aab9c 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -42,7 +42,7 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { ShadowFrame* shadow_frame, \ JValue* result, \ size_t arg_offset) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \ } #include "unstarted_runtime_list.h" @@ -58,7 +58,7 @@ class UnstartedRuntimeTest : public CommonRuntimeTest { mirror::Object* receiver, \ uint32_t* args, \ JValue* result) \ - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + SHARED_REQUIRES(Locks::mutator_lock_) { \ interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \ } #include "unstarted_runtime_list.h" diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index f1deacf39a..9d41018c1b 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -87,7 +87,7 @@ class SharedLibrary { * If the call has not yet finished in another thread, wait for it. */ bool CheckOnLoadResult() - LOCKS_EXCLUDED(jni_on_load_lock_) { + REQUIRES(!jni_on_load_lock_) { Thread* self = Thread::Current(); bool okay; { @@ -112,7 +112,7 @@ class SharedLibrary { return okay; } - void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) { + void SetResult(bool result) REQUIRES(!jni_on_load_lock_) { Thread* self = Thread::Current(); MutexLock mu(self, jni_on_load_lock_); @@ -210,8 +210,8 @@ class Libraries { // See section 11.3 "Linking Native Methods" of the JNI spec. void* FindNativeMethod(ArtMethod* m, std::string& detail) - EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + REQUIRES(Locks::jni_libraries_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) { std::string jni_short_name(JniShortName(m)); std::string jni_long_name(JniLongName(m)); const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader(); @@ -473,7 +473,8 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, mirror::Object* obj) { return nullptr; } MutexLock mu(self, weak_globals_lock_); - while (UNLIKELY(!allow_new_weak_globals_)) { + while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) || + (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { weak_globals_add_condition_.WaitHoldingLocks(self); } IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj); @@ -559,6 +560,13 @@ void JavaVMExt::EnsureNewWeakGlobalsDisallowed() { CHECK(!allow_new_weak_globals_); } +void JavaVMExt::BroadcastForNewWeakGlobals() { + CHECK(kUseReadBarrier); + Thread* self = Thread::Current(); + MutexLock mu(self, weak_globals_lock_); + weak_globals_add_condition_.Broadcast(self); +} + mirror::Object* JavaVMExt::DecodeGlobal(Thread* self, IndirectRef ref) { return globals_.SynchronizedGet(self, &globals_lock_, ref); } @@ -570,7 +578,8 @@ void JavaVMExt::UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* resu mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) { MutexLock mu(self, weak_globals_lock_); - while (UNLIKELY(!allow_new_weak_globals_)) { + while (UNLIKELY((!kUseReadBarrier && !allow_new_weak_globals_) || + (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { weak_globals_add_condition_.WaitHoldingLocks(self); } return weak_globals_.Get(ref); @@ -757,7 +766,7 @@ void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) { return native_method; } -void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) { +void JavaVMExt::SweepJniWeakGlobals(IsMarkedVisitor* visitor) { MutexLock mu(Thread::Current(), weak_globals_lock_); Runtime* const runtime = Runtime::Current(); for (auto* entry : weak_globals_) { @@ -765,7 +774,7 @@ void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) { if (!entry->IsNull()) { // Since this is called by the GC, we don't need a read barrier. mirror::Object* obj = entry->Read<kWithoutReadBarrier>(); - mirror::Object* new_obj = callback(obj, arg); + mirror::Object* new_obj = visitor->IsMarked(obj); if (new_obj == nullptr) { new_obj = runtime->GetClearedJniWeakGlobal(); } diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index 4fdf45a03e..d70fc47c61 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -77,7 +77,7 @@ class JavaVMExt : public JavaVM { // such as NewByteArray. // If -verbose:third-party-jni is on, we want to log any JNI function calls // made by a third-party native method. - bool ShouldTrace(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ShouldTrace(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); /** * Loads the given shared library. 'path' is an absolute pathname. @@ -93,55 +93,57 @@ class JavaVMExt : public JavaVM { * using dlsym(3) on every native library that's been loaded so far. */ void* FindCodeForNativeMethod(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_); + REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_); bool SetCheckJniEnabled(bool enabled); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!globals_lock_); - void DisallowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewWeakGlobalsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); + void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); + void EnsureNewWeakGlobalsDisallowed() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!weak_globals_lock_); + void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!weak_globals_lock_); jobject AddGlobalRef(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_); jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); - void DeleteGlobalRef(Thread* self, jobject obj); + void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_); - void DeleteWeakGlobalRef(Thread* self, jweak obj); + void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_); - void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SweepJniWeakGlobals(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(globals_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_); mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(weak_globals_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_); const JNIInvokeInterface* GetUncheckedFunctions() const { return unchecked_functions_; } - void TrimGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(globals_lock_); + void TrimGlobals() SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!globals_lock_); private: Runtime* const runtime_; diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h index 7c48985dfe..ae02fe6a0b 100644 --- a/runtime/jdwp/jdwp.h +++ b/runtime/jdwp/jdwp.h @@ -88,7 +88,7 @@ struct JdwpLocation { uint64_t dex_pc; }; std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs); bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs); @@ -128,9 +128,12 @@ struct JdwpState { * the debugger. * * Returns a newly-allocated JdwpState struct on success, or nullptr on failure. + * + * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have + * state->thread_start_lock_ held. */ static JdwpState* Create(const JdwpOptions* options) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; ~JdwpState(); @@ -155,15 +158,15 @@ struct JdwpState { // thread (command handler) so no event thread posts an event while // it processes a command. This must be called only from the debugger // thread. - void AcquireJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_); - void ReleaseJdwpTokenForCommand() LOCKS_EXCLUDED(jdwp_token_lock_); + void AcquireJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_); + void ReleaseJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_); // Acquires/releases the JDWP synchronization token for the event thread // so no other thread (debugger thread or event thread) interleaves with // it when posting an event. This must NOT be called from the debugger // thread, only event thread. - void AcquireJdwpTokenForEvent(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_); - void ReleaseJdwpTokenForEvent() LOCKS_EXCLUDED(jdwp_token_lock_); + void AcquireJdwpTokenForEvent(ObjectId threadId) REQUIRES(!jdwp_token_lock_); + void ReleaseJdwpTokenForEvent() REQUIRES(!jdwp_token_lock_); /* * These notify the debug code that something interesting has happened. This @@ -183,7 +186,7 @@ struct JdwpState { * The VM has finished initializing. Only called when the debugger is * connected at the time initialization completes. */ - void PostVMStart() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PostVMStart() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_); /* * A location of interest has been reached. This is used for breakpoints, @@ -199,8 +202,7 @@ struct JdwpState { */ void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags, const JValue* returnValue) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * A field of interest has been accessed or modified. This is used for field access and field @@ -211,8 +213,7 @@ struct JdwpState { */ void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr, const JValue* fieldValue, bool is_modification) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * An exception has been thrown. @@ -221,22 +222,19 @@ struct JdwpState { */ void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object, const EventLocation* pCatchLoc, mirror::Object* thisPtr) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * A thread has started or stopped. */ void PostThreadChange(Thread* thread, bool start) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * Class has been prepared. */ void PostClassPrepare(mirror::Class* klass) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_); /* * The VM is about to stop. @@ -244,7 +242,7 @@ struct JdwpState { bool PostVMDeath(); // Called if/when we realize we're talking to DDMS. - void NotifyDdmsActive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void NotifyDdmsActive() SHARED_REQUIRES(Locks::mutator_lock_); void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header); @@ -253,23 +251,23 @@ struct JdwpState { * Send up a chunk of DDM data. */ void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool HandlePacket(); + bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_); void SendRequest(ExpandBuf* pReq); void ResetState() - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); /* atomic ops to get next serial number */ uint32_t NextRequestSerial(); uint32_t NextEventSerial(); void Run() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_, + !attach_lock_, !event_list_lock_); /* * Register an event by adding it to the event list. @@ -278,48 +276,45 @@ struct JdwpState { * may discard its pointer after calling this. */ JdwpError RegisterEvent(JdwpEvent* pEvent) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); /* * Unregister an event, given the requestId. */ void UnregisterEventById(uint32_t requestId) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); /* * Unregister all events. */ void UnregisterAll() - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); private: explicit JdwpState(const JdwpOptions* options); - size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply); + size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply) + REQUIRES(!jdwp_token_lock_); bool InvokeInProgress(); bool IsConnected(); void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy, ObjectId threadId) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_); void CleanupMatchList(const std::vector<JdwpEvent*>& match_list) - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void EventFinish(ExpandBuf* pReq); bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket, std::vector<JdwpEvent*>* match_list) - LOCKS_EXCLUDED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket, std::vector<JdwpEvent*>* match_list) - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void UnregisterEvent(JdwpEvent* pEvent) - EXCLUSIVE_LOCKS_REQUIRED(event_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov); /* @@ -351,8 +346,8 @@ struct JdwpState { * events at the same time, so we grab a mutex in the SetWaitForJdwpToken * call, and release it in the ClearWaitForJdwpToken call. */ - void SetWaitForJdwpToken(ObjectId threadId) LOCKS_EXCLUDED(jdwp_token_lock_); - void ClearWaitForJdwpToken() LOCKS_EXCLUDED(jdwp_token_lock_); + void SetWaitForJdwpToken(ObjectId threadId) REQUIRES(!jdwp_token_lock_); + void ClearWaitForJdwpToken() REQUIRES(!jdwp_token_lock_); public: // TODO: fix privacy const JdwpOptions* options_; @@ -415,9 +410,9 @@ struct JdwpState { bool processing_request_ GUARDED_BY(shutdown_lock_); }; -std::string DescribeField(const FieldId& field_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -std::string DescribeMethod(const MethodId& method_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); +std::string DescribeField(const FieldId& field_id) SHARED_REQUIRES(Locks::mutator_lock_); +std::string DescribeMethod(const MethodId& method_id) SHARED_REQUIRES(Locks::mutator_lock_); +std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_REQUIRES(Locks::mutator_lock_); class Request { public: @@ -433,9 +428,9 @@ class Request { uint32_t ReadUnsigned32(const char* what); - FieldId ReadFieldId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + FieldId ReadFieldId() SHARED_REQUIRES(Locks::mutator_lock_); - MethodId ReadMethodId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MethodId ReadMethodId() SHARED_REQUIRES(Locks::mutator_lock_); ObjectId ReadObjectId(const char* specific_kind); @@ -447,7 +442,7 @@ class Request { ObjectId ReadThreadGroupId(); - RefTypeId ReadRefTypeId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + RefTypeId ReadRefTypeId() SHARED_REQUIRES(Locks::mutator_lock_); FrameId ReadFrameId(); @@ -461,7 +456,7 @@ class Request { JdwpTypeTag ReadTypeTag(); - JdwpLocation ReadLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + JdwpLocation ReadLocation() SHARED_REQUIRES(Locks::mutator_lock_); JdwpModKind ReadModKind(); diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc index adc2912e58..51952c4923 100644 --- a/runtime/jdwp/jdwp_adb.cc +++ b/runtime/jdwp/jdwp_adb.cc @@ -24,7 +24,7 @@ #include "base/stringprintf.h" #include "jdwp/jdwp_priv.h" -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "cutils/sockets.h" #endif @@ -224,7 +224,7 @@ bool JdwpAdbState::Accept() { */ int ret = connect(control_sock_, &control_addr_.controlAddrPlain, control_addr_len_); if (!ret) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ if (!socket_peer_is_trusted(control_sock_)) { if (shutdown(control_sock_, SHUT_RDWR)) { PLOG(ERROR) << "trouble shutting down socket"; diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index 14f097f72a..5d21f1716e 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -447,7 +447,7 @@ static bool PatternMatch(const char* pattern, const std::string& target) { * need to do this even if later mods cause us to ignore the event. */ static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JdwpEventMod* pMod = pEvent->mods; for (int i = pEvent->modCount; i > 0; i--, pMod++) { @@ -784,7 +784,7 @@ void JdwpState::PostVMStart() { static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list, ObjectId thread_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0, e = match_list.size(); i < e; ++i) { JdwpEvent* pEvent = match_list[i]; VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind @@ -800,7 +800,7 @@ static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list, static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location, JDWP::JdwpLocation* jdwp_location) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(event_location != nullptr); DCHECK(jdwp_location != nullptr); Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index d4e2656b7e..7776f8fad3 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -53,7 +53,7 @@ std::string DescribeRefTypeId(const RefTypeId& ref_type_id) { } static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint8_t tag; JdwpError rc = Dbg::GetObjectTag(object_id, &tag); if (rc == ERR_NONE) { @@ -64,7 +64,7 @@ static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id) } static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAdd4BE(reply, objects.size()); for (size_t i = 0; i < objects.size(); ++i) { JdwpError rc = WriteTaggedObject(reply, objects[i]); @@ -84,7 +84,7 @@ static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<Objec static JdwpError RequestInvoke(JdwpState*, Request* request, ObjectId thread_id, ObjectId object_id, RefTypeId class_id, MethodId method_id, bool is_constructor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(!is_constructor || object_id != 0); int32_t arg_count = request->ReadSigned32("argument count"); @@ -123,7 +123,7 @@ static JdwpError RequestInvoke(JdwpState*, Request* request, } static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Text information on runtime version. std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion())); expandBufAddUtf8String(pReply, version); @@ -147,7 +147,7 @@ static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply) * been loaded by multiple class loaders. */ static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string classDescriptor(request->ReadUtf8String()); std::vector<RefTypeId> ids; @@ -179,7 +179,7 @@ static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* * to be suspended, and that violates some JDWP expectations. */ static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::vector<ObjectId> thread_ids; Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids); @@ -195,7 +195,7 @@ static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply) * List all thread groups that do not have a parent. */ static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { /* * TODO: maintain a list of parentless thread groups in the VM. * @@ -214,7 +214,7 @@ static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply * Respond with the sizes of the basic debugger types. */ static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAdd4BE(pReply, sizeof(FieldId)); expandBufAdd4BE(pReply, sizeof(MethodId)); expandBufAdd4BE(pReply, sizeof(ObjectId)); @@ -224,7 +224,7 @@ static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply) } static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::Dispose(); return ERR_NONE; } @@ -236,7 +236,7 @@ static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*) * This needs to increment the "suspend count" on all threads. */ static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Thread* self = Thread::Current(); self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); Dbg::SuspendVM(); @@ -248,13 +248,13 @@ static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*) * Resume execution. Decrements the "suspend count" of all threads. */ static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Dbg::ResumeVM(); return ERR_NONE; } static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t exit_status = request->ReadUnsigned32("exit_status"); state->ExitAfterReplying(exit_status); return ERR_NONE; @@ -267,7 +267,7 @@ static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*) * string "java.util.Arrays".) */ static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string str(request->ReadUtf8String()); ObjectId string_id; JdwpError status = Dbg::CreateString(str, &string_id); @@ -279,7 +279,7 @@ static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply } static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAddUtf8String(pReply, "/"); std::vector<std::string> class_path; @@ -300,7 +300,7 @@ static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply) } static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { size_t object_count = request->ReadUnsigned32("object_count"); for (size_t i = 0; i < object_count; ++i) { ObjectId object_id = request->ReadObjectId(); @@ -311,7 +311,7 @@ static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*) } static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { expandBufAdd1(reply, true); // canWatchFieldModification expandBufAdd1(reply, true); // canWatchFieldAccess expandBufAdd1(reply, true); // canGetBytecodes @@ -323,7 +323,7 @@ static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply) } static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // The first few capabilities are the same as those reported by the older call. VM_Capabilities(nullptr, request, reply); @@ -350,7 +350,7 @@ static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* rep } static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::vector<JDWP::RefTypeId> classes; Dbg::GetClassList(&classes); @@ -381,17 +381,17 @@ static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status } static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, false); } static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return VM_AllClassesImpl(pReply, true, true); } static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { int32_t class_count = request->ReadSigned32("class count"); if (class_count < 0) { return ERR_ILLEGAL_ARGUMENT; @@ -415,7 +415,7 @@ static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pRep } static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::GetModifiers(refTypeId, pReply); } @@ -424,7 +424,7 @@ static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply) * Get values from static fields in a reference type. */ static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); int32_t field_count = request->ReadSigned32("field count"); expandBufAdd4BE(pReply, field_count); @@ -442,7 +442,7 @@ static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Get the name of the source file in which a reference type was declared. */ static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); std::string source_file; JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file); @@ -457,7 +457,7 @@ static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply) * Return the current status of the reference type. */ static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); JDWP::JdwpTypeTag type_tag; uint32_t class_status; @@ -473,7 +473,7 @@ static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply) * Return interfaces implemented directly by this class. */ static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredInterfaces(refTypeId, pReply); } @@ -482,7 +482,7 @@ static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply) * Return the class object corresponding to this type. */ static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); ObjectId class_object_id; JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id); @@ -500,13 +500,13 @@ static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply) * JDB seems interested, but DEX files don't currently support this. */ static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { /* referenceTypeId in, string out */ return ERR_ABSENT_INFORMATION; } static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); std::string signature; @@ -522,12 +522,12 @@ static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, b } static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return RT_Signature(state, request, pReply, false); } static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return RT_Signature(state, request, pReply, true); } @@ -536,7 +536,7 @@ static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, Exp * reference type, or null if it was loaded by the system loader. */ static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::GetClassLoader(refTypeId, pReply); } @@ -546,14 +546,14 @@ static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply) * fields declared by a class. */ static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredFields(refTypeId, true, pReply); } // Obsolete equivalent of FieldsWithGeneric, without the generic type information. static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredFields(refTypeId, false, pReply); } @@ -563,20 +563,20 @@ static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply) * methods declared by a class. */ static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredMethods(refTypeId, true, pReply); } // Obsolete equivalent of MethodsWithGeneric, without the generic type information. static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); return Dbg::OutputDeclaredMethods(refTypeId, false, pReply); } static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); int32_t max_count = request->ReadSigned32("max count"); if (max_count < 0) { @@ -596,7 +596,7 @@ static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply) * Return the immediate superclass of a class. */ static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); RefTypeId superClassId; JdwpError status = Dbg::GetSuperclass(class_id, &superClassId); @@ -611,7 +611,7 @@ static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply) * Set static class values. */ static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); int32_t values_count = request->ReadSigned32("values count"); @@ -641,7 +641,7 @@ static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*) */ static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); ObjectId thread_id = request->ReadThreadId(); MethodId method_id = request->ReadMethodId(); @@ -658,7 +658,7 @@ static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, */ static JdwpError CT_NewInstance(JdwpState* state, Request* request, ExpandBuf* pReply ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); ObjectId thread_id = request->ReadThreadId(); MethodId method_id = request->ReadMethodId(); @@ -675,7 +675,7 @@ static JdwpError CT_NewInstance(JdwpState* state, Request* request, * Create a new array object of the requested type and length. */ static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId arrayTypeId = request->ReadRefTypeId(); int32_t length = request->ReadSigned32("length"); @@ -693,7 +693,7 @@ static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply) * Return line number information for the method, if present. */ static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId refTypeId = request->ReadRefTypeId(); MethodId method_id = request->ReadMethodId(); @@ -704,7 +704,7 @@ static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply) static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply, bool generic) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); MethodId method_id = request->ReadMethodId(); @@ -717,17 +717,17 @@ static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply } static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return M_VariableTable(state, request, pReply, false); } static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return M_VariableTable(state, request, pReply, true); } static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_id = request->ReadRefTypeId(); MethodId method_id = request->ReadMethodId(); @@ -753,7 +753,7 @@ static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply) * passed in here. */ static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::GetReferenceType(object_id, pReply); } @@ -762,7 +762,7 @@ static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pRepl * Get values from the fields of an object. */ static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); int32_t field_count = request->ReadSigned32("field count"); @@ -782,7 +782,7 @@ static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Set values in the fields of an object. */ static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); int32_t field_count = request->ReadSigned32("field count"); @@ -804,7 +804,7 @@ static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*) } static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::GetMonitorInfo(object_id, reply); } @@ -822,7 +822,7 @@ static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply) */ static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, ExpandBuf* pReply ATTRIBUTE_UNUSED) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); ObjectId thread_id = request->ReadThreadId(); RefTypeId class_id = request->ReadRefTypeId(); @@ -832,19 +832,19 @@ static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, } static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::DisableCollection(object_id); } static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); return Dbg::EnableCollection(object_id); } static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); bool is_collected; JdwpError rc = Dbg::IsCollected(object_id, &is_collected); @@ -853,7 +853,7 @@ static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply) } static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId object_id = request->ReadObjectId(); int32_t max_count = request->ReadSigned32("max count"); if (max_count < 0) { @@ -873,7 +873,7 @@ static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* re * Return the string value in a string object. */ static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId stringObject = request->ReadObjectId(); std::string str; JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str); @@ -892,7 +892,7 @@ static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply) * Return a thread's name. */ static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); std::string name; @@ -913,7 +913,7 @@ static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply) * resume it; only the JDI is allowed to resume it. */ static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); if (thread_id == Dbg::GetThreadSelfId()) { @@ -932,7 +932,7 @@ static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*) * Resume the specified thread. */ static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); if (thread_id == Dbg::GetThreadSelfId()) { @@ -948,7 +948,7 @@ static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*) * Return status of specified thread. */ static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); JDWP::JdwpThreadStatus threadStatus; @@ -970,7 +970,7 @@ static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply) * Return the thread group that the specified thread is a member of. */ static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); return Dbg::GetThreadGroup(thread_id, pReply); } @@ -982,7 +982,7 @@ static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply) * be THREAD_NOT_SUSPENDED. */ static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); uint32_t start_frame = request->ReadUnsigned32("start frame"); uint32_t length = request->ReadUnsigned32("length"); @@ -1014,7 +1014,7 @@ static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply) * Returns the #of frames on the specified thread, which must be suspended. */ static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); size_t frame_count; @@ -1028,7 +1028,7 @@ static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply) } static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); std::vector<ObjectId> monitors; @@ -1052,17 +1052,17 @@ static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_ } static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return TR_OwnedMonitors(request, reply, false); } static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return TR_OwnedMonitors(request, reply, true); } static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); ObjectId contended_monitor; @@ -1074,7 +1074,7 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, Expand } static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(reply); ObjectId thread_id = request->ReadThreadId(); return Dbg::Interrupt(thread_id); @@ -1087,7 +1087,7 @@ static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply) * its suspend count recently.) */ static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); return Dbg::GetThreadDebugSuspendCount(thread_id, pReply); } @@ -1098,7 +1098,7 @@ static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* p * The Eclipse debugger recognizes "main" and "system" as special. */ static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_group_id = request->ReadThreadGroupId(); return Dbg::GetThreadGroupName(thread_group_id, pReply); } @@ -1108,7 +1108,7 @@ static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply) * thread group. */ static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_group_id = request->ReadThreadGroupId(); return Dbg::GetThreadGroupParent(thread_group_id, pReply); } @@ -1118,7 +1118,7 @@ static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply) * specified thread group. */ static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_group_id = request->ReadThreadGroupId(); return Dbg::GetThreadGroupChildren(thread_group_id, pReply); } @@ -1127,7 +1127,7 @@ static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply) * Return the #of components in the array. */ static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId array_id = request->ReadArrayId(); int32_t length; @@ -1146,7 +1146,7 @@ static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply) * Return the values from an array. */ static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId array_id = request->ReadArrayId(); uint32_t offset = request->ReadUnsigned32("offset"); uint32_t length = request->ReadUnsigned32("length"); @@ -1157,7 +1157,7 @@ static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Set values in an array. */ static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId array_id = request->ReadArrayId(); uint32_t offset = request->ReadUnsigned32("offset"); uint32_t count = request->ReadUnsigned32("count"); @@ -1165,7 +1165,7 @@ static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*) } static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { request->ReadObjectId(); // classLoaderObject // TODO: we should only return classes which have the given class loader as a defining or // initiating loader. The former would be easy; the latter is hard, because we don't have @@ -1179,7 +1179,7 @@ static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pRe * Reply with a requestID. */ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind"); JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy"); int32_t modifier_count = request->ReadSigned32("modifier count"); @@ -1322,7 +1322,7 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply) } static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { request->ReadEnum1<JdwpEventKind>("event kind"); uint32_t requestId = request->ReadUnsigned32("request id"); @@ -1336,7 +1336,7 @@ static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*) * Return the values of arguments and local variables. */ static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Dbg::GetLocalValues(request, pReply); } @@ -1344,12 +1344,12 @@ static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply) * Set the values of arguments and local variables. */ static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return Dbg::SetLocalValues(request); } static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ObjectId thread_id = request->ReadThreadId(); FrameId frame_id = request->ReadFrameId(); @@ -1370,7 +1370,7 @@ static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply) * that, or I have no idea what this is for.) */ static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { RefTypeId class_object_id = request->ReadRefTypeId(); return Dbg::GetReflectedType(class_object_id, pReply); } @@ -1379,7 +1379,7 @@ static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pRep * Handle a DDM packet with a single chunk in it. */ static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { state->NotifyDdmsActive(); uint8_t* replyBuf = nullptr; int replyLen = -1; @@ -1391,7 +1391,7 @@ static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply // heap requirements is probably more valuable than the efficiency. CHECK_GT(replyLen, 0); memcpy(expandBufAddSpace(pReply, replyLen), replyBuf, replyLen); - free(replyBuf); + delete[] replyBuf; } return ERR_NONE; } diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc index 6bc5e27f85..5a9a0f5006 100644 --- a/runtime/jdwp/jdwp_main.cc +++ b/runtime/jdwp/jdwp_main.cc @@ -248,7 +248,7 @@ JdwpState* JdwpState::Create(const JdwpOptions* options) { case kJdwpTransportSocket: InitSocketTransport(state.get(), options); break; -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ case kJdwpTransportAndroidAdb: InitAdbTransport(state.get(), options); break; @@ -256,12 +256,12 @@ JdwpState* JdwpState::Create(const JdwpOptions* options) { default: LOG(FATAL) << "Unknown transport: " << options->transport; } - { /* * Grab a mutex before starting the thread. This ensures they * won't signal the cond var before we're waiting. */ + state->thread_start_lock_.AssertNotHeld(self); MutexLock thread_start_locker(self, state->thread_start_lock_); /* diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h index d58467d108..29314f6274 100644 --- a/runtime/jdwp/jdwp_priv.h +++ b/runtime/jdwp/jdwp_priv.h @@ -86,8 +86,8 @@ class JdwpNetStateBase { void Close(); - ssize_t WritePacket(ExpandBuf* pReply, size_t length) LOCKS_EXCLUDED(socket_lock_); - ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) LOCKS_EXCLUDED(socket_lock_); + ssize_t WritePacket(ExpandBuf* pReply, size_t length) REQUIRES(!socket_lock_); + ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) REQUIRES(!socket_lock_); Mutex* GetSocketLock() { return &socket_lock_; } diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc index 2b28f7df5a..3fbad36e43 100644 --- a/runtime/jdwp/object_registry.cc +++ b/runtime/jdwp/object_registry.cc @@ -63,13 +63,13 @@ JDWP::ObjectId ObjectRegistry::Add(Handle<T> obj_h) { // Explicit template instantiation. template -SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) +SHARED_REQUIRES(Locks::mutator_lock_) +REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h); template -SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) -LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) +SHARED_REQUIRES(Locks::mutator_lock_) +REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h); template<class T> diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h index 4c149cdac7..17490f4db5 100644 --- a/runtime/jdwp/object_registry.h +++ b/runtime/jdwp/object_registry.h @@ -63,28 +63,24 @@ class ObjectRegistry { ObjectRegistry(); JDWP::ObjectId Add(mirror::Object* o) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); JDWP::RefTypeId AddRefType(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); template<class T> JDWP::ObjectId Add(Handle<T> obj_h) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_); template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) { if (id == 0) { *error = JDWP::ERR_NONE; return nullptr; @@ -92,47 +88,42 @@ class ObjectRegistry { return down_cast<T>(InternalGet(id, error)); } - void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Clear() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void DisableCollection(JDWP::ObjectId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void EnableCollection(JDWP::ObjectId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); bool IsCollected(JDWP::ObjectId id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void DisposeObject(JDWP::ObjectId id, uint32_t reference_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); // This is needed to get the jobject instead of the Object*. // Avoid using this and use standard Get when possible. - jobject GetJObject(JDWP::ObjectId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject GetJObject(JDWP::ObjectId id) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); private: template<class T> JDWP::ObjectId InternalAdd(Handle<T> obj_h) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void Demote(ObjectRegistryEntry& entry) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_); void Promote(ObjectRegistryEntry& entry) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_); bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code, ObjectRegistryEntry** out_entry) - EXCLUSIVE_LOCKS_REQUIRED(lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_); Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_); diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index bc9545b5a8..26a4fe49f1 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -19,6 +19,7 @@ #include <dlfcn.h> #include "art_method-inl.h" +#include "debugger.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "interpreter/interpreter.h" #include "jit_code_cache.h" @@ -132,11 +133,7 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self) { VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to breakpoint"; return false; } - const bool result = jit_compile_method_(jit_compiler_handle_, method, self); - if (result) { - method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge); - } - return result; + return jit_compile_method_(jit_compiler_handle_, method, self); } void Jit::CreateThreadPool() { diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index dbd8977d91..ca6e7ea1f8 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -48,7 +48,7 @@ class Jit { virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); bool CompileMethod(ArtMethod* method, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CreateInstrumentationCache(size_t compile_threshold); void CreateThreadPool(); CompilerCallbacks* GetCompilerCallbacks() { diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index c1ea921834..9707f6f29d 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -78,27 +78,27 @@ class JitCodeCache { // Return true if the code cache contains the code pointer which si the entrypoint of the method. bool ContainsMethod(ArtMethod* method) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return true if the code cache contains a code ptr. bool ContainsCodePtr(const void* ptr) const; // Reserve a region of code of size at least "size". Returns null if there is no more room. - uint8_t* ReserveCode(Thread* self, size_t size) LOCKS_EXCLUDED(lock_); + uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_); // Add a data array of size (end - begin) with the associated contents, returns null if there // is no more room. uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) - LOCKS_EXCLUDED(lock_); + REQUIRES(!lock_); // Get code for a method, returns null if it is not in the jit cache. const void* GetCodeFor(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the // entrypoint isn't within the cache. void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); private: // Takes ownership of code_mem_map. diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc index cd123b97d9..a6cbb710af 100644 --- a/runtime/jit/jit_code_cache_test.cc +++ b/runtime/jit/jit_code_cache_test.cc @@ -50,7 +50,7 @@ TEST_F(JitCodeCacheTest, TestCoverage) { ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code)); ASSERT_EQ(code_cache->NumMethods(), 1u); ClassLinker* const cl = Runtime::Current()->GetClassLinker(); - auto* method = cl->AllocArtMethodArray(soa.Self(), 1); + ArtMethod* method = &cl->AllocArtMethodArray(soa.Self(), 1)->At(0); ASSERT_FALSE(code_cache->ContainsMethod(method)); method->SetEntryPointFromQuickCompiledCode(reserved_code); ASSERT_TRUE(code_cache->ContainsMethod(method)); diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc index 1e56cdca30..258c29dd20 100644 --- a/runtime/jit/jit_instrumentation.cc +++ b/runtime/jit/jit_instrumentation.cc @@ -26,7 +26,7 @@ namespace jit { class JitCompileTask : public Task { public: - explicit JitCompileTask(ArtMethod* method, JitInstrumentationCache* cache) + JitCompileTask(ArtMethod* method, JitInstrumentationCache* cache) : method_(method), cache_(cache) { } diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h index 27894eb6c2..0deaf8ad02 100644 --- a/runtime/jit/jit_instrumentation.h +++ b/runtime/jit/jit_instrumentation.h @@ -47,9 +47,9 @@ class JitInstrumentationCache { public: explicit JitInstrumentationCache(size_t hot_method_threshold); void AddSamples(Thread* self, ArtMethod* method, size_t samples) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void SignalCompiled(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); void CreateThreadPool(); void DeleteThreadPool(); @@ -68,7 +68,7 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen virtual void MethodEntered(Thread* thread, mirror::Object* /*this_object*/, ArtMethod* method, uint32_t /*dex_pc*/) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { instrumentation_cache_->AddSamples(thread, method, 1); } virtual void MethodExited(Thread* /*thread*/, mirror::Object* /*this_object*/, @@ -92,7 +92,7 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen // We only care about how many dex instructions were executed in the Jit. virtual void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { CHECK_LE(dex_pc_offset, 0); instrumentation_cache_->AddSamples(thread, method, 1); } diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc index 84fc404b46..b18b430403 100644 --- a/runtime/jni_env_ext.cc +++ b/runtime/jni_env_ext.cc @@ -63,14 +63,14 @@ JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in) JNIEnvExt::~JNIEnvExt() { } -jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { if (obj == nullptr) { return nullptr; } return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj)); } -void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) { if (obj != nullptr) { locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj)); } @@ -86,14 +86,14 @@ void JNIEnvExt::DumpReferenceTables(std::ostream& os) { monitors.Dump(os); } -void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void JNIEnvExt::PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast. // TODO: take 'capacity' into account. stacked_local_ref_cookies.push_back(local_ref_cookie); local_ref_cookie = locals.GetSegmentState(); } -void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void JNIEnvExt::PopFrame() SHARED_REQUIRES(Locks::mutator_lock_) { locals.SetSegmentState(local_ref_cookie); local_ref_cookie = stacked_local_ref_cookies.back(); stacked_local_ref_cookies.pop_back(); diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h index 29d912cb01..9b55536e98 100644 --- a/runtime/jni_env_ext.h +++ b/runtime/jni_env_ext.h @@ -39,7 +39,7 @@ struct JNIEnvExt : public JNIEnv { ~JNIEnvExt(); void DumpReferenceTables(std::ostream& os) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetCheckJniEnabled(bool enabled); @@ -48,7 +48,7 @@ struct JNIEnvExt : public JNIEnv { template<typename T> T AddLocalReference(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static Offset SegmentStateOffset(); @@ -60,8 +60,8 @@ struct JNIEnvExt : public JNIEnv { return Offset(OFFSETOF_MEMBER(JNIEnvExt, self)); } - jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); + void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_); Thread* const self; JavaVMExt* const vm; diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index cc176b7c71..6a716b5e0d 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -89,7 +89,7 @@ static std::string NormalizeJniClassDescriptor(const char* name) { static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c, const char* name, const char* sig, const char* kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string temp; soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;", "no %s method \"%s.%s%s\"", @@ -98,7 +98,7 @@ static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c, static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c, const char* kind, jint idx, bool return_errors) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in " << PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8() << ": " << kind << " is null at index " << idx; @@ -107,7 +107,7 @@ static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror:: } static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (LIKELY(klass->IsInitialized())) { return klass; } @@ -121,7 +121,7 @@ static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass) static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class)); if (c == nullptr) { return nullptr; @@ -148,7 +148,7 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, } static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr); // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set. if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) { @@ -179,7 +179,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa) static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name, const char* sig, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<2> hs(soa.Self()); Handle<mirror::Class> c( hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class)))); @@ -227,7 +227,7 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start, jsize length, const char* identifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string type(PrettyTypeOf(array)); soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;", "%s offset=%d length=%d %s.length=%d", @@ -236,14 +236,14 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize sta static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length, jsize array_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;", "offset=%d length=%d string.length()=%d", start, length, array_length); } int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause) - LOCKS_EXCLUDED(Locks::mutator_lock_) { + REQUIRES(!Locks::mutator_lock_) { // Turn the const char* into a java.lang.String. ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg)); if (msg != nullptr && s.get() == nullptr) { @@ -314,7 +314,7 @@ static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) { template <bool kNative> static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); for (auto& method : c->GetDirectMethods(pointer_size)) { if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) { @@ -2321,7 +2321,7 @@ class JNI { private: static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity, const char* caller) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO: we should try to expand the table if necessary. if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) { LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity; @@ -2350,7 +2350,7 @@ class JNI { template <typename JArrayT, typename ElementT, typename ArtArrayT> static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array, const char* fn_name, const char* operation) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array); if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) { soa.Vm()->JniAbortF(fn_name, @@ -2407,7 +2407,7 @@ class JNI { static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array, size_t component_size, void* elements, jint mode) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { void* array_data = array->GetRawData(component_size, 0); gc::Heap* heap = Runtime::Current()->GetHeap(); bool is_copy = array_data != elements; diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc new file mode 100644 index 0000000000..64a6076aea --- /dev/null +++ b/runtime/lambda/box_table.cc @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "lambda/box_table.h" + +#include "base/mutex.h" +#include "common_throws.h" +#include "gc_root-inl.h" +#include "mirror/method.h" +#include "mirror/object-inl.h" +#include "thread.h" + +#include <vector> + +namespace art { +namespace lambda { + +BoxTable::BoxTable() + : allow_new_weaks_(true), + new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {} + +mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) { + Thread* self = Thread::Current(); + + { + // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes + /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_); + BlockUntilWeaksAllowed(); + + // Attempt to look up this object, it's possible it was already boxed previously. + // If this is the case we *must* return the same object as before to maintain + // referential equality. + // + // In managed code: + // Functional f = () -> 5; // vF = create-lambda + // Object a = f; // vA = box-lambda vA + // Object b = f; // vB = box-lambda vB + // assert(a == f) + ValueType value = FindBoxedLambda(closure); + if (!value.IsNull()) { + return value.Read(); + } + + // Otherwise we need to box ourselves and insert it into the hash map + } + + // Release the lambda table lock here, so that thread suspension is allowed. + + // Convert the ArtMethod into a java.lang.reflect.Method which will serve + // as the temporary 'boxed' version of the lambda. This is good enough + // to check all the basic object identities that a boxed lambda must retain. + + // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class + // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object + mirror::Method* method_as_object = + mirror::Method::CreateFromArtMethod(self, closure); + // There are no thread suspension points after this, so we don't need to put it into a handle. + + if (UNLIKELY(method_as_object == nullptr)) { + // Most likely an OOM has occurred. + CHECK(self->IsExceptionPending()); + return nullptr; + } + + // The method has been successfully boxed into an object, now insert it into the hash map. + { + MutexLock mu(self, *Locks::lambda_table_lock_); + BlockUntilWeaksAllowed(); + + // Lookup the object again, it's possible another thread already boxed it while + // we were allocating the object before. + ValueType value = FindBoxedLambda(closure); + if (UNLIKELY(!value.IsNull())) { + // Let the GC clean up method_as_object at a later time. + return value.Read(); + } + + // Otherwise we should insert it into the hash map in this thread. + map_.Insert(std::make_pair(closure, ValueType(method_as_object))); + } + + return method_as_object; +} + +bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) { + DCHECK(object != nullptr); + *out_closure = nullptr; + + // Note that we do not need to access lambda_table_lock_ here + // since we don't need to look at the map. + + mirror::Object* boxed_closure_object = object; + + // Raise ClassCastException if object is not instanceof java.lang.reflect.Method + if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) { + ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass()); + return false; + } + + // TODO(iam): We must check that the closure object extends/implements the type + // specified in [type id]. This is not currently implemented since it's always a Method. + + // If we got this far, the inputs are valid. + // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target. + mirror::AbstractMethod* boxed_closure_as_method = + down_cast<mirror::AbstractMethod*>(boxed_closure_object); + + ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod(); + DCHECK(unboxed_closure != nullptr); + + *out_closure = unboxed_closure; + return true; +} + +BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const { + auto map_iterator = map_.Find(closure); + if (map_iterator != map_.end()) { + const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator; + const ValueType& value = key_value_pair.second; + + DCHECK(!value.IsNull()); // Never store null boxes. + return value; + } + + return ValueType(nullptr); +} + +void BoxTable::BlockUntilWeaksAllowed() { + Thread* self = Thread::Current(); + while (UNLIKELY(allow_new_weaks_ == false)) { + new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock + } +} + +void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) { + DCHECK(visitor != nullptr); + + Thread* self = Thread::Current(); + MutexLock mu(self, *Locks::lambda_table_lock_); + + /* + * Visit every weak root in our lambda box table. + * Remove unmarked objects, update marked objects to new address. + */ + std::vector<ClosureType> remove_list; + for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) { + std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator; + + const ValueType& old_value = key_value_pair.second; + + // This does not need a read barrier because this is called by GC. + mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>(); + mirror::Object* new_value = visitor->IsMarked(old_value_raw); + + if (new_value == nullptr) { + const ClosureType& closure = key_value_pair.first; + // The object has been swept away. + // Delete the entry from the map. + map_iterator = map_.Erase(map_.Find(closure)); + } else { + // The object has been moved. + // Update the map. + key_value_pair.second = ValueType(new_value); + ++map_iterator; + } + } + + // Occasionally shrink the map to avoid growing very large. + if (map_.CalculateLoadFactor() < kMinimumLoadFactor) { + map_.ShrinkToMaximumLoad(); + } +} + +void BoxTable::DisallowNewWeakBoxedLambdas() { + Thread* self = Thread::Current(); + MutexLock mu(self, *Locks::lambda_table_lock_); + + allow_new_weaks_ = false; +} + +void BoxTable::AllowNewWeakBoxedLambdas() { + Thread* self = Thread::Current(); + MutexLock mu(self, *Locks::lambda_table_lock_); + + allow_new_weaks_ = true; + new_weaks_condition_.Broadcast(self); +} + +void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() { + Thread* self = Thread::Current(); + MutexLock mu(self, *Locks::lambda_table_lock_); + CHECK_NE(allow_new_weaks_, false); +} + +bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const { + // Nothing needs this right now, but leave this assertion for later when + // we need to look at the references inside of the closure. + if (kIsDebugBuild) { + Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); + } + + // TODO: Need rework to use read barriers once closures have references inside of them that can + // move. Until then, it's safe to just compare the data inside of it directly. + return lhs == rhs; +} + +} // namespace lambda +} // namespace art diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h new file mode 100644 index 0000000000..312d811b9b --- /dev/null +++ b/runtime/lambda/box_table.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_ +#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_ + +#include "base/allocator.h" +#include "base/hash_map.h" +#include "gc_root.h" +#include "base/macros.h" +#include "base/mutex.h" +#include "object_callbacks.h" + +#include <stdint.h> + +namespace art { + +class ArtMethod; // forward declaration + +namespace mirror { +class Object; // forward declaration +} // namespace mirror + +namespace lambda { + +/* + * Store a table of boxed lambdas. This is required to maintain object referential equality + * when a lambda is re-boxed. + * + * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>. + * When too many objects get GCd, we shrink the underlying table to use less space. + */ +class BoxTable FINAL { + public: + using ClosureType = art::ArtMethod*; + + // Boxes a closure into an object. Returns null and throws an exception on failure. + mirror::Object* BoxLambda(const ClosureType& closure) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); + + // Unboxes an object back into the lambda. Returns false and throws an exception on failure. + bool UnboxLambda(mirror::Object* object, ClosureType* out_closure) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Sweep weak references to lambda boxes. Update the addresses if the objects have been + // moved, and delete them from the table if the objects have been cleaned up. + void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_); + + // GC callback: Temporarily block anyone from touching the map. + void DisallowNewWeakBoxedLambdas() + REQUIRES(!Locks::lambda_table_lock_); + + // GC callback: Unblock any readers who have been queued waiting to touch the map. + void AllowNewWeakBoxedLambdas() + REQUIRES(!Locks::lambda_table_lock_); + + // GC callback: Verify that the state is now blocking anyone from touching the map. + void EnsureNewWeakBoxedLambdasDisallowed() + REQUIRES(!Locks::lambda_table_lock_); + + BoxTable(); + ~BoxTable() = default; + + private: + // FIXME: This needs to be a GcRoot. + // Explanation: + // - After all threads are suspended (exclusive mutator lock), + // the concurrent-copying GC can move objects from the "from" space to the "to" space. + // If an object is moved at that time and *before* SweepSystemWeaks are called then + // we don't know if the move has happened yet. + // Successive reads will then (incorrectly) look at the objects in the "from" space, + // which is a problem since the objects have been already forwarded and mutations + // would not be visible in the right space. + // Instead, use a GcRoot here which will be automatically updated by the GC. + // + // Also, any reads should be protected by a read barrier to always give us the "to" space address. + using ValueType = GcRoot<mirror::Object>; + + // Attempt to look up the lambda in the map, or return null if it's not there yet. + ValueType FindBoxedLambda(const ClosureType& closure) const + SHARED_REQUIRES(Locks::lambda_table_lock_); + + // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed. + void BlockUntilWeaksAllowed() + SHARED_REQUIRES(Locks::lambda_table_lock_); + + // EmptyFn implementation for art::HashMap + struct EmptyFn { + void MakeEmpty(std::pair<ClosureType, ValueType>& item) const { + item.first = nullptr; + } + bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const { + return item.first == nullptr; + } + }; + + // HashFn implementation for art::HashMap + struct HashFn { + size_t operator()(const ClosureType& key) const { + // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod* + return static_cast<size_t>(reinterpret_cast<uintptr_t>(key)); + } + }; + + // EqualsFn implementation for art::HashMap + struct EqualsFn { + bool operator()(const ClosureType& lhs, const ClosureType& rhs) const; + }; + + using UnorderedMap = art::HashMap<ClosureType, + ValueType, + EmptyFn, + HashFn, + EqualsFn, + TrackingAllocator<std::pair<ClosureType, ValueType>, + kAllocatorTagLambdaBoxTable>>; + + UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_); + bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_); + ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_); + + // Shrink the map when we get below this load factor. + // (This is an arbitrary value that should be large enough to prevent aggressive map erases + // from shrinking the table too often.) + static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2; + + DISALLOW_COPY_AND_ASSIGN(BoxTable); +}; + +} // namespace lambda +} // namespace art + +#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_ diff --git a/runtime/leb128.h b/runtime/leb128.h index 14683d4063..976936d639 100644 --- a/runtime/leb128.h +++ b/runtime/leb128.h @@ -101,7 +101,7 @@ static inline int32_t DecodeSignedLeb128(const uint8_t** data) { static inline uint32_t UnsignedLeb128Size(uint32_t data) { // bits_to_encode = (data != 0) ? 32 - CLZ(x) : 1 // 32 - CLZ(data | 1) // bytes = ceil(bits_to_encode / 7.0); // (6 + bits_to_encode) / 7 - uint32_t x = 6 + 32 - CLZ(data | 1); + uint32_t x = 6 + 32 - CLZ(data | 1U); // Division by 7 is done by (x * 37) >> 8 where 37 = ceil(256 / 7). // This works for 0 <= x < 256 / (7 * 37 - 256), i.e. 0 <= x <= 85. return (x * 37) >> 8; @@ -111,7 +111,7 @@ static inline uint32_t UnsignedLeb128Size(uint32_t data) { static inline uint32_t SignedLeb128Size(int32_t data) { // Like UnsignedLeb128Size(), but we need one bit beyond the highest bit that differs from sign. data = data ^ (data >> 31); - uint32_t x = 1 /* we need to encode the sign bit */ + 6 + 32 - CLZ(data | 1); + uint32_t x = 1 /* we need to encode the sign bit */ + 6 + 32 - CLZ(data | 1U); return (x * 37) >> 8; } diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h new file mode 100644 index 0000000000..d9bc656673 --- /dev/null +++ b/runtime/length_prefixed_array.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_LENGTH_PREFIXED_ARRAY_H_ +#define ART_RUNTIME_LENGTH_PREFIXED_ARRAY_H_ + +#include <stddef.h> // for offsetof() + +#include "linear_alloc.h" +#include "stride_iterator.h" +#include "base/bit_utils.h" +#include "base/casts.h" +#include "base/iteration_range.h" + +namespace art { + +template<typename T> +class LengthPrefixedArray { + public: + explicit LengthPrefixedArray(size_t length) + : length_(dchecked_integral_cast<uint32_t>(length)) {} + + T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) { + DCHECK_LT(index, length_); + return AtUnchecked(index, element_size, alignment); + } + + StrideIterator<T> Begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) { + return StrideIterator<T>(&AtUnchecked(0, element_size, alignment), element_size); + } + + StrideIterator<T> End(size_t element_size = sizeof(T), size_t alignment = alignof(T)) { + return StrideIterator<T>(&AtUnchecked(length_, element_size, alignment), element_size); + } + + static size_t OffsetOfElement(size_t index, + size_t element_size = sizeof(T), + size_t alignment = alignof(T)) { + DCHECK_ALIGNED_PARAM(element_size, alignment); + return RoundUp(offsetof(LengthPrefixedArray<T>, data), alignment) + index * element_size; + } + + static size_t ComputeSize(size_t num_elements, + size_t element_size = sizeof(T), + size_t alignment = alignof(T)) { + size_t result = OffsetOfElement(num_elements, element_size, alignment); + DCHECK_ALIGNED_PARAM(result, alignment); + return result; + } + + uint64_t Length() const { + return length_; + } + + // Update the length but does not reallocate storage. + void SetLength(size_t length) { + length_ = dchecked_integral_cast<uint32_t>(length); + } + + private: + T& AtUnchecked(size_t index, size_t element_size, size_t alignment) { + return *reinterpret_cast<T*>( + reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment)); + } + + uint32_t length_; + uint8_t data[0]; +}; + +// Returns empty iteration range if the array is null. +template<typename T> +IterationRange<StrideIterator<T>> MakeIterationRangeFromLengthPrefixedArray( + LengthPrefixedArray<T>* arr, size_t element_size = sizeof(T), size_t alignment = alignof(T)) { + return arr != nullptr ? + MakeIterationRange(arr->Begin(element_size, alignment), arr->End(element_size, alignment)) : + MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0)); +} + +} // namespace art + +#endif // ART_RUNTIME_LENGTH_PREFIXED_ARRAY_H_ diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h index c10ddfda9f..1b21527317 100644 --- a/runtime/linear_alloc.h +++ b/runtime/linear_alloc.h @@ -28,24 +28,24 @@ class LinearAlloc { public: explicit LinearAlloc(ArenaPool* pool); - void* Alloc(Thread* self, size_t size) LOCKS_EXCLUDED(lock_); + void* Alloc(Thread* self, size_t size) REQUIRES(!lock_); // Realloc never frees the input pointer, it is the caller's job to do this if necessary. - void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) LOCKS_EXCLUDED(lock_); + void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) REQUIRES(!lock_); - // Allocate and construct an array of structs of type T. + // Allocate an array of structs of type T. template<class T> - T* AllocArray(Thread* self, size_t elements) { + T* AllocArray(Thread* self, size_t elements) REQUIRES(!lock_) { return reinterpret_cast<T*>(Alloc(self, elements * sizeof(T))); } // Return the number of bytes used in the allocator. - size_t GetUsedMemory() const LOCKS_EXCLUDED(lock_); + size_t GetUsedMemory() const REQUIRES(!lock_); - ArenaPool* GetArenaPool() LOCKS_EXCLUDED(lock_); + ArenaPool* GetArenaPool() REQUIRES(!lock_); // Return true if the linear alloc contrains an address. - bool Contains(void* ptr) const; + bool Contains(void* ptr) const REQUIRES(!lock_); private: mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/lock_word.h b/runtime/lock_word.h index a290575bac..5d0d204636 100644 --- a/runtime/lock_word.h +++ b/runtime/lock_word.h @@ -118,7 +118,7 @@ class LockWord { } static LockWord FromForwardingAddress(size_t target) { - DCHECK(IsAligned < 1 << kStateSize>(target)); + DCHECK_ALIGNED(target, (1 << kStateSize)); return LockWord((target >> kStateSize) | (kStateForwardingAddress << kStateShift)); } @@ -197,7 +197,7 @@ class LockWord { size_t ForwardingAddress() const; // Constructor a lock word for inflation to use a Monitor. - explicit LockWord(Monitor* mon, uint32_t rb_state); + LockWord(Monitor* mon, uint32_t rb_state); // Return the hash code stored in the lock word, must be kHashCode state. int32_t GetHashCode() const; diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 7e640c65f1..d9ad7dc0c2 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -16,6 +16,7 @@ #include "mem_map.h" +#include "base/memory_tool.h" #include <backtrace/BacktraceMap.h> #include <inttypes.h> @@ -279,7 +280,7 @@ MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byt ScopedFd fd(-1); #ifdef USE_ASHMEM -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ const bool use_ashmem = true; #else // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't @@ -481,6 +482,12 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int p uint8_t* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset); + size_t redzone_size = 0; + if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) { + redzone_size = kPageSize; + page_aligned_byte_count += redzone_size; + } + uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(page_aligned_expected, page_aligned_byte_count, prot, @@ -503,15 +510,35 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr, size_t byte_count, int p if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) { return nullptr; } + if (redzone_size != 0) { + const uint8_t *real_start = actual + page_offset; + const uint8_t *real_end = actual + page_offset + byte_count; + const uint8_t *mapping_end = actual + page_aligned_byte_count; + + MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual); + MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end); + page_aligned_byte_count -= redzone_size; + } + return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count, - prot, reuse); + prot, reuse, redzone_size); } MemMap::~MemMap() { if (base_begin_ == nullptr && base_size_ == 0) { return; } + + // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned + // before it is returned to the system. + if (redzone_size_ != 0) { + MEMORY_TOOL_MAKE_UNDEFINED( + reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_, + redzone_size_); + } + if (!reuse_) { + MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_); int result = munmap(base_begin_, base_size_); if (result == -1) { PLOG(FATAL) << "munmap failed"; @@ -534,9 +561,9 @@ MemMap::~MemMap() { } MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, - size_t base_size, int prot, bool reuse) + size_t base_size, int prot, bool reuse, size_t redzone_size) : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), - prot_(prot), reuse_(reuse) { + prot_(prot), reuse_(reuse), redzone_size_(redzone_size) { if (size_ == 0) { CHECK(begin_ == nullptr); CHECK(base_begin_ == nullptr); @@ -558,10 +585,10 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro DCHECK_GE(new_end, Begin()); DCHECK_LE(new_end, End()); DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_); - DCHECK(IsAligned<kPageSize>(begin_)); - DCHECK(IsAligned<kPageSize>(base_begin_)); - DCHECK(IsAligned<kPageSize>(reinterpret_cast<uint8_t*>(base_begin_) + base_size_)); - DCHECK(IsAligned<kPageSize>(new_end)); + DCHECK_ALIGNED(begin_, kPageSize); + DCHECK_ALIGNED(base_begin_, kPageSize); + DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); + DCHECK_ALIGNED(new_end, kPageSize); uint8_t* old_end = begin_ + size_; uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_; uint8_t* new_base_end = new_end; @@ -576,7 +603,7 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro uint8_t* tail_base_begin = new_base_end; size_t tail_base_size = old_base_end - new_base_end; DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end); - DCHECK(IsAligned<kPageSize>(tail_base_size)); + DCHECK_ALIGNED(tail_base_size, kPageSize); #ifdef USE_ASHMEM // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are @@ -595,6 +622,8 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro int flags = MAP_PRIVATE | MAP_ANONYMOUS; #endif + + MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size); // Unmap/map the tail region. int result = munmap(tail_base_begin, tail_base_size); if (result == -1) { @@ -697,7 +726,7 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { size_t num_gaps = 0; size_t num = 1u; size_t size = map->BaseSize(); - CHECK(IsAligned<kPageSize>(size)); + CHECK_ALIGNED(size, kPageSize); void* end = map->BaseEnd(); while (it != maps_end && it->second->GetProtect() == map->GetProtect() && @@ -711,12 +740,12 @@ void MemMap::DumpMapsLocked(std::ostream& os, bool terse) { } size_t gap = reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end); - CHECK(IsAligned<kPageSize>(gap)); + CHECK_ALIGNED(gap, kPageSize); os << "~0x" << std::hex << (gap / kPageSize) << "P"; num = 0u; size = 0u; } - CHECK(IsAligned<kPageSize>(it->second->BaseSize())); + CHECK_ALIGNED(it->second->BaseSize(), kPageSize); ++num; size += it->second->BaseSize(); end = it->second->BaseEnd(); @@ -778,6 +807,10 @@ void MemMap::SetSize(size_t new_size) { CHECK_ALIGNED(new_size, kPageSize); CHECK_EQ(base_size_, size_) << "Unsupported"; CHECK_LE(new_size, base_size_); + MEMORY_TOOL_MAKE_UNDEFINED( + reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + + new_size), + base_size_ - new_size); CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size), base_size_ - new_size), 0) << new_size << " " << base_size_; base_size_ = new_size; diff --git a/runtime/mem_map.h b/runtime/mem_map.h index 14387eed28..196a7f6292 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -92,7 +92,7 @@ class MemMap { std::string* error_msg); // Releases the memory mapping. - ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_); + ~MemMap() REQUIRES(!Locks::mem_maps_lock_); const std::string& GetName() const { return name_; @@ -142,25 +142,25 @@ class MemMap { std::string* error_msg); static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map) - LOCKS_EXCLUDED(Locks::mem_maps_lock_); + REQUIRES(!Locks::mem_maps_lock_); static void DumpMaps(std::ostream& os, bool terse = false) - LOCKS_EXCLUDED(Locks::mem_maps_lock_); + REQUIRES(!Locks::mem_maps_lock_); typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps; - static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_); - static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_); + static void Init() REQUIRES(!Locks::mem_maps_lock_); + static void Shutdown() REQUIRES(!Locks::mem_maps_lock_); private: MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size, - int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_); + int prot, bool reuse, size_t redzone_size = 0) REQUIRES(!Locks::mem_maps_lock_); static void DumpMapsLocked(std::ostream& os, bool terse) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_); + REQUIRES(Locks::mem_maps_lock_); static bool HasMemMap(MemMap* map) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_); + REQUIRES(Locks::mem_maps_lock_); static MemMap* GetLargestMemMapAt(void* address) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_); + REQUIRES(Locks::mem_maps_lock_); const std::string name_; uint8_t* const begin_; // Start of data. @@ -175,6 +175,8 @@ class MemMap { // unmapping. const bool reuse_; + const size_t redzone_size_; + #if USE_ART_LOW_4G_ALLOCATOR static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent. #endif diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc index f635b5d62f..13bf5b7698 100644 --- a/runtime/mem_map_test.cc +++ b/runtime/mem_map_test.cc @@ -18,7 +18,7 @@ #include <memory> -#include <valgrind.h> +#include "base/memory_tool.h" #include "gtest/gtest.h" @@ -216,7 +216,7 @@ TEST_F(MemMapTest, RemapAtEnd32bit) { TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) { CommonInit(); // This test may not work under valgrind. - if (RUNNING_ON_VALGRIND == 0) { + if (RUNNING_ON_MEMORY_TOOL == 0) { uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000; std::string error_msg; std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr", diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index 99d697a6d3..dc084be06e 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -34,12 +34,13 @@ namespace mirror { class MANAGED AbstractMethod : public AccessibleObject { public: // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod. - bool CreateFromArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); - ArtMethod* GetArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Only used by the image writer. - void SetArtMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); private: static MemberOffset ArtMethodOffset() { @@ -60,9 +61,8 @@ class MANAGED AbstractMethod : public AccessibleObject { HeapReference<mirror::Class> declaring_class_; HeapReference<mirror::Class> declaring_class_of_overridden_method_; - uint32_t padding_; - uint64_t art_method_; uint32_t access_flags_; + uint64_t art_method_; uint32_t dex_method_index_; friend struct art::AbstractMethodOffsets; // for verifying offset information diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h index 6d4c0f6fb3..dcf5118d11 100644 --- a/runtime/mirror/accessible_object.h +++ b/runtime/mirror/accessible_object.h @@ -36,12 +36,12 @@ class MANAGED AccessibleObject : public Object { } template<bool kTransactionActive> - void SetAccessible(bool value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(padding_); return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u); } - bool IsAccessible() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldBoolean(FlagOffset()); } diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index 88d75abd3c..3d540297e5 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -101,7 +101,7 @@ class SetLengthVisitor { } void operator()(Object* obj, size_t usable_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(usable_size); // Avoid AsArray as object is not yet in live bitmap or allocation stack. Array* array = down_cast<Array*>(obj); @@ -126,7 +126,7 @@ class SetLengthToUsableSizeVisitor { } void operator()(Object* obj, size_t usable_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsArray as object is not yet in live bitmap or allocation stack. Array* array = down_cast<Array*>(obj); // DCHECK(array->IsArrayInstance()); diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc index d72c03ff86..4128689bb7 100644 --- a/runtime/mirror/array.cc +++ b/runtime/mirror/array.cc @@ -43,7 +43,7 @@ namespace mirror { static Array* RecursiveCreateMultiArray(Thread* self, Handle<Class> array_class, int current_dimension, Handle<mirror::IntArray> dimensions) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { int32_t array_length = dimensions->Get(current_dimension); StackHandleScope<1> hs(self); Handle<Array> new_array( diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index e65611d3c5..b27a8849ed 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -39,21 +39,21 @@ class MANAGED Array : public Object { template <bool kIsInstrumented, bool kFillUsable = false> ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count, size_t component_size_shift, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static Array* CreateMultiArray(Thread* self, Handle<Class> element_class, Handle<IntArray> dimensions) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_)); } - void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_GE(length, 0); // We use non transactional version since we can't undo this write. We also disable checking // since it would fail during a transaction. @@ -67,7 +67,7 @@ class MANAGED Array : public Object { static MemberOffset DataOffset(size_t component_size); void* GetRawData(size_t component_size, int32_t index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() + + (index * component_size); return reinterpret_cast<void*>(data); @@ -82,16 +82,18 @@ class MANAGED Array : public Object { // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and // returns false. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_); - Array* CopyOf(Thread* self, int32_t new_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); protected: - void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); private: void ThrowArrayIndexOutOfBoundsException(int32_t index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // The number of array elements. int32_t length_; @@ -107,32 +109,32 @@ class MANAGED PrimitiveArray : public Array { typedef T ElementType; static PrimitiveArray<T>* Alloc(Thread* self, size_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - const T* GetData() const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const T* GetData() const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0)); } - T* GetData() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<T*>(GetRawData(sizeof(T), 0)); } - T Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(CheckIsValidIndex(i)); return GetData()[i]; } - void Set(int32_t i, T value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true> void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS; // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> @@ -144,7 +146,7 @@ class MANAGED PrimitiveArray : public Array { * and the arrays non-null. */ void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Works like memcpy(), except we guarantee not to allow tearing of array values (ie using @@ -152,7 +154,7 @@ class MANAGED PrimitiveArray : public Array { * and the arrays non-null. */ void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void SetArrayClass(Class* array_class) { CHECK(array_class_.IsNull()); @@ -160,7 +162,7 @@ class MANAGED PrimitiveArray : public Array { array_class_ = GcRoot<Class>(array_class); } - static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!array_class_.IsNull()); return array_class_.Read(); } @@ -170,7 +172,7 @@ class MANAGED PrimitiveArray : public Array { array_class_ = GcRoot<Class>(nullptr); } - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: static GcRoot<Class> array_class_; @@ -183,11 +185,11 @@ class PointerArray : public Array { public: template<typename T> T GetElementPtrSize(uint32_t idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive = false, bool kUnchecked = false, typename T> void SetElementPtrSize(uint32_t idx, T element, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); }; } // namespace mirror diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 7f89b1d4df..cd678f670b 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -28,6 +28,7 @@ #include "dex_file.h" #include "gc/heap-inl.h" #include "iftable.h" +#include "length_prefixed_array.h" #include "object_array-inl.h" #include "read_barrier-inl.h" #include "reference-inl.h" @@ -61,25 +62,28 @@ inline DexCache* Class::GetDexCache() { return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_)); } -inline ArtMethod* Class::GetDirectMethodsPtr() { +inline LengthPrefixedArray<ArtMethod>* Class::GetDirectMethodsPtr() { DCHECK(IsLoaded() || IsErroneous()); return GetDirectMethodsPtrUnchecked(); } -inline ArtMethod* Class::GetDirectMethodsPtrUnchecked() { - return reinterpret_cast<ArtMethod*>(GetField64(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_))); +inline LengthPrefixedArray<ArtMethod>* Class::GetDirectMethodsPtrUnchecked() { + return reinterpret_cast<LengthPrefixedArray<ArtMethod>*>( + GetField64(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_))); } -inline ArtMethod* Class::GetVirtualMethodsPtrUnchecked() { - return reinterpret_cast<ArtMethod*>(GetField64(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_))); +inline LengthPrefixedArray<ArtMethod>* Class::GetVirtualMethodsPtrUnchecked() { + return reinterpret_cast<LengthPrefixedArray<ArtMethod>*>( + GetField64(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_))); } -inline void Class::SetDirectMethodsPtr(ArtMethod* new_direct_methods) { +inline void Class::SetDirectMethodsPtr(LengthPrefixedArray<ArtMethod>* new_direct_methods) { DCHECK(GetDirectMethodsPtrUnchecked() == nullptr); SetDirectMethodsPtrUnchecked(new_direct_methods); } -inline void Class::SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods) { +inline void Class::SetDirectMethodsPtrUnchecked( + LengthPrefixedArray<ArtMethod>* new_direct_methods) { SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), reinterpret_cast<uint64_t>(new_direct_methods)); } @@ -88,25 +92,27 @@ inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size) CheckPointerSize(pointer_size); auto* methods = GetDirectMethodsPtrUnchecked(); DCHECK(methods != nullptr); - return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) + - ArtMethod::ObjectSize(pointer_size) * i); + return &methods->At(i, + ArtMethod::Size(pointer_size), + ArtMethod::Alignment(pointer_size)); } inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) { CheckPointerSize(pointer_size); auto* methods = GetDirectMethodsPtr(); DCHECK(methods != nullptr); - return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) + - ArtMethod::ObjectSize(pointer_size) * i); + return &methods->At(i, + ArtMethod::Size(pointer_size), + ArtMethod::Alignment(pointer_size)); } template<VerifyObjectFlags kVerifyFlags> -inline ArtMethod* Class::GetVirtualMethodsPtr() { +inline LengthPrefixedArray<ArtMethod>* Class::GetVirtualMethodsPtr() { DCHECK(IsLoaded<kVerifyFlags>() || IsErroneous<kVerifyFlags>()); return GetVirtualMethodsPtrUnchecked(); } -inline void Class::SetVirtualMethodsPtr(ArtMethod* new_virtual_methods) { +inline void Class::SetVirtualMethodsPtr(LengthPrefixedArray<ArtMethod>* new_virtual_methods) { // TODO: we reassign virtual methods to grow the table for miranda // methods.. they should really just be assigned once. SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), @@ -129,10 +135,11 @@ inline ArtMethod* Class::GetVirtualMethodDuringLinking(size_t i, size_t pointer_ inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size) { CheckPointerSize(pointer_size); - auto* methods = GetVirtualMethodsPtrUnchecked(); + LengthPrefixedArray<ArtMethod>* methods = GetVirtualMethodsPtrUnchecked(); DCHECK(methods != nullptr); - return reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t>(methods) + - ArtMethod::ObjectSize(pointer_size) * i); + return &methods->At(i, + ArtMethod::Size(pointer_size), + ArtMethod::Alignment(pointer_size)); } inline PointerArray* Class::GetVTable() { @@ -423,9 +430,9 @@ inline void Class::SetIfTable(IfTable* new_iftable) { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable); } -inline ArtField* Class::GetIFields() { +inline LengthPrefixedArray<ArtField>* Class::GetIFieldsPtr() { DCHECK(IsLoaded() || IsErroneous()); - return GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)); + return GetFieldPtr<LengthPrefixedArray<ArtField>*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)); } inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() { @@ -458,46 +465,44 @@ inline MemberOffset Class::GetFirstReferenceStaticFieldOffsetDuringLinking(size_ return MemberOffset(base); } -inline void Class::SetIFields(ArtField* new_ifields) { - DCHECK(GetIFieldsUnchecked() == nullptr); +inline void Class::SetIFieldsPtr(LengthPrefixedArray<ArtField>* new_ifields) { + DCHECK(GetIFieldsPtrUnchecked() == nullptr); return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields); } -inline void Class::SetIFieldsUnchecked(ArtField* new_ifields) { +inline void Class::SetIFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_ifields) { SetFieldPtr<false, true, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields); } -inline ArtField* Class::GetSFieldsUnchecked() { - return GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_)); +inline LengthPrefixedArray<ArtField>* Class::GetSFieldsPtrUnchecked() { + return GetFieldPtr<LengthPrefixedArray<ArtField>*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_)); } -inline ArtField* Class::GetIFieldsUnchecked() { - return GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)); +inline LengthPrefixedArray<ArtField>* Class::GetIFieldsPtrUnchecked() { + return GetFieldPtr<LengthPrefixedArray<ArtField>*>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)); } -inline ArtField* Class::GetSFields() { +inline LengthPrefixedArray<ArtField>* Class::GetSFieldsPtr() { DCHECK(IsLoaded() || IsErroneous()) << GetStatus(); - return GetSFieldsUnchecked(); + return GetSFieldsPtrUnchecked(); } -inline void Class::SetSFields(ArtField* new_sfields) { +inline void Class::SetSFieldsPtr(LengthPrefixedArray<ArtField>* new_sfields) { DCHECK((IsRetired() && new_sfields == nullptr) || GetFieldPtr<ArtField*>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_)) == nullptr); SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields); } -inline void Class::SetSFieldsUnchecked(ArtField* new_sfields) { +inline void Class::SetSFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields) { SetFieldPtr<false, true, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields); } inline ArtField* Class::GetStaticField(uint32_t i) { - DCHECK_LT(i, NumStaticFields()); - return &GetSFields()[i]; + return &GetSFieldsPtr()->At(i); } inline ArtField* Class::GetInstanceField(uint32_t i) { - DCHECK_LT(i, NumInstanceFields()); - return &GetIFields()[i]; + return &GetIFieldsPtr()->At(i); } template<VerifyObjectFlags kVerifyFlags> @@ -664,9 +669,9 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables, return size; } -template <bool kVisitClass, typename Visitor> +template <typename Visitor> inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) { - VisitInstanceFieldsReferences<kVisitClass>(klass, visitor); + VisitInstanceFieldsReferences(klass, visitor); // Right after a class is allocated, but not yet loaded // (kStatusNotReady, see ClassLinker::LoadClass()), GC may find it // and scan it. IsTemp() may call Class::GetAccessFlags() but may @@ -678,8 +683,10 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor) // Temp classes don't ever populate imt/vtable or static fields and they are not even // allocated with the right size for those. Also, unresolved classes don't have fields // linked yet. - VisitStaticFieldsReferences<kVisitClass>(this, visitor); + VisitStaticFieldsReferences(this, visitor); } + // Since this class is reachable, we must also visit the associated roots when we scan it. + VisitNativeRoots(visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); } template<ReadBarrierOption kReadBarrierOption> @@ -811,72 +818,56 @@ inline ObjectArray<String>* Class::GetDexCacheStrings() { template<class Visitor> void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) { - ArtField* const sfields = GetSFieldsUnchecked(); - // Since we visit class roots while we may be writing these fields, check against null. - if (sfields != nullptr) { - for (size_t i = 0, count = NumStaticFields(); i < count; ++i) { - auto* f = &sfields[i]; - if (kIsDebugBuild && IsResolved()) { - CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus(); - } - f->VisitRoots(visitor); + for (ArtField& field : GetSFieldsUnchecked()) { + // Visit roots first in case the declaring class gets moved. + field.VisitRoots(visitor); + if (kIsDebugBuild && IsResolved()) { + CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus(); } } - ArtField* const ifields = GetIFieldsUnchecked(); - if (ifields != nullptr) { - for (size_t i = 0, count = NumInstanceFields(); i < count; ++i) { - auto* f = &ifields[i]; - if (kIsDebugBuild && IsResolved()) { - CHECK_EQ(f->GetDeclaringClass(), this) << GetStatus(); - } - f->VisitRoots(visitor); + for (ArtField& field : GetIFieldsUnchecked()) { + // Visit roots first in case the declaring class gets moved. + field.VisitRoots(visitor); + if (kIsDebugBuild && IsResolved()) { + CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus(); } } - for (auto& m : GetDirectMethods(pointer_size)) { - m.VisitRoots(visitor); + for (ArtMethod& method : GetDirectMethods(pointer_size)) { + method.VisitRoots(visitor); } - for (auto& m : GetVirtualMethods(pointer_size)) { - m.VisitRoots(visitor); + for (ArtMethod& method : GetVirtualMethods(pointer_size)) { + method.VisitRoots(visitor); } } -inline StrideIterator<ArtMethod> Class::DirectMethodsBegin(size_t pointer_size) { +inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) { CheckPointerSize(pointer_size); - auto* methods = GetDirectMethodsPtrUnchecked(); - auto stride = ArtMethod::ObjectSize(pointer_size); - return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods), stride); + return MakeIterationRangeFromLengthPrefixedArray(GetDirectMethodsPtrUnchecked(), + ArtMethod::Size(pointer_size), + ArtMethod::Alignment(pointer_size)); } -inline StrideIterator<ArtMethod> Class::DirectMethodsEnd(size_t pointer_size) { +inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) { CheckPointerSize(pointer_size); - auto* methods = GetDirectMethodsPtrUnchecked(); - auto stride = ArtMethod::ObjectSize(pointer_size); - auto count = NumDirectMethods(); - return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods) + stride * count, stride); + return MakeIterationRangeFromLengthPrefixedArray(GetVirtualMethodsPtrUnchecked(), + ArtMethod::Size(pointer_size), + ArtMethod::Alignment(pointer_size)); } -inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) { - CheckPointerSize(pointer_size); - return MakeIterationRange(DirectMethodsBegin(pointer_size), DirectMethodsEnd(pointer_size)); +inline IterationRange<StrideIterator<ArtField>> Class::GetIFields() { + return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtr()); } -inline StrideIterator<ArtMethod> Class::VirtualMethodsBegin(size_t pointer_size) { - CheckPointerSize(pointer_size); - auto* methods = GetVirtualMethodsPtrUnchecked(); - auto stride = ArtMethod::ObjectSize(pointer_size); - return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods), stride); +inline IterationRange<StrideIterator<ArtField>> Class::GetSFields() { + return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtr()); } -inline StrideIterator<ArtMethod> Class::VirtualMethodsEnd(size_t pointer_size) { - CheckPointerSize(pointer_size); - auto* methods = GetVirtualMethodsPtrUnchecked(); - auto stride = ArtMethod::ObjectSize(pointer_size); - auto count = NumVirtualMethods(); - return StrideIterator<ArtMethod>(reinterpret_cast<uintptr_t>(methods) + stride * count, stride); +inline IterationRange<StrideIterator<ArtField>> Class::GetIFieldsUnchecked() { + return MakeIterationRangeFromLengthPrefixedArray(GetIFieldsPtrUnchecked()); } -inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) { - return MakeIterationRange(VirtualMethodsBegin(pointer_size), VirtualMethodsEnd(pointer_size)); +inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() { + return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked()); } inline MemberOffset Class::EmbeddedImTableOffset(size_t pointer_size) { @@ -898,6 +889,53 @@ inline void Class::CheckPointerSize(size_t pointer_size) { DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); } +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> +inline Class* Class::GetComponentType() { + return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset()); +} + +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> +inline bool Class::IsArrayClass() { + return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr; +} + +inline bool Class::IsAssignableFrom(Class* src) { + DCHECK(src != nullptr); + if (this == src) { + // Can always assign to things of the same type. + return true; + } else if (IsObjectClass()) { + // Can assign any reference to java.lang.Object. + return !src->IsPrimitive(); + } else if (IsInterface()) { + return src->Implements(this); + } else if (src->IsArrayClass()) { + return IsAssignableFromArray(src); + } else { + return !src->IsInterface() && src->IsSubClass(this); + } +} + +inline uint32_t Class::NumDirectMethods() { + LengthPrefixedArray<ArtMethod>* arr = GetDirectMethodsPtrUnchecked(); + return arr != nullptr ? arr->Length() : 0u; +} + +inline uint32_t Class::NumVirtualMethods() { + LengthPrefixedArray<ArtMethod>* arr = GetVirtualMethodsPtrUnchecked(); + return arr != nullptr ? arr->Length() : 0u; +} + +inline uint32_t Class::NumInstanceFields() { + LengthPrefixedArray<ArtField>* arr = GetIFieldsPtrUnchecked(); + return arr != nullptr ? arr->Length() : 0u; +} + +inline uint32_t Class::NumStaticFields() { + LengthPrefixedArray<ArtField>* arr = GetSFieldsPtrUnchecked(); + return arr != nullptr ? arr->Length() : 0u; +} + } // namespace mirror } // namespace art diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index f0b7bfddea..055b3e5110 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -471,7 +471,8 @@ ArtMethod* Class::FindDirectMethod( ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { - if (name == method.GetName() && method.GetSignature() == signature) { + ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); + if (name == np_method->GetName() && np_method->GetSignature() == signature) { return &method; } } @@ -481,7 +482,8 @@ ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Strin ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) { for (auto& method : GetVirtualMethods(pointer_size)) { - if (name == method.GetName() && signature == method.GetSignature()) { + ArtMethod* const np_method = method.GetInterfaceMethodIfProxy(pointer_size); + if (name == np_method->GetName() && signature == np_method->GetSignature()) { return &method; } } @@ -822,24 +824,56 @@ void Class::PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], } } +class ReadBarrierOnNativeRootsVisitor { + public: + void operator()(mirror::Object* obj ATTRIBUTE_UNUSED, + MemberOffset offset ATTRIBUTE_UNUSED, + bool is_static ATTRIBUTE_UNUSED) const {} + + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::Object* old_ref = root->AsMirrorPtr(); + mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root); + if (old_ref != new_ref) { + // Update the field atomically. This may fail if mutator updates before us, but it's ok. + auto* atomic_root = + reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root); + atomic_root->CompareExchangeStrongSequentiallyConsistent( + mirror::CompressedReference<mirror::Object>::FromMirrorPtr(old_ref), + mirror::CompressedReference<mirror::Object>::FromMirrorPtr(new_ref)); + } + } +}; + // The pre-fence visitor for Class::CopyOf(). class CopyClassVisitor { public: - explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length, - size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize], - size_t pointer_size) + CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig, size_t new_length, + size_t copy_bytes, ArtMethod* const (&imt)[mirror::Class::kImtSize], + size_t pointer_size) : self_(self), orig_(orig), new_length_(new_length), copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) { } void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self_); Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass())); mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_); mirror::Class::SetStatus(h_new_class_obj, Class::kStatusResolving, self_); h_new_class_obj->PopulateEmbeddedImtAndVTable(imt_, pointer_size_); h_new_class_obj->SetClassSize(new_length_); + // Visit all of the references to make sure there is no from space references in the native + // roots. + static_cast<mirror::Object*>(h_new_class_obj.Get())->VisitReferences( + ReadBarrierOnNativeRootsVisitor(), VoidFunctor()); } private: diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index ba0a9fc4e5..3f375be9ca 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -41,7 +41,7 @@ class ArtField; class ArtMethod; struct ClassOffsets; template<class T> class Handle; -template<class T> class Handle; +template<typename T> class LengthPrefixedArray; class Signature; class StringPiece; template<size_t kNumReferences> class PACKED(4) StackHandleScope; @@ -127,7 +127,7 @@ class MANAGED Class FINAL : public Object { }; template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) { static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32"); return static_cast<Status>( GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_))); @@ -135,7 +135,7 @@ class MANAGED Class FINAL : public Object { // This is static because 'this' may be moved by GC. static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static MemberOffset StatusOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, status_); @@ -143,146 +143,155 @@ class MANAGED Class FINAL : public Object { // Returns true if the class has been retired. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsRetired() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() == kStatusRetired; } // Returns true if the class has failed to link. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsErroneous() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() == kStatusError; } // Returns true if the class has been loaded. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsIdxLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusIdx; } // Returns true if the class has been loaded. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusLoaded; } // Returns true if the class has been linked. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsResolved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusResolved; } // Returns true if the class was compile-time verified. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsCompileTimeVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime; } // Returns true if the class has been verified. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsVerified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusVerified; } // Returns true if the class is initializing. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsInitializing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() >= kStatusInitializing; } // Returns true if the class is initialized. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsInitialized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStatus<kVerifyFlags>() == kStatusInitialized; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset AccessFlagsOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_); } - void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the class is an interface. - ALWAYS_INLINE bool IsInterface() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccInterface) != 0; } // Returns true if the class is declared public. - ALWAYS_INLINE bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPublic) != 0; } // Returns true if the class is declared final. - ALWAYS_INLINE bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } - ALWAYS_INLINE bool IsFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccClassIsFinalizable) != 0; } - ALWAYS_INLINE void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); SetAccessFlags(flags | kAccClassIsFinalizable); } - ALWAYS_INLINE bool IsStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetField32(AccessFlagsOffset()) & kAccClassIsStringClass) != 0; } - ALWAYS_INLINE void SetStringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); SetAccessFlags(flags | kAccClassIsStringClass); } + ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) { + return (GetField32(AccessFlagsOffset()) & kAccClassIsClassLoaderClass) != 0; + } + + ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) { + uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); + SetAccessFlags(flags | kAccClassIsClassLoaderClass); + } + // Returns true if the class is abstract. - ALWAYS_INLINE bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccAbstract) != 0; } // Returns true if the class is an annotation. - ALWAYS_INLINE bool IsAnnotation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccAnnotation) != 0; } // Returns true if the class is synthetic. - ALWAYS_INLINE bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccSynthetic) != 0; } // Returns true if the class can avoid access checks. - bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccPreverified) != 0; } - void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPreverified() SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_)); SetAccessFlags(flags | kAccPreverified); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsTypeOfReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsReference) != 0; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsWeakReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsWeakReference) != 0; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsSoftReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccReferenceFlagsMask) == kAccClassIsReference; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsFinalizerReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsFinalizerReference) != 0; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPhantomReferenceClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags<kVerifyFlags>() & kAccClassIsPhantomReference) != 0; } @@ -291,7 +300,7 @@ class MANAGED Class FINAL : public Object { // For array classes, where all the classes are final due to there being no sub-classes, an // Object[] may be assigned to by a String[] but a String[] may not be assigned to by other // types as the component is final. - bool CannotBeAssignedFromOtherTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsArrayClass()) { return IsFinal(); } else { @@ -306,18 +315,19 @@ class MANAGED Class FINAL : public Object { // Returns true if this class is the placeholder and should retire and // be replaced with a class with the right size for embedded imt/vtable. - bool IsTemp() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) { Status s = GetStatus(); return s < Status::kStatusResolving && ShouldHaveEmbeddedImtAndVTable(); } - String* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns the cached name. - void SetName(String* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Sets the cached name. + String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name. + void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_); // Sets the cached name. // Computes the name, then sets the cached value. - static String* ComputeName(Handle<Class> h_this) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsProxyClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) { // Read access flags without using getter as whether something is a proxy can be check in // any loaded state // TODO: switch to a check if the super class is java.lang.reflect.Proxy? @@ -326,9 +336,9 @@ class MANAGED Class FINAL : public Object { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t)); int32_t v32 = static_cast<int32_t>(new_type); DCHECK_EQ(v32 & 0xFFFF, v32) << "upper 16 bits aren't zero"; @@ -338,83 +348,82 @@ class MANAGED Class FINAL : public Object { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the class is a primitive type. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitive() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveBoolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveByte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveChar() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveShort() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveInt() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType() == Primitive::kPrimInt; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveLong() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveFloat() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveDouble() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveVoid() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) { return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPrimitiveArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) { return IsArrayClass<kVerifyFlags>() && GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()-> IsPrimitive(); } // Depth of class from java.lang.Object - uint32_t Depth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr; - } + + bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_); - bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsReferenceClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset ComponentTypeOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, component_type_); @@ -422,11 +431,9 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset()); - } + Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_); - void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(GetComponentType() == nullptr); DCHECK(new_component_type != nullptr); // Component type is invariant: use non-transactional mode without check. @@ -434,43 +441,43 @@ class MANAGED Class FINAL : public Object { } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) { return 1U << GetComponentSizeShift(); } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t GetComponentSizeShift() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) { return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift(); } - bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) { return !IsPrimitive() && GetSuperClass() == nullptr; } - bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) { return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass(); } - bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) { return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || (IsAbstract() && IsArrayClass()); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsObjectArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetComponentType<kVerifyFlags>() != nullptr && !GetComponentType<kVerifyFlags>()->IsPrimitive(); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsIntArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); auto* component_type = GetComponentType<kVerifyFlags>(); return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>(); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLongArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis); auto* component_type = GetComponentType<kVerifyFlags>(); return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>(); @@ -479,16 +486,16 @@ class MANAGED Class FINAL : public Object { // Creates a raw object instance but does not invoke the default constructor. template<bool kIsInstrumented, bool kCheckAddFinalizer = true> ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); Object* AllocObject(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); Object* AllocNonMovableObject(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsVariableSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) { // Classes, arrays, and strings vary in size, and so the object_size_ field cannot // be used to Get their instance size return IsClassClass<kVerifyFlags, kReadBarrierOption>() || @@ -497,17 +504,17 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_)); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_)); } void SetClassSize(uint32_t new_class_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Compute how many bytes would be used a class with the given elements. static uint32_t ComputeClassSize(bool has_embedded_tables, @@ -533,31 +540,31 @@ class MANAGED Class FINAL : public Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - uint32_t GetObjectSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset ObjectSizeOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, object_size_); } - void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsVariableSize()); // Not called within a transaction. return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size); } void SetObjectSizeWithoutChecks(uint32_t new_object_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. return SetField32<false, false, kVerifyNone>( OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size); } // Returns true if this class is in the same packages as that class. - bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_); static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2); // Returns true if this class can access that class. - bool CanAccess(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) { return that->IsPublic() || this->IsInSamePackage(that); } @@ -565,7 +572,7 @@ class MANAGED Class FINAL : public Object { // Note that access to the class isn't checked in case the declaring class is protected and the // method has been exposed by a public sub-class bool CanAccessMember(Class* access_to, uint32_t member_flags) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Classes can access all of their own members if (this == access_to) { return true; @@ -593,49 +600,34 @@ class MANAGED Class FINAL : public Object { // referenced by the FieldId in the DexFile in case the declaring class is inaccessible. bool CanAccessResolvedField(Class* access_to, ArtField* field, DexCache* dex_cache, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool CheckResolvedFieldAccess(Class* access_to, ArtField* field, uint32_t field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this class access a resolved method? // Note that access to methods's class is checked and this may require looking up the class // referenced by the MethodId in the DexFile in case the declaring class is inaccessible. bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method, DexCache* dex_cache, uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <InvokeType throw_invoke_type> bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method, uint32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool IsSubClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); // Can src be assigned to this class? For example, String can be assigned to Object (by an // upcast), however, an Object cannot be assigned to a String as a potentially exception throwing // downcast would be necessary. Similarly for interfaces, a class that implements (or an interface // that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign // to themselves. Classes for primitive types may not assign to each other. - ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(src != nullptr); - if (this == src) { - // Can always assign to things of the same type. - return true; - } else if (IsObjectClass()) { - // Can assign any reference to java.lang.Object. - return !src->IsPrimitive(); - } else if (IsInterface()) { - return src->Implements(this); - } else if (src->IsArrayClass()) { - return IsAssignableFromArray(src); - } else { - return !src->IsInterface() && src->IsSubClass(this); - } - } + ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE Class* GetSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_); - void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetSuperClass(Class *new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) { // Super class is assigned once, except during class linker initialization. Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_)); DCHECK(old_super_class == nullptr || old_super_class == new_super_class); @@ -643,7 +635,7 @@ class MANAGED Class FINAL : public Object { SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class); } - bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetSuperClass() != nullptr; } @@ -651,9 +643,9 @@ class MANAGED Class FINAL : public Object { return MemberOffset(OFFSETOF_MEMBER(Class, super_class_)); } - ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); - void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DexCacheOffset() { return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_)); @@ -665,83 +657,62 @@ class MANAGED Class FINAL : public Object { kDumpClassInitialized = (1 << 2), }; - void DumpClass(std::ostream& os, int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); // Also updates the dex_cache_strings_ variable from new_dex_cache. - void SetDexCache(DexCache* new_dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsBegin(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ALWAYS_INLINE StrideIterator<ArtMethod> DirectMethodsEnd(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* GetDirectMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);\ + LengthPrefixedArray<ArtMethod>* GetDirectMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_); - void SetDirectMethodsPtr(ArtMethod* new_direct_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDirectMethodsPtr(LengthPrefixedArray<ArtMethod>* new_direct_methods) + SHARED_REQUIRES(Locks::mutator_lock_); // Used by image writer. - void SetDirectMethodsPtrUnchecked(ArtMethod* new_direct_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDirectMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_direct_methods) + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Use only when we are allocating populating the method arrays. ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of static, private, and constructor methods. - ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_)); - } - void SetNumDirectMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_direct_methods_), num); - } + ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsBegin(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - ALWAYS_INLINE StrideIterator<ArtMethod> VirtualMethodsEnd(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetVirtualMethodsPtr() + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void SetVirtualMethodsPtr(ArtMethod* new_virtual_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetVirtualMethodsPtr(LengthPrefixedArray<ArtMethod>* new_virtual_methods) + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of non-inherited virtual methods. - ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_)); - } - void SetNumVirtualMethods(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_virtual_methods_), num); - } + ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetVirtualMethodDuringLinking(size_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE PointerArray* GetVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_); - void SetVTable(PointerArray* new_vtable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset VTableOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, vtable_); @@ -751,362 +722,355 @@ class MANAGED Class FINAL : public Object { return MemberOffset(sizeof(Class)); } - bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool ShouldHaveEmbeddedImtAndVTable() SHARED_REQUIRES(Locks::mutator_lock_) { return IsInstantiable(); } - bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset EmbeddedImTableEntryOffset(uint32_t i, size_t pointer_size); static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, size_t pointer_size); ArtMethod* GetEmbeddedImTableEntry(uint32_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetVTableEntry(uint32_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_); - void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetEmbeddedVTableEntry(uint32_t i, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); inline void SetEmbeddedVTableEntryUnchecked(uint32_t i, ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void PopulateEmbeddedImtAndVTable(ArtMethod* const (&methods)[kImtSize], size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class but potentially from a super class, return the // specific implementation method for this class. ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class' super class, return the specific implementation // method for this class. ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Given a method implemented by this class, but potentially from a // super class or interface, return the specific implementation // method for this class. ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE; + SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE; ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE int32_t GetIfTableCount() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE IfTable* GetIfTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_); // Get instance fields of the class (See also GetSFields). - ArtField* GetIFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LengthPrefixedArray<ArtField>* GetIFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_); - void SetIFields(ArtField* new_ifields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetIFields() + SHARED_REQUIRES(Locks::mutator_lock_); - // Unchecked edition has no verification flags. - void SetIFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - uint32_t NumInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_)); - } + void SetIFieldsPtr(LengthPrefixedArray<ArtField>* new_ifields) + SHARED_REQUIRES(Locks::mutator_lock_); - void SetNumInstanceFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_instance_fields_), num); - } + // Unchecked edition has no verification flags. + void SetIFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields) + SHARED_REQUIRES(Locks::mutator_lock_); - ArtField* GetInstanceField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_); + ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of instance fields containing reference types. - uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_)); } - uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_)); } - void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num); } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); void SetReferenceInstanceOffsets(uint32_t new_reference_offsets) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the offset of the first reference instance field. Other reference instance fields follow. MemberOffset GetFirstReferenceInstanceFieldOffset() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the number of static fields containing reference types. - uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsResolved() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_)); } - uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsLoaded() || IsErroneous() || IsRetired()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_)); } - void SetNumReferenceStaticFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num); } // Get the offset of the first reference static field. Other reference static fields follow. MemberOffset GetFirstReferenceStaticFieldOffset(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the offset of the first reference static field. Other reference static fields follow. MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Gets the static fields of the class. - ArtField* GetSFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LengthPrefixedArray<ArtField>* GetSFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_); + ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetSFields() + SHARED_REQUIRES(Locks::mutator_lock_); - void SetSFields(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetSFieldsPtr(LengthPrefixedArray<ArtField>* new_sfields) + SHARED_REQUIRES(Locks::mutator_lock_); // Unchecked edition has no verification flags. - void SetSFieldsUnchecked(ArtField* new_sfields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - uint32_t NumStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_)); - } + void SetSFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields) + SHARED_REQUIRES(Locks::mutator_lock_); - void SetNumStaticFields(uint32_t num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_static_fields_), num); - } + uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_); // TODO: uint16_t - ArtField* GetStaticField(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_); // Find a static or instance field using the JLS resolution order static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass. ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given instance field in this class or a superclass, only searches classes that // have the same dex cache. ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given static field in this class or a superclass. static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Finds the given static field in this class or superclass, only searches classes that // have the same dex cache. static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(IsIdxLoaded() || IsErroneous()); return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_)); } - void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_); - Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Class* GetVerifyErrorClass() SHARED_REQUIRES(Locks::mutator_lock_) { // DCHECK(IsErroneous()); return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_)); } - uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_)); } - void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx); } - uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_)); } - void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) { // Not called within a transaction. SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx); } - static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(HasJavaLangClass()); return java_lang_Class_.Read(); } - static bool HasJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) { return !java_lang_Class_.IsNull(); } // Can't call this SetClass or else gets called instead of Object::SetClass in places. - static void SetClassClass(Class* java_lang_Class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit native roots visits roots which are keyed off the native pointers such as ArtFields and // ArtMethods. template<class Visitor> void VisitNativeRoots(Visitor& visitor, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // When class is verified, set the kAccPreverified flag on each method. void SetPreverifiedFlagOnAllMethods(size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - template <bool kVisitClass, typename Visitor> - void VisitReferences(mirror::Class* klass, const Visitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the descriptor of the class. In a few cases a std::string is required, rather than // always create one the storage argument is populated and its internal c_str() returned. We do // this to avoid memory allocation in the common case. - const char* GetDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetArrayDescriptor(std::string* storage) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_); - bool DescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::ClassDef* GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_); static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass, uint32_t idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_); - std::string GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile& GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_); - const DexFile::TypeList* GetInterfaceTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_); // Asserts we are initialized or initializing in the given thread. void AssertInitializedOrInitializingInThread(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); Class* CopyOf(Thread* self, int32_t new_length, ArtMethod* const (&imt)[mirror::Class::kImtSize], - size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t pointer_size) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // For proxy class only. - ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_); // For proxy class only. - ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_); // For reference class only. - MemberOffset GetDisableIntrinsicFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - MemberOffset GetSlowPathFlagOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool GetSlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetSlowPath(bool enabled) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_); + MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_); + bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_); + void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_); - ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<String>* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_); void SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static MemberOffset DexCacheStringsOffset() { return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_); } @@ -1114,7 +1078,7 @@ class MANAGED Class FINAL : public Object { // May cause thread suspension due to EqualParameters. ArtMethod* GetDeclaredConstructor( Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore // fence. @@ -1124,7 +1088,7 @@ class MANAGED Class FINAL : public Object { } void operator()(mirror::Object* obj, size_t usable_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: const uint32_t class_size_; @@ -1133,7 +1097,7 @@ class MANAGED Class FINAL : public Object { }; // Returns true if the class loader is null, ie the class loader is the boot strap class loader. - bool IsBootStrapClassLoaded() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) { return GetClassLoader() == nullptr; } @@ -1145,35 +1109,39 @@ class MANAGED Class FINAL : public Object { return pointer_size; } - ALWAYS_INLINE ArtMethod* GetDirectMethodsPtrUnchecked() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetDirectMethodsPtrUnchecked() + SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE ArtMethod* GetVirtualMethodsPtrUnchecked() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetVirtualMethodsPtrUnchecked() + SHARED_REQUIRES(Locks::mutator_lock_); private: - void SetVerifyErrorClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetVerifyErrorClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); template <bool throw_on_failure, bool use_referrers_cache> bool ResolvedFieldAccessTest(Class* access_to, ArtField* field, uint32_t field_idx, DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type> bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method, uint32_t method_idx, DexCache* dex_cache) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool Implements(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsArrayAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsAssignableFromArray(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - void CheckObjectAlloc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_); // Unchecked editions is for root visiting. - ArtField* GetSFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtField* GetIFieldsUnchecked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_); + IterationRange<StrideIterator<ArtField>> GetSFieldsUnchecked() + SHARED_REQUIRES(Locks::mutator_lock_); + LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_); + IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked() + SHARED_REQUIRES(Locks::mutator_lock_); - bool ProxyDescriptorEquals(const char* match) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_); // Check that the pointer size mathces the one in the class linker. ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size); @@ -1181,6 +1149,10 @@ class MANAGED Class FINAL : public Object { static MemberOffset EmbeddedImTableOffset(size_t pointer_size); static MemberOffset EmbeddedVTableOffset(size_t pointer_size); + template <typename Visitor> + void VisitReferences(mirror::Class* klass, const Visitor& visitor) + SHARED_REQUIRES(Locks::mutator_lock_); + // Defining class loader, or null for the "bootstrap" system loader. HeapReference<ClassLoader> class_loader_; @@ -1232,7 +1204,7 @@ class MANAGED Class FINAL : public Object { // Note: Shuffled back. uint32_t access_flags_; - // static, private, and <init> methods. Pointer to an ArtMethod array. + // static, private, and <init> methods. Pointer to an ArtMethod length-prefixed array. uint64_t direct_methods_; // instance fields @@ -1242,13 +1214,15 @@ class MANAGED Class FINAL : public Object { // listed in ifields; fields declared by a superclass are listed in // the superclass's Class.ifields. // - // ArtField arrays are allocated as an array of fields, and not an array of fields pointers. + // ArtFields are allocated as a length prefixed ArtField array, and not an array of pointers to + // ArtFields. uint64_t ifields_; - // Static fields + // Static fields length-prefixed array. uint64_t sfields_; - // Virtual methods defined in this class; invoked through vtable. Pointer to an ArtMethod array. + // Virtual methods defined in this class; invoked through vtable. Pointer to an ArtMethod + // length-prefixed array. uint64_t virtual_methods_; // Total size of the Class instance; used when allocating storage on gc heap. @@ -1266,24 +1240,12 @@ class MANAGED Class FINAL : public Object { // TODO: really 16bits int32_t dex_type_idx_; - // Number of direct fields. - uint32_t num_direct_methods_; - - // Number of instance fields. - uint32_t num_instance_fields_; - // Number of instance fields that are object refs. uint32_t num_reference_instance_fields_; // Number of static fields that are object refs, uint32_t num_reference_static_fields_; - // Number of static fields. - uint32_t num_static_fields_; - - // Number of virtual methods. - uint32_t num_virtual_methods_; - // Total object size; used when allocating storage on gc heap. // (For interfaces and abstract classes this will be zero.) // See also class_size_. @@ -1317,6 +1279,7 @@ class MANAGED Class FINAL : public Object { static GcRoot<Class> java_lang_Class_; friend struct art::ClassOffsets; // for verifying offset information + friend class Object; // For VisitReferences DISALLOW_IMPLICIT_CONSTRUCTORS(Class); }; diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h new file mode 100644 index 0000000000..e22ddd7e90 --- /dev/null +++ b/runtime/mirror/class_loader-inl.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_INL_H_ +#define ART_RUNTIME_MIRROR_CLASS_LOADER_INL_H_ + +#include "class_loader.h" + +#include "base/mutex-inl.h" +#include "class_table-inl.h" + +namespace art { +namespace mirror { + +template <VerifyObjectFlags kVerifyFlags, typename Visitor> +inline void ClassLoader::VisitReferences(mirror::Class* klass, const Visitor& visitor) { + // Visit instance fields first. + VisitInstanceFieldsReferences(klass, visitor); + // Visit classes loaded after. + ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + ClassTable* const class_table = GetClassTable(); + if (class_table != nullptr) { + class_table->VisitRoots(visitor); + } +} + +} // namespace mirror +} // namespace art + +#endif // ART_RUNTIME_MIRROR_CLASS_LOADER_INL_H_ diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h index b10a296f18..f27b6155ce 100644 --- a/runtime/mirror/class_loader.h +++ b/runtime/mirror/class_loader.h @@ -22,9 +22,12 @@ namespace art { struct ClassLoaderOffsets; +class ClassTable; namespace mirror { +class Class; + // C++ mirror of java.lang.ClassLoader class MANAGED ClassLoader : public Object { public: @@ -32,17 +35,36 @@ class MANAGED ClassLoader : public Object { static constexpr uint32_t InstanceSize() { return sizeof(ClassLoader); } - ClassLoader* GetParent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_)); } + ClassTable* GetClassTable() SHARED_REQUIRES(Locks::mutator_lock_) { + return reinterpret_cast<ClassTable*>( + GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_))); + } + void SetClassTable(ClassTable* class_table) SHARED_REQUIRES(Locks::mutator_lock_) { + SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_), + reinterpret_cast<uint64_t>(class_table)); + } private: + // Visit instance fields of the class loader as well as its associated classes. + // Null class loader is handled by ClassLinker::VisitClassRoots. + template <VerifyObjectFlags kVerifyFlags, typename Visitor> + void VisitReferences(mirror::Class* klass, const Visitor& visitor) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::classlinker_classes_lock_); + // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". HeapReference<Object> packages_; HeapReference<ClassLoader> parent_; HeapReference<Object> proxyCache_; + // Native pointer to class table, need to zero this out when image writing. + uint32_t padding_ ATTRIBUTE_UNUSED; + uint64_t class_table_; friend struct art::ClassLoaderOffsets; // for verifying offset information + friend class Object; // For VisitReferences DISALLOW_IMPLICIT_CONSTRUCTORS(ClassLoader); }; diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index 0ce83ec746..ba49a15f22 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -48,12 +48,12 @@ class MANAGED DexCache FINAL : public Object { void Init(const DexFile* dex_file, String* location, ObjectArray<String>* strings, ObjectArray<Class>* types, PointerArray* methods, PointerArray* fields, - size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); void Fixup(ArtMethod* trampoline, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); } @@ -73,76 +73,76 @@ class MANAGED DexCache FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_); } - size_t NumStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) { return GetStrings()->GetLength(); } - size_t NumResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedTypes()->GetLength(); } - size_t NumResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedMethods()->GetLength(); } - size_t NumResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedFields()->GetLength(); } - String* GetResolvedString(uint32_t string_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetResolvedString(uint32_t string_idx) SHARED_REQUIRES(Locks::mutator_lock_) { return GetStrings()->Get(string_idx); } void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // TODO default transaction support. GetStrings()->Set(string_idx, resolved); } Class* GetResolvedType(uint32_t type_idx) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetResolvedTypes()->Get(type_idx); } void SetResolvedType(uint32_t type_idx, Class* resolved) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Pointer sized variant, used for patching. ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Pointer sized variant, used for patching. ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, size_t ptr_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<ObjectArray<String>>(StringsOffset()); } - ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<ObjectArray<Class>>( OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_)); } - PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + PointerArray* GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<PointerArray>(ResolvedMethodsOffset()); } - PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + PointerArray* GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<PointerArray>(ResolvedFieldsOffset()); } - const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_)); } - void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE { return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file); } diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc index 02e4484a98..ff6847cf7a 100644 --- a/runtime/mirror/field.cc +++ b/runtime/mirror/field.cc @@ -61,10 +61,10 @@ ArtField* Field::GetArtField() { DCHECK_EQ(declaring_class->NumStaticFields(), 2U); // 0 == Class[] interfaces; 1 == Class[][] throws; if (GetDexFieldIndex() == 0) { - return &declaring_class->GetSFields()[0]; + return &declaring_class->GetSFieldsPtr()->At(0); } else { DCHECK_EQ(GetDexFieldIndex(), 1U); - return &declaring_class->GetSFields()[1]; + return &declaring_class->GetSFieldsPtr()->At(1); } } mirror::DexCache* const dex_cache = declaring_class->GetDexCache(); diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h index d927f0c258..edaddbd2e7 100644 --- a/runtime/mirror/field.h +++ b/runtime/mirror/field.h @@ -36,66 +36,66 @@ class String; // C++ mirror of java.lang.reflect.Field. class MANAGED Field : public AccessibleObject { public: - static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) { return static_class_.Read(); } - static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return array_class_.Read(); } - ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_)); } - mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_)); } - uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_)); } - bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccStatic) != 0; } - bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccFinal) != 0; } - bool IsVolatile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) { return (GetAccessFlags() & kAccVolatile) != 0; } ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetType()->GetPrimitiveType(); } - mirror::Class* GetType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_)); } - int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_)); } - static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); // Slow, try to use only for PrettyField and such. - ArtField* GetArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_); template <bool kTransactionActive = false> static mirror::Field* CreateFromArtField(Thread* self, ArtField* field, bool force_resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); private: HeapReference<mirror::Class> declaring_class_; @@ -105,27 +105,27 @@ class MANAGED Field : public AccessibleObject { int32_t offset_; template<bool kTransactionActive> - void SetDeclaringClass(mirror::Class* c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c); } template<bool kTransactionActive> - void SetType(mirror::Class* type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type); } template<bool kTransactionActive> - void SetAccessFlags(uint32_t flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) { SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags); } template<bool kTransactionActive> - void SetDexFieldIndex(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) { SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx); } template<bool kTransactionActive> - void SetOffset(uint32_t offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) { SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset); } diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h index 1ea5beeae3..b21ecdf6e8 100644 --- a/runtime/mirror/iftable.h +++ b/runtime/mirror/iftable.h @@ -25,34 +25,34 @@ namespace mirror { class MANAGED IfTable FINAL : public ObjectArray<Object> { public: - ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) { Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass(); DCHECK(interface != nullptr); return interface; } ALWAYS_INLINE void SetInterface(int32_t i, Class* interface) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - PointerArray* GetMethodArray(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) { auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray)); DCHECK(method_array != nullptr); return method_array; } - size_t GetMethodArrayCount(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) { auto* method_array = down_cast<PointerArray*>(Get((i * kMax) + kMethodArray)); return method_array == nullptr ? 0u : method_array->GetLength(); } - void SetMethodArray(int32_t i, PointerArray* arr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(arr != nullptr); auto idx = i * kMax + kMethodArray; DCHECK(Get(idx) == nullptr); Set<false>(idx, arr); } - size_t Count() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) { return GetLength() / kMax; } diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h index 42c76c045c..0c28e4f580 100644 --- a/runtime/mirror/method.h +++ b/runtime/mirror/method.h @@ -29,25 +29,25 @@ class Class; class MANAGED Method : public AbstractMethod { public: static Method* CreateFromArtMethod(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) { return static_class_.Read(); } - static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_); - static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return array_class_.Read(); } - static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: static GcRoot<Class> static_class_; // java.lang.reflect.Method.class. @@ -60,25 +60,25 @@ class MANAGED Method : public AbstractMethod { class MANAGED Constructor: public AbstractMethod { public: static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - static mirror::Class* StaticClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) { return static_class_.Read(); } - static void SetClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_); - static mirror::Class* ArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) { return array_class_.Read(); } - static void SetArrayClass(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); - static void ResetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class. diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index e019d5aa72..586ae30d19 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -25,6 +25,7 @@ #include "array-inl.h" #include "class.h" #include "class_linker.h" +#include "class_loader-inl.h" #include "lock_word-inl.h" #include "monitor.h" #include "object_array-inl.h" @@ -477,7 +478,7 @@ inline int8_t Object::GetFieldByteVolatile(MemberOffset field_offset) { template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, bool kIsVolatile> inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -495,7 +496,7 @@ inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags, bool kIsVolatile> inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kCheckTransaction) { DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); } @@ -941,13 +942,10 @@ inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrie return success; } -template<bool kVisitClass, bool kIsStatic, typename Visitor> +template<bool kIsStatic, typename Visitor> inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) { if (!kIsStatic && (ref_offsets != mirror::Class::kClassWalkSuper)) { // Instance fields and not the slow-path. - if (kVisitClass) { - visitor(this, ClassOffset(), kIsStatic); - } uint32_t field_offset = mirror::kObjectHeaderSize; while (ref_offsets != 0) { if ((ref_offsets & 1) != 0) { @@ -973,9 +971,9 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v ? klass->GetFirstReferenceStaticFieldOffset( Runtime::Current()->GetClassLinker()->GetImagePointerSize()) : klass->GetFirstReferenceInstanceFieldOffset(); - for (size_t i = 0; i < num_reference_fields; ++i) { + for (size_t i = 0u; i < num_reference_fields; ++i) { // TODO: Do a simpler check? - if (kVisitClass || field_offset.Uint32Value() != ClassOffset().Uint32Value()) { + if (field_offset.Uint32Value() != ClassOffset().Uint32Value()) { visitor(this, field_offset, kIsStatic); } field_offset = MemberOffset(field_offset.Uint32Value() + @@ -985,34 +983,45 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v } } -template<bool kVisitClass, typename Visitor> +template<typename Visitor> inline void Object::VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) { - VisitFieldsReferences<kVisitClass, false>( - klass->GetReferenceInstanceOffsets<kVerifyNone>(), visitor); + VisitFieldsReferences<false>(klass->GetReferenceInstanceOffsets<kVerifyNone>(), visitor); } -template<bool kVisitClass, typename Visitor> +template<typename Visitor> inline void Object::VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) { DCHECK(!klass->IsTemp()); - klass->VisitFieldsReferences<kVisitClass, true>(0, visitor); + klass->VisitFieldsReferences<true>(0, visitor); +} + +template<VerifyObjectFlags kVerifyFlags> +inline bool Object::IsClassLoader() { + return GetClass<kVerifyFlags>()->IsClassLoaderClass(); +} + +template<VerifyObjectFlags kVerifyFlags> +inline mirror::ClassLoader* Object::AsClassLoader() { + DCHECK(IsClassLoader<kVerifyFlags>()); + return down_cast<mirror::ClassLoader*>(this); } -template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visitor, - typename JavaLangRefVisitor> +template <VerifyObjectFlags kVerifyFlags, typename Visitor, typename JavaLangRefVisitor> inline void Object::VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor) { mirror::Class* klass = GetClass<kVerifyFlags>(); + visitor(this, ClassOffset(), false); if (klass == Class::GetJavaLangClass()) { - AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor); + AsClass<kVerifyNone>()->VisitReferences(klass, visitor); } else if (klass->IsArrayClass() || klass->IsStringClass()) { if (klass->IsObjectArrayClass<kVerifyNone>()) { - AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor); - } else if (kVisitClass) { - visitor(this, ClassOffset(), false); + AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor); } + } else if (klass->IsClassLoaderClass()) { + mirror::ClassLoader* class_loader = AsClassLoader<kVerifyFlags>(); + class_loader->VisitReferences<kVerifyFlags>(klass, visitor); } else { DCHECK(!klass->IsVariableSize()); - VisitInstanceFieldsReferences<kVisitClass>(klass, visitor); + VisitInstanceFieldsReferences(klass, visitor); if (UNLIKELY(klass->IsTypeOfReferenceClass<kVerifyNone>())) { ref_visitor(klass, AsReference()); } diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index b177e2f579..4d941302f9 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -47,7 +47,7 @@ class CopyReferenceFieldsWithReadBarrierVisitor { : dest_obj_(dest_obj) {} void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const - ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { // GetFieldObject() contains a RB. Object* ref = obj->GetFieldObject<Object>(offset); // No WB here as a large object space does not have a card table @@ -56,13 +56,18 @@ class CopyReferenceFieldsWithReadBarrierVisitor { } void operator()(mirror::Class* klass, mirror::Reference* ref) const - ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) { // Copy java.lang.ref.Reference.referent which isn't visited in // Object::VisitReferences(). DCHECK(klass->IsTypeOfReferenceClass()); this->operator()(ref, mirror::Reference::ReferentOffset(), false); } + // Unused since we don't copy class native roots. + void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {} + private: Object* const dest_obj_; }; @@ -80,7 +85,7 @@ Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* s // object above, copy references fields one by one again with a // RB. TODO: Optimize this later? CopyReferenceFieldsWithReadBarrierVisitor visitor(dest); - src->VisitReferences<true>(visitor, visitor); + src->VisitReferences(visitor, visitor); } gc::Heap* heap = Runtime::Current()->GetHeap(); // Perform write barriers on copied object references. @@ -102,12 +107,12 @@ Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* s // An allocation pre-fence visitor that copies the object. class CopyObjectVisitor { public: - explicit CopyObjectVisitor(Thread* self, Handle<Object>* orig, size_t num_bytes) + CopyObjectVisitor(Thread* self, Handle<Object>* orig, size_t num_bytes) : self_(self), orig_(orig), num_bytes_(num_bytes) { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Object::CopyObject(self_, obj, orig_->Get(), num_bytes_); } @@ -203,15 +208,13 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_val return; } for (Class* cur = c; cur != nullptr; cur = cur->GetSuperClass()) { - ArtField* fields = cur->GetIFields(); - for (size_t i = 0, count = cur->NumInstanceFields(); i < count; ++i) { + for (ArtField& field : cur->GetIFields()) { StackHandleScope<1> hs(Thread::Current()); Handle<Object> h_object(hs.NewHandle(new_value)); - ArtField* field = &fields[i]; - if (field->GetOffset().Int32Value() == field_offset.Int32Value()) { - CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot); + if (field.GetOffset().Int32Value() == field_offset.Int32Value()) { + CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot); // TODO: resolve the field type for moving GC. - mirror::Class* field_type = field->GetType<!kMovingCollector>(); + mirror::Class* field_type = field.GetType<!kMovingCollector>(); if (field_type != nullptr) { CHECK(field_type->IsAssignableFrom(new_value->GetClass())); } @@ -224,13 +227,11 @@ void Object::CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_val return; } if (IsClass()) { - ArtField* fields = AsClass()->GetSFields(); - for (size_t i = 0, count = AsClass()->NumStaticFields(); i < count; ++i) { - ArtField* field = &fields[i]; - if (field->GetOffset().Int32Value() == field_offset.Int32Value()) { - CHECK_NE(field->GetTypeAsPrimitiveType(), Primitive::kPrimNot); + for (ArtField& field : AsClass()->GetSFields()) { + if (field.GetOffset().Int32Value() == field_offset.Int32Value()) { + CHECK_NE(field.GetTypeAsPrimitiveType(), Primitive::kPrimNot); // TODO: resolve the field type for moving GC. - mirror::Class* field_type = field->GetType<!kMovingCollector>(); + mirror::Class* field_type = field.GetType<!kMovingCollector>(); if (field_type != nullptr) { CHECK(field_type->IsAssignableFrom(new_value->GetClass())); } diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index f1c96b5007..3cec29cd43 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -37,6 +37,7 @@ namespace mirror { class Array; class Class; +class ClassLoader; class FinalizerReference; template<class T> class ObjectArray; template<class T> class PrimitiveArray; @@ -84,40 +85,40 @@ class MANAGED LOCKABLE Object { template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - ALWAYS_INLINE Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_); - Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif - void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_); // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in // invoke-interface to detect incompatible interface types. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_); - Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); int32_t IdentityHashCode() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static MemberOffset MonitorOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); @@ -126,298 +127,303 @@ class MANAGED LOCKABLE Object { // As_volatile can be false if the mutators are suspended. This is an optimization since it // avoids the barriers. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_); bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); - mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) EXCLUSIVE_LOCK_FUNCTION(); - bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) UNLOCK_FUNCTION(); - void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); + void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_); template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_); + + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + bool IsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> + ClassLoader* AsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsString() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + String* AsString() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_); // Accessor for Java type fields. template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false> ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, Object* old_value, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> void SetFieldPtr(MemberOffset field_offset, T new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>( field_offset, new_value, sizeof(void*)); } @@ -426,7 +432,7 @@ class MANAGED LOCKABLE Object { VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; if (pointer_size == 4) { intptr_t ptr = reinterpret_cast<intptr_t>(new_value); @@ -439,13 +445,14 @@ class MANAGED LOCKABLE Object { } } // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). - template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, - typename Visitor, typename JavaLangRefVisitor = VoidFunctor> + // SHARED_REQUIRES(Locks::mutator_lock_). + template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + typename Visitor, + typename JavaLangRefVisitor = VoidFunctor> void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor) NO_THREAD_SAFETY_ANALYSIS; - ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_); // Used by object_test. static void SetHashCodeSeed(uint32_t new_seed); @@ -456,13 +463,13 @@ class MANAGED LOCKABLE Object { // Accessors for non-Java type fields template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> T GetFieldPtr(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*)); } template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false> ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; if (pointer_size == 4) { return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset)); @@ -475,30 +482,30 @@ class MANAGED LOCKABLE Object { } // TODO: Fixme when anotatalysis works with visitors. - template<bool kVisitClass, bool kIsStatic, typename Visitor> + template<bool kIsStatic, typename Visitor> void VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) HOT_ATTR NO_THREAD_SAFETY_ANALYSIS; - template<bool kVisitClass, typename Visitor> + template<typename Visitor> void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - template<bool kVisitClass, typename Visitor> + SHARED_REQUIRES(Locks::mutator_lock_); + template<typename Visitor> void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: template<typename kSize, bool kIsVolatile> ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template<typename kSize, bool kIsVolatile> ALWAYS_INLINE kSize GetField(MemberOffset field_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckFieldAssignment(MemberOffset field_offset, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kCheckFieldAssignments) { CheckFieldAssignmentImpl(field_offset, new_value); } @@ -509,7 +516,7 @@ class MANAGED LOCKABLE Object { // Class::CopyOf(). static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static Atomic<uint32_t> hash_code_seed; diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h index 4a7e7b35da..5b73557941 100644 --- a/runtime/mirror/object_array-inl.h +++ b/runtime/mirror/object_array-inl.h @@ -269,11 +269,8 @@ inline MemberOffset ObjectArray<T>::OffsetOfElement(int32_t i) { (i * sizeof(HeapReference<Object>))); } -template<class T> template<const bool kVisitClass, typename Visitor> +template<class T> template<typename Visitor> void ObjectArray<T>::VisitReferences(const Visitor& visitor) { - if (kVisitClass) { - visitor(this, ClassOffset(), false); - } const size_t length = static_cast<size_t>(GetLength()); for (size_t i = 0; i < length; ++i) { visitor(this, OffsetOfElement(i), false); diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h index 5eddc18745..b45cafd2a3 100644 --- a/runtime/mirror/object_array.h +++ b/runtime/mirror/object_array.h @@ -32,21 +32,21 @@ class MANAGED ObjectArray: public Array { static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); - T* Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + T* Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_); // Returns true if the object can be stored into the array. If not, throws // an ArrayStoreException and returns false. - // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_). template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS; - ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_); + // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS; @@ -54,43 +54,44 @@ class MANAGED ObjectArray: public Array { // Set element without bound and element type checks, to be used in limited // circumstances, such as during boot image writing. // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS; // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). + // SHARED_REQUIRES(Locks::mutator_lock_). template<bool kTransactionActive, bool kCheckTransaction = true, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS; - ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_); // Copy src into this array (dealing with overlaps as memmove does) without assignability checks. void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos, - int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t count) SHARED_REQUIRES(Locks::mutator_lock_); // Copy src into this array assuming no overlap and without assignability checks. void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos, - int32_t count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t count) SHARED_REQUIRES(Locks::mutator_lock_); // Copy src into this array with assignability checks. void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos, int32_t count, bool throw_exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ObjectArray<T>* CopyOf(Thread* self, int32_t new_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - - // TODO fix thread safety analysis broken by the use of template. This should be - // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). - template<const bool kVisitClass, typename Visitor> - void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static MemberOffset OffsetOfElement(int32_t i); private: + // TODO fix thread safety analysis broken by the use of template. This should be + // SHARED_REQUIRES(Locks::mutator_lock_). + template<typename Visitor> + void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS; + + friend class Object; // For VisitReferences DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray); }; diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h index 055be8524c..2a5c88e29f 100644 --- a/runtime/mirror/object_reference.h +++ b/runtime/mirror/object_reference.h @@ -33,11 +33,11 @@ class Object; template<bool kPoisonReferences, class MirrorType> class MANAGED ObjectReference { public: - MirrorType* AsMirrorPtr() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) { return UnCompress(); } - void Assign(MirrorType* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) { reference_ = Compress(other); } @@ -56,18 +56,18 @@ class MANAGED ObjectReference { protected: ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : reference_(Compress(mirror_ptr)) { } // Compress reference to its bit representation. - static uint32_t Compress(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) { uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr); return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits); } // Uncompress an encoded reference from its bit representation. - MirrorType* UnCompress() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) { uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_; return reinterpret_cast<MirrorType*>(as_bits); } @@ -83,11 +83,11 @@ template<class MirrorType> class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> { public: static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return HeapReference<MirrorType>(mirror_ptr); } private: - HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {} }; @@ -95,16 +95,16 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr template<class MirrorType> class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> { public: - CompressedReference<MirrorType>() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_) : mirror::ObjectReference<false, MirrorType>(nullptr) {} static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return CompressedReference<MirrorType>(p); } private: - CompressedReference<MirrorType>(MirrorType* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_) : mirror::ObjectReference<false, MirrorType>(p) {} }; diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 85ea28f9f5..f5a04457e7 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -48,7 +48,7 @@ class ObjectTest : public CommonRuntimeTest { const char* utf8_in, const char* utf16_expected_le, int32_t expected_hash) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]); for (int32_t i = 0; i < expected_utf16_length; i++) { uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) | diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h index 4bbdb99553..51ae760515 100644 --- a/runtime/mirror/reference.h +++ b/runtime/mirror/reference.h @@ -62,49 +62,49 @@ class MANAGED Reference : public Object { return OFFSET_OF_OBJECT_MEMBER(Reference, referent_); } template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>( ReferentOffset()); } template<bool kTransactionActive> - void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent); } template<bool kTransactionActive> - void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr); } // Volatile read/write is not necessary since the java pending next is only accessed from // the java threads for cleared references. Once these cleared references have a null referent, // we never end up reading their pending next from the GC again. - Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<Reference>(PendingNextOffset()); } template<bool kTransactionActive> - void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetPendingNext(Reference* pending_next) SHARED_REQUIRES(Locks::mutator_lock_) { SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next); } - bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool IsEnqueued() SHARED_REQUIRES(Locks::mutator_lock_) { // Since the references are stored as cyclic lists it means that once enqueued, the pending // next is always non-null. return GetPendingNext() != nullptr; } - bool IsEnqueuable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsEnqueuable() SHARED_REQUIRES(Locks::mutator_lock_); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_ref_Reference_.IsNull()); return java_lang_ref_Reference_.Read<kReadBarrierOption>(); } static void SetClass(Class* klass); static void ResetClass(); - static void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: // Note: This avoids a read barrier, it should only be used by the GC. - HeapReference<Object>* GetReferentReferenceAddr() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset()); } @@ -130,10 +130,10 @@ class MANAGED FinalizerReference : public Reference { } template<bool kTransactionActive> - void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) { return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie); } - Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(ZombieOffset()); } diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h index dc7131e46e..1167391e23 100644 --- a/runtime/mirror/stack_trace_element.h +++ b/runtime/mirror/stack_trace_element.h @@ -31,32 +31,32 @@ namespace mirror { // C++ mirror of java.lang.StackTraceElement class MANAGED StackTraceElement FINAL : public Object { public: - String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_)); } - String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_)); } - String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_)); } - int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_)); } static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name, int32_t line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static void SetClass(Class* java_lang_StackTraceElement); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_); + static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_StackTraceElement_.IsNull()); return java_lang_StackTraceElement_.Read(); } @@ -71,7 +71,7 @@ class MANAGED StackTraceElement FINAL : public Object { template<bool kTransactionActive> void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name, int32_t line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static GcRoot<Class> java_lang_StackTraceElement_; diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index b689057426..3a39f587da 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -42,7 +42,7 @@ class SetStringCountVisitor { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); @@ -61,7 +61,7 @@ class SetStringCountAndBytesVisitor { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); @@ -88,7 +88,7 @@ class SetStringCountAndValueVisitorFromCharArray { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); @@ -111,7 +111,7 @@ class SetStringCountAndValueVisitorFromString { } void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Avoid AsString as object is not yet in live bitmap or allocation stack. String* string = down_cast<String*>(obj); string->SetCount(count_); diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h index af06385401..eb2e1f6977 100644 --- a/runtime/mirror/string.h +++ b/runtime/mirror/string.h @@ -49,87 +49,87 @@ class MANAGED String FINAL : public Object { return OFFSET_OF_OBJECT_MEMBER(String, value_); } - uint16_t* GetValue() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) { return &value_[0]; } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_); template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) { return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_)); } - void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) { // Count is invariant so use non-transactional mode. Also disable check as we may run inside // a transaction. DCHECK_LE(0, new_count); SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count); } - int32_t GetHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_); // Computes, stores, and returns the hash code. - int32_t ComputeHashCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_); - int32_t GetUtfLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_); - uint16_t CharAt(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_); - void SetCharAt(int32_t index, uint16_t c) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_); - String* Intern() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + String* Intern() SHARED_REQUIRES(Locks::mutator_lock_); template <bool kIsInstrumented, typename PreFenceVisitor> ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length, gc::AllocatorType allocator_type, const PreFenceVisitor& pre_fence_visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template <bool kIsInstrumented> ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length, Handle<ByteArray> array, int32_t offset, int32_t high_byte, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template <bool kIsInstrumented> ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count, Handle<CharArray> array, int32_t offset, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); template <bool kIsInstrumented> ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length, Handle<String> string, int32_t offset, gc::AllocatorType allocator_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromModifiedUtf8(Thread* self, const char* utf) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // TODO: This is only used in the interpreter to compare against // entries from a dex files constant pool (ArtField names). Should // we unify this with Equals(const StringPiece&); ? - bool Equals(const char* modified_utf8) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_); // TODO: This is only used to compare DexCache.location with // a dex_file's location (which is an std::string). Do we really // need this in mirror::String just for that one usage ? bool Equals(const StringPiece& modified_utf8) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - bool Equals(String* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_); // Compare UTF-16 code point values not in a locale-sensitive manner int Compare(int32_t utf16_length, const char* utf8_data_in); @@ -137,21 +137,22 @@ class MANAGED String FINAL : public Object { // TODO: do we need this overload? give it a more intention-revealing name. bool Equals(const uint16_t* that_chars, int32_t that_offset, int32_t that_length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Create a modified UTF-8 encoded std::string from a java/lang/String object. - std::string ToModifiedUtf8() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_); - int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_); - int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_); - CharArray* ToCharArray(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_String_.IsNull()); return java_lang_String_.Read(); } @@ -159,10 +160,10 @@ class MANAGED String FINAL : public Object { static void SetClass(Class* java_lang_String); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) { // Hash code is invariant so use non-transactional mode. Also disable check as we may run inside // a transaction. DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_))); diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc index 1c21edbc42..e8633def48 100644 --- a/runtime/mirror/throwable.cc +++ b/runtime/mirror/throwable.cc @@ -53,7 +53,7 @@ void Throwable::SetCause(Throwable* cause) { } } -void Throwable::SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(state != nullptr); if (Runtime::Current()->IsActiveTransaction()) { SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state); diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h index 9cc0b6f5c4..0f488dc46a 100644 --- a/runtime/mirror/throwable.h +++ b/runtime/mirror/throwable.h @@ -31,38 +31,38 @@ namespace mirror { // C++ mirror of java.lang.Throwable class MANAGED Throwable : public Object { public: - void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_); - String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_)); } - std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_); // This is a runtime version of initCause, you shouldn't use it if initCause may have been // overridden. Also it asserts rather than throwing exceptions. Currently this is only used // in cases like the verifier where the checks cannot fail and initCause isn't overridden. - void SetCause(Throwable* cause) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetStackState(Object* state) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_); + void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_); + bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_); - static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!java_lang_Throwable_.IsNull()); return java_lang_Throwable_.Read(); } - int32_t GetStackDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_); static void SetClass(Class* java_lang_Throwable); static void ResetClass(); static void VisitRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_)); } - Object* GetStackTrace() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_trace_)); } diff --git a/runtime/modifiers.h b/runtime/modifiers.h index 8586dd196b..8b363a686f 100644 --- a/runtime/modifiers.h +++ b/runtime/modifiers.h @@ -55,6 +55,9 @@ static constexpr uint32_t kAccDontInline = 0x00400000; // method (dex // Special runtime-only flags. // Note: if only kAccClassIsReference is set, we have a soft reference. +// class is ClassLoader or one of its subclasses +static constexpr uint32_t kAccClassIsClassLoaderClass = 0x10000000; + // class/ancestor overrides finalize() static constexpr uint32_t kAccClassIsFinalizable = 0x80000000; // class is a soft/weak/phantom ref diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 4be25d6946..da6ee259b5 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -298,7 +298,7 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) __attribute__((format(printf, 1, 2))); static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { va_list args; va_start(args, fmt); Thread* self = Thread::Current(); @@ -667,11 +667,9 @@ void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWo // Suspend the owner, inflate. First change to blocked and give up mutator_lock_. self->SetMonitorEnterObject(obj.Get()); bool timed_out; - Thread* owner; - { - ScopedThreadStateChange tsc(self, kBlocked); - owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out); - } + self->TransitionFromRunnableToSuspended(kBlocked); + Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out); + self->TransitionFromSuspendedToRunnable(); if (owner != nullptr) { // We succeeded in suspending the thread, check the lock's status didn't change. lock_word = obj->GetLockWord(true); @@ -1083,7 +1081,7 @@ bool Monitor::IsValidLockWord(LockWord lock_word) { } } -bool Monitor::IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) { MutexLock mu(Thread::Current(), monitor_lock_); return owner_ != nullptr; } @@ -1146,16 +1144,24 @@ void MonitorList::EnsureNewMonitorsDisallowed() { CHECK(!allow_new_monitors_); } +void MonitorList::BroadcastForNewMonitors() { + CHECK(kUseReadBarrier); + Thread* self = Thread::Current(); + MutexLock mu(self, monitor_list_lock_); + monitor_add_condition_.Broadcast(self); +} + void MonitorList::Add(Monitor* m) { Thread* self = Thread::Current(); MutexLock mu(self, monitor_list_lock_); - while (UNLIKELY(!allow_new_monitors_)) { + while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) || + (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { monitor_add_condition_.WaitHoldingLocks(self); } list_.push_front(m); } -void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) { +void MonitorList::SweepMonitorList(IsMarkedVisitor* visitor) { Thread* self = Thread::Current(); MutexLock mu(self, monitor_list_lock_); for (auto it = list_.begin(); it != list_.end(); ) { @@ -1163,7 +1169,7 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) { // Disable the read barrier in GetObject() as this is called by GC. mirror::Object* obj = m->GetObject<kWithoutReadBarrier>(); // The object of a monitor can be null if we have deflated it. - mirror::Object* new_obj = obj != nullptr ? callback(obj, arg) : nullptr; + mirror::Object* new_obj = obj != nullptr ? visitor->IsMarked(obj) : nullptr; if (new_obj == nullptr) { VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object " << obj; @@ -1176,29 +1182,30 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) { } } -struct MonitorDeflateArgs { - MonitorDeflateArgs() : self(Thread::Current()), deflate_count(0) {} - Thread* const self; - size_t deflate_count; -}; - -static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - MonitorDeflateArgs* args = reinterpret_cast<MonitorDeflateArgs*>(arg); - if (Monitor::Deflate(args->self, object)) { - DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked); - ++args->deflate_count; - // If we deflated, return null so that the monitor gets removed from the array. - return nullptr; +class MonitorDeflateVisitor : public IsMarkedVisitor { + public: + MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {} + + virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { + if (Monitor::Deflate(self_, object)) { + DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked); + ++deflate_count_; + // If we deflated, return null so that the monitor gets removed from the array. + return nullptr; + } + return object; // Monitor was not deflated. } - return object; // Monitor was not deflated. -} + + Thread* const self_; + size_t deflate_count_; +}; size_t MonitorList::DeflateMonitors() { - MonitorDeflateArgs args; - Locks::mutator_lock_->AssertExclusiveHeld(args.self); - SweepMonitorList(MonitorDeflateCallback, &args); - return args.deflate_count; + MonitorDeflateVisitor visitor; + Locks::mutator_lock_->AssertExclusiveHeld(visitor.self_); + SweepMonitorList(&visitor); + return visitor.deflate_count_; } MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(nullptr), entry_count_(0) { diff --git a/runtime/monitor.h b/runtime/monitor.h index 8f3a91d7f6..346e8662b2 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -62,34 +62,36 @@ class Monitor { static uint32_t GetLockOwnerThreadId(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // TODO: Reading lock owner without holding lock is racy. + // NO_THREAD_SAFETY_ANALYSIS for mon->Lock. static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj) EXCLUSIVE_LOCK_FUNCTION(obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; + + // NO_THREAD_SAFETY_ANALYSIS for mon->Unlock. static bool MonitorExit(Thread* thread, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(obj); + SHARED_REQUIRES(Locks::mutator_lock_) + UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS; - static void Notify(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { DoNotify(self, obj, false); } - static void NotifyAll(Thread* self, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { DoNotify(self, obj, true); } // Object.wait(). Also called for class init. + // NO_THREAD_SAFETY_ANALYSIS for mon->Wait. static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow, ThreadState why) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; static void DescribeWait(std::ostream& os, const Thread* thread) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Used to implement JDWP's ThreadReference.CurrentContendedMonitor. static mirror::Object* GetContendedMonitor(Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Calls 'callback' once for each lock held in the single stack frame represented by // the current state of 'stack_visitor'. @@ -97,12 +99,12 @@ class Monitor { // is necessary when we have already aborted but want to dump the stack as much as we can. static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*), void* callback_context, bool abort_on_failure = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsValidLockWord(LockWord lock_word); template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier> - mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetObject() SHARED_REQUIRES(Locks::mutator_lock_) { return obj_.Read<kReadBarrierOption>(); } @@ -114,7 +116,7 @@ class Monitor { int32_t GetHashCode(); - bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_lock_); bool HasHashCode() const { return hash_code_.LoadRelaxed() != 0; @@ -126,12 +128,13 @@ class Monitor { // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check. static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word, - uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS; + uint32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_); + // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that + // does not allow a thread suspension in the middle. TODO: maybe make this exclusive. + // NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_. static bool Deflate(Thread* self, mirror::Object* obj) - // Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that - // does not allow a thread suspension in the middle. TODO: maybe make this exclusive. - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; #ifndef __LP64__ void* operator new(size_t size) { @@ -148,58 +151,59 @@ class Monitor { #endif private: - explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - explicit Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code, - MonitorId id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) + SHARED_REQUIRES(Locks::mutator_lock_); + Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code, MonitorId id) + SHARED_REQUIRES(Locks::mutator_lock_); // Install the monitor into its object, may fail if another thread installs a different monitor // first. bool Install(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this // routine. - void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); + void AppendToWaitSet(Thread* thread) REQUIRES(monitor_lock_); // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of // this routine. - void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_); + void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_); // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The // calling thread must own the lock or the owner must be suspended. There's a race with other // threads inflating the lock, installing hash codes and spurious failures. The caller should // re-read the lock word following the call. static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self) void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent, const char* owner_filename, uint32_t owner_line_number) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner, Monitor* mon) - LOCKS_EXCLUDED(Locks::thread_list_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void Lock(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); bool Unlock(Thread* thread) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify. void Notify(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void NotifyAll(Thread* self) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and @@ -222,15 +226,15 @@ class Monitor { // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end // of the 32-bit time epoch. void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why) - LOCKS_EXCLUDED(monitor_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!monitor_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Translates the provided method and pc into its declaring class' source file and line number. void TranslateLocation(ArtMethod* method, uint32_t pc, const char** source_file, uint32_t* line_number) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - uint32_t GetOwnerThreadId(); + uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_); static bool (*is_sensitive_thread_hook_)(); static uint32_t lock_profiling_threshold_; @@ -285,16 +289,16 @@ class MonitorList { MonitorList(); ~MonitorList(); - void Add(Monitor* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Add(Monitor* m) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_); - void SweepMonitorList(IsMarkedCallback* callback, void* arg) - LOCKS_EXCLUDED(monitor_list_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DisallowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); - void AllowNewMonitors() LOCKS_EXCLUDED(monitor_list_lock_); - void EnsureNewMonitorsDisallowed() LOCKS_EXCLUDED(monitor_list_lock_); + void SweepMonitorList(IsMarkedVisitor* visitor) + REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void DisallowNewMonitors() REQUIRES(!monitor_list_lock_); + void AllowNewMonitors() REQUIRES(!monitor_list_lock_); + void EnsureNewMonitorsDisallowed() REQUIRES(!monitor_list_lock_); + void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_); // Returns how many monitors were deflated. - size_t DeflateMonitors() LOCKS_EXCLUDED(monitor_list_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t DeflateMonitors() REQUIRES(!monitor_list_lock_) REQUIRES(Locks::mutator_lock_); typedef std::list<Monitor*, TrackingAllocator<Monitor*, kAllocatorTagMonitorList>> Monitors; @@ -317,7 +321,7 @@ class MonitorList { // For use only by the JDWP implementation. class MonitorInfo { public: - explicit MonitorInfo(mirror::Object* o) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); + explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_); Thread* owner_; size_t entry_count_; diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc index 4a364cab62..2832e32dd1 100644 --- a/runtime/monitor_pool.cc +++ b/runtime/monitor_pool.cc @@ -90,7 +90,7 @@ void MonitorPool::AllocateChunk() { Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // We are gonna allocate, so acquire the writer lock. MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h index 4ab4e86ac4..240ca61641 100644 --- a/runtime/monitor_pool.h +++ b/runtime/monitor_pool.h @@ -43,7 +43,7 @@ class MonitorPool { } static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { #ifndef __LP64__ Monitor* mon = new Monitor(self, owner, obj, hash_code); DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment); @@ -110,10 +110,10 @@ class MonitorPool { // analysis. MonitorPool() NO_THREAD_SAFETY_ANALYSIS; - void AllocateChunk() EXCLUSIVE_LOCKS_REQUIRED(Locks::allocated_monitor_ids_lock_); + void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_); Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ReleaseMonitorToPool(Thread* self, Monitor* monitor); void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors); diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc index 2a29c60a13..e1173bb026 100644 --- a/runtime/monitor_test.cc +++ b/runtime/monitor_test.cc @@ -60,7 +60,7 @@ static const size_t kMaxHandles = 1000000; // Use arbitrary large amount for no static void FillHeap(Thread* self, ClassLinker* class_linker, std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp, std::vector<MutableHandle<mirror::Object>>* handles) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB); hsp->reset(new StackHandleScope<kMaxHandles>(self)); @@ -106,8 +106,7 @@ static void FillHeap(Thread* self, ClassLinker* class_linker, class CreateTask : public Task { public: - explicit CreateTask(MonitorTest* monitor_test, uint64_t initial_sleep, int64_t millis, - bool expected) : + CreateTask(MonitorTest* monitor_test, uint64_t initial_sleep, int64_t millis, bool expected) : monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis), expected_(expected) {} diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index 1078492d08..8febb62a5f 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -257,21 +257,45 @@ static void VMDebug_infopoint(JNIEnv*, jclass, jint id) { static jlong VMDebug_countInstancesOfClass(JNIEnv* env, jclass, jclass javaClass, jboolean countAssignable) { ScopedObjectAccess soa(env); - gc::Heap* heap = Runtime::Current()->GetHeap(); - // We only want reachable instances, so do a GC. Heap::VisitObjects visits all of the heap - // objects in the all spaces and the allocation stack. - heap->CollectGarbage(false); + gc::Heap* const heap = Runtime::Current()->GetHeap(); + // Caller's responsibility to do GC if desired. mirror::Class* c = soa.Decode<mirror::Class*>(javaClass); if (c == nullptr) { return 0; } - std::vector<mirror::Class*> classes; - classes.push_back(c); + std::vector<mirror::Class*> classes {c}; uint64_t count = 0; heap->CountInstances(classes, countAssignable, &count); return count; } +static jlongArray VMDebug_countInstancesOfClasses(JNIEnv* env, jclass, jobjectArray javaClasses, + jboolean countAssignable) { + ScopedObjectAccess soa(env); + gc::Heap* const heap = Runtime::Current()->GetHeap(); + // Caller's responsibility to do GC if desired. + auto* decoded_classes = soa.Decode<mirror::ObjectArray<mirror::Class>*>(javaClasses); + if (decoded_classes == nullptr) { + return nullptr; + } + std::vector<mirror::Class*> classes; + for (size_t i = 0, count = decoded_classes->GetLength(); i < count; ++i) { + classes.push_back(decoded_classes->Get(i)); + } + std::vector<uint64_t> counts(classes.size(), 0u); + // Heap::CountInstances can handle null and will put 0 for these classes. + heap->CountInstances(classes, countAssignable, &counts[0]); + auto* long_counts = mirror::LongArray::Alloc(soa.Self(), counts.size()); + if (long_counts == nullptr) { + soa.Self()->AssertPendingOOMException(); + return nullptr; + } + for (size_t i = 0; i < counts.size(); ++i) { + long_counts->Set(i, counts[i]); + } + return soa.AddLocalReference<jlongArray>(long_counts); +} + // We export the VM internal per-heap-space size/alloc/free metrics // for the zygote space, alloc space (application heap), and the large // object space for dumpsys meminfo. The other memory region data such @@ -452,6 +476,7 @@ static jobjectArray VMDebug_getRuntimeStatsInternal(JNIEnv* env, jclass) { static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"), + NATIVE_METHOD(VMDebug, countInstancesOfClasses, "([Ljava/lang/Class;Z)[J"), NATIVE_METHOD(VMDebug, crash, "()V"), NATIVE_METHOD(VMDebug, dumpHprofData, "(Ljava/lang/String;Ljava/io/FileDescriptor;)V"), NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"), diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 5dd354d4d6..9ea339a6bd 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -16,7 +16,7 @@ #include "dalvik_system_VMRuntime.h" -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ extern "C" void android_set_application_target_sdk_version(uint32_t version); #endif #include <limits.h> @@ -196,7 +196,7 @@ static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sd // Note that targetSdkVersion may be 0, meaning "current". Runtime::Current()->SetTargetSdkVersion(target_sdk_version); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // This part is letting libc/dynamic linker know about current app's // target sdk version to enable compatibility workarounds. android_set_application_target_sdk_version(static_cast<uint32_t>(target_sdk_version)); @@ -262,7 +262,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor { explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { } void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { mirror::String* string = root->AsString(); table_->operator[](string->ToModifiedUtf8()) = string; } @@ -274,7 +274,7 @@ class PreloadDexCachesStringsVisitor : public SingleRootVisitor { // Based on ClassLinker::ResolveString. static void PreloadDexCachesResolveString( Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::String* string = dex_cache->GetResolvedString(string_idx); if (string != nullptr) { return; @@ -292,7 +292,7 @@ static void PreloadDexCachesResolveString( // Based on ClassLinker::ResolveType. static void PreloadDexCachesResolveType( Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* klass = dex_cache->GetResolvedType(type_idx); if (klass != nullptr) { return; @@ -321,7 +321,7 @@ static void PreloadDexCachesResolveType( // Based on ClassLinker::ResolveField. static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtField* field = dex_cache->GetResolvedField(field_idx, sizeof(void*)); if (field != nullptr) { return; @@ -349,7 +349,7 @@ static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uin // Based on ClassLinker::ResolveMethod. static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx, InvokeType invoke_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, sizeof(void*)); if (method != nullptr) { return; @@ -423,7 +423,7 @@ static void PreloadDexCachesStatsTotal(DexCacheStats* total) { } static void PreloadDexCachesStatsFilled(DexCacheStats* filled) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (!kPreloadDexCachesCollectStats) { return; } diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc index ee62755ae4..541eeb13c9 100644 --- a/runtime/native/dalvik_system_VMStack.cc +++ b/runtime/native/dalvik_system_VMStack.cc @@ -29,7 +29,7 @@ namespace art { static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { jobject trace = nullptr; if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) { trace = soa.Self()->CreateInternalStackTrace<false>(soa); @@ -87,7 +87,7 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass) { : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), class_loader(nullptr) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(class_loader == nullptr); mirror::Class* c = GetMethod()->GetDeclaringClass(); // c is null for runtime methods. diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index a41aed6f29..c337e91cf8 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -41,7 +41,7 @@ namespace art { ALWAYS_INLINE static inline mirror::Class* DecodeClass( const ScopedFastNativeObjectAccess& soa, jobject java_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* c = soa.Decode<mirror::Class*>(java_class); DCHECK(c != nullptr); DCHECK(c->IsClass()); @@ -108,22 +108,20 @@ static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { static mirror::ObjectArray<mirror::Field>* GetDeclaredFields( Thread* self, mirror::Class* klass, bool public_only, bool force_resolve) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { StackHandleScope<1> hs(self); - auto* ifields = klass->GetIFields(); - auto* sfields = klass->GetSFields(); - const auto num_ifields = klass->NumInstanceFields(); - const auto num_sfields = klass->NumStaticFields(); - size_t array_size = num_ifields + num_sfields; + IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields(); + IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields(); + size_t array_size = klass->NumInstanceFields() + klass->NumStaticFields(); if (public_only) { // Lets go subtract all the non public fields. - for (size_t i = 0; i < num_ifields; ++i) { - if (!ifields[i].IsPublic()) { + for (ArtField& field : ifields) { + if (!field.IsPublic()) { --array_size; } } - for (size_t i = 0; i < num_sfields; ++i) { - if (!sfields[i].IsPublic()) { + for (ArtField& field : sfields) { + if (!field.IsPublic()) { --array_size; } } @@ -134,34 +132,32 @@ static mirror::ObjectArray<mirror::Field>* GetDeclaredFields( if (object_array.Get() == nullptr) { return nullptr; } - for (size_t i = 0; i < num_ifields; ++i) { - auto* art_field = &ifields[i]; - if (!public_only || art_field->IsPublic()) { - auto* field = mirror::Field::CreateFromArtField(self, art_field, force_resolve); - if (field == nullptr) { + for (ArtField& field : ifields) { + if (!public_only || field.IsPublic()) { + auto* reflect_field = mirror::Field::CreateFromArtField(self, &field, force_resolve); + if (reflect_field == nullptr) { if (kIsDebugBuild) { self->AssertPendingException(); } // Maybe null due to OOME or type resolving exception. return nullptr; } - object_array->SetWithoutChecks<false>(array_idx++, field); + object_array->SetWithoutChecks<false>(array_idx++, reflect_field); } } - for (size_t i = 0; i < num_sfields; ++i) { - auto* art_field = &sfields[i]; - if (!public_only || art_field->IsPublic()) { - auto* field = mirror::Field::CreateFromArtField(self, art_field, force_resolve); - if (field == nullptr) { + for (ArtField& field : sfields) { + if (!public_only || field.IsPublic()) { + auto* reflect_field = mirror::Field::CreateFromArtField(self, &field, force_resolve); + if (reflect_field == nullptr) { if (kIsDebugBuild) { self->AssertPendingException(); } return nullptr; } - object_array->SetWithoutChecks<false>(array_idx++, field); + object_array->SetWithoutChecks<false>(array_idx++, reflect_field); } } - CHECK_EQ(array_idx, array_size); + DCHECK_EQ(array_idx, array_size); return object_array.Get(); } @@ -188,16 +184,19 @@ static jobjectArray Class_getPublicDeclaredFields(JNIEnv* env, jobject javaThis) // the dex cache for lookups? I think CompareModifiedUtf8ToUtf16AsCodePointValues should be fairly // fast. ALWAYS_INLINE static inline ArtField* FindFieldByName( - Thread* self ATTRIBUTE_UNUSED, mirror::String* name, ArtField* fields, size_t num_fields) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self ATTRIBUTE_UNUSED, mirror::String* name, LengthPrefixedArray<ArtField>* fields) + SHARED_REQUIRES(Locks::mutator_lock_) { + if (fields == nullptr) { + return nullptr; + } size_t low = 0; - size_t high = num_fields; + size_t high = fields->Length(); const uint16_t* const data = name->GetValue(); const size_t length = name->GetLength(); while (low < high) { auto mid = (low + high) / 2; - ArtField* const field = &fields[mid]; - int result = CompareModifiedUtf8ToUtf16AsCodePointValues(field->GetName(), data, length); + ArtField& field = fields->At(mid); + int result = CompareModifiedUtf8ToUtf16AsCodePointValues(field.GetName(), data, length); // Alternate approach, only a few % faster at the cost of more allocations. // int result = field->GetStringName(self, true)->CompareTo(name); if (result < 0) { @@ -205,12 +204,12 @@ ALWAYS_INLINE static inline ArtField* FindFieldByName( } else if (result > 0) { high = mid; } else { - return field; + return &field; } } if (kIsDebugBuild) { - for (size_t i = 0; i < num_fields; ++i) { - CHECK_NE(fields[i].GetName(), name->ToModifiedUtf8()); + for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) { + CHECK_NE(field.GetName(), name->ToModifiedUtf8()); } } return nullptr; @@ -218,14 +217,12 @@ ALWAYS_INLINE static inline ArtField* FindFieldByName( ALWAYS_INLINE static inline mirror::Field* GetDeclaredField( Thread* self, mirror::Class* c, mirror::String* name) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - auto* instance_fields = c->GetIFields(); - auto* art_field = FindFieldByName(self, name, instance_fields, c->NumInstanceFields()); + SHARED_REQUIRES(Locks::mutator_lock_) { + ArtField* art_field = FindFieldByName(self, name, c->GetIFieldsPtr()); if (art_field != nullptr) { return mirror::Field::CreateFromArtField(self, art_field, true); } - auto* static_fields = c->GetSFields(); - art_field = FindFieldByName(self, name, static_fields, c->NumStaticFields()); + art_field = FindFieldByName(self, name, c->GetSFieldsPtr()); if (art_field != nullptr) { return mirror::Field::CreateFromArtField(self, art_field, true); } @@ -274,7 +271,7 @@ static jobject Class_getDeclaredConstructorInternal( } static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(m != nullptr); return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor(); } diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc index abac8153b3..856a3e7d01 100644 --- a/runtime/native/java_lang_Runtime.cc +++ b/runtime/native/java_lang_Runtime.cc @@ -31,10 +31,10 @@ #include "verify_object-inl.h" #include <sstream> -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // This function is provided by android linker. extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path); -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ namespace art { @@ -53,7 +53,7 @@ NO_RETURN static void Runtime_nativeExit(JNIEnv*, jclass, jint status) { } static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPathJstr) { -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ if (javaLdLibraryPathJstr != nullptr) { ScopedUtfChars ldLibraryPath(env, javaLdLibraryPathJstr); if (ldLibraryPath.c_str() != nullptr) { diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc index 736b42b739..d9863c579e 100644 --- a/runtime/native/java_lang_System.cc +++ b/runtime/native/java_lang_System.cc @@ -36,7 +36,7 @@ namespace art { */ static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::string actualType(PrettyTypeOf(array)); Thread* self = Thread::Current(); self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;", @@ -105,15 +105,21 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, dstArray->AsShortSizedArray()->Memmove(dstPos, srcArray->AsShortSizedArray(), srcPos, count); return; case Primitive::kPrimInt: - case Primitive::kPrimFloat: DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U); dstArray->AsIntArray()->Memmove(dstPos, srcArray->AsIntArray(), srcPos, count); return; + case Primitive::kPrimFloat: + DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U); + dstArray->AsFloatArray()->Memmove(dstPos, srcArray->AsFloatArray(), srcPos, count); + return; case Primitive::kPrimLong: - case Primitive::kPrimDouble: DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U); dstArray->AsLongArray()->Memmove(dstPos, srcArray->AsLongArray(), srcPos, count); return; + case Primitive::kPrimDouble: + DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U); + dstArray->AsDoubleArray()->Memmove(dstPos, srcArray->AsDoubleArray(), srcPos, count); + return; case Primitive::kPrimNot: { mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>(); mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>(); diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc index 6569d833c5..c76f6eec73 100644 --- a/runtime/native/java_lang_Thread.cc +++ b/runtime/native/java_lang_Thread.cc @@ -17,7 +17,6 @@ #include "java_lang_Thread.h" #include "common_throws.h" -#include "debugger.h" #include "jni_internal.h" #include "monitor.h" #include "mirror/object.h" @@ -90,6 +89,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha case kWaitingInMainSignalCatcherLoop: return kJavaWaiting; case kWaitingForMethodTracingStart: return kJavaWaiting; case kWaitingForVisitObjects: return kJavaWaiting; + case kWaitingWeakGcRootRead: return kJavaWaiting; case kSuspended: return kJavaRunnable; // Don't add a 'default' here so the compiler can spot incompatible enum changes. } diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index ba898c6d2d..5bbb0dc45f 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -32,7 +32,7 @@ namespace art { template<bool kIsSet> ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field, mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (kIsSet && field->IsFinal()) { ThrowIllegalAccessException( StringPrintf("Cannot set %s field %s of class %s", @@ -60,7 +60,7 @@ ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* template<bool kAllowReferences> ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f, Primitive::Type field_type, JValue* value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_EQ(value->GetJ(), INT64_C(0)); MemberOffset offset(f->GetOffset()); const bool is_volatile = f->IsVolatile(); @@ -105,7 +105,7 @@ ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa, jobject j_rcvr, mirror::Field** f, mirror::Object** class_or_rcvr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { soa.Self()->AssertThreadSuspensionIsAllowable(); mirror::Class* declaringClass = (*f)->GetDeclaringClass(); if ((*f)->IsStatic()) { @@ -232,7 +232,7 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj) { ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f, Primitive::Type field_type, bool allow_references, const JValue& new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(f->GetDeclaringClass()->IsInitialized()); MemberOffset offset(f->GetOffset()); const bool is_volatile = f->IsVolatile(); @@ -253,9 +253,9 @@ ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* break; case Primitive::kPrimChar: if (is_volatile) { - o->SetFieldBooleanVolatile<false>(offset, new_value.GetC()); + o->SetFieldCharVolatile<false>(offset, new_value.GetC()); } else { - o->SetFieldBoolean<false>(offset, new_value.GetC()); + o->SetFieldChar<false>(offset, new_value.GetC()); } break; case Primitive::kPrimInt: diff --git a/runtime/native/scoped_fast_native_object_access.h b/runtime/native/scoped_fast_native_object_access.h index 57b873bc22..c4a33dfd14 100644 --- a/runtime/native/scoped_fast_native_object_access.h +++ b/runtime/native/scoped_fast_native_object_access.h @@ -27,7 +27,7 @@ namespace art { class ScopedFastNativeObjectAccess : public ScopedObjectAccessAlreadyRunnable { public: explicit ScopedFastNativeObjectAccess(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(env) { Locks::mutator_lock_->AssertSharedHeld(Self()); diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h index 7fe31300ab..2295cb4664 100644 --- a/runtime/nth_caller_visitor.h +++ b/runtime/nth_caller_visitor.h @@ -33,7 +33,7 @@ struct NthCallerVisitor : public StackVisitor { count(0), caller(nullptr) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); bool do_count = false; if (m == nullptr || m->IsRuntimeMethod()) { diff --git a/runtime/oat.cc b/runtime/oat.cc index 1dd2aad611..5725b6ff6c 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -97,7 +97,7 @@ OatHeader::OatHeader(InstructionSet instruction_set, image_file_location_oat_checksum_ = image_file_location_oat_checksum; UpdateChecksum(&image_file_location_oat_checksum_, sizeof(image_file_location_oat_checksum_)); - CHECK(IsAligned<kPageSize>(image_file_location_oat_data_begin)); + CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize); image_file_location_oat_data_begin_ = image_file_location_oat_data_begin; UpdateChecksum(&image_file_location_oat_data_begin_, sizeof(image_file_location_oat_data_begin_)); diff --git a/runtime/oat.h b/runtime/oat.h index 000ae8ed5d..29dd76ce5e 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '0', '6', '4', '\0' }; + static constexpr uint8_t kOatVersion[] = { '0', '6', '8', '\0' }; static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h index 6b3b66643c..5df652579f 100644 --- a/runtime/oat_file-inl.h +++ b/runtime/oat_file-inl.h @@ -22,7 +22,7 @@ namespace art { inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const { - const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); if (code == nullptr) { return nullptr; } @@ -38,14 +38,6 @@ inline uint32_t OatFile::OatMethod::GetOatQuickMethodHeaderOffset() const { return reinterpret_cast<const uint8_t*>(method_header) - begin_; } -inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const { - const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); - if (code == nullptr) { - return 0u; - } - return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_; -} - inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const { const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader(); if (method_header == nullptr) { @@ -78,8 +70,8 @@ inline uint32_t OatFile::OatMethod::GetFpSpillMask() const { return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].frame_info_.FpSpillMask(); } -const uint8_t* OatFile::OatMethod::GetGcMap() const { - const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); +inline const uint8_t* OatFile::OatMethod::GetGcMap() const { + const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); if (code == nullptr) { return nullptr; } @@ -90,12 +82,12 @@ const uint8_t* OatFile::OatMethod::GetGcMap() const { return reinterpret_cast<const uint8_t*>(code) - offset; } -uint32_t OatFile::OatMethod::GetGcMapOffset() const { +inline uint32_t OatFile::OatMethod::GetGcMapOffset() const { const uint8_t* gc_map = GetGcMap(); return static_cast<uint32_t>(gc_map != nullptr ? gc_map - begin_ : 0u); } -uint32_t OatFile::OatMethod::GetGcMapOffsetOffset() const { +inline uint32_t OatFile::OatMethod::GetGcMapOffsetOffset() const { const OatQuickMethodHeader* method_header = GetOatQuickMethodHeader(); if (method_header == nullptr) { return 0u; @@ -130,7 +122,7 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const { } inline const uint8_t* OatFile::OatMethod::GetMappingTable() const { - const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); if (code == nullptr) { return nullptr; } @@ -142,7 +134,7 @@ inline const uint8_t* OatFile::OatMethod::GetMappingTable() const { } inline const uint8_t* OatFile::OatMethod::GetVmapTable() const { - const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode()); + const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); if (code == nullptr) { return nullptr; } @@ -153,6 +145,22 @@ inline const uint8_t* OatFile::OatMethod::GetVmapTable() const { return reinterpret_cast<const uint8_t*>(code) - offset; } +inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const { + const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_)); + if (code == nullptr) { + return 0u; + } + return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_; +} + +inline uint32_t OatFile::OatMethod::GetCodeOffset() const { + return (GetQuickCodeSize() == 0) ? 0 : code_offset_; +} + +inline const void* OatFile::OatMethod::GetQuickCode() const { + return GetOatPointer<const void*>(GetCodeOffset()); +} + } // namespace art #endif // ART_RUNTIME_OAT_FILE_INL_H_ diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index ad5741e475..a23d94d845 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -27,7 +27,7 @@ #include <sstream> // dlopen_ext support from bionic. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include "android/dlext.h" #endif @@ -41,6 +41,7 @@ #include "mem_map.h" #include "mirror/class.h" #include "mirror/object-inl.h" +#include "oat_file-inl.h" #include "os.h" #include "runtime.h" #include "utils.h" @@ -228,7 +229,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base, *error_msg = StringPrintf("Failed to find absolute path for '%s'", elf_filename.c_str()); return false; } -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ android_dlextinfo extinfo; extinfo.flags = ANDROID_DLEXT_FORCE_LOAD | ANDROID_DLEXT_FORCE_FIXED_VADDR; dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo); diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 1a782deb82..27f8677f03 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -100,13 +100,9 @@ class OatFile FINAL { public: void LinkMethod(ArtMethod* method) const; - uint32_t GetCodeOffset() const { - return code_offset_; - } + uint32_t GetCodeOffset() const; - const void* GetQuickCode() const { - return GetOatPointer<const void*>(code_offset_); - } + const void* GetQuickCode() const; // Returns size of quick code. uint32_t GetQuickCodeSize() const; @@ -219,7 +215,7 @@ class OatFile FINAL { const OatDexFile* GetOatDexFile(const char* dex_location, const uint32_t* const dex_location_checksum, bool exception_if_not_found = true) const - LOCKS_EXCLUDED(secondary_lookup_lock_); + REQUIRES(!secondary_lookup_lock_); const std::vector<const OatDexFile*>& GetOatDexFiles() const { return oat_dex_files_storage_; diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index b28adf9ceb..29b879ee80 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -165,6 +165,11 @@ bool OatFileAssistant::MakeUpToDate(std::string* error_msg) { } std::unique_ptr<OatFile> OatFileAssistant::GetBestOatFile() { + // The best oat files are, in descending order of bestness: + // 1. Properly relocated files. These may be opened executable. + // 2. Not out-of-date files that are already opened non-executable. + // 3. Not out-of-date files that we must reopen non-executable. + if (OatFileIsUpToDate()) { oat_file_released_ = true; return std::move(cached_oat_file_); @@ -175,26 +180,36 @@ std::unique_ptr<OatFile> OatFileAssistant::GetBestOatFile() { return std::move(cached_odex_file_); } - if (load_executable_) { - VLOG(oat) << "Oat File Assistant: No relocated oat file found," - << " attempting to fall back to interpreting oat file instead."; + VLOG(oat) << "Oat File Assistant: No relocated oat file found," + << " attempting to fall back to interpreting oat file instead."; + + if (!OatFileIsOutOfDate() && !OatFileIsExecutable()) { + oat_file_released_ = true; + return std::move(cached_oat_file_); + } + + if (!OdexFileIsOutOfDate() && !OdexFileIsExecutable()) { + oat_file_released_ = true; + return std::move(cached_odex_file_); + } + if (!OatFileIsOutOfDate()) { + load_executable_ = false; + ClearOatFileCache(); if (!OatFileIsOutOfDate()) { - load_executable_ = false; - ClearOatFileCache(); - if (!OatFileIsOutOfDate()) { - oat_file_released_ = true; - return std::move(cached_oat_file_); - } + CHECK(!OatFileIsExecutable()); + oat_file_released_ = true; + return std::move(cached_oat_file_); } + } + if (!OdexFileIsOutOfDate()) { + load_executable_ = false; + ClearOdexFileCache(); if (!OdexFileIsOutOfDate()) { - load_executable_ = false; - ClearOdexFileCache(); - if (!OdexFileIsOutOfDate()) { - oat_file_released_ = true; - return std::move(cached_odex_file_); - } + CHECK(!OdexFileIsExecutable()); + oat_file_released_ = true; + return std::move(cached_odex_file_); } } @@ -698,18 +713,13 @@ bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args, return false; } - ClassLinker* linker = runtime->GetClassLinker(); - CHECK(linker != nullptr) << "ClassLinker is not created yet"; - const OatFile* primary_oat_file = linker->GetPrimaryOatFile(); - const bool debuggable = primary_oat_file != nullptr && primary_oat_file->IsDebuggable(); - std::vector<std::string> argv; argv.push_back(runtime->GetCompilerExecutable()); argv.push_back("--runtime-arg"); argv.push_back("-classpath"); argv.push_back("--runtime-arg"); argv.push_back(runtime->GetClassPathString()); - if (debuggable) { + if (runtime->IsDebuggable()) { argv.push_back("--debuggable"); } runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv); @@ -873,6 +883,11 @@ const OatFile* OatFileAssistant::GetOdexFile() { return cached_odex_file_.get(); } +bool OatFileAssistant::OdexFileIsExecutable() { + const OatFile* odex_file = GetOdexFile(); + return (odex_file != nullptr && odex_file->IsExecutable()); +} + void OatFileAssistant::ClearOdexFileCache() { odex_file_load_attempted_ = false; cached_odex_file_.reset(); @@ -899,6 +914,11 @@ const OatFile* OatFileAssistant::GetOatFile() { return cached_oat_file_.get(); } +bool OatFileAssistant::OatFileIsExecutable() { + const OatFile* oat_file = GetOatFile(); + return (oat_file != nullptr && oat_file->IsExecutable()); +} + void OatFileAssistant::ClearOatFileCache() { oat_file_load_attempted_ = false; cached_oat_file_.reset(); diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index 7216fc7331..664db987d8 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -327,6 +327,9 @@ class OatFileAssistant { // The caller shouldn't clean up or free the returned pointer. const OatFile* GetOdexFile(); + // Returns true if the odex file is opened executable. + bool OdexFileIsExecutable(); + // Clear any cached information about the odex file that depends on the // contents of the file. void ClearOdexFileCache(); @@ -336,6 +339,9 @@ class OatFileAssistant { // The caller shouldn't clean up or free the returned pointer. const OatFile* GetOatFile(); + // Returns true if the oat file is opened executable. + bool OatFileIsExecutable(); + // Clear any cached information about the oat file that depends on the // contents of the file. void ClearOatFileCache(); diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index 570c59c037..03ad2d5ac8 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -38,6 +38,28 @@ namespace art { +// Some tests very occasionally fail: we expect to have an unrelocated non-pic +// odex file that is reported as needing relocation, but it is reported +// instead as being up to date (b/22599792). +// +// This function adds extra checks for diagnosing why the given oat file is +// reported up to date, when it should be non-pic needing relocation. +// These extra diagnostics checks should be removed once b/22599792 has been +// resolved. +static void DiagnoseFlakyTestFailure(const OatFile& oat_file) { + Runtime* runtime = Runtime::Current(); + const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace(); + ASSERT_TRUE(image_space != nullptr); + const ImageHeader& image_header = image_space->GetImageHeader(); + const OatHeader& oat_header = oat_file.GetOatHeader(); + EXPECT_FALSE(oat_file.IsPic()); + EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum()); + EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()), + oat_header.GetImageFileLocationOatDataBegin()); + EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta()); +} + + class OatFileAssistantTest : public CommonRuntimeTest { public: virtual void SetUp() { @@ -186,6 +208,7 @@ class OatFileAssistantTest : public CommonRuntimeTest { // Generate an odex file for the purposes of test. // If pic is true, generates a PIC odex. + // The generated odex file will be un-relocated. void GenerateOdexForTest(const std::string& dex_location, const std::string& odex_location, bool pic = false) { @@ -210,6 +233,16 @@ class OatFileAssistantTest : public CommonRuntimeTest { std::string error_msg; ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg; setenv("ANDROID_DATA", android_data_.c_str(), 1); + + // Verify the odex file was generated as expected. + std::unique_ptr<OatFile> odex_file(OatFile::Open( + odex_location.c_str(), odex_location.c_str(), nullptr, nullptr, + false, dex_location.c_str(), &error_msg)); + ASSERT_TRUE(odex_file.get() != nullptr) << error_msg; + + if (!pic) { + DiagnoseFlakyTestFailure(*odex_file); + } } void GeneratePicOdexForTest(const std::string& dex_location, @@ -470,6 +503,12 @@ TEST_F(OatFileAssistantTest, DexOdexNoOat) { EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate()); EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate()); EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles()); + + // We should still be able to get the non-executable odex file to run from. + std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile(); + ASSERT_TRUE(oat_file.get() != nullptr); + + DiagnoseFlakyTestFailure(*oat_file); } // Case: We have a stripped DEX file and an ODEX file, but no OAT file. @@ -712,17 +751,7 @@ TEST_F(OatFileAssistantTest, OdexOatOverlap) { dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str()); EXPECT_EQ(1u, dex_files.size()); - // Add some extra checks to help diagnose apparently flaky test failures. - Runtime* runtime = Runtime::Current(); - const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace(); - ASSERT_TRUE(image_space != nullptr); - const ImageHeader& image_header = image_space->GetImageHeader(); - const OatHeader& oat_header = oat_file->GetOatHeader(); - EXPECT_FALSE(oat_file->IsPic()); - EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum()); - EXPECT_NE(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()), - oat_header.GetImageFileLocationOatDataBegin()); - EXPECT_NE(image_header.GetPatchDelta(), oat_header.GetImagePatchDelta()); + DiagnoseFlakyTestFailure(*oat_file); } // Case: We have a DEX file and a PIC ODEX file, but no OAT file. diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h index 8e99dbb286..4d726ecc4c 100644 --- a/runtime/object_callbacks.h +++ b/runtime/object_callbacks.h @@ -17,42 +17,34 @@ #ifndef ART_RUNTIME_OBJECT_CALLBACKS_H_ #define ART_RUNTIME_OBJECT_CALLBACKS_H_ -// For ostream. -#include <ostream> -// For uint32_t. -#include <stdint.h> -// For size_t. -#include <stdlib.h> - #include "base/macros.h" namespace art { namespace mirror { - class Class; class Object; template<class MirrorType> class HeapReference; - class Reference; } // namespace mirror -class StackVisitor; // A callback for visiting an object in the heap. typedef void (ObjectCallback)(mirror::Object* obj, void* arg); -// A callback used for marking an object, returns the new address of the object if the object moved. -typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED; - -typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg); -typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, - void* arg); - -// A callback for testing if an object is marked, returns null if not marked, otherwise the new -// address the object (if the object didn't move, returns the object input parameter). -typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED; - -// Returns true if the object in the heap reference is marked, if it is marked and has moved the -// callback updates the heap reference contain the new value. -typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object, - void* arg) WARN_UNUSED; -typedef void (ProcessMarkStackCallback)(void* arg); + +class IsMarkedVisitor { + public: + virtual ~IsMarkedVisitor() {} + // Return null if an object is not marked, otherwise returns the new address of that object. + // May return the same address as the input if the object did not move. + virtual mirror::Object* IsMarked(mirror::Object* obj) = 0; +}; + +class MarkObjectVisitor { + public: + virtual ~MarkObjectVisitor() {} + // Mark an object and return the new address of an object. + // May return the same address as the input if the object did not move. + virtual mirror::Object* MarkObject(mirror::Object* obj) = 0; + // Mark an object and update the value stored in the heap reference if the object moved. + virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj) = 0; +}; } // namespace art diff --git a/runtime/object_lock.h b/runtime/object_lock.h index acddc03e29..eb7cbd85d3 100644 --- a/runtime/object_lock.h +++ b/runtime/object_lock.h @@ -28,15 +28,15 @@ class Thread; template <typename T> class ObjectLock { public: - ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ObjectLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_); - ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ~ObjectLock() SHARED_REQUIRES(Locks::mutator_lock_); - void WaitIgnoringInterrupts() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void WaitIgnoringInterrupts() SHARED_REQUIRES(Locks::mutator_lock_); - void Notify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Notify() SHARED_REQUIRES(Locks::mutator_lock_); - void NotifyAll() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void NotifyAll() SHARED_REQUIRES(Locks::mutator_lock_); private: Thread* const self_; diff --git a/runtime/os.h b/runtime/os.h index 6248d5fc14..befe2e808a 100644 --- a/runtime/os.h +++ b/runtime/os.h @@ -35,7 +35,8 @@ class OS { // Open an existing file with read/write access. static File* OpenFileReadWrite(const char* name); - // Create an empty file with read/write access. + // Create an empty file with read/write access. This is a *new* file, that is, if the file + // already exists, it is *not* overwritten, but unlinked, and a new inode will be used. static File* CreateEmptyFile(const char* name); // Open a file with the specified open(2) flags. diff --git a/runtime/os_linux.cc b/runtime/os_linux.cc index 22827891b0..675699daea 100644 --- a/runtime/os_linux.cc +++ b/runtime/os_linux.cc @@ -36,6 +36,10 @@ File* OS::OpenFileReadWrite(const char* name) { } File* OS::CreateEmptyFile(const char* name) { + // In case the file exists, unlink it so we get a new file. This is necessary as the previous + // file may be in use and must not be changed. + unlink(name); + return OpenFileWithFlags(name, O_RDWR | O_CREAT | O_TRUNC); } diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index d08af71e6e..25b5e49b3d 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -244,10 +244,11 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .AppendValues() .IntoKey(M::ImageCompilerOptions) .Define("-Xverify:_") - .WithType<bool>() - .WithValueMap({{"none", false}, - {"remote", true}, - {"all", true}}) + .WithType<verifier::VerifyMode>() + .WithValueMap({{"none", verifier::VerifyMode::kNone}, + {"remote", verifier::VerifyMode::kEnable}, + {"all", verifier::VerifyMode::kEnable}, + {"softfail", verifier::VerifyMode::kSoftFail}}) .IntoKey(M::Verify) .Define("-XX:NativeBridge=_") .WithType<std::string>() @@ -262,6 +263,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .Define("--cpu-abilist=_") .WithType<std::string>() .IntoKey(M::CpuAbiList) + .Define("-Xfingerprint:_") + .WithType<std::string>() + .IntoKey(M::Fingerprint) .Define({"-Xexperimental-lambdas", "-Xnoexperimental-lambdas"}) .WithType<bool>() .WithValues({true, false}) @@ -686,7 +690,7 @@ void ParsedOptions::Usage(const char* fmt, ...) { UsageMessage(stream, " -esa\n"); UsageMessage(stream, " -dsa\n"); UsageMessage(stream, " (-enablesystemassertions, -disablesystemassertions)\n"); - UsageMessage(stream, " -Xverify:{none,remote,all}\n"); + UsageMessage(stream, " -Xverify:{none,remote,all,softfail}\n"); UsageMessage(stream, " -Xrs\n"); UsageMessage(stream, " -Xint:portable, -Xint:fast, -Xint:jit\n"); UsageMessage(stream, " -Xdexopt:{none,verified,all,full}\n"); diff --git a/runtime/prebuilt_tools_test.cc b/runtime/prebuilt_tools_test.cc index 53bc87665a..a7f7bcd134 100644 --- a/runtime/prebuilt_tools_test.cc +++ b/runtime/prebuilt_tools_test.cc @@ -23,7 +23,7 @@ namespace art { // Run the tests only on host. -#ifndef HAVE_ANDROID_OS +#ifndef __ANDROID__ class PrebuiltToolsTest : public CommonRuntimeTest { }; @@ -61,6 +61,6 @@ TEST_F(PrebuiltToolsTest, CheckTargetTools) { } } -#endif // HAVE_ANDROID_OS +#endif // __ANDROID__ } // namespace art diff --git a/runtime/profiler.cc b/runtime/profiler.cc index 87b0d43451..33cfe08c33 100644 --- a/runtime/profiler.cc +++ b/runtime/profiler.cc @@ -28,7 +28,6 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "common_throws.h" -#include "debugger.h" #include "dex_file-inl.h" #include "instrumentation.h" #include "mirror/class-inl.h" @@ -59,13 +58,13 @@ class BoundedStackVisitor : public StackVisitor { public: BoundedStackVisitor(std::vector<std::pair<ArtMethod*, uint32_t>>* stack, Thread* thread, uint32_t max_depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), stack_(stack), max_depth_(max_depth), depth_(0) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; @@ -88,7 +87,7 @@ class BoundedStackVisitor : public StackVisitor { // This is called from either a thread list traversal or from a checkpoint. Regardless // of which caller, the mutator lock must be held. -static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) { BackgroundMethodSamplingProfiler* profiler = reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg); const ProfilerOptions profile_options = profiler->GetProfilerOptions(); diff --git a/runtime/profiler.h b/runtime/profiler.h index 7611487da2..30babe358d 100644 --- a/runtime/profiler.h +++ b/runtime/profiler.h @@ -104,8 +104,8 @@ class ProfileSampleResults { explicit ProfileSampleResults(Mutex& lock); ~ProfileSampleResults(); - void Put(ArtMethod* method); - void PutStack(const std::vector<InstructionLocation>& stack_dump); + void Put(ArtMethod* method) REQUIRES(!lock_); + void PutStack(const std::vector<InstructionLocation>& stack_dump) REQUIRES(!lock_); uint32_t Write(std::ostream &os, ProfileDataType type); void ReadPrevious(int fd, ProfileDataType type); void Clear(); @@ -168,17 +168,19 @@ class BackgroundMethodSamplingProfiler { // Start a profile thread with the user-supplied arguments. // Returns true if the profile was started or if it was already running. Returns false otherwise. static bool Start(const std::string& output_filename, const ProfilerOptions& options) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_, - Locks::profiler_lock_); - - static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_); - static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_); - - void RecordMethod(ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RecordStack(const std::vector<InstructionLocation>& stack) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool ProcessMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::profiler_lock_); + + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void Stop() REQUIRES(!Locks::profiler_lock_, !wait_lock_, !Locks::profiler_lock_) + NO_THREAD_SAFETY_ANALYSIS; + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void Shutdown() REQUIRES(!Locks::profiler_lock_) NO_THREAD_SAFETY_ANALYSIS; + + void RecordMethod(ArtMethod *method) SHARED_REQUIRES(Locks::mutator_lock_); + void RecordStack(const std::vector<InstructionLocation>& stack) + SHARED_REQUIRES(Locks::mutator_lock_); + bool ProcessMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); const ProfilerOptions& GetProfilerOptions() const { return options_; } Barrier& GetBarrier() { @@ -190,13 +192,15 @@ class BackgroundMethodSamplingProfiler { const std::string& output_filename, const ProfilerOptions& options); // The sampling interval in microseconds is passed as an argument. - static void* RunProfilerThread(void* arg) LOCKS_EXCLUDED(Locks::profiler_lock_); + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void* RunProfilerThread(void* arg) REQUIRES(!Locks::profiler_lock_) + NO_THREAD_SAFETY_ANALYSIS; - uint32_t WriteProfile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t WriteProfile() SHARED_REQUIRES(Locks::mutator_lock_); void CleanProfile(); - uint32_t DumpProfile(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static bool ShuttingDown(Thread* self) LOCKS_EXCLUDED(Locks::profiler_lock_); + uint32_t DumpProfile(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); + static bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_); static BackgroundMethodSamplingProfiler* profiler_ GUARDED_BY(Locks::profiler_lock_); diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc index f40c0f1130..bc9ba377bc 100644 --- a/runtime/proxy_test.cc +++ b/runtime/proxy_test.cc @@ -34,7 +34,7 @@ class ProxyTest : public CommonCompilerTest { mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader, const char* className, const std::vector<mirror::Class*>& interfaces) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"); CHECK(javaLangObject != nullptr); @@ -160,10 +160,9 @@ TEST_F(ProxyTest, ProxyFieldHelper) { ASSERT_TRUE(proxyClass->IsProxyClass()); ASSERT_TRUE(proxyClass->IsInitialized()); - ArtField* instance_fields = proxyClass->GetIFields(); - EXPECT_TRUE(instance_fields == nullptr); + EXPECT_TRUE(proxyClass->GetIFieldsPtr() == nullptr); - ArtField* static_fields = proxyClass->GetSFields(); + LengthPrefixedArray<ArtField>* static_fields = proxyClass->GetSFieldsPtr(); ASSERT_TRUE(static_fields != nullptr); ASSERT_EQ(2u, proxyClass->NumStaticFields()); @@ -175,7 +174,7 @@ TEST_F(ProxyTest, ProxyFieldHelper) { ASSERT_TRUE(throwsFieldClass.Get() != nullptr); // Test "Class[] interfaces" field. - ArtField* field = &static_fields[0]; + ArtField* field = &static_fields->At(0); EXPECT_STREQ("interfaces", field->GetName()); EXPECT_STREQ("[Ljava/lang/Class;", field->GetTypeDescriptor()); EXPECT_EQ(interfacesFieldClass.Get(), field->GetType<true>()); @@ -184,7 +183,7 @@ TEST_F(ProxyTest, ProxyFieldHelper) { EXPECT_FALSE(field->IsPrimitiveType()); // Test "Class[][] throws" field. - field = &static_fields[1]; + field = &static_fields->At(1); EXPECT_STREQ("throws", field->GetName()); EXPECT_STREQ("[[Ljava/lang/Class;", field->GetTypeDescriptor()); EXPECT_EQ(throwsFieldClass.Get(), field->GetType<true>()); @@ -215,30 +214,30 @@ TEST_F(ProxyTest, CheckArtMirrorFieldsOfProxyStaticFields) { ASSERT_TRUE(proxyClass1->IsProxyClass()); ASSERT_TRUE(proxyClass1->IsInitialized()); - ArtField* static_fields0 = proxyClass0->GetSFields(); + LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr(); ASSERT_TRUE(static_fields0 != nullptr); - ASSERT_EQ(2u, proxyClass0->NumStaticFields()); - ArtField* static_fields1 = proxyClass1->GetSFields(); + ASSERT_EQ(2u, static_fields0->Length()); + LengthPrefixedArray<ArtField>* static_fields1 = proxyClass1->GetSFieldsPtr(); ASSERT_TRUE(static_fields1 != nullptr); - ASSERT_EQ(2u, proxyClass1->NumStaticFields()); + ASSERT_EQ(2u, static_fields1->Length()); - EXPECT_EQ(static_fields0[0].GetDeclaringClass(), proxyClass0.Get()); - EXPECT_EQ(static_fields0[1].GetDeclaringClass(), proxyClass0.Get()); - EXPECT_EQ(static_fields1[0].GetDeclaringClass(), proxyClass1.Get()); - EXPECT_EQ(static_fields1[1].GetDeclaringClass(), proxyClass1.Get()); + EXPECT_EQ(static_fields0->At(0).GetDeclaringClass(), proxyClass0.Get()); + EXPECT_EQ(static_fields0->At(1).GetDeclaringClass(), proxyClass0.Get()); + EXPECT_EQ(static_fields1->At(0).GetDeclaringClass(), proxyClass1.Get()); + EXPECT_EQ(static_fields1->At(1).GetDeclaringClass(), proxyClass1.Get()); Handle<mirror::Field> field00 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0[0], true)); + hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(0), true)); Handle<mirror::Field> field01 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0[1], true)); + hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(1), true)); Handle<mirror::Field> field10 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1[0], true)); + hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(0), true)); Handle<mirror::Field> field11 = - hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1[1], true)); - EXPECT_EQ(field00->GetArtField(), &static_fields0[0]); - EXPECT_EQ(field01->GetArtField(), &static_fields0[1]); - EXPECT_EQ(field10->GetArtField(), &static_fields1[0]); - EXPECT_EQ(field11->GetArtField(), &static_fields1[1]); + hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(1), true)); + EXPECT_EQ(field00->GetArtField(), &static_fields0->At(0)); + EXPECT_EQ(field01->GetArtField(), &static_fields0->At(1)); + EXPECT_EQ(field10->GetArtField(), &static_fields1->At(0)); + EXPECT_EQ(field11->GetArtField(), &static_fields1->At(1)); } } // namespace art diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h index 0d39e22b34..dd3703cad5 100644 --- a/runtime/quick/inline_method_analyser.h +++ b/runtime/quick/inline_method_analyser.h @@ -39,6 +39,7 @@ enum InlineMethodOpcode : uint16_t { kIntrinsicFloatCvt, kIntrinsicReverseBits, kIntrinsicReverseBytes, + kIntrinsicNumberOfLeadingZeros, kIntrinsicAbsInt, kIntrinsicAbsLong, kIntrinsicAbsFloat, @@ -56,6 +57,7 @@ enum InlineMethodOpcode : uint16_t { kIntrinsicReferenceGetReferent, kIntrinsicCharAt, kIntrinsicCompareTo, + kIntrinsicEquals, kIntrinsicGetCharsNoCheck, kIntrinsicIsEmptyOrLength, kIntrinsicIndexOf, @@ -157,7 +159,7 @@ class InlineMethodAnalyser { * @return true if the method is a candidate for inlining, false otherwise. */ static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static constexpr bool IsInstructionIGet(Instruction::Code opcode) { return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT; @@ -182,16 +184,16 @@ class InlineMethodAnalyser { static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result); static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result); static bool AnalyseIGetMethod(verifier::MethodVerifier* verifier, InlineMethod* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool AnalyseIPutMethod(verifier::MethodVerifier* verifier, InlineMethod* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can we fast path instance field access in a verified accessor? // If yes, computes field's offset and volatility and whether the method is static or not. static bool ComputeSpecialAccessorInfo(uint32_t field_idx, bool is_put, verifier::MethodVerifier* verifier, InlineIGetIPutData* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); }; } // namespace art diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 02baad758f..d1a4081125 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -45,14 +45,14 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { public: CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception, QuickExceptionHandler* exception_handler) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), exception_(exception), exception_handler_(exception_handler) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = GetMethod(); exception_handler_->SetHandlerFrameDepth(GetFrameDepth()); if (method == nullptr) { @@ -83,7 +83,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor { private: bool HandleTryItems(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t dex_pc = DexFile::kDexNoIndex; if (!method->IsNative()) { dex_pc = GetDexPc(); @@ -159,7 +159,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) { class DeoptimizeStackVisitor FINAL : public StackVisitor { public: DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), exception_handler_(exception_handler), @@ -167,7 +167,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { stacked_shadow_frame_pushed_(false) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { exception_handler_->SetHandlerFrameDepth(GetFrameDepth()); ArtMethod* method = GetMethod(); if (method == nullptr) { @@ -196,7 +196,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor { return static_cast<VRegKind>(kinds.at(reg * 2)); } - bool HandleDeoptimization(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = m->GetCodeItem(); CHECK(code_item != nullptr); uint16_t num_regs = code_item->registers_size_; @@ -350,14 +350,14 @@ void QuickExceptionHandler::DeoptimizeStack() { class InstrumentationStackVisitor : public StackVisitor { public: InstrumentationStackVisitor(Thread* self, size_t frame_depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), frame_depth_(frame_depth), instrumentation_frames_to_pop_(0) { CHECK_NE(frame_depth_, kInvalidFrameDepth); } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { size_t current_frame_depth = GetFrameDepth(); if (current_frame_depth < frame_depth_) { CHECK(GetMethod() != nullptr); diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h index 8d7cd12216..ce9085d70a 100644 --- a/runtime/quick_exception_handler.h +++ b/runtime/quick_exception_handler.h @@ -36,17 +36,17 @@ class ShadowFrame; class QuickExceptionHandler { public: QuickExceptionHandler(Thread* self, bool is_deoptimization) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); NO_RETURN ~QuickExceptionHandler() { LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. UNREACHABLE(); } - void FindCatch(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_); + void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_); + void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_); + NO_RETURN void DoLongJump() SHARED_REQUIRES(Locks::mutator_lock_); void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) { handler_quick_frame_ = handler_quick_frame; diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h index 55cef6826a..e7ad7316bd 100644 --- a/runtime/read_barrier.h +++ b/runtime/read_barrier.h @@ -49,7 +49,7 @@ class ReadBarrier { bool kMaybeDuringStartup = false> ALWAYS_INLINE static MirrorType* Barrier( mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // It's up to the implementation whether the given root gets updated // whereas the return value must be an updated reference. @@ -57,7 +57,7 @@ class ReadBarrier { bool kMaybeDuringStartup = false> ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root, GcRootSource* gc_root_source = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // It's up to the implementation whether the given root gets updated // whereas the return value must be an updated reference. @@ -65,24 +65,24 @@ class ReadBarrier { bool kMaybeDuringStartup = false> ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root, GcRootSource* gc_root_source = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static bool IsDuringStartup(); // Without the holder object. static void AssertToSpaceInvariant(mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { AssertToSpaceInvariant(nullptr, MemberOffset(0), ref); } // With the holder object. static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // With GcRootSource. static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static mirror::Object* Mark(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); static mirror::Object* WhitePtr() { return reinterpret_cast<mirror::Object*>(white_ptr_); @@ -96,7 +96,7 @@ class ReadBarrier { ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj, uintptr_t* out_rb_ptr_high_bits) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them. static constexpr uintptr_t white_ptr_ = 0x0; // Not marked. diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h index 4f408dd5c1..710c21f03e 100644 --- a/runtime/read_barrier_c.h +++ b/runtime/read_barrier_c.h @@ -47,9 +47,4 @@ #error "Only one of Baker or Brooks can be enabled at a time." #endif -// A placeholder marker to indicate places to add read barriers in the -// assembly code. This is a development time aid and to be removed -// after read barriers are added. -#define THIS_LOAD_REQUIRES_READ_BARRIER - #endif // ART_RUNTIME_READ_BARRIER_C_H_ diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc index a31d8ac5ba..49b6a38b01 100644 --- a/runtime/reference_table.cc +++ b/runtime/reference_table.cc @@ -62,7 +62,7 @@ void ReferenceTable::Remove(mirror::Object* obj) { // If "obj" is an array, return the number of elements in the array. // Otherwise, return zero. -static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static size_t GetElementCount(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { // We assume the special cleared value isn't an array in the if statement below. DCHECK(!Runtime::Current()->GetClearedJniWeakGlobal()->IsArrayInstance()); if (obj == nullptr || !obj->IsArrayInstance()) { @@ -78,7 +78,7 @@ static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks:: // or equivalent to the original. static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count, int identical, int equiv) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (obj == nullptr) { os << " null reference (count=" << equiv << ")\n"; return; diff --git a/runtime/reference_table.h b/runtime/reference_table.h index 94f16b66de..f90ccd1e51 100644 --- a/runtime/reference_table.h +++ b/runtime/reference_table.h @@ -41,22 +41,22 @@ class ReferenceTable { ReferenceTable(const char* name, size_t initial_size, size_t max_size); ~ReferenceTable(); - void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); - void Remove(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Remove(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); size_t Size() const; - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: typedef std::vector<GcRoot<mirror::Object>, TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table; static void Dump(std::ostream& os, Table& entries) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); friend class IndirectReferenceTable; // For Dump. std::string name_; diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 11522d9914..2fe1e64fe7 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -36,7 +36,7 @@ namespace art { class ArgArray { public: - explicit ArgArray(const char* shorty, uint32_t shorty_len) + ArgArray(const char* shorty, uint32_t shorty_len) : shorty_(shorty), shorty_len_(shorty_len), num_bytes_(0) { size_t num_slots = shorty_len + 1; // +1 in case of receiver. if (LIKELY((num_slots * 2) < kSmallArgArraySize)) { @@ -72,7 +72,7 @@ class ArgArray { num_bytes_ += 4; } - void Append(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void Append(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue()); } @@ -96,7 +96,7 @@ class ArgArray { void BuildArgArrayFromVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver, va_list ap) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Set receiver if non-null (method is not static) if (receiver != nullptr) { Append(receiver); @@ -132,7 +132,7 @@ class ArgArray { void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* receiver, jvalue* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Set receiver if non-null (method is not static) if (receiver != nullptr) { Append(receiver); @@ -171,7 +171,7 @@ class ArgArray { } void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Set receiver if non-null (method is not static) size_t cur_arg = arg_offset; if (!shadow_frame->GetMethod()->IsStatic()) { @@ -206,7 +206,7 @@ class ArgArray { static void ThrowIllegalPrimitiveArgumentException(const char* expected, const char* found_descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ThrowIllegalArgumentException( StringPrintf("Invalid primitive conversion from %s to %s", expected, PrettyDescriptor(found_descriptor).c_str()).c_str()); @@ -214,7 +214,7 @@ class ArgArray { bool BuildArgArrayFromObjectArray(mirror::Object* receiver, mirror::ObjectArray<mirror::Object>* args, ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::TypeList* classes = m->GetParameterTypeList(); // Set receiver if non-null (method is not static) if (receiver != nullptr) { @@ -343,7 +343,7 @@ class ArgArray { }; static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const DexFile::TypeList* params = m->GetParameterTypeList(); if (params == nullptr) { return; // No arguments so nothing to check. @@ -418,7 +418,7 @@ static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args) } static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, sizeof(void*)); } @@ -426,7 +426,7 @@ static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method) static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa, ArtMethod* method, ArgArray* arg_array, JValue* result, const char* shorty) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { uint32_t* args = arg_array->GetArray(); if (UNLIKELY(soa.Env()->check_jni)) { CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(sizeof(void*)), args); @@ -436,7 +436,7 @@ static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa, JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // We want to make sure that the stack is not within a small distance from the // protected region in case we are calling into a leaf function whose stack // check has been elided. @@ -730,7 +730,7 @@ mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) { } static std::string UnboxingFailureKind(ArtField* f) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (f != nullptr) { return "field " + PrettyField(f, false); } @@ -740,7 +740,7 @@ static std::string UnboxingFailureKind(ArtField* f) static bool UnboxPrimitive(mirror::Object* o, mirror::Class* dst_class, ArtField* f, JValue* unboxed_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { bool unbox_for_result = (f == nullptr); if (!dst_class->IsPrimitive()) { if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) { @@ -780,7 +780,7 @@ static bool UnboxPrimitive(mirror::Object* o, mirror::Class* klass = o->GetClass(); mirror::Class* src_class = nullptr; ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); - ArtField* primitive_field = &klass->GetIFields()[0]; + ArtField* primitive_field = &klass->GetIFieldsPtr()->At(0); if (klass->DescriptorEquals("Ljava/lang/Boolean;")) { src_class = class_linker->FindPrimitiveClass('Z'); boxed_value.SetZ(primitive_field->GetBoolean(o)); diff --git a/runtime/reflection.h b/runtime/reflection.h index 825a7213ce..d9c38c1064 100644 --- a/runtime/reflection.h +++ b/runtime/reflection.h @@ -33,60 +33,60 @@ class ScopedObjectAccessAlreadyRunnable; class ShadowFrame; mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f, JValue* unboxed_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result, Primitive::Type src_class, Primitive::Type dst_class, const JValue& src, JValue* dst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, jvalue* args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid, va_list args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // num_frames is number of frames we look up for access check. jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver, jobject args, size_t num_frames = 1) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags, mirror::Class** calling_class, size_t num_frames) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // This version takes a known calling class. bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class, uint32_t access_flags, mirror::Class* calling_class) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the calling class by using a stack visitor, may return null for unattached native threads. mirror::Class* GetCallingClass(Thread* self, size_t num_frames) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void InvalidReceiverError(mirror::Object* o, mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void UpdateReference(Thread* self, jobject obj, mirror::Object* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace art diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc index 9707fb8e42..bd89be5d17 100644 --- a/runtime/reflection_test.cc +++ b/runtime/reflection_test.cc @@ -85,7 +85,7 @@ class ReflectionTest : public CommonCompilerTest { mirror::Object** receiver, bool is_static, const char* method_name, const char* method_signature) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods"; jobject jclass_loader(LoadDex(class_name)); Thread* self = Thread::Current(); diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h index 68d5ad2f6e..380e72b5dd 100644 --- a/runtime/runtime-inl.h +++ b/runtime/runtime-inl.h @@ -66,13 +66,13 @@ inline ArtMethod* Runtime::GetImtUnimplementedMethod() { } inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(HasCalleeSaveMethod(type)); return GetCalleeSaveMethodUnchecked(type); } inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]); } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 5fef8aea2b..1912314d17 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -26,7 +26,7 @@ #include <cutils/trace.h> #include <signal.h> #include <sys/syscall.h> -#include <valgrind.h> +#include "base/memory_tool.h" #include <cstdio> #include <cstdlib> @@ -75,6 +75,7 @@ #include "jit/jit.h" #include "jni_internal.h" #include "linear_alloc.h" +#include "lambda/box_table.h" #include "mirror/array.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" @@ -177,7 +178,7 @@ Runtime::Runtime() exit_(nullptr), abort_(nullptr), stats_enabled_(false), - running_on_valgrind_(RUNNING_ON_VALGRIND > 0), + is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL), profiler_started_(false), instrumentation_(), main_thread_group_(nullptr), @@ -185,7 +186,7 @@ Runtime::Runtime() system_class_loader_(nullptr), dump_gc_performance_on_shutdown_(false), preinitialization_transaction_(nullptr), - verify_(false), + verify_(verifier::VerifyMode::kNone), allow_dex_file_fallback_(true), target_sdk_version_(0), implicit_null_checks_(false), @@ -403,11 +404,12 @@ void Runtime::CallExitHook(jint status) { } } -void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) { - GetInternTable()->SweepInternTableWeaks(visitor, arg); - GetMonitorList()->SweepMonitorList(visitor, arg); - GetJavaVM()->SweepJniWeakGlobals(visitor, arg); - GetHeap()->SweepAllocationRecords(visitor, arg); +void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) { + GetInternTable()->SweepInternTableWeaks(visitor); + GetMonitorList()->SweepMonitorList(visitor); + GetJavaVM()->SweepJniWeakGlobals(visitor); + GetHeap()->SweepAllocationRecords(visitor); + GetLambdaBoxTable()->SweepWeakBoxedLambdas(visitor); } bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) { @@ -588,7 +590,7 @@ bool Runtime::Start() { return true; } -void Runtime::EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { +void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { DCHECK_GT(threads_being_born_, 0U); threads_being_born_--; if (shutting_down_started_ && threads_being_born_ == 0) { @@ -605,14 +607,14 @@ bool Runtime::InitZygote() { // See storage config details at http://source.android.com/tech/storage/ // Create private mount namespace shared by all children if (unshare(CLONE_NEWNS) == -1) { - PLOG(WARNING) << "Failed to unshare()"; + PLOG(ERROR) << "Failed to unshare()"; return false; } // Mark rootfs as being a slave so that changes from default // namespace only flow into our children. if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) { - PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE"; + PLOG(ERROR) << "Failed to mount() rootfs as MS_SLAVE"; return false; } @@ -623,7 +625,7 @@ bool Runtime::InitZygote() { if (target_base != nullptr) { if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV, "uid=0,gid=1028,mode=0751") == -1) { - LOG(WARNING) << "Failed to mount tmpfs to " << target_base; + PLOG(ERROR) << "Failed to mount tmpfs to " << target_base; return false; } } @@ -680,6 +682,11 @@ bool Runtime::IsShuttingDown(Thread* self) { return IsShuttingDownLocked(); } +bool Runtime::IsDebuggable() const { + const OatFile* oat_file = GetClassLinker()->GetPrimaryOatFile(); + return oat_file != nullptr && oat_file->IsDebuggable(); +} + void Runtime::StartDaemonThreads() { VLOG(startup) << "Runtime::StartDaemonThreads entering"; @@ -845,6 +852,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_); + fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint); + if (runtime_options.GetOrDefault(Opt::Interpret)) { GetInstrumentation()->ForceInterpretOnly(); } @@ -907,6 +916,9 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) jit_options_->SetUseJIT(false); } + // Allocate a global table of boxed lambda objects <-> closures. + lambda_box_table_ = MakeUnique<lambda::BoxTable>(); + // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but // can't be trimmed as easily. const bool use_malloc = IsAotCompiler(); @@ -929,9 +941,11 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) case kX86: case kArm64: case kX86_64: + case kMips: + case kMips64: implicit_null_checks_ = true; // Installing stack protection does not play well with valgrind. - implicit_so_checks_ = (RUNNING_ON_VALGRIND == 0); + implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind); break; default: // Keep the defaults. @@ -1490,25 +1504,34 @@ ArtMethod* Runtime::CreateCalleeSaveMethod() { void Runtime::DisallowNewSystemWeaks() { monitor_list_->DisallowNewMonitors(); - intern_table_->DisallowNewInterns(); + intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites); java_vm_->DisallowNewWeakGlobals(); heap_->DisallowNewAllocationRecords(); + lambda_box_table_->DisallowNewWeakBoxedLambdas(); } void Runtime::AllowNewSystemWeaks() { monitor_list_->AllowNewMonitors(); - intern_table_->AllowNewInterns(); + intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping. java_vm_->AllowNewWeakGlobals(); heap_->AllowNewAllocationRecords(); + lambda_box_table_->AllowNewWeakBoxedLambdas(); } void Runtime::EnsureNewSystemWeaksDisallowed() { // Lock and unlock the system weak locks once to ensure that no // threads are still in the middle of adding new system weaks. monitor_list_->EnsureNewMonitorsDisallowed(); - intern_table_->EnsureNewInternsDisallowed(); + intern_table_->EnsureNewWeakInternsDisallowed(); java_vm_->EnsureNewWeakGlobalsDisallowed(); - heap_->EnsureNewAllocationRecordsDisallowed(); + lambda_box_table_->EnsureNewWeakBoxedLambdasDisallowed(); +} + +void Runtime::BroadcastForNewSystemWeaks() { + CHECK(kUseReadBarrier); + monitor_list_->BroadcastForNewMonitors(); + intern_table_->BroadcastForNewInterns(); + java_vm_->BroadcastForNewWeakGlobals(); } void Runtime::SetInstructionSet(InstructionSet instruction_set) { @@ -1744,4 +1767,12 @@ void Runtime::SetImtUnimplementedMethod(ArtMethod* method) { imt_unimplemented_method_ = method; } +bool Runtime::IsVerificationEnabled() const { + return verify_ == verifier::VerifyMode::kEnable; +} + +bool Runtime::IsVerificationSoftFail() const { + return verify_ == verifier::VerifyMode::kSoftFail; +} + } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index bcc7118db0..4577b75397 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -53,6 +53,10 @@ namespace jit { class JitOptions; } // namespace jit +namespace lambda { + class BoxTable; +} // namespace lambda + namespace mirror { class ClassLoader; class Array; @@ -64,6 +68,7 @@ namespace mirror { } // namespace mirror namespace verifier { class MethodVerifier; + enum class VerifyMode : int8_t; } // namespace verifier class ArenaPool; class ArtMethod; @@ -179,19 +184,19 @@ class Runtime { bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_); bool IsShuttingDown(Thread* self); - bool IsShuttingDownLocked() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { + bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) { return shutting_down_; } - size_t NumberOfThreadsBeingBorn() const EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { + size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) { return threads_being_born_; } - void StartThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) { + void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { threads_being_born_++; } - void EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); + void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_); bool IsStarted() const { return started_; @@ -207,7 +212,7 @@ class Runtime { // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most // callers should prefer. - NO_RETURN static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_); + NO_RETURN static void Abort() REQUIRES(!Locks::abort_lock_); // Returns the "main" ThreadGroup, used when attaching user threads. jobject GetMainThreadGroup() const; @@ -225,7 +230,7 @@ class Runtime { void CallExitHook(jint status); // Detaches the current native thread from the runtime. - void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_); + void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os); void DumpLockHolders(std::ostream& os); @@ -274,15 +279,15 @@ class Runtime { } // Is the given object the special object used to mark a cleared JNI weak global? - bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Get the special object used to mark a cleared JNI weak global. - mirror::Object* GetClearedJniWeakGlobal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_); mirror::Throwable* GetPreAllocatedNoClassDefFoundError() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const std::vector<std::string>& GetProperties() const { return properties_; @@ -296,76 +301,77 @@ class Runtime { return "2.1.0"; } - void DisallowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void AllowNewSystemWeaks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void EnsureNewSystemWeaksDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_); + void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_); + void EnsureNewSystemWeaksDisallowed() SHARED_REQUIRES(Locks::mutator_lock_); + void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_); // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If // clean_dirty is true then dirty roots will be marked as non-dirty after visiting. void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit image roots, only used for hprof since the GC uses the image space mod union table // instead. - void VisitImageRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); // Visit all of the roots we can do safely do concurrently. void VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit all of the non thread roots, we can do this with mutators unpaused. void VisitNonThreadRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VisitTransactionRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Visit all of the thread roots. - void VisitThreadRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); // Flip thread roots from from-space refs to to-space refs. size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback, gc::collector::GarbageCollector* collector) - LOCKS_EXCLUDED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_); // Visit all other roots which must be done with mutators suspended. void VisitNonConcurrentRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the // system weak is updated to be the visitor's returned value. - void SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SweepSystemWeaks(IsMarkedVisitor* visitor) + SHARED_REQUIRES(Locks::mutator_lock_); // Constant roots are the roots which never change after the runtime is initialized, they only // need to be visited once per GC cycle. void VisitConstantRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a special method that calls into a trampoline for runtime method resolution - ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_); bool HasResolutionMethod() const { return resolution_method_ != nullptr; } - void SetResolutionMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Returns a special method that calls into a trampoline for runtime imt conflicts. - ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ArtMethod* GetImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_); + ArtMethod* GetImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_); bool HasImtConflictMethod() const { return imt_conflict_method_ != nullptr; } - void SetImtConflictMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetImtUnimplementedMethod(ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateImtConflictMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Returns a special method that describes all callee saves being spilled to the stack. enum CalleeSaveType { @@ -380,17 +386,17 @@ class Runtime { } ArtMethod* GetCalleeSaveMethod(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const { return callee_save_method_frame_infos_[type]; } QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) { return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]); @@ -404,7 +410,7 @@ class Runtime { void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type); - ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_); int32_t GetStat(int kind); @@ -418,8 +424,8 @@ class Runtime { void ResetStats(int kinds); - void SetStatsEnabled(bool new_state) LOCKS_EXCLUDED(Locks::instrument_entrypoints_lock_, - Locks::mutator_lock_); + void SetStatsEnabled(bool new_state) + REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_); enum class NativeBridgeAction { // private kUnload, @@ -457,9 +463,9 @@ class Runtime { bool IsTransactionAborted() const; void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowTransactionAbortError(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, bool is_volatile) const; @@ -476,17 +482,17 @@ class Runtime { void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, mirror::Object* value, bool is_volatile) const; void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void RecordStrongStringInsertion(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); void RecordWeakStringInsertion(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); void RecordStrongStringRemoval(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); void RecordWeakStringRemoval(mirror::String* s) const - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); + REQUIRES(Locks::intern_table_lock_); - void SetFaultMessage(const std::string& message); + void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_); // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations // with the unexpected_signal_lock_. const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS { @@ -499,9 +505,8 @@ class Runtime { return !implicit_so_checks_; } - bool IsVerificationEnabled() const { - return verify_; - } + bool IsVerificationEnabled() const; + bool IsVerificationSoftFail() const; bool IsDexFileFallbackEnabled() const { return allow_dex_file_fallback_; @@ -511,8 +516,8 @@ class Runtime { return cpu_abilist_; } - bool RunningOnValgrind() const { - return running_on_valgrind_; + bool IsRunningOnMemoryTool() const { + return is_running_on_memory_tool_; } void SetTargetSdkVersion(int32_t version) { @@ -531,6 +536,10 @@ class Runtime { return experimental_lambdas_; } + lambda::BoxTable* GetLambdaBoxTable() const { + return lambda_box_table_.get(); + } + // Create the JIT and instrumentation and code cache. void CreateJit(); @@ -552,6 +561,13 @@ class Runtime { return method_ref_string_init_reg_map_; } + bool IsDebuggable() const; + + // Returns the build fingerprint, if set. Otherwise an empty string is returned. + std::string GetFingerprint() { + return fingerprint_; + } + private: static void InitPlatformSignalHandlers(); @@ -561,7 +577,7 @@ class Runtime { bool Init(const RuntimeOptions& options, bool ignore_unrecognized) SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_); - void InitNativeMethods() LOCKS_EXCLUDED(Locks::mutator_lock_); + void InitNativeMethods() REQUIRES(!Locks::mutator_lock_); void InitThreadGroups(Thread* self); void RegisterRuntimeNativeMethods(JNIEnv* env); @@ -643,6 +659,8 @@ class Runtime { std::unique_ptr<jit::Jit> jit_; std::unique_ptr<jit::JitOptions> jit_options_; + std::unique_ptr<lambda::BoxTable> lambda_box_table_; + // Fault message, printed when we get a SIGSEGV. Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::string fault_message_ GUARDED_BY(fault_message_lock_); @@ -675,7 +693,7 @@ class Runtime { bool stats_enabled_; RuntimeStats stats_; - const bool running_on_valgrind_; + const bool is_running_on_memory_tool_; std::string profile_output_filename_; ProfilerOptions profiler_options_; @@ -697,8 +715,8 @@ class Runtime { // Transaction used for pre-initializing classes at compilation time. Transaction* preinitialization_transaction_; - // If false, verification is disabled. True by default. - bool verify_; + // If kNone, verification is disabled. kEnable by default. + verifier::VerifyMode verify_; // If true, the runtime may use dex files directly with the interpreter if an oat file is not // available/usable. @@ -744,6 +762,9 @@ class Runtime { MethodRefToStringInitRegMap method_ref_string_init_reg_map_; + // Contains the build fingerprint, if given as a parameter. + std::string fingerprint_; + DISALLOW_COPY_AND_ASSIGN(Runtime); }; std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs); diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index dc4c0c7493..02ed3a2553 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -106,11 +106,13 @@ RUNTIME_OPTIONS_KEY (std::vector<std::string>, \ CompilerOptions) // -Xcompiler-option ... RUNTIME_OPTIONS_KEY (std::vector<std::string>, \ ImageCompilerOptions) // -Ximage-compiler-option ... -RUNTIME_OPTIONS_KEY (bool, Verify, true) +RUNTIME_OPTIONS_KEY (verifier::VerifyMode, \ + Verify, verifier::VerifyMode::kEnable) RUNTIME_OPTIONS_KEY (std::string, NativeBridge) RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10) RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback) RUNTIME_OPTIONS_KEY (std::string, CpuAbiList) +RUNTIME_OPTIONS_KEY (std::string, Fingerprint) RUNTIME_OPTIONS_KEY (bool, ExperimentalLambdas, false) // -X[no]experimental-lambdas // Not parse-able from command line, but can be provided explicitly. diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h index 7e59000e09..88ac00a672 100644 --- a/runtime/runtime_options.h +++ b/runtime/runtime_options.h @@ -32,6 +32,7 @@ #include "gc/space/large_object_space.h" #include "profiler_options.h" #include "arch/instruction_set.h" +#include "verifier/verify_mode.h" #include <stdio.h> #include <stdarg.h> diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h index 1cc2df65ba..b90aa0ec0e 100644 --- a/runtime/scoped_thread_state_change.h +++ b/runtime/scoped_thread_state_change.h @@ -34,7 +34,7 @@ namespace art { class ScopedThreadStateChange { public: ScopedThreadStateChange(Thread* self, ThreadState new_thread_state) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : self_(self), thread_state_(new_thread_state), expected_has_no_thread_(false) { if (UNLIKELY(self_ == nullptr)) { // Value chosen arbitrarily and won't be used in the destructor since thread_ == null. @@ -59,7 +59,7 @@ class ScopedThreadStateChange { } } - ~ScopedThreadStateChange() LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE { + ~ScopedThreadStateChange() REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE { if (UNLIKELY(self_ == nullptr)) { if (!expected_has_no_thread_) { Runtime* runtime = Runtime::Current(); @@ -130,7 +130,7 @@ class ScopedObjectAccessAlreadyRunnable { * it's best if we don't grab a mutex. */ template<typename T> - T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + T AddLocalReference(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal()); @@ -139,32 +139,32 @@ class ScopedObjectAccessAlreadyRunnable { template<typename T> T Decode(jobject obj) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return down_cast<T>(Self()->DecodeJObject(obj)); } ArtField* DecodeField(jfieldID fid) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<ArtField*>(fid); } - jfieldID EncodeField(ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jfieldID EncodeField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<jfieldID>(field); } - ArtMethod* DecodeMethod(jmethodID mid) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* DecodeMethod(jmethodID mid) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<ArtMethod*>(mid); } - jmethodID EncodeMethod(ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jmethodID EncodeMethod(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) { Locks::mutator_lock_->AssertSharedHeld(Self()); DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. return reinterpret_cast<jmethodID>(method); @@ -176,12 +176,12 @@ class ScopedObjectAccessAlreadyRunnable { protected: explicit ScopedObjectAccessAlreadyRunnable(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : self_(ThreadForEnv(env)), env_(down_cast<JNIEnvExt*>(env)), vm_(env_->vm) { } explicit ScopedObjectAccessAlreadyRunnable(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : self_(self), env_(down_cast<JNIEnvExt*>(self->GetJniEnv())), vm_(env_ != nullptr ? env_->vm : nullptr) { } @@ -220,14 +220,14 @@ class ScopedObjectAccessAlreadyRunnable { class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable { public: explicit ScopedObjectAccessUnchecked(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(env), tsc_(Self(), kRunnable) { Self()->VerifyStack(); Locks::mutator_lock_->AssertSharedHeld(Self()); } explicit ScopedObjectAccessUnchecked(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) ALWAYS_INLINE + REQUIRES(!Locks::thread_suspend_count_lock_) ALWAYS_INLINE : ScopedObjectAccessAlreadyRunnable(self), tsc_(self, kRunnable) { Self()->VerifyStack(); Locks::mutator_lock_->AssertSharedHeld(Self()); @@ -250,13 +250,13 @@ class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable { class ScopedObjectAccess : public ScopedObjectAccessUnchecked { public: explicit ScopedObjectAccess(JNIEnv* env) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessUnchecked(env) { } explicit ScopedObjectAccess(Thread* self) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE : ScopedObjectAccessUnchecked(self) { } diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc index 9f8c55c980..6cb795061d 100644 --- a/runtime/signal_catcher.cc +++ b/runtime/signal_catcher.cc @@ -133,8 +133,11 @@ void SignalCatcher::HandleSigQuit() { DumpCmdLine(os); - // Note: The string "ABI:" is chosen to match the format used by debuggerd. - os << "ABI: " << GetInstructionSetString(runtime->GetInstructionSet()) << "\n"; + // Note: The strings "Build fingerprint:" and "ABI:" are chosen to match the format used by + // debuggerd. This allows, for example, the stack tool to work. + std::string fingerprint = runtime->GetFingerprint(); + os << "Build fingerprint: '" << (fingerprint.empty() ? "unknown" : fingerprint) << "'\n"; + os << "ABI: '" << GetInstructionSetString(runtime->GetInstructionSet()) << "'\n"; os << "Build type: " << (kIsDebugBuild ? "debug" : "optimized") << "\n"; diff --git a/runtime/signal_catcher.h b/runtime/signal_catcher.h index 43bbef48ca..de6a212df4 100644 --- a/runtime/signal_catcher.h +++ b/runtime/signal_catcher.h @@ -35,19 +35,19 @@ class SignalCatcher { explicit SignalCatcher(const std::string& stack_trace_file); ~SignalCatcher(); - void HandleSigQuit() LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + void HandleSigQuit() REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); private: - static void* Run(void* arg); + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void* Run(void* arg) NO_THREAD_SAFETY_ANALYSIS; void HandleSigUsr1(); void Output(const std::string& s); - void SetHaltFlag(bool new_value); - bool ShouldHalt(); - int WaitForSignal(Thread* self, SignalSet& signals); + void SetHaltFlag(bool new_value) REQUIRES(!lock_); + bool ShouldHalt() REQUIRES(!lock_); + int WaitForSignal(Thread* self, SignalSet& signals) REQUIRES(!lock_); std::string stack_trace_file_; diff --git a/runtime/stack.cc b/runtime/stack.cc index 11c94dbbb8..b07b244282 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -150,7 +150,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { } extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); mirror::Object* StackVisitor::GetThisObject() const { DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); @@ -655,7 +655,7 @@ bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next next_dex_pc_(0) { } - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { if (found_frame_) { ArtMethod* method = GetMethod(); if (method != nullptr && !method->IsRuntimeMethod()) { @@ -688,7 +688,7 @@ void StackVisitor::DescribeStack(Thread* thread) { explicit DescribeStackVisitor(Thread* thread_in) : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation(); return true; } @@ -857,7 +857,6 @@ void StackVisitor::WalkStack(bool include_transitions) { << " native=" << method->IsNative() << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode() << "," << method->GetEntryPointFromJni() - << "," << method->GetEntryPointFromInterpreter() << " next=" << *cur_quick_frame_; } @@ -905,7 +904,7 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); } } - DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U); + DCHECK_ALIGNED(frame_size, kStackAlignment); DCHECK_NE(reg, -1); int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) diff --git a/runtime/stack.h b/runtime/stack.h index d60714f7a3..25627383fe 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -21,7 +21,6 @@ #include <string> #include "arch/instruction_set.h" -#include "base/bit_utils.h" #include "dex_file.h" #include "gc_root.h" #include "mirror/object_reference.h" @@ -155,7 +154,7 @@ class ShadowFrame { // If this returns non-null then this does not mean the vreg is currently a reference // on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain. template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - mirror::Object* GetVRegReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_LT(i, NumberOfVRegs()); mirror::Object* ref; if (HasReferenceArray()) { @@ -229,7 +228,7 @@ class ShadowFrame { } template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> - void SetVRegReference(size_t i, mirror::Object* val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK_LT(i, NumberOfVRegs()); if (kVerifyFlags & kVerifyWrites) { VerifyObject(val); @@ -244,14 +243,14 @@ class ShadowFrame { } } - ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(method_ != nullptr); return method_; } - mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_); bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const { if (HasReferenceArray()) { @@ -333,7 +332,7 @@ class JavaFrameRootInfo : public RootInfo { : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) { } virtual void Describe(std::ostream& os) const OVERRIDE - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: const StackVisitor* const stack_visitor_; @@ -410,7 +409,7 @@ class PACKED(4) ManagedStack { return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_); } - size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_); bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const; @@ -431,31 +430,31 @@ class StackVisitor { protected: StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); public: virtual ~StackVisitor() {} // Return 'true' if we should continue to visit more frames, 'false' to stop. - virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0; void WalkStack(bool include_transitions = false) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_); bool IsShadowFrame() const { return cur_shadow_frame_ != nullptr; } - uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetThisObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_); - size_t GetNativePcOffset() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_); uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Callee saves are held at the top of the frame DCHECK(GetMethod() != nullptr); uint8_t* save_addr = @@ -467,46 +466,46 @@ class StackVisitor { } // Returns the height of the stack in the managed stack frames, including transitions. - size_t GetFrameHeight() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) { return GetNumFrames() - cur_depth_ - 1; } // Returns a frame ID for JDWP use, starting from 1. - size_t GetFrameId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) { return GetFrameHeight() + 1; } - size_t GetNumFrames() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) { if (num_frames_ == 0) { num_frames_ = ComputeNumFrames(thread_, walk_kind_); } return num_frames_; } - size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) { return cur_depth_; } // Get the method and dex pc immediately after the one that's currently being visited. bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsReferenceVReg(ArtMethod* m, uint16_t vreg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); uintptr_t* GetGPRAddress(uint32_t reg) const; @@ -522,9 +521,9 @@ class StackVisitor { return reinterpret_cast<uint32_t*>(vreg_addr); } - uintptr_t GetReturnPc() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_); - void SetReturnPc(uintptr_t new_ret_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_); /* * Return sp-relative offset for a Dalvik virtual register, compiler @@ -606,17 +605,17 @@ class StackVisitor { return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size); } - std::string DescribeLocation() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_); static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); private: // Private constructor known in the case that num_frames_ has already been computed. StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsAccessibleRegister(uint32_t reg, bool is_float) const { return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg); @@ -644,40 +643,40 @@ class StackVisitor { bool GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo, uint64_t* val) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, uint64_t new_value, bool is_float) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_); - InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_); Thread* const thread_; const StackWalkKind walk_kind_; diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc index 741cd906a8..5544507c06 100644 --- a/runtime/stack_map.cc +++ b/runtime/stack_map.cc @@ -27,16 +27,17 @@ constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex; constexpr uint32_t StackMap::kNoDexRegisterMap; constexpr uint32_t StackMap::kNoInlineInfo; -DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(uint16_t dex_register_number, - uint16_t number_of_dex_registers, - const CodeInfo& code_info, - const StackMapEncoding& enc) const { +DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind( + uint16_t dex_register_number, + uint16_t number_of_dex_registers, + const CodeInfo& code_info, + const StackMapEncoding& enc) const { DexRegisterLocationCatalog dex_register_location_catalog = code_info.GetDexRegisterLocationCatalog(enc); size_t location_catalog_entry_index = GetLocationCatalogEntryIndex( dex_register_number, number_of_dex_registers, - code_info.GetNumberOfDexRegisterLocationCatalogEntries()); + code_info.GetNumberOfLocationCatalogEntries()); return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index); } @@ -49,7 +50,7 @@ DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register size_t location_catalog_entry_index = GetLocationCatalogEntryIndex( dex_register_number, number_of_dex_registers, - code_info.GetNumberOfDexRegisterLocationCatalogEntries()); + code_info.GetNumberOfLocationCatalogEntries()); return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index); } @@ -95,40 +96,37 @@ static void DumpRegisterMapping(std::ostream& os, DexRegisterLocation location, const std::string& prefix = "v", const std::string& suffix = "") { - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); - indented_os << prefix << dex_register_num << ": " - << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind()) - << " (" << location.GetValue() << ")" << suffix << '\n'; + os << prefix << dex_register_num << ": " + << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind()) + << " (" << location.GetValue() << ")" << suffix << '\n'; } -void CodeInfo::Dump(std::ostream& os, +void CodeInfo::Dump(VariableIndentationOutputStream* vios, uint32_t code_offset, uint16_t number_of_dex_registers, bool dump_stack_maps) const { StackMapEncoding encoding = ExtractEncoding(); uint32_t code_info_size = GetOverallSize(); size_t number_of_stack_maps = GetNumberOfStackMaps(); - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); - indented_os << "Optimized CodeInfo (size=" << code_info_size - << ", number_of_dex_registers=" << number_of_dex_registers - << ", number_of_stack_maps=" << number_of_stack_maps - << ", has_inline_info=" << encoding.HasInlineInfo() - << ", number_of_bytes_for_inline_info=" << encoding.NumberOfBytesForInlineInfo() - << ", number_of_bytes_for_dex_register_map=" - << encoding.NumberOfBytesForDexRegisterMap() - << ", number_of_bytes_for_dex_pc=" << encoding.NumberOfBytesForDexPc() - << ", number_of_bytes_for_native_pc=" << encoding.NumberOfBytesForNativePc() - << ", number_of_bytes_for_register_mask=" << encoding.NumberOfBytesForRegisterMask() - << ")\n"; + vios->Stream() + << "Optimized CodeInfo (size=" << code_info_size + << ", number_of_dex_registers=" << number_of_dex_registers + << ", number_of_stack_maps=" << number_of_stack_maps + << ", has_inline_info=" << encoding.HasInlineInfo() + << ", number_of_bytes_for_inline_info=" << encoding.NumberOfBytesForInlineInfo() + << ", number_of_bytes_for_dex_register_map=" << encoding.NumberOfBytesForDexRegisterMap() + << ", number_of_bytes_for_dex_pc=" << encoding.NumberOfBytesForDexPc() + << ", number_of_bytes_for_native_pc=" << encoding.NumberOfBytesForNativePc() + << ", number_of_bytes_for_register_mask=" << encoding.NumberOfBytesForRegisterMask() + << ")\n"; + ScopedIndentation indent1(vios); // Display the Dex register location catalog. - GetDexRegisterLocationCatalog(encoding).Dump(indented_os, *this); + GetDexRegisterLocationCatalog(encoding).Dump(vios, *this); // Display stack maps along with (live) Dex register maps. if (dump_stack_maps) { for (size_t i = 0; i < number_of_stack_maps; ++i) { StackMap stack_map = GetStackMapAt(i, encoding); - stack_map.Dump(indented_os, + stack_map.Dump(vios, *this, encoding, code_offset, @@ -140,30 +138,26 @@ void CodeInfo::Dump(std::ostream& os, // we need to know the number of dex registers for each inlined method. } -void DexRegisterLocationCatalog::Dump(std::ostream& os, const CodeInfo& code_info) { +void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios, + const CodeInfo& code_info) { StackMapEncoding encoding = code_info.ExtractEncoding(); - size_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); + size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding); - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); - indented_os + vios->Stream() << "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries << ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n"; for (size_t i = 0; i < number_of_location_catalog_entries; ++i) { DexRegisterLocation location = GetDexRegisterLocation(i); - DumpRegisterMapping(indented_os, i, location, "entry "); + ScopedIndentation indent1(vios); + DumpRegisterMapping(vios->Stream(), i, location, "entry "); } } -void DexRegisterMap::Dump(std::ostream& os, +void DexRegisterMap::Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, uint16_t number_of_dex_registers) const { StackMapEncoding encoding = code_info.ExtractEncoding(); - size_t number_of_location_catalog_entries = - code_info.GetNumberOfDexRegisterLocationCatalogEntries(); - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); + size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(); // TODO: Display the bit mask of live Dex registers. for (size_t j = 0; j < number_of_dex_registers; ++j) { if (IsDexRegisterLive(j)) { @@ -173,70 +167,70 @@ void DexRegisterMap::Dump(std::ostream& os, number_of_dex_registers, code_info, encoding); + ScopedIndentation indent1(vios); DumpRegisterMapping( - indented_os, j, location, "v", + vios->Stream(), j, location, "v", "\t[entry " + std::to_string(static_cast<int>(location_catalog_entry_index)) + "]"); } } } -void StackMap::Dump(std::ostream& os, +void StackMap::Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, const StackMapEncoding& encoding, uint32_t code_offset, uint16_t number_of_dex_registers, const std::string& header_suffix) const { - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); - indented_os << "StackMap" << header_suffix - << std::hex - << " [native_pc=0x" << code_offset + GetNativePcOffset(encoding) << "]" - << " (dex_pc=0x" << GetDexPc(encoding) - << ", native_pc_offset=0x" << GetNativePcOffset(encoding) - << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(encoding) - << ", inline_info_offset=0x" << GetInlineDescriptorOffset(encoding) - << ", register_mask=0x" << GetRegisterMask(encoding) - << std::dec - << ", stack_mask=0b"; + vios->Stream() + << "StackMap" << header_suffix + << std::hex + << " [native_pc=0x" << code_offset + GetNativePcOffset(encoding) << "]" + << " (dex_pc=0x" << GetDexPc(encoding) + << ", native_pc_offset=0x" << GetNativePcOffset(encoding) + << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(encoding) + << ", inline_info_offset=0x" << GetInlineDescriptorOffset(encoding) + << ", register_mask=0x" << GetRegisterMask(encoding) + << std::dec + << ", stack_mask=0b"; MemoryRegion stack_mask = GetStackMask(encoding); for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) { - indented_os << stack_mask.LoadBit(e - i - 1); + vios->Stream() << stack_mask.LoadBit(e - i - 1); } - indented_os << ")\n"; + vios->Stream() << ")\n"; if (HasDexRegisterMap(encoding)) { DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf( *this, encoding, number_of_dex_registers); - dex_register_map.Dump(os, code_info, number_of_dex_registers); + dex_register_map.Dump(vios, code_info, number_of_dex_registers); } if (HasInlineInfo(encoding)) { InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding); // We do not know the length of the dex register maps of inlined frames // at this level, so we just pass null to `InlineInfo::Dump` to tell // it not to look at these maps. - inline_info.Dump(os, code_info, nullptr); + inline_info.Dump(vios, code_info, nullptr); } } -void InlineInfo::Dump(std::ostream& os, +void InlineInfo::Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, uint16_t number_of_dex_registers[]) const { - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indented_os(&indent_filter); - indented_os << "InlineInfo with depth " << static_cast<uint32_t>(GetDepth()) << "\n"; + vios->Stream() << "InlineInfo with depth " << static_cast<uint32_t>(GetDepth()) << "\n"; for (size_t i = 0; i < GetDepth(); ++i) { - indented_os << " At depth " << i - << std::hex - << " (dex_pc=0x" << GetDexPcAtDepth(i) - << std::dec - << ", method_index=" << GetMethodIndexAtDepth(i) - << ", invoke_type=" << static_cast<InvokeType>(GetInvokeTypeAtDepth(i)) - << ")\n"; + vios->Stream() + << " At depth " << i + << std::hex + << " (dex_pc=0x" << GetDexPcAtDepth(i) + << std::dec + << ", method_index=" << GetMethodIndexAtDepth(i) + << ", invoke_type=" << static_cast<InvokeType>(GetInvokeTypeAtDepth(i)) + << ")\n"; if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) { StackMapEncoding encoding = code_info.ExtractEncoding(); DexRegisterMap dex_register_map = code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]); - dex_register_map.Dump(indented_os, code_info, number_of_dex_registers[i]); + ScopedIndentation indent1(vios); + dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]); } } } diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 4e420084d1..0d3816b97e 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -23,6 +23,14 @@ namespace art { +#define ELEMENT_BYTE_OFFSET_AFTER(PreviousElement) \ + k ## PreviousElement ## Offset + sizeof(PreviousElement ## Type) + +#define ELEMENT_BIT_OFFSET_AFTER(PreviousElement) \ + k ## PreviousElement ## BitOffset + PreviousElement ## BitSize + +class VariableIndentationOutputStream; + // Size of a frame slot, in bytes. This constant is a signed value, // to please the compiler in arithmetic operations involving int32_t // (signed) values. @@ -31,6 +39,11 @@ static constexpr ssize_t kFrameSlotSize = 4; // Size of Dex virtual registers. static constexpr size_t kVRegSize = 4; +// We encode the number of bytes needed for writing a value on 3 bits +// (i.e. up to 8 values), for values that we know are maximum 32-bit +// long. +static constexpr size_t kNumberOfBitForNumberOfBytesForEncoding = 3; + class CodeInfo; class StackMapEncoding; @@ -188,7 +201,9 @@ class DexRegisterLocation { /** * Store information on unique Dex register locations used in a method. * The information is of the form: - * [DexRegisterLocation+]. + * + * [DexRegisterLocation+]. + * * DexRegisterLocations are either 1- or 5-byte wide (see art::DexRegisterLocation::Kind). */ class DexRegisterLocationCatalog { @@ -357,7 +372,7 @@ class DexRegisterLocationCatalog { return region_.size(); } - void Dump(std::ostream& os, const CodeInfo& code_info); + void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info); // Special (invalid) Dex register location catalog entry index meaning // that there is no location for a given Dex register (i.e., it is @@ -425,7 +440,9 @@ class DexRegisterLocationCatalog { /* Information on Dex register locations for a specific PC, mapping a * stack map's Dex register to a location entry in a DexRegisterLocationCatalog. * The information is of the form: - * [live_bit_mask, entries*] + * + * [live_bit_mask, entries*] + * * where entries are concatenated unsigned integer values encoded on a number * of bits (fixed per DexRegisterMap instances of a CodeInfo object) depending * on the number of entries in the Dex register location catalog @@ -610,7 +627,8 @@ class DexRegisterMap { return region_.size(); } - void Dump(std::ostream& o, const CodeInfo& code_info, uint16_t number_of_dex_registers) const; + void Dump(VariableIndentationOutputStream* vios, + const CodeInfo& code_info, uint16_t number_of_dex_registers) const; private: // Return the index in the Dex register map corresponding to the Dex @@ -749,8 +767,9 @@ class StackMapEncoding { * - Knowing the values of dex registers. * * The information is of the form: - * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask, - * stack_mask]. + * + * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask, + * stack_mask]. */ class StackMap { public: @@ -837,7 +856,7 @@ class StackMap { && region_.size() == other.region_.size(); } - void Dump(std::ostream& os, + void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, const StackMapEncoding& encoding, uint32_t code_offset, @@ -853,8 +872,6 @@ class StackMap { static constexpr uint32_t kNoInlineInfo = -1; private: - // TODO: Instead of plain types such as "uint32_t", introduce - // typedefs (and document the memory layout of StackMap). static constexpr int kFixedSize = 0; // Loads `number_of_bytes` at the given `offset` and assemble a uint32_t. If `check_max` is true, @@ -869,61 +886,74 @@ class StackMap { /** * Inline information for a specific PC. The information is of the form: - * [inlining_depth, [dex_pc, method_index, dex_register_map_offset]+] + * + * [inlining_depth, entry+] + * + * where `entry` is of the form: + * + * [dex_pc, method_index, dex_register_map_offset]. */ class InlineInfo { public: + // Memory layout: fixed contents. + typedef uint8_t DepthType; + // Memory layout: single entry contents. + typedef uint32_t MethodIndexType; + typedef uint32_t DexPcType; + typedef uint8_t InvokeTypeType; + typedef uint32_t DexRegisterMapType; + explicit InlineInfo(MemoryRegion region) : region_(region) {} - uint8_t GetDepth() const { - return region_.LoadUnaligned<uint8_t>(kDepthOffset); + DepthType GetDepth() const { + return region_.LoadUnaligned<DepthType>(kDepthOffset); } - void SetDepth(uint8_t depth) { - region_.StoreUnaligned<uint8_t>(kDepthOffset, depth); + void SetDepth(DepthType depth) { + region_.StoreUnaligned<DepthType>(kDepthOffset, depth); } - uint32_t GetMethodIndexAtDepth(uint8_t depth) const { - return region_.LoadUnaligned<uint32_t>( + MethodIndexType GetMethodIndexAtDepth(DepthType depth) const { + return region_.LoadUnaligned<MethodIndexType>( kFixedSize + depth * SingleEntrySize() + kMethodIndexOffset); } - void SetMethodIndexAtDepth(uint8_t depth, uint32_t index) { - region_.StoreUnaligned<uint32_t>( + void SetMethodIndexAtDepth(DepthType depth, MethodIndexType index) { + region_.StoreUnaligned<MethodIndexType>( kFixedSize + depth * SingleEntrySize() + kMethodIndexOffset, index); } - uint32_t GetDexPcAtDepth(uint8_t depth) const { - return region_.LoadUnaligned<uint32_t>( + DexPcType GetDexPcAtDepth(DepthType depth) const { + return region_.LoadUnaligned<DexPcType>( kFixedSize + depth * SingleEntrySize() + kDexPcOffset); } - void SetDexPcAtDepth(uint8_t depth, uint32_t dex_pc) { - region_.StoreUnaligned<uint32_t>( + void SetDexPcAtDepth(DepthType depth, DexPcType dex_pc) { + region_.StoreUnaligned<DexPcType>( kFixedSize + depth * SingleEntrySize() + kDexPcOffset, dex_pc); } - uint8_t GetInvokeTypeAtDepth(uint8_t depth) const { - return region_.LoadUnaligned<uint8_t>( + InvokeTypeType GetInvokeTypeAtDepth(DepthType depth) const { + return region_.LoadUnaligned<InvokeTypeType>( kFixedSize + depth * SingleEntrySize() + kInvokeTypeOffset); } - void SetInvokeTypeAtDepth(uint8_t depth, uint8_t invoke_type) { - region_.StoreUnaligned<uint8_t>( + void SetInvokeTypeAtDepth(DepthType depth, InvokeTypeType invoke_type) { + region_.StoreUnaligned<InvokeTypeType>( kFixedSize + depth * SingleEntrySize() + kInvokeTypeOffset, invoke_type); } - uint32_t GetDexRegisterMapOffsetAtDepth(uint8_t depth) const { - return region_.LoadUnaligned<uint32_t>( + DexRegisterMapType GetDexRegisterMapOffsetAtDepth(DepthType depth) const { + return region_.LoadUnaligned<DexRegisterMapType>( kFixedSize + depth * SingleEntrySize() + kDexRegisterMapOffset); } - void SetDexRegisterMapOffsetAtDepth(uint8_t depth, uint32_t offset) { - region_.StoreUnaligned<uint32_t>( + void SetDexRegisterMapOffsetAtDepth(DepthType depth, DexRegisterMapType offset) { + region_.StoreUnaligned<DexRegisterMapType>( kFixedSize + depth * SingleEntrySize() + kDexRegisterMapOffset, offset); } - bool HasDexRegisterMapAtDepth(uint8_t depth) const { + bool HasDexRegisterMapAtDepth(DepthType depth) const { return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoDexRegisterMap; } @@ -931,19 +961,19 @@ class InlineInfo { return kFixedEntrySize; } - void Dump(std::ostream& os, const CodeInfo& info, uint16_t* number_of_dex_registers) const; + void Dump(VariableIndentationOutputStream* vios, + const CodeInfo& info, uint16_t* number_of_dex_registers) const; + private: - // TODO: Instead of plain types such as "uint8_t", introduce - // typedefs (and document the memory layout of InlineInfo). static constexpr int kDepthOffset = 0; - static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t); + static constexpr int kFixedSize = ELEMENT_BYTE_OFFSET_AFTER(Depth); static constexpr int kMethodIndexOffset = 0; - static constexpr int kDexPcOffset = kMethodIndexOffset + sizeof(uint32_t); - static constexpr int kInvokeTypeOffset = kDexPcOffset + sizeof(uint32_t); - static constexpr int kDexRegisterMapOffset = kInvokeTypeOffset + sizeof(uint8_t); - static constexpr int kFixedEntrySize = kDexRegisterMapOffset + sizeof(uint32_t); + static constexpr int kDexPcOffset = ELEMENT_BYTE_OFFSET_AFTER(MethodIndex); + static constexpr int kInvokeTypeOffset = ELEMENT_BYTE_OFFSET_AFTER(DexPc); + static constexpr int kDexRegisterMapOffset = ELEMENT_BYTE_OFFSET_AFTER(InvokeType); + static constexpr int kFixedEntrySize = ELEMENT_BYTE_OFFSET_AFTER(DexRegisterMap); MemoryRegion region_; @@ -955,11 +985,32 @@ class InlineInfo { /** * Wrapper around all compiler information collected for a method. * The information is of the form: - * [overall_size, number_of_location_catalog_entries, number_of_stack_maps, stack_mask_size, - * DexRegisterLocationCatalog+, StackMap+, DexRegisterMap+, InlineInfo*]. + * + * [overall_size, encoding_info, number_of_location_catalog_entries, number_of_stack_maps, + * stack_mask_size, DexRegisterLocationCatalog+, StackMap+, DexRegisterMap+, InlineInfo*] + * + * where `encoding_info` is of the form: + * + * [has_inline_info, inline_info_size_in_bytes, dex_register_map_size_in_bytes, + * dex_pc_size_in_bytes, native_pc_size_in_bytes, register_mask_size_in_bytes]. */ class CodeInfo { public: + // Memory layout: fixed contents. + typedef uint32_t OverallSizeType; + typedef uint16_t EncodingInfoType; + typedef uint32_t NumberOfLocationCatalogEntriesType; + typedef uint32_t NumberOfStackMapsType; + typedef uint32_t StackMaskSizeType; + + // Memory (bit) layout: encoding info. + static constexpr int HasInlineInfoBitSize = 1; + static constexpr int InlineInfoBitSize = kNumberOfBitForNumberOfBytesForEncoding; + static constexpr int DexRegisterMapBitSize = kNumberOfBitForNumberOfBytesForEncoding; + static constexpr int DexPcBitSize = kNumberOfBitForNumberOfBytesForEncoding; + static constexpr int NativePcBitSize = kNumberOfBitForNumberOfBytesForEncoding; + static constexpr int RegisterMaskBitSize = kNumberOfBitForNumberOfBytesForEncoding; + explicit CodeInfo(MemoryRegion region) : region_(region) {} explicit CodeInfo(const void* data) { @@ -987,17 +1038,11 @@ class CodeInfo { } void SetEncodingAt(size_t bit_offset, size_t number_of_bytes) { - // We encode the number of bytes needed for writing a value on 3 bits, - // for values that we know are maximum 32bits. - region_.StoreBit(bit_offset, (number_of_bytes & 1)); - region_.StoreBit(bit_offset + 1, (number_of_bytes & 2)); - region_.StoreBit(bit_offset + 2, (number_of_bytes & 4)); + region_.StoreBits(bit_offset, number_of_bytes, kNumberOfBitForNumberOfBytesForEncoding); } size_t GetNumberOfBytesForEncoding(size_t bit_offset) const { - return region_.LoadBit(bit_offset) - + (region_.LoadBit(bit_offset + 1) << 1) - + (region_.LoadBit(bit_offset + 2) << 2); + return region_.LoadBits(bit_offset, kNumberOfBitForNumberOfBytesForEncoding); } bool HasInlineInfo() const { @@ -1015,33 +1060,35 @@ class CodeInfo { return StackMap(GetStackMaps(encoding).Subregion(i * stack_map_size, stack_map_size)); } - uint32_t GetOverallSize() const { - return region_.LoadUnaligned<uint32_t>(kOverallSizeOffset); + OverallSizeType GetOverallSize() const { + return region_.LoadUnaligned<OverallSizeType>(kOverallSizeOffset); } - void SetOverallSize(uint32_t size) { - region_.StoreUnaligned<uint32_t>(kOverallSizeOffset, size); + void SetOverallSize(OverallSizeType size) { + region_.StoreUnaligned<OverallSizeType>(kOverallSizeOffset, size); } - uint32_t GetNumberOfDexRegisterLocationCatalogEntries() const { - return region_.LoadUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset); + NumberOfLocationCatalogEntriesType GetNumberOfLocationCatalogEntries() const { + return region_.LoadUnaligned<NumberOfLocationCatalogEntriesType>( + kNumberOfLocationCatalogEntriesOffset); } - void SetNumberOfDexRegisterLocationCatalogEntries(uint32_t num_entries) { - region_.StoreUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset, num_entries); + void SetNumberOfLocationCatalogEntries(NumberOfLocationCatalogEntriesType num_entries) { + region_.StoreUnaligned<NumberOfLocationCatalogEntriesType>( + kNumberOfLocationCatalogEntriesOffset, num_entries); } uint32_t GetDexRegisterLocationCatalogSize(const StackMapEncoding& encoding) const { return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(encoding), - GetNumberOfDexRegisterLocationCatalogEntries()); + GetNumberOfLocationCatalogEntries()); } - size_t GetNumberOfStackMaps() const { - return region_.LoadUnaligned<uint32_t>(kNumberOfStackMapsOffset); + NumberOfStackMapsType GetNumberOfStackMaps() const { + return region_.LoadUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset); } - void SetNumberOfStackMaps(uint32_t number_of_stack_maps) { - region_.StoreUnaligned<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps); + void SetNumberOfStackMaps(NumberOfStackMapsType number_of_stack_maps) { + region_.StoreUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset, number_of_stack_maps); } // Get the size all the stack maps of this CodeInfo object, in bytes. @@ -1120,29 +1167,34 @@ class CodeInfo { // number of Dex virtual registers used in this method. If // `dump_stack_maps` is true, also dump the stack maps and the // associated Dex register maps. - void Dump(std::ostream& os, + void Dump(VariableIndentationOutputStream* vios, uint32_t code_offset, uint16_t number_of_dex_registers, bool dump_stack_maps) const; private: - // TODO: Instead of plain types such as "uint32_t", introduce - // typedefs (and document the memory layout of CodeInfo). static constexpr int kOverallSizeOffset = 0; - static constexpr int kEncodingInfoOffset = kOverallSizeOffset + sizeof(uint32_t); - static constexpr int kNumberOfDexRegisterLocationCatalogEntriesOffset = - kEncodingInfoOffset + sizeof(uint16_t); + static constexpr int kEncodingInfoOffset = ELEMENT_BYTE_OFFSET_AFTER(OverallSize); + static constexpr int kNumberOfLocationCatalogEntriesOffset = + ELEMENT_BYTE_OFFSET_AFTER(EncodingInfo); static constexpr int kNumberOfStackMapsOffset = - kNumberOfDexRegisterLocationCatalogEntriesOffset + sizeof(uint32_t); - static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t); - static constexpr int kFixedSize = kStackMaskSizeOffset + sizeof(uint32_t); + ELEMENT_BYTE_OFFSET_AFTER(NumberOfLocationCatalogEntries); + static constexpr int kStackMaskSizeOffset = ELEMENT_BYTE_OFFSET_AFTER(NumberOfStackMaps); + static constexpr int kFixedSize = ELEMENT_BYTE_OFFSET_AFTER(StackMaskSize); + + static constexpr int kHasInlineInfoBitOffset = kEncodingInfoOffset * kBitsPerByte; + static constexpr int kInlineInfoBitOffset = ELEMENT_BIT_OFFSET_AFTER(HasInlineInfo); + static constexpr int kDexRegisterMapBitOffset = ELEMENT_BIT_OFFSET_AFTER(InlineInfo); + static constexpr int kDexPcBitOffset = ELEMENT_BIT_OFFSET_AFTER(DexRegisterMap); + static constexpr int kNativePcBitOffset = ELEMENT_BIT_OFFSET_AFTER(DexPc); + static constexpr int kRegisterMaskBitOffset = ELEMENT_BIT_OFFSET_AFTER(NativePc); - static constexpr int kHasInlineInfoBitOffset = (kEncodingInfoOffset * kBitsPerByte); - static constexpr int kInlineInfoBitOffset = kHasInlineInfoBitOffset + 1; - static constexpr int kDexRegisterMapBitOffset = kInlineInfoBitOffset + 3; - static constexpr int kDexPcBitOffset = kDexRegisterMapBitOffset + 3; - static constexpr int kNativePcBitOffset = kDexPcBitOffset + 3; - static constexpr int kRegisterMaskBitOffset = kNativePcBitOffset + 3; + static constexpr int kEncodingInfoPastTheEndBitOffset = ELEMENT_BIT_OFFSET_AFTER(RegisterMask); + static constexpr int kEncodingInfoOverallBitSize = + kEncodingInfoPastTheEndBitOffset - kHasInlineInfoBitOffset; + + static_assert(kEncodingInfoOverallBitSize <= (sizeof(EncodingInfoType) * kBitsPerByte), + "art::CodeInfo::EncodingInfoType is too short to hold all encoding info elements."); MemoryRegion GetStackMaps(const StackMapEncoding& encoding) const { return region_.size() == 0 @@ -1165,7 +1217,7 @@ class CodeInfo { size_t number_of_live_dex_registers = dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers); size_t location_mapping_data_size_in_bits = - DexRegisterMap::SingleEntrySizeInBits(GetNumberOfDexRegisterLocationCatalogEntries()) + DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries()) * number_of_live_dex_registers; size_t location_mapping_data_size_in_bytes = RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte; @@ -1207,6 +1259,9 @@ class CodeInfo { friend class StackMapStream; }; +#undef ELEMENT_BYTE_OFFSET_AFTER +#undef ELEMENT_BIT_OFFSET_AFTER + } // namespace art #endif // ART_RUNTIME_STACK_MAP_H_ diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h index d8d21aa5af..a9da51ba29 100644 --- a/runtime/stride_iterator.h +++ b/runtime/stride_iterator.h @@ -29,11 +29,12 @@ class StrideIterator : public std::iterator<std::forward_iterator_tag, T> { StrideIterator& operator=(const StrideIterator&) = default; StrideIterator& operator=(StrideIterator&&) = default; - StrideIterator(uintptr_t ptr, size_t stride) - : ptr_(ptr), stride_(stride) { - } + StrideIterator(T* ptr, size_t stride) + : ptr_(reinterpret_cast<uintptr_t>(ptr)), + stride_(stride) {} bool operator==(const StrideIterator& other) const { + DCHECK_EQ(stride_, other.stride_); return ptr_ == other.ptr_; } @@ -47,11 +48,22 @@ class StrideIterator : public std::iterator<std::forward_iterator_tag, T> { } StrideIterator operator++(int) { - auto temp = *this; + StrideIterator<T> temp = *this; ptr_ += stride_; return temp; } + StrideIterator operator+(ssize_t delta) const { + StrideIterator<T> temp = *this; + temp += delta; + return temp; + } + + StrideIterator& operator+=(ssize_t delta) { + ptr_ += static_cast<ssize_t>(stride_) * delta; + return *this; + } + T& operator*() const { return *reinterpret_cast<T*>(ptr_); } diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index f7ef8942e6..8bf241b66d 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -19,12 +19,17 @@ #include "thread.h" +#ifdef __ANDROID__ +#include <bionic_tls.h> // Access to our own TLS slot. +#endif + #include <pthread.h> #include "base/casts.h" #include "base/mutex-inl.h" #include "gc/heap.h" #include "jni_env_ext.h" +#include "thread_pool.h" namespace art { @@ -40,7 +45,11 @@ inline Thread* Thread::Current() { if (!is_started_) { return nullptr; } else { +#ifdef __ANDROID__ + void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF]; +#else void* thread = pthread_getspecific(Thread::pthread_key_self_); +#endif return reinterpret_cast<Thread*>(thread); } } @@ -66,8 +75,10 @@ inline void Thread::CheckSuspend() { } inline ThreadState Thread::SetState(ThreadState new_state) { - // Cannot use this code to change into Runnable as changing to Runnable should fail if - // old_state_and_flags.suspend_request is true. + // Should only be used to change between suspended states. + // Cannot use this code to change into or from Runnable as changing to Runnable should + // fail if old_state_and_flags.suspend_request is true and changing from Runnable might + // miss passing an active suspend barrier. DCHECK_NE(new_state, kRunnable); if (kIsDebugBuild && this != Thread::Current()) { std::string name; @@ -77,6 +88,7 @@ inline ThreadState Thread::SetState(ThreadState new_state) { } union StateAndFlags old_state_and_flags; old_state_and_flags.as_int = tls32_.state_and_flags.as_int; + CHECK_NE(old_state_and_flags.as_struct.state, kRunnable); tls32_.state_and_flags.as_struct.state = new_state; return static_cast<ThreadState>(old_state_and_flags.as_struct.state); } @@ -125,20 +137,34 @@ inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) { new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags; new_state_and_flags.as_struct.state = new_state; - // CAS the value without a memory ordering as that is given by the lock release below. + // CAS the value with a memory ordering. bool done = - tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelaxed(old_state_and_flags.as_int, + tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelease(old_state_and_flags.as_int, new_state_and_flags.as_int); if (LIKELY(done)) { break; } } - // Release share on mutator_lock_. - Locks::mutator_lock_->SharedUnlock(this); + + // Change to non-runnable state, thereby appearing suspended to the system. + // Mark the release of the share of the mutator_lock_. + Locks::mutator_lock_->TransitionFromRunnableToSuspended(this); + + // Once suspended - check the active suspend barrier flag + while (true) { + uint16_t current_flags = tls32_.state_and_flags.as_struct.flags; + if (LIKELY((current_flags & (kCheckpointRequest | kActiveSuspendBarrier)) == 0)) { + break; + } else if ((current_flags & kActiveSuspendBarrier) != 0) { + PassActiveSuspendBarriers(this); + } else { + // Impossible + LOG(FATAL) << "Fatal, thread transited into suspended without running the checkpoint"; + } + } } inline ThreadState Thread::TransitionFromSuspendedToRunnable() { - bool done = false; union StateAndFlags old_state_and_flags; old_state_and_flags.as_int = tls32_.state_and_flags.as_int; int16_t old_state = old_state_and_flags.as_struct.state; @@ -147,7 +173,26 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC.. old_state_and_flags.as_int = tls32_.state_and_flags.as_int; DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); - if (UNLIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0)) { + if (LIKELY(old_state_and_flags.as_struct.flags == 0)) { + // Optimize for the return from native code case - this is the fast path. + // Atomically change from suspended to runnable if no suspend request pending. + union StateAndFlags new_state_and_flags; + new_state_and_flags.as_int = old_state_and_flags.as_int; + new_state_and_flags.as_struct.state = kRunnable; + // CAS the value with a memory barrier. + if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakAcquire( + old_state_and_flags.as_int, + new_state_and_flags.as_int))) { + // Mark the acquisition of a share of the mutator_lock_. + Locks::mutator_lock_->TransitionFromSuspendedToRunnable(this); + break; + } + } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) { + PassActiveSuspendBarriers(this); + } else if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) { + // Impossible + LOG(FATAL) << "Fatal, wrong checkpoint flag"; + } else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) { // Wait while our suspend count is non-zero. MutexLock mu(this, *Locks::thread_suspend_count_lock_); old_state_and_flags.as_int = tls32_.state_and_flags.as_int; @@ -160,32 +205,13 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { } DCHECK_EQ(GetSuspendCount(), 0); } - // Re-acquire shared mutator_lock_ access. - Locks::mutator_lock_->SharedLock(this); - // Atomically change from suspended to runnable if no suspend request pending. - old_state_and_flags.as_int = tls32_.state_and_flags.as_int; - DCHECK_EQ(old_state_and_flags.as_struct.state, old_state); - if (LIKELY((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0)) { - union StateAndFlags new_state_and_flags; - new_state_and_flags.as_int = old_state_and_flags.as_int; - new_state_and_flags.as_struct.state = kRunnable; - // CAS the value without a memory ordering as that is given by the lock acquisition above. - done = - tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelaxed(old_state_and_flags.as_int, - new_state_and_flags.as_int); - } - if (UNLIKELY(!done)) { - // Failed to transition to Runnable. Release shared mutator_lock_ access and try again. - Locks::mutator_lock_->SharedUnlock(this); - } else { - // Run the flip function, if set. - Closure* flip_func = GetFlipFunction(); - if (flip_func != nullptr) { - flip_func->Run(this); - } - return static_cast<ThreadState>(old_state); - } } while (true); + // Run the flip function, if set. + Closure* flip_func = GetFlipFunction(); + if (flip_func != nullptr) { + flip_func->Run(this); + } + return static_cast<ThreadState>(old_state); } inline void Thread::VerifyStack() { diff --git a/runtime/thread.cc b/runtime/thread.cc index 37a86f1218..a33e150b93 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -35,6 +35,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" #include "base/bit_utils.h" +#include "base/memory_tool.h" #include "base/mutex.h" #include "base/timing_logger.h" #include "base/to_str.h" @@ -74,6 +75,14 @@ #include "vmap_table.h" #include "well_known_classes.h" +#if ART_USE_FUTEXES +#include "linux/futex.h" +#include "sys/syscall.h" +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#endif // ART_USE_FUTEXES + namespace art { bool Thread::is_started_ = false; @@ -81,6 +90,12 @@ pthread_key_t Thread::pthread_key_self_; ConditionVariable* Thread::resume_cond_ = nullptr; const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); +// For implicit overflow checks we reserve an extra piece of memory at the bottom +// of the stack (lowest memory). The higher portion of the memory +// is protected against reads and the lower is available for use while +// throwing the StackOverflow exception. +constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; + static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; void Thread::InitCardTable() { @@ -351,6 +366,10 @@ uint8_t dont_optimize_this; // to make sure the pages for the stack are mapped in before we call mprotect. We do // this by reading every page from the stack bottom (highest address) to the stack top. // We then madvise this away. + +// AddressSanitizer does not like the part of this functions that reads every stack page. +// Looks a lot like an out-of-bounds access. +ATTRIBUTE_NO_SANITIZE_ADDRESS void Thread::InstallImplicitProtection() { uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; uint8_t* stack_himem = tlsPtr_.stack_end; @@ -388,6 +407,24 @@ void Thread::InstallImplicitProtection() { void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { CHECK(java_peer != nullptr); Thread* self = static_cast<JNIEnvExt*>(env)->self; + + if (VLOG_IS_ON(threads)) { + ScopedObjectAccess soa(env); + + ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); + mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject( + soa.Decode<mirror::Object*>(java_peer))); + std::string thread_name; + if (java_name != nullptr) { + thread_name = java_name->ToModifiedUtf8(); + } else { + thread_name = "(Unnamed)"; + } + + VLOG(threads) << "Creating native thread for " << thread_name; + self->Dump(LOG(INFO)); + } + Runtime* runtime = Runtime::Current(); // Atomically start the birth of the thread ensuring the runtime isn't shutting down. @@ -490,7 +527,11 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en InitCardTable(); InitTid(); +#ifdef __ANDROID__ + __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; +#else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); +#endif DCHECK_EQ(Thread::Current(), this); tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); @@ -556,6 +597,16 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g } } + if (VLOG_IS_ON(threads)) { + if (thread_name != nullptr) { + VLOG(threads) << "Attaching thread " << thread_name; + } else { + VLOG(threads) << "Attaching unnamed thread."; + } + ScopedObjectAccess soa(self); + self->Dump(LOG(INFO)); + } + { ScopedObjectAccess soa(self); Dbg::PostThreadStart(self); @@ -758,7 +809,8 @@ static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREA LOG(FATAL) << ss.str(); } -void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) { +bool Thread::ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, + bool for_debugger) { if (kIsDebugBuild) { DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count) << delta << " " << tls32_.debug_suspend_count << " " << this; @@ -770,7 +822,24 @@ void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) { } if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { UnsafeLogFatalForSuspendCount(self, this); - return; + return false; + } + + uint16_t flags = kSuspendRequest; + if (delta > 0 && suspend_barrier != nullptr) { + uint32_t available_barrier = kMaxSuspendBarriers; + for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { + if (tlsPtr_.active_suspend_barriers[i] == nullptr) { + available_barrier = i; + break; + } + } + if (available_barrier == kMaxSuspendBarriers) { + // No barrier spaces available, we can't add another. + return false; + } + tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; + flags |= kActiveSuspendBarrier; } tls32_.suspend_count += delta; @@ -781,9 +850,76 @@ void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) { if (tls32_.suspend_count == 0) { AtomicClearFlag(kSuspendRequest); } else { - AtomicSetFlag(kSuspendRequest); + // Two bits might be set simultaneously. + tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags); TriggerSuspend(); } + return true; +} + +bool Thread::PassActiveSuspendBarriers(Thread* self) { + // Grab the suspend_count lock and copy the current set of + // barriers. Then clear the list and the flag. The ModifySuspendCount + // function requires the lock so we prevent a race between setting + // the kActiveSuspendBarrier flag and clearing it. + AtomicInteger* pass_barriers[kMaxSuspendBarriers]; + { + MutexLock mu(self, *Locks::thread_suspend_count_lock_); + if (!ReadFlag(kActiveSuspendBarrier)) { + // quick exit test: the barriers have already been claimed - this is + // possible as there may be a race to claim and it doesn't matter + // who wins. + // All of the callers of this function (except the SuspendAllInternal) + // will first test the kActiveSuspendBarrier flag without lock. Here + // double-check whether the barrier has been passed with the + // suspend_count lock. + return false; + } + + for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { + pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; + tlsPtr_.active_suspend_barriers[i] = nullptr; + } + AtomicClearFlag(kActiveSuspendBarrier); + } + + uint32_t barrier_count = 0; + for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { + AtomicInteger* pending_threads = pass_barriers[i]; + if (pending_threads != nullptr) { + bool done = false; + do { + int32_t cur_val = pending_threads->LoadRelaxed(); + CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; + // Reduce value by 1. + done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1); +#if ART_USE_FUTEXES + if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. + futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); + } +#endif + } while (!done); + ++barrier_count; + } + } + CHECK_GT(barrier_count, 0U); + return true; +} + +void Thread::ClearSuspendBarrier(AtomicInteger* target) { + CHECK(ReadFlag(kActiveSuspendBarrier)); + bool clear_flag = true; + for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { + AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; + if (ptr == target) { + tlsPtr_.active_suspend_barriers[i] = nullptr; + } else if (ptr != nullptr) { + clear_flag = false; + } + } + if (LIKELY(clear_flag)) { + AtomicClearFlag(kActiveSuspendBarrier); + } } void Thread::RunCheckpointFunction() { @@ -1027,7 +1163,7 @@ void Thread::DumpState(std::ostream& os) const { struct StackDumpVisitor : public StackVisitor { StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), os(os_in), thread(thread_in), @@ -1043,11 +1179,12 @@ struct StackDumpVisitor : public StackVisitor { } } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { return true; } + m = m->GetInterfaceMethodIfProxy(sizeof(void*)); const int kMaxRepetition = 3; mirror::Class* c = m->GetDeclaringClass(); mirror::DexCache* dex_cache = c->GetDexCache(); @@ -1090,7 +1227,7 @@ struct StackDumpVisitor : public StackVisitor { } static void DumpLockedObject(mirror::Object* o, void* context) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { std::ostream& os = *reinterpret_cast<std::ostream*>(context); os << " - locked "; if (o == nullptr) { @@ -1122,7 +1259,7 @@ struct StackDumpVisitor : public StackVisitor { }; static bool ShouldShowNativeStack(const Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ThreadState state = thread->GetState(); // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. @@ -1216,7 +1353,11 @@ void Thread::ThreadExitCallback(void* arg) { LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " "going to use a pthread_key_create destructor?): " << *self; CHECK(is_started_); +#ifdef __ANDROID__ + __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; +#else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); +#endif self->tls32_.thread_exit_check_count = 1; } else { LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; @@ -1288,7 +1429,11 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte for (uint32_t i = 0; i < kMaxCheckpoints; ++i) { tlsPtr_.checkpoint_functions[i] = nullptr; } + for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { + tlsPtr_.active_suspend_barriers[i] = nullptr; + } tlsPtr_.flip_function = nullptr; + tlsPtr_.thread_local_mark_stack = nullptr; tls32_.suspended_at_suspend_check = false; } @@ -1407,6 +1552,9 @@ void Thread::Destroy() { { ScopedObjectAccess soa(self); Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); + if (kUseReadBarrier) { + Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); + } } } @@ -1620,11 +1768,11 @@ void Thread::SetClassLoaderOverride(jobject class_loader_override) { class CountStackDepthVisitor : public StackVisitor { public: explicit CountStackDepthVisitor(Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), depth_(0), skip_depth_(0), skipping_(true) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { // We want to skip frames up to and including the exception's constructor. // Note we also skip the frame if it doesn't have a method (namely the callee // save frame) @@ -1660,7 +1808,7 @@ class CountStackDepthVisitor : public StackVisitor { template<bool kTransactionActive> class BuildInternalStackTraceVisitor : public StackVisitor { public: - explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) + BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), self_(self), skip_depth_(skip_depth), @@ -1668,29 +1816,26 @@ class BuildInternalStackTraceVisitor : public StackVisitor { trace_(nullptr), pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} - bool Init(int depth) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { // Allocate method trace with format [method pointers][pcs]. auto* cl = Runtime::Current()->GetClassLinker(); trace_ = cl->AllocPointerArray(self_, depth * 2); + const char* last_no_suspend_cause = + self_->StartAssertNoThreadSuspension("Building internal stack trace"); if (trace_ == nullptr) { self_->AssertPendingOOMException(); return false; } // If We are called from native, use non-transactional mode. - const char* last_no_suspend_cause = - self_->StartAssertNoThreadSuspension("Building internal stack trace"); CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; return true; } - virtual ~BuildInternalStackTraceVisitor() { - if (trace_ != nullptr) { - self_->EndAssertNoThreadSuspension(nullptr); - } + virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { + self_->EndAssertNoThreadSuspension(nullptr); } - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (trace_ == nullptr) { return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. } @@ -1872,7 +2017,7 @@ void Thread::ThrowNewException(const char* exception_class_descriptor, } static mirror::ClassLoader* GetCurrentClassLoader(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* method = self->GetCurrentMethod(nullptr); return method != nullptr ? method->GetDeclaringClass()->GetClassLoader() @@ -2002,7 +2147,7 @@ void Thread::DumpFromGdb() const { std::string str(ss.str()); // log to stderr for debugging command line processes std::cerr << str; -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ // log to logcat for debugging frameworks processes LOG(INFO) << str; #endif @@ -2167,6 +2312,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) QUICK_ENTRY_POINT_INFO(pReadBarrierJni) + QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) #undef QUICK_ENTRY_POINT_INFO os << offset; @@ -2205,13 +2351,13 @@ Context* Thread::GetLongJumpContext() { // so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack. struct CurrentMethodVisitor FINAL : public StackVisitor { CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_object_(nullptr), method_(nullptr), dex_pc_(0), abort_on_error_(abort_on_error) {} - bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); if (m->IsRuntimeMethod()) { // Continue if this is a runtime method. @@ -2251,13 +2397,13 @@ template <typename RootVisitor> class ReferenceMapVisitor : public StackVisitor { public: ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) // We are visiting the references in compiled frames, so we do not need // to know the inlined frames. : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), visitor_(visitor) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (false) { LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) << StringPrintf("@ PC:%04x", GetDexPc()); @@ -2271,8 +2417,9 @@ class ReferenceMapVisitor : public StackVisitor { return true; } - void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = shadow_frame->GetMethod(); + VisitDeclaringClass(m); DCHECK(m != nullptr); size_t num_regs = shadow_frame->NumberOfVRegs(); if (m->IsNative() || shadow_frame->HasReferenceArray()) { @@ -2313,10 +2460,25 @@ class ReferenceMapVisitor : public StackVisitor { } private: - void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - auto* cur_quick_frame = GetCurrentQuickFrame(); + // Visiting the declaring class is necessary so that we don't unload the class of a method that + // is executing. We need to ensure that the code stays mapped. + void VisitDeclaringClass(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::Class* klass = method->GetDeclaringClassNoBarrier(); + // klass can be null for runtime methods. + if (klass != nullptr) { + mirror::Object* new_ref = klass; + visitor_(&new_ref, -1, this); + if (new_ref != klass) { + method->CASDeclaringClass(klass, new_ref->AsClass()); + } + } + } + + void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); DCHECK(cur_quick_frame != nullptr); - auto* m = *cur_quick_frame; + ArtMethod* m = *cur_quick_frame; + VisitDeclaringClass(m); // Process register map (which native and runtime methods don't have) if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { @@ -2417,7 +2579,7 @@ class RootCallbackVisitor { RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); } @@ -2480,7 +2642,7 @@ void Thread::VisitRoots(RootVisitor* visitor) { class VerifyRootVisitor : public SingleRootVisitor { public: void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) - OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { VerifyObject(root); } }; @@ -2595,4 +2757,12 @@ void Thread::PopVerifier(verifier::MethodVerifier* verifier) { tlsPtr_.method_verifier = verifier->link_; } +size_t Thread::NumberOfHeldMutexes() const { + size_t count = 0; + for (BaseMutex* mu : tlsPtr_.held_mutexes) { + count += mu != nullptr ? 1 : 0; + } + return count; +} + } // namespace art diff --git a/runtime/thread.h b/runtime/thread.h index 0e71c08b07..e4ad7f36db 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -46,6 +46,9 @@ namespace art { namespace gc { +namespace accounting { + template<class T> class AtomicStack; +} // namespace accounting namespace collector { class SemiSpace; } // namespace collector @@ -98,7 +101,8 @@ enum ThreadPriority { enum ThreadFlag { kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the // safepoint handler. - kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue. + kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue. + kActiveSuspendBarrier = 4 // Register that at least 1 suspend barrier needs to be passed. }; enum class StackedShadowFrameType { @@ -138,11 +142,6 @@ static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34; class Thread { public: - // For implicit overflow checks we reserve an extra piece of memory at the bottom - // of the stack (lowest memory). The higher portion of the memory - // is protected against reads and the lower is available for use while - // throwing the StackOverflow exception. - static constexpr size_t kStackOverflowProtectedSize = 4 * KB; static const size_t kStackOverflowImplicitCheckSize; // Creates a new native thread corresponding to the given managed peer. @@ -163,20 +162,18 @@ class Thread { static Thread* Current(); // On a runnable thread, check for pending thread suspension request and handle if pending. - void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_); // Process pending thread suspension request and handle if pending. - void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, mirror::Object* thread_peer) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Translates 172 to pAllocArrayFromCode and so on. template<size_t size_of_pointers> @@ -187,18 +184,18 @@ class Thread { // Dumps the detailed thread state and the thread stack (used for SIGQUIT). void Dump(std::ostream& os) const - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void DumpJavaStack(std::ostream& os) const - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which // case we use 'tid' to identify the thread, and we'll include as much information as we can. static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); ThreadState GetState() const { DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated); @@ -208,11 +205,11 @@ class Thread { ThreadState SetState(ThreadState new_state); - int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + int GetSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { return tls32_.suspend_count; } - int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { + int GetDebugSuspendCount() const REQUIRES(Locks::thread_suspend_count_lock_) { return tls32_.debug_suspend_count; } @@ -223,36 +220,46 @@ class Thread { (state_and_flags.as_struct.flags & kSuspendRequest) != 0; } - void ModifySuspendCount(Thread* self, int delta, bool for_debugger) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); + bool ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, bool for_debugger) + REQUIRES(Locks::thread_suspend_count_lock_); bool RequestCheckpoint(Closure* function) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); + REQUIRES(Locks::thread_suspend_count_lock_); void SetFlipFunction(Closure* function); Closure* GetFlipFunction(); + gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() { + CHECK(kUseReadBarrier); + return tlsPtr_.thread_local_mark_stack; + } + void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) { + CHECK(kUseReadBarrier); + tlsPtr_.thread_local_mark_stack = stack; + } + // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. void FullSuspendCheck() - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Transition from non-runnable to runnable state acquiring share on mutator_lock_. ThreadState TransitionFromSuspendedToRunnable() - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE; // Transition from runnable into a state where mutator privileges are denied. Releases share of // mutator lock. void TransitionFromRunnableToSuspended(ThreadState new_state) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) + REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_) UNLOCK_FUNCTION(Locks::mutator_lock_) ALWAYS_INLINE; // Once called thread suspension will cause an assertion failure. - const char* StartAssertNoThreadSuspension(const char* cause) { + const char* StartAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) { + Roles::uninterruptible_.Acquire(); // No-op. if (kIsDebugBuild) { CHECK(cause != nullptr); const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause; @@ -265,13 +272,14 @@ class Thread { } // End region where no thread suspension is expected. - void EndAssertNoThreadSuspension(const char* old_cause) { + void EndAssertNoThreadSuspension(const char* old_cause) RELEASE(Roles::uninterruptible_) { if (kIsDebugBuild) { CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1); CHECK_GT(tls32_.no_thread_suspension, 0U); tls32_.no_thread_suspension--; tlsPtr_.last_no_thread_suspension_cause = old_cause; } + Roles::uninterruptible_.Release(); // No-op. } void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; @@ -280,7 +288,9 @@ class Thread { return tls32_.daemon; } - bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + size_t NumberOfHeldMutexes() const; + + bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_); /* * Changes the priority of this thread to match that of the java.lang.Thread object. @@ -308,19 +318,19 @@ class Thread { // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer. mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. void GetThreadName(std::string& name) const; // Sets the thread's name. - void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_); // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable. uint64_t GetCpuMicroTime() const; - mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(tlsPtr_.jpeer == nullptr); return tlsPtr_.opeer; } @@ -339,28 +349,28 @@ class Thread { return tlsPtr_.exception != nullptr; } - mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) { return tlsPtr_.exception; } void AssertPendingException() const; - void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_); void AssertNoPendingException() const; void AssertNoPendingExceptionForNewException(const char* msg) const; void SetException(mirror::Throwable* new_exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(new_exception != nullptr); // TODO: DCHECK(!IsExceptionPending()); tlsPtr_.exception = new_exception; } - void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) { tlsPtr_.exception = nullptr; } // Find catch block and perform long jump to appropriate exception handle - NO_RETURN void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_); Context* GetLongJumpContext(); void ReleaseLongJumpContext(Context* context) { @@ -382,12 +392,12 @@ class Thread { // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will // abort the runtime iff abort_on_error is true. ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns whether the given exception was thrown by the current Java method being executed // (Note that this includes native Java methods). bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetTopOfStack(ArtMethod** top_method) { tlsPtr_.managed_stack.SetTopQuickFrame(top_method); @@ -404,23 +414,24 @@ class Thread { // If 'msg' is null, no detail message is set. void ThrowNewException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // If 'msg' is null, no detail message is set. An exception must be pending, and will be // used as the new exception's cause. void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) __attribute__((format(printf, 3, 4))) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // OutOfMemoryError is special, because we need to pre-allocate an instance. // Only the GC should call this. - void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); static void Startup(); static void FinishStartup(); @@ -432,50 +443,49 @@ class Thread { } // Convert a jobject into a Object* - mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_); - mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) { return tlsPtr_.monitor_enter_object; } - void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { tlsPtr_.monitor_enter_object = obj; } // Implements java.lang.Thread.interrupted. - bool Interrupted() LOCKS_EXCLUDED(wait_mutex_); + bool Interrupted() REQUIRES(!*wait_mutex_); // Implements java.lang.Thread.isInterrupted. - bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_); - bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + bool IsInterrupted() REQUIRES(!*wait_mutex_); + bool IsInterruptedLocked() REQUIRES(wait_mutex_) { return interrupted_; } - void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_); - void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + void Interrupt(Thread* self) REQUIRES(!*wait_mutex_); + void SetInterruptedLocked(bool i) REQUIRES(wait_mutex_) { interrupted_ = i; } - void Notify() LOCKS_EXCLUDED(wait_mutex_); + void Notify() REQUIRES(!*wait_mutex_); private: - void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_); + void NotifyLocked(Thread* self) REQUIRES(wait_mutex_); public: Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) { return wait_mutex_; } - ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + ConditionVariable* GetWaitConditionVariable() const REQUIRES(wait_mutex_) { return wait_cond_; } - Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + Monitor* GetWaitMonitor() const REQUIRES(wait_mutex_) { return wait_monitor_; } - void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) { + void SetWaitMonitor(Monitor* mon) REQUIRES(wait_mutex_) { wait_monitor_ = mon; } - // Waiter link-list support. Thread* GetWaitNext() const { return tlsPtr_.wait_next; @@ -495,7 +505,7 @@ class Thread { // and space efficient to compute than the StackTraceElement[]. template<bool kTransactionActive> jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many @@ -504,11 +514,11 @@ class Thread { static jobjectArray InternalStackTraceToStackTraceElementArray( const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array = nullptr, int* stack_depth = nullptr) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); - ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_); // // Offsets of various members of native Thread class, used by compiled code. @@ -639,7 +649,7 @@ class Thread { } // Set the stack end to that to be used during a stack overflow - void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_); // Set the stack end to that to be used during regular execution void ResetDefaultStackEnd() { @@ -702,7 +712,7 @@ class Thread { } // Number of references allocated in JNI ShadowFrames on this thread. - size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) { return tlsPtr_.managed_stack.NumJniShadowFrameReferences(); } @@ -710,7 +720,7 @@ class Thread { size_t NumHandleReferences(); // Number of references allocated in handle scopes & JNI shadow frames on this thread. - size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) { return NumHandleReferences() + NumJniShadowFrameReferences(); } @@ -718,7 +728,7 @@ class Thread { bool HandleScopeContains(jobject obj) const; void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); HandleScope* GetTopHandleScope() { return tlsPtr_.top_handle_scope; @@ -772,6 +782,16 @@ class Thread { tls32_.debug_method_entry_ = false; } + bool GetWeakRefAccessEnabled() const { + CHECK(kUseReadBarrier); + return tls32_.weak_ref_access_enabled; + } + + void SetWeakRefAccessEnabled(bool enabled) { + CHECK(kUseReadBarrier); + tls32_.weak_ref_access_enabled = enabled; + } + // Activates single step control for debugging. The thread takes the // ownership of the given SingleStepControl*. It is deleted by a call // to DeactivateSingleStepControl or upon thread destruction. @@ -846,6 +866,12 @@ class Thread { void RunCheckpointFunction(); + bool PassActiveSuspendBarriers(Thread* self) + REQUIRES(!Locks::thread_suspend_count_lock_); + + void ClearSuspendBarrier(AtomicInteger* target) + REQUIRES(Locks::thread_suspend_count_lock_); + bool ReadFlag(ThreadFlag flag) const { return (tls32_.state_and_flags.as_struct.flags & flag) != 0; } @@ -894,7 +920,7 @@ class Thread { // Push an object onto the allocation stack. bool PushOnThreadLocalAllocationStack(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Set the thread local allocation pointers to the given pointers. void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start, @@ -948,8 +974,7 @@ class Thread { private: explicit Thread(bool daemon); - ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_suspend_count_lock_); + ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_); void Destroy(); void CreatePeer(const char* name, bool as_daemon, jobject thread_group); @@ -957,33 +982,38 @@ class Thread { template<bool kTransactionActive> void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group, jobject thread_name, jint thread_priority) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and // Dbg::Disconnected. ThreadState SetStateUnsafe(ThreadState new_state) { ThreadState old_state = GetState(); tls32_.state_and_flags.as_struct.state = new_state; + // if transit to a suspended state, check the pass barrier request. + if (UNLIKELY((new_state != kRunnable) && + (tls32_.state_and_flags.as_struct.flags & kActiveSuspendBarrier))) { + PassActiveSuspendBarriers(this); + } return old_state; } - void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_); - void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_); void DumpStack(std::ostream& os) const - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Out-of-line conveniences for debugging in gdb. static Thread* CurrentFromGdb(); // Like Thread::Current. // Like Thread::Dump(std::cerr). - void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_); static void* CreateCallback(void* arg); void HandleUncaughtExceptions(ScopedObjectAccess& soa) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_); // Initialize a thread. // @@ -993,7 +1023,7 @@ class Thread { // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value // of false). bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr) - EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); + REQUIRES(Locks::runtime_shutdown_lock_); void InitCardTable(); void InitCpu(); void CleanupCpu(); @@ -1034,6 +1064,9 @@ class Thread { // Maximum number of checkpoint functions. static constexpr uint32_t kMaxCheckpoints = 3; + // Maximum number of suspend barriers. + static constexpr uint32_t kMaxSuspendBarriers = 3; + // Has Thread::Startup been called? static bool is_started_; @@ -1060,7 +1093,7 @@ class Thread { daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0), thread_exit_check_count(0), handling_signal_(false), deoptimization_return_value_is_reference(false), suspended_at_suspend_check(false), - ready_for_debug_invoke(false), debug_method_entry_(false) { + ready_for_debug_invoke(false), debug_method_entry_(false), weak_ref_access_enabled(true) { } union StateAndFlags state_and_flags; @@ -1117,6 +1150,15 @@ class Thread { // True if the thread enters a method. This is used to detect method entry // event for the debugger. bool32_t debug_method_entry_; + + // True if the thread is allowed to access a weak ref (Reference::GetReferent() and system + // weaks) and to potentially mark an object alive/gray. This is used for concurrent reference + // processing of the CC collector only. This is thread local so that we can enable/disable weak + // ref access by using a checkpoint and avoid a race around the time weak ref access gets + // disabled and concurrent reference processing begins (if weak ref access is disabled during a + // pause, this is not an issue.) Other collectors use Runtime::DisallowNewSystemWeaks() and + // ReferenceProcessor::EnableSlowPath(). + bool32_t weak_ref_access_enabled; } tls32_; struct PACKED(8) tls_64bit_sized_values { @@ -1238,6 +1280,12 @@ class Thread { // Locks::thread_suspend_count_lock_. Closure* checkpoint_functions[kMaxCheckpoints]; + // Pending barriers that require passing or NULL if non-pending. Installation guarding by + // Locks::thread_suspend_count_lock_. + // They work effectively as art::Barrier, but implemented directly using AtomicInteger and futex + // to avoid additional cost of a mutex and a condition variable, as used in art::Barrier. + AtomicInteger* active_suspend_barriers[kMaxSuspendBarriers]; + // Entrypoint function pointers. // TODO: move this to more of a global offset table model to avoid per-thread duplication. InterpreterEntryPoints interpreter_entrypoints; @@ -1268,6 +1316,9 @@ class Thread { // Current method verifier, used for root marking. verifier::MethodVerifier* method_verifier; + + // Thread-local mark stack for the concurrent copying collector. + gc::accounting::AtomicStack<mirror::Object>* thread_local_mark_stack; } tlsPtr_; // Guards the 'interrupted_' and 'wait_monitor_' members. @@ -1294,12 +1345,12 @@ class Thread { DISALLOW_COPY_AND_ASSIGN(Thread); }; -class ScopedAssertNoThreadSuspension { +class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension { public: - ScopedAssertNoThreadSuspension(Thread* self, const char* cause) + ScopedAssertNoThreadSuspension(Thread* self, const char* cause) ACQUIRE(Roles::uninterruptible_) : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) { } - ~ScopedAssertNoThreadSuspension() { + ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) { self_->EndAssertNoThreadSuspension(old_cause_); } Thread* Self() { diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc index 9d54eba347..9563b994f8 100644 --- a/runtime/thread_linux.cc +++ b/runtime/thread_linux.cc @@ -44,7 +44,7 @@ static constexpr int kHostAltSigStackSize = void Thread::SetUpAlternateSignalStack() { // Create and set an alternate signal stack. -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ LOG(FATAL) << "Invalid use of alternate signal stack on Android"; #endif stack_t ss; diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index b697b43a77..d449f42701 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -28,11 +28,11 @@ #include <sstream> #include "base/histogram-inl.h" -#include "base/mutex.h" #include "base/mutex-inl.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "debugger.h" +#include "gc/collector/concurrent_copying.h" #include "jni_internal.h" #include "lock_word.h" #include "monitor.h" @@ -41,6 +41,14 @@ #include "trace.h" #include "well_known_classes.h" +#if ART_USE_FUTEXES +#include "linux/futex.h" +#include "sys/syscall.h" +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#endif // ART_USE_FUTEXES + namespace art { static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5); @@ -70,6 +78,13 @@ ThreadList::~ThreadList() { Runtime::Current()->DetachCurrentThread(); } WaitForOtherNonDaemonThreadsToExit(); + // Disable GC and wait for GC to complete in case there are still daemon threads doing + // allocations. + gc::Heap* const heap = Runtime::Current()->GetHeap(); + heap->DisableGCForShutdown(); + // In case a GC is in progress, wait for it to finish. + heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current()); + // TODO: there's an unaddressed race here where a thread may attach during shutdown, see // Thread::Init. SuspendAllDaemonThreads(); @@ -279,7 +294,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) { // Spurious fail, try again. continue; } - thread->ModifySuspendCount(self, +1, false); + thread->ModifySuspendCount(self, +1, nullptr, false); suspended_count_modified_threads.push_back(thread); break; } @@ -317,7 +332,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) { checkpoint_function->Run(thread); { MutexLock mu2(self, *Locks::thread_suspend_count_lock_); - thread->ModifySuspendCount(self, -1, false); + thread->ModifySuspendCount(self, -1, nullptr, false); } } @@ -373,23 +388,7 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_c Locks::thread_suspend_count_lock_->AssertNotHeld(self); CHECK_NE(self->GetState(), kRunnable); - std::vector<Thread*> runnable_threads; - std::vector<Thread*> other_threads; - - // Suspend all threads once. - { - MutexLock mu(self, *Locks::thread_list_lock_); - MutexLock mu2(self, *Locks::thread_suspend_count_lock_); - // Update global suspend all state for attaching threads. - ++suspend_all_count_; - // Increment everybody's suspend count (except our own). - for (const auto& thread : list_) { - if (thread == self) { - continue; - } - thread->ModifySuspendCount(self, +1, false); - } - } + SuspendAllInternal(self, self, nullptr); // Run the flip callback for the collector. Locks::mutator_lock_->ExclusiveLock(self); @@ -398,6 +397,8 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_c collector->RegisterPause(NanoTime() - start_time); // Resume runnable threads. + std::vector<Thread*> runnable_threads; + std::vector<Thread*> other_threads; { MutexLock mu(self, *Locks::thread_list_lock_); MutexLock mu2(self, *Locks::thread_suspend_count_lock_); @@ -414,7 +415,7 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_c thread->SetFlipFunction(thread_flip_visitor); if (thread->IsSuspendedAtSuspendCheck()) { // The thread will resume right after the broadcast. - thread->ModifySuspendCount(self, -1, false); + thread->ModifySuspendCount(self, -1, nullptr, false); runnable_threads.push_back(thread); } else { other_threads.push_back(thread); @@ -440,7 +441,7 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_c { MutexLock mu2(self, *Locks::thread_suspend_count_lock_); for (const auto& thread : other_threads) { - thread->ModifySuspendCount(self, -1, false); + thread->ModifySuspendCount(self, -1, nullptr, false); } Thread::resume_cond_->Broadcast(self); } @@ -459,28 +460,9 @@ void ThreadList::SuspendAll(const char* cause, bool long_suspend) { ATRACE_BEGIN("Suspending mutator threads"); const uint64_t start_time = NanoTime(); - Locks::mutator_lock_->AssertNotHeld(self); - Locks::thread_list_lock_->AssertNotHeld(self); - Locks::thread_suspend_count_lock_->AssertNotHeld(self); - if (kDebugLocking && self != nullptr) { - CHECK_NE(self->GetState(), kRunnable); - } - { - MutexLock mu(self, *Locks::thread_list_lock_); - MutexLock mu2(self, *Locks::thread_suspend_count_lock_); - // Update global suspend all state for attaching threads. - ++suspend_all_count_; - // Increment everybody's suspend count (except our own). - for (const auto& thread : list_) { - if (thread == self) { - continue; - } - VLOG(threads) << "requesting thread suspend: " << *thread; - thread->ModifySuspendCount(self, +1, false); - } - } - - // Block on the mutator lock until all Runnable threads release their share of access. + SuspendAllInternal(self, self); + // All threads are known to have suspended (but a thread may still own the mutator lock) + // Make sure this thread grabs exclusive access to the mutator lock and its protected data. #if HAVE_TIMED_RWLOCK while (true) { if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) { @@ -520,6 +502,114 @@ void ThreadList::SuspendAll(const char* cause, bool long_suspend) { } } +// Ensures all threads running Java suspend and that those not running Java don't start. +// Debugger thread might be set to kRunnable for a short period of time after the +// SuspendAllInternal. This is safe because it will be set back to suspended state before +// the SuspendAll returns. +void ThreadList::SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2, + bool debug_suspend) { + Locks::mutator_lock_->AssertNotExclusiveHeld(self); + Locks::thread_list_lock_->AssertNotHeld(self); + Locks::thread_suspend_count_lock_->AssertNotHeld(self); + if (kDebugLocking && self != nullptr) { + CHECK_NE(self->GetState(), kRunnable); + } + + // First request that all threads suspend, then wait for them to suspend before + // returning. This suspension scheme also relies on other behaviour: + // 1. Threads cannot be deleted while they are suspended or have a suspend- + // request flag set - (see Unregister() below). + // 2. When threads are created, they are created in a suspended state (actually + // kNative) and will never begin executing Java code without first checking + // the suspend-request flag. + + // The atomic counter for number of threads that need to pass the barrier. + AtomicInteger pending_threads; + uint32_t num_ignored = 0; + if (ignore1 != nullptr) { + ++num_ignored; + } + if (ignore2 != nullptr && ignore1 != ignore2) { + ++num_ignored; + } + { + MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock mu2(self, *Locks::thread_suspend_count_lock_); + // Update global suspend all state for attaching threads. + ++suspend_all_count_; + if (debug_suspend) + ++debug_suspend_all_count_; + pending_threads.StoreRelaxed(list_.size() - num_ignored); + // Increment everybody's suspend count (except those that should be ignored). + for (const auto& thread : list_) { + if (thread == ignore1 || thread == ignore2) { + continue; + } + VLOG(threads) << "requesting thread suspend: " << *thread; + while (true) { + if (LIKELY(thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend))) { + break; + } else { + // Failure means the list of active_suspend_barriers is full, we should release the + // thread_suspend_count_lock_ (to avoid deadlock) and wait till the target thread has + // executed Thread::PassActiveSuspendBarriers(). Note that we could not simply wait for + // the thread to change to a suspended state, because it might need to run checkpoint + // function before the state change, which also needs thread_suspend_count_lock_. + + // This is very unlikely to happen since more than kMaxSuspendBarriers threads need to + // execute SuspendAllInternal() simultaneously, and target thread stays in kRunnable + // in the mean time. + Locks::thread_suspend_count_lock_->ExclusiveUnlock(self); + NanoSleep(100000); + Locks::thread_suspend_count_lock_->ExclusiveLock(self); + } + } + + // Must install the pending_threads counter first, then check thread->IsSuspend() and clear + // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended() + // that can lead a thread to miss a call to PassActiveSuspendBarriers(). + if (thread->IsSuspended()) { + // Only clear the counter for the current thread. + thread->ClearSuspendBarrier(&pending_threads); + pending_threads.FetchAndSubSequentiallyConsistent(1); + } + } + } + + // Wait for the barrier to be passed by all runnable threads. This wait + // is done with a timeout so that we can detect problems. +#if ART_USE_FUTEXES + timespec wait_timeout; + InitTimeSpec(true, CLOCK_MONOTONIC, 10000, 0, &wait_timeout); +#endif + while (true) { + int32_t cur_val = pending_threads.LoadRelaxed(); + if (LIKELY(cur_val > 0)) { +#if ART_USE_FUTEXES + if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) { + // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. + if ((errno != EAGAIN) && (errno != EINTR)) { + if (errno == ETIMEDOUT) { + LOG(kIsDebugBuild ? FATAL : ERROR) << "Unexpected time out during suspend all."; + } else { + PLOG(FATAL) << "futex wait failed for SuspendAllInternal()"; + } + } + } else { + cur_val = pending_threads.LoadRelaxed(); + CHECK_EQ(cur_val, 0); + break; + } +#else + // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set. +#endif + } else { + CHECK_EQ(cur_val, 0); + break; + } + } +} + void ThreadList::ResumeAll() { Thread* self = Thread::Current(); @@ -550,7 +640,7 @@ void ThreadList::ResumeAll() { if (thread == self) { continue; } - thread->ModifySuspendCount(self, -1, false); + thread->ModifySuspendCount(self, -1, nullptr, false); } // Broadcast a notification to all suspended threads, some or all of @@ -593,7 +683,7 @@ void ThreadList::Resume(Thread* thread, bool for_debugger) { << ") thread not within thread list"; return; } - thread->ModifySuspendCount(self, -1, for_debugger); + thread->ModifySuspendCount(self, -1, nullptr, for_debugger); } { @@ -645,7 +735,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, // If we incremented the suspend count but the thread reset its peer, we need to // re-decrement it since it is shutting down and may deadlock the runtime in // ThreadList::WaitForOtherNonDaemonThreadsToExit. - suspended_thread->ModifySuspendCount(soa.Self(), -1, debug_suspension); + suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension); } ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer); return nullptr; @@ -668,7 +758,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, } CHECK(suspended_thread == nullptr); suspended_thread = thread; - suspended_thread->ModifySuspendCount(self, +1, debug_suspension); + suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension); request_suspension = false; } else { // If the caller isn't requesting suspension, a suspension should have already occurred. @@ -697,7 +787,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer); if (suspended_thread != nullptr) { CHECK_EQ(suspended_thread, thread); - suspended_thread->ModifySuspendCount(soa.Self(), -1, debug_suspension); + suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension); } *timed_out = true; return nullptr; @@ -766,7 +856,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe // which will allow this thread to be suspended. continue; } - thread->ModifySuspendCount(self, +1, debug_suspension); + thread->ModifySuspendCount(self, +1, nullptr, debug_suspension); suspended_thread = thread; } else { CHECK_EQ(suspended_thread, thread); @@ -795,7 +885,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) { ThreadSuspendByThreadIdWarning(WARNING, "Thread suspension timed out", thread_id); if (suspended_thread != nullptr) { - thread->ModifySuspendCount(soa.Self(), -1, debug_suspension); + thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension); } *timed_out = true; return nullptr; @@ -832,25 +922,7 @@ void ThreadList::SuspendAllForDebugger() { VLOG(threads) << *self << " SuspendAllForDebugger starting..."; - { - MutexLock thread_list_mu(self, *Locks::thread_list_lock_); - { - MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); - // Update global suspend all state for attaching threads. - DCHECK_GE(suspend_all_count_, debug_suspend_all_count_); - ++suspend_all_count_; - ++debug_suspend_all_count_; - // Increment everybody's suspend count (except our own). - for (const auto& thread : list_) { - if (thread == self || thread == debug_thread) { - continue; - } - VLOG(threads) << "requesting thread suspend: " << *thread; - thread->ModifySuspendCount(self, +1, true); - } - } - } - + SuspendAllInternal(self, self, debug_thread, true); // Block on the mutator lock until all Runnable threads release their share of access then // immediately unlock again. #if HAVE_TIMED_RWLOCK @@ -888,7 +960,7 @@ void ThreadList::SuspendSelfForDebugger() { // to ensure that we're the only one fiddling with the suspend count // though. MutexLock mu(self, *Locks::thread_suspend_count_lock_); - self->ModifySuspendCount(self, +1, true); + self->ModifySuspendCount(self, +1, nullptr, true); CHECK_GT(self->GetSuspendCount(), 0); VLOG(threads) << *self << " self-suspending (debugger)"; @@ -972,7 +1044,7 @@ void ThreadList::ResumeAllForDebugger() { continue; } VLOG(threads) << "requesting thread resume: " << *thread; - thread->ModifySuspendCount(self, -1, true); + thread->ModifySuspendCount(self, -1, nullptr, true); } } } @@ -1001,7 +1073,7 @@ void ThreadList::UndoDebuggerSuspensions() { if (thread == self || thread->GetDebugSuspendCount() == 0) { continue; } - thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), true); + thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true); } } @@ -1054,7 +1126,7 @@ void ThreadList::SuspendAllDaemonThreads() { // daemons. CHECK(thread->IsDaemon()) << *thread; if (thread != self) { - thread->ModifySuspendCount(self, +1, false); + thread->ModifySuspendCount(self, +1, nullptr, false); } } } @@ -1095,13 +1167,19 @@ void ThreadList::Register(Thread* self) { // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While // this isn't particularly efficient the suspend counts are most commonly 0 or 1. for (int delta = debug_suspend_all_count_; delta > 0; delta--) { - self->ModifySuspendCount(self, +1, true); + self->ModifySuspendCount(self, +1, nullptr, true); } for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) { - self->ModifySuspendCount(self, +1, false); + self->ModifySuspendCount(self, +1, nullptr, false); } CHECK(!Contains(self)); list_.push_back(self); + if (kUseReadBarrier) { + // Initialize this according to the state of the CC collector. + bool weak_ref_access_enabled = + Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled(); + self->SetWeakRefAccessEnabled(weak_ref_access_enabled); + } } void ThreadList::Unregister(Thread* self) { @@ -1157,7 +1235,11 @@ void ThreadList::Unregister(Thread* self) { // Clear the TLS data, so that the underlying native thread is recognizably detached. // (It may wish to reattach later.) +#ifdef __ANDROID__ + __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr; +#else CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self"); +#endif // Signal that a thread just detached. MutexLock mu(nullptr, *Locks::thread_list_lock_); diff --git a/runtime/thread_list.h b/runtime/thread_list.h index 2c1f8135ea..4c50181891 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -46,27 +46,25 @@ class ThreadList { ~ThreadList(); void DumpForSigQuit(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::mutator_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_); // For thread suspend timeout dumps. void Dump(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); pid_t GetLockOwner(); // For SignalCatcher. // Thread suspension support. void ResumeAll() UNLOCK_FUNCTION(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void Resume(Thread* thread, bool for_debugger = false) - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_); // Suspends all threads and gets exclusive access to the mutator_lock_. // If long suspend is true, then other people who try to suspend will never timeout. Long suspend // is currenly used for hprof since large heaps take a long time. void SuspendAll(const char* cause, bool long_suspend = false) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success, @@ -76,18 +74,16 @@ class ThreadList { // is set to true. Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension, bool* timed_out) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the // thread on success else null. The thread id is used to identify the thread to avoid races with // the thread terminating. Note that as thread ids are recycled this may not suspend the expected // thread, that may be terminating. If the suspension times out then *timeout is set to true. Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); // Find an already suspended thread (or self) by its id. Thread* FindThreadByThreadId(uint32_t thin_lock_id); @@ -95,80 +91,78 @@ class ThreadList { // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside // of the suspend check. Returns how many checkpoints we should expect to run. size_t RunCheckpoint(Closure* checkpoint_function) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Flip thread roots from from-space refs to to-space refs. Used by // the concurrent copying collector. size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback, gc::collector::GarbageCollector* collector) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); // Suspends all threads void SuspendAllForDebugger() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); void SuspendSelfForDebugger() - LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_suspend_count_lock_); // Resume all threads void ResumeAllForDebugger() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void UndoDebuggerSuspensions() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); // Iterates over all the threads. void ForEach(void (*callback)(Thread*, void*), void* context) - EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + REQUIRES(Locks::thread_list_lock_); // Add/remove current thread from list. void Register(Thread* self) - EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) - LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_); - void Unregister(Thread* self) LOCKS_EXCLUDED(Locks::mutator_lock_, Locks::thread_list_lock_); + REQUIRES(Locks::runtime_shutdown_lock_, !Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); + void Unregister(Thread* self) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, + !Locks::thread_suspend_count_lock_); void VisitRoots(RootVisitor* visitor) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return a copy of the thread list. - std::list<Thread*> GetList() EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) { + std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) { return list_; } void DumpNativeStacks(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); private: uint32_t AllocThreadId(Thread* self); - void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_); + void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_); - bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); - bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); + bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_); + bool Contains(pid_t tid) REQUIRES(Locks::thread_list_lock_); + size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void DumpUnattachedThreads(std::ostream& os) - LOCKS_EXCLUDED(Locks::thread_list_lock_); + REQUIRES(!Locks::thread_list_lock_); void SuspendAllDaemonThreads() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void WaitForOtherNonDaemonThreadsToExit() - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); + + void SuspendAllInternal(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr, + bool debug_suspend = false) + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr) - LOCKS_EXCLUDED(Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_); + REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index 1e84c9ddb3..d8f80fa690 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -201,112 +201,4 @@ size_t ThreadPool::GetTaskCount(Thread* self) { return tasks_.size(); } -WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, - size_t stack_size) - : ThreadPoolWorker(thread_pool, name, stack_size), task_(nullptr) {} - -void WorkStealingWorker::Run() { - Thread* self = Thread::Current(); - Task* task = nullptr; - WorkStealingThreadPool* thread_pool = down_cast<WorkStealingThreadPool*>(thread_pool_); - while ((task = thread_pool_->GetTask(self)) != nullptr) { - WorkStealingTask* stealing_task = down_cast<WorkStealingTask*>(task); - - { - CHECK(task_ == nullptr); - MutexLock mu(self, thread_pool->work_steal_lock_); - // Register that we are running the task - ++stealing_task->ref_count_; - task_ = stealing_task; - } - stealing_task->Run(self); - // Mark ourselves as not running a task so that nobody tries to steal from us. - // There is a race condition that someone starts stealing from us at this point. This is okay - // due to the reference counting. - task_ = nullptr; - - bool finalize; - - // Steal work from tasks until there is none left to steal. Note: There is a race, but - // all that happens when the race occurs is that we steal some work instead of processing a - // task from the queue. - while (thread_pool->GetTaskCount(self) == 0) { - WorkStealingTask* steal_from_task = nullptr; - - { - MutexLock mu(self, thread_pool->work_steal_lock_); - // Try finding a task to steal from. - steal_from_task = thread_pool->FindTaskToStealFrom(); - if (steal_from_task != nullptr) { - CHECK_NE(stealing_task, steal_from_task) - << "Attempting to steal from completed self task"; - steal_from_task->ref_count_++; - } else { - break; - } - } - - if (steal_from_task != nullptr) { - // Task which completed earlier is going to steal some work. - stealing_task->StealFrom(self, steal_from_task); - - { - // We are done stealing from the task, lets decrement its reference count. - MutexLock mu(self, thread_pool->work_steal_lock_); - finalize = !--steal_from_task->ref_count_; - } - - if (finalize) { - steal_from_task->Finalize(); - } - } - } - - { - MutexLock mu(self, thread_pool->work_steal_lock_); - // If nobody is still referencing task_ we can finalize it. - finalize = !--stealing_task->ref_count_; - } - - if (finalize) { - stealing_task->Finalize(); - } - } -} - -WorkStealingWorker::~WorkStealingWorker() {} - -WorkStealingThreadPool::WorkStealingThreadPool(const char* name, size_t num_threads) - : ThreadPool(name, 0), - work_steal_lock_("work stealing lock"), - steal_index_(0) { - while (GetThreadCount() < num_threads) { - const std::string worker_name = StringPrintf("Work stealing worker %zu", GetThreadCount()); - threads_.push_back(new WorkStealingWorker(this, worker_name, - ThreadPoolWorker::kDefaultStackSize)); - } -} - -WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom() { - const size_t thread_count = GetThreadCount(); - for (size_t i = 0; i < thread_count; ++i) { - // TODO: Use CAS instead of lock. - ++steal_index_; - if (steal_index_ >= thread_count) { - steal_index_-= thread_count; - } - - WorkStealingWorker* worker = down_cast<WorkStealingWorker*>(threads_[steal_index_]); - WorkStealingTask* task = worker->task_; - if (task) { - // Not null, we can probably steal from this worker. - return task; - } - } - // Couldn't find something to steal. - return nullptr; -} - -WorkStealingThreadPool::~WorkStealingThreadPool() {} - } // namespace art diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h index 0557708fd9..a2338d6fcc 100644 --- a/runtime/thread_pool.h +++ b/runtime/thread_pool.h @@ -61,7 +61,7 @@ class ThreadPoolWorker { protected: ThreadPoolWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size); - static void* Callback(void* arg) LOCKS_EXCLUDED(Locks::mutator_lock_); + static void* Callback(void* arg) REQUIRES(!Locks::mutator_lock_); virtual void Run(); ThreadPool* const thread_pool_; @@ -82,22 +82,22 @@ class ThreadPool { } // Broadcast to the workers and tell them to empty out the work queue. - void StartWorkers(Thread* self); + void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_); // Do not allow workers to grab any new tasks. - void StopWorkers(Thread* self); + void StopWorkers(Thread* self) REQUIRES(!task_queue_lock_); // Add a new task, the first available started worker will process it. Does not delete the task // after running it, it is the caller's responsibility. - void AddTask(Thread* self, Task* task); + void AddTask(Thread* self, Task* task) REQUIRES(!task_queue_lock_); - explicit ThreadPool(const char* name, size_t num_threads); + ThreadPool(const char* name, size_t num_threads); virtual ~ThreadPool(); // Wait for all tasks currently on queue to get completed. - void Wait(Thread* self, bool do_work, bool may_hold_locks); + void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_); - size_t GetTaskCount(Thread* self); + size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_); // Returns the total amount of workers waited for tasks. uint64_t GetWaitTime() const { @@ -106,18 +106,18 @@ class ThreadPool { // Provides a way to bound the maximum number of worker threads, threads must be less the the // thread count of the thread pool. - void SetMaxActiveWorkers(size_t threads); + void SetMaxActiveWorkers(size_t threads) REQUIRES(!task_queue_lock_); protected: // get a task to run, blocks if there are no tasks left - virtual Task* GetTask(Thread* self); + virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_); // Try to get a task, returning null if there is none available. - Task* TryGetTask(Thread* self); - Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_); + Task* TryGetTask(Thread* self) REQUIRES(!task_queue_lock_); + Task* TryGetTaskLocked() REQUIRES(task_queue_lock_); // Are we shutting down? - bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) { + bool IsShuttingDown() const REQUIRES(task_queue_lock_) { return shutting_down_; } @@ -144,58 +144,6 @@ class ThreadPool { DISALLOW_COPY_AND_ASSIGN(ThreadPool); }; -class WorkStealingTask : public Task { - public: - WorkStealingTask() : ref_count_(0) {} - - size_t GetRefCount() const { - return ref_count_; - } - - virtual void StealFrom(Thread* self, WorkStealingTask* source) = 0; - - private: - // How many people are referencing this task. - size_t ref_count_; - - friend class WorkStealingWorker; -}; - -class WorkStealingWorker : public ThreadPoolWorker { - public: - virtual ~WorkStealingWorker(); - - bool IsRunningTask() const { - return task_ != nullptr; - } - - protected: - WorkStealingTask* task_; - - WorkStealingWorker(ThreadPool* thread_pool, const std::string& name, size_t stack_size); - virtual void Run(); - - private: - friend class WorkStealingThreadPool; - DISALLOW_COPY_AND_ASSIGN(WorkStealingWorker); -}; - -class WorkStealingThreadPool : public ThreadPool { - public: - explicit WorkStealingThreadPool(const char* name, size_t num_threads); - virtual ~WorkStealingThreadPool(); - - private: - Mutex work_steal_lock_; - // Which thread we are stealing from (round robin). - size_t steal_index_; - - // Find a task to steal from - WorkStealingTask* FindTaskToStealFrom() EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_); - - friend class WorkStealingWorker; -}; - } // namespace art #endif // ART_RUNTIME_THREAD_POOL_H_ diff --git a/runtime/thread_state.h b/runtime/thread_state.h index c7ea7f4381..a11d213ea3 100644 --- a/runtime/thread_state.h +++ b/runtime/thread_state.h @@ -43,6 +43,7 @@ enum ThreadState { kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects + kWaitingWeakGcRootRead, // WAITING TS_WAIT waiting on the GC to read a weak root kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code kNative, // RUNNABLE TS_RUNNING running in a JNI native method kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger diff --git a/runtime/trace.cc b/runtime/trace.cc index 487baedba4..439343068c 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -57,7 +57,7 @@ class BuildStackTraceVisitor : public StackVisitor { : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), method_trace_(Trace::AllocStackTrace()) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); // Ignore runtime frames (in particular callee save). if (!m->IsRuntimeMethod()) { @@ -218,7 +218,7 @@ static void Append8LE(uint8_t* buf, uint64_t val) { *buf++ = static_cast<uint8_t>(val >> 56); } -static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) { BuildStackTraceVisitor build_trace_visitor(thread); build_trace_visitor.WalkStack(); std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace(); @@ -636,7 +636,7 @@ void Trace::DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source static void GetVisitedMethodsFromBitSets( const std::map<const DexFile*, DexIndexBitSet*>& seen_methods, - std::set<ArtMethod*>* visited_methods) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + std::set<ArtMethod*>* visited_methods) SHARED_REQUIRES(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (auto& e : seen_methods) { DexIndexBitSet* bit_set = e.second; @@ -749,7 +749,7 @@ void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object, void Trace::FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(thread, this_object, method, dex_pc, field); // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc; @@ -758,7 +758,7 @@ void Trace::FieldRead(Thread* thread, mirror::Object* this_object, void Trace::FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(thread, this_object, method, dex_pc, field, field_value); // We're not recorded to listen to this kind of event, so complain. LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc; @@ -793,14 +793,14 @@ void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_U } void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { UNUSED(thread, exception_object); LOG(ERROR) << "Unexpected exception caught event in tracing"; } void Trace::BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t /*dex_pc_offset*/) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { LOG(ERROR) << "Unexpected backward branch event in tracing" << PrettyMethod(method); } diff --git a/runtime/trace.h b/runtime/trace.h index 69e6acc899..04be3ddeab 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -114,28 +114,20 @@ class Trace FINAL : public instrumentation::InstrumentationListener { static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags, TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::thread_suspend_count_lock_, - Locks::trace_lock_); - static void Pause() LOCKS_EXCLUDED(Locks::trace_lock_, Locks::thread_list_lock_); - static void Resume() LOCKS_EXCLUDED(Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::trace_lock_); + static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_); + static void Resume() REQUIRES(!Locks::trace_lock_); // Stop tracing. This will finish the trace and write it to file/send it via DDMS. static void Stop() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); // Abort tracing. This will just stop tracing and *not* write/send the collected data. static void Abort() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); static void Shutdown() - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); - static TracingMode GetMethodTracingMode() LOCKS_EXCLUDED(Locks::trace_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_); + static TracingMode GetMethodTracingMode() REQUIRES(!Locks::trace_lock_); bool UseWallClock(); bool UseThreadCpuClock(); @@ -143,33 +135,37 @@ class Trace FINAL : public instrumentation::InstrumentationListener { uint32_t GetClockOverheadNanoSeconds(); void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); // InstrumentationListener implementation. void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, const JValue& return_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void MethodUnwind(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t new_dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_) + OVERRIDE; void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; void FieldWritten(Thread* thread, mirror::Object* this_object, ArtMethod* method, uint32_t dex_pc, ArtField* field, const JValue& field_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; void BackwardBranch(Thread* thread, ArtMethod* method, int32_t dex_pc_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE; + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE; // Reuse an old stack trace if it exists, otherwise allocate a new one. static std::vector<ArtMethod*>* AllocStackTrace(); // Clear and store an old stack trace for later use. @@ -177,57 +173,61 @@ class Trace FINAL : public instrumentation::InstrumentationListener { // Save id and name of a thread before it exits. static void StoreExitingThreadInfo(Thread* thread); - static TraceOutputMode GetOutputMode() LOCKS_EXCLUDED(Locks::trace_lock_); - static TraceMode GetMode() LOCKS_EXCLUDED(Locks::trace_lock_); - static size_t GetBufferSize() LOCKS_EXCLUDED(Locks::trace_lock_); + static TraceOutputMode GetOutputMode() REQUIRES(!Locks::trace_lock_); + static TraceMode GetMode() REQUIRES(!Locks::trace_lock_); + static size_t GetBufferSize() REQUIRES(!Locks::trace_lock_); private: Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags, TraceOutputMode output_mode, TraceMode trace_mode); // The sampling interval in microseconds is passed as an argument. - static void* RunSamplingThread(void* arg) LOCKS_EXCLUDED(Locks::trace_lock_); + static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_); static void StopTracing(bool finish_tracing, bool flush_file) - LOCKS_EXCLUDED(Locks::mutator_lock_, - Locks::thread_list_lock_, - Locks::trace_lock_); - void FinishTracing() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_) + // There is an annoying issue with static functions that create a new object and call into + // that object that causes them to not be able to tell that we don't currently hold the lock. + // This causes the negative annotations to incorrectly have a false positive. TODO: Figure out + // how to annotate this. + NO_THREAD_SAFETY_ANALYSIS; + void FinishTracing() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff); void LogMethodTraceEvent(Thread* thread, ArtMethod* method, instrumentation::Instrumentation::InstrumentationEvent event, uint32_t thread_clock_diff, uint32_t wall_clock_diff) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_); // Methods to output traced methods and threads. - void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods); + void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods) + REQUIRES(!*unique_methods_lock_); void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void DumpThreadList(std::ostream& os) LOCKS_EXCLUDED(Locks::thread_list_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); + void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_); // Methods to register seen entitites in streaming mode. The methods return true if the entity // is newly discovered. bool RegisterMethod(ArtMethod* method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(streaming_lock_); bool RegisterThread(Thread* thread) - EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); + REQUIRES(streaming_lock_); // Copy a temporary buffer to the main buffer. Used for streaming. Exposed here for lock // annotation. void WriteToBuf(const uint8_t* src, size_t src_size) - EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_); + REQUIRES(streaming_lock_); - uint32_t EncodeTraceMethod(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_); + uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!*unique_methods_lock_); uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) - LOCKS_EXCLUDED(unique_methods_lock_); - ArtMethod* DecodeTraceMethod(uint32_t tmid) LOCKS_EXCLUDED(unique_methods_lock_); - std::string GetMethodLine(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!*unique_methods_lock_); + ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_); + std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_); // Singleton instance of the Trace or null when no method tracing is active. static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_); diff --git a/runtime/transaction.h b/runtime/transaction.h index 030478c7ad..8ff0614574 100644 --- a/runtime/transaction.h +++ b/runtime/transaction.h @@ -46,63 +46,63 @@ class Transaction FINAL { ~Transaction(); void Abort(const std::string& abort_message) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void ThrowAbortError(Thread* self, const std::string* abort_message) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsAborted() LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsAborted() REQUIRES(!log_lock_); // Record object field changes. void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset, mirror::Object* value, bool is_volatile) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(!log_lock_); // Record array change. void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); // Record intern string table changes. void RecordStrongStringInsertion(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void RecordWeakStringInsertion(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void RecordStrongStringRemoval(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void RecordWeakStringRemoval(mirror::String* s) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); // Abort transaction by undoing all recorded changes. void Rollback() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(log_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!log_lock_); void VisitRoots(RootVisitor* visitor) - LOCKS_EXCLUDED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(!log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); private: class ObjectLog : public ValueObject { @@ -115,8 +115,8 @@ class Transaction FINAL { void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile); void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile); - void Undo(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Undo(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); size_t Size() const { return field_values_.size(); @@ -141,7 +141,7 @@ class Transaction FINAL { void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile); void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset, - const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const FieldValue& field_value) SHARED_REQUIRES(Locks::mutator_lock_); // Maps field's offset to its value. std::map<uint32_t, FieldValue> field_values_; @@ -151,7 +151,7 @@ class Transaction FINAL { public: void LogValue(size_t index, uint64_t value); - void Undo(mirror::Array* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Undo(mirror::Array* obj) SHARED_REQUIRES(Locks::mutator_lock_); size_t Size() const { return array_values_.size(); @@ -159,7 +159,7 @@ class Transaction FINAL { private: void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index, - uint64_t value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint64_t value) SHARED_REQUIRES(Locks::mutator_lock_); // Maps index to value. // TODO use JValue instead ? @@ -182,9 +182,9 @@ class Transaction FINAL { } void Undo(InternTable* intern_table) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_); - void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::intern_table_lock_); + void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_); private: mirror::String* str_; @@ -193,31 +193,31 @@ class Transaction FINAL { }; void LogInternedString(const InternStringLog& log) - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - LOCKS_EXCLUDED(log_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(!log_lock_); void UndoObjectModifications() - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void UndoArrayModifications() - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void UndoInternStringTableModifications() - EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(Locks::intern_table_lock_) + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitObjectLogs(RootVisitor* visitor) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitArrayLogs(RootVisitor* visitor) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); void VisitStringLogs(RootVisitor* visitor) - EXCLUSIVE_LOCKS_REQUIRED(log_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + REQUIRES(log_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); - const std::string& GetAbortMessage() LOCKS_EXCLUDED(log_lock_); + const std::string& GetAbortMessage() REQUIRES(!log_lock_); Mutex log_lock_ ACQUIRED_AFTER(Locks::intern_table_lock_); std::map<mirror::Object*, ObjectLog> object_logs_ GUARDED_BY(log_lock_); diff --git a/runtime/utf.h b/runtime/utf.h index 7f05248c29..1193d29c7d 100644 --- a/runtime/utf.h +++ b/runtime/utf.h @@ -77,7 +77,7 @@ void ConvertUtf16ToModifiedUtf8(char* utf8_out, const uint16_t* utf16_in, size_t * The java.lang.String hashCode() algorithm. */ int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset, size_t char_count) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); int32_t ComputeUtf16Hash(const uint16_t* chars, size_t char_count); // Compute a hash code of a modified UTF-8 string. Not the standard java hash since it returns a diff --git a/runtime/utils.cc b/runtime/utils.cc index 4923342e8e..8aa1189a95 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -30,6 +30,7 @@ #include "base/stl_util.h" #include "base/unix_file/fd_file.h" #include "dex_file-inl.h" +#include "dex_instruction.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" @@ -1094,7 +1095,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, ArtMethod* current_method, void* ucontext_ptr) { #if __linux__ // b/18119146 - if (RUNNING_ON_VALGRIND != 0) { + if (RUNNING_ON_MEMORY_TOOL != 0) { return; } @@ -1130,9 +1131,13 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, os << prefix << StringPrintf("#%02zu pc ", it->num); bool try_addr2line = false; if (!BacktraceMap::IsValid(it->map)) { - os << StringPrintf("%08" PRIxPTR " ???", it->pc); + os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???" + : "%08" PRIxPTR " ???", + it->pc); } else { - os << StringPrintf("%08" PRIxPTR " ", BacktraceMap::GetRelativePc(it->map, it->pc)); + os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " " + : "%08" PRIxPTR " ", + BacktraceMap::GetRelativePc(it->map, it->pc)); os << it->map.name; os << " ("; if (!it->func_name.empty()) { @@ -1448,4 +1453,375 @@ std::string PrettyDescriptor(Primitive::Type type) { return PrettyDescriptor(Primitive::Descriptor(type)); } +static void DumpMethodCFGImpl(const DexFile* dex_file, + uint32_t dex_method_idx, + const DexFile::CodeItem* code_item, + std::ostream& os) { + os << "digraph {\n"; + os << " # /* " << PrettyMethod(dex_method_idx, *dex_file, true) << " */\n"; + + std::set<uint32_t> dex_pc_is_branch_target; + { + // Go and populate. + const Instruction* inst = Instruction::At(code_item->insns_); + for (uint32_t dex_pc = 0; + dex_pc < code_item->insns_size_in_code_units_; + dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) { + if (inst->IsBranch()) { + dex_pc_is_branch_target.insert(dex_pc + inst->GetTargetOffset()); + } else if (inst->IsSwitch()) { + const uint16_t* insns = code_item->insns_ + dex_pc; + int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16); + const uint16_t* switch_insns = insns + switch_offset; + uint32_t switch_count = switch_insns[1]; + int32_t targets_offset; + if ((*insns & 0xff) == Instruction::PACKED_SWITCH) { + /* 0=sig, 1=count, 2/3=firstKey */ + targets_offset = 4; + } else { + /* 0=sig, 1=count, 2..count*2 = keys */ + targets_offset = 2 + 2 * switch_count; + } + for (uint32_t targ = 0; targ < switch_count; targ++) { + int32_t offset = + static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) | + static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16); + dex_pc_is_branch_target.insert(dex_pc + offset); + } + } + } + } + + // Create nodes for "basic blocks." + std::map<uint32_t, uint32_t> dex_pc_to_node_id; // This only has entries for block starts. + std::map<uint32_t, uint32_t> dex_pc_to_incl_id; // This has entries for all dex pcs. + + { + const Instruction* inst = Instruction::At(code_item->insns_); + bool first_in_block = true; + bool force_new_block = false; + for (uint32_t dex_pc = 0; + dex_pc < code_item->insns_size_in_code_units_; + dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) { + if (dex_pc == 0 || + (dex_pc_is_branch_target.find(dex_pc) != dex_pc_is_branch_target.end()) || + force_new_block) { + uint32_t id = dex_pc_to_node_id.size(); + if (id > 0) { + // End last node. + os << "}\"];\n"; + } + // Start next node. + os << " node" << id << " [shape=record,label=\"{"; + dex_pc_to_node_id.insert(std::make_pair(dex_pc, id)); + first_in_block = true; + force_new_block = false; + } + + // Register instruction. + dex_pc_to_incl_id.insert(std::make_pair(dex_pc, dex_pc_to_node_id.size() - 1)); + + // Print instruction. + if (!first_in_block) { + os << " | "; + } else { + first_in_block = false; + } + + // Dump the instruction. Need to escape '"', '<', '>', '{' and '}'. + os << "<" << "p" << dex_pc << ">"; + os << " 0x" << std::hex << dex_pc << std::dec << ": "; + std::string inst_str = inst->DumpString(dex_file); + size_t cur_start = 0; // It's OK to start at zero, instruction dumps don't start with chars + // we need to escape. + while (cur_start != std::string::npos) { + size_t next_escape = inst_str.find_first_of("\"{}<>", cur_start + 1); + if (next_escape == std::string::npos) { + os << inst_str.substr(cur_start, inst_str.size() - cur_start); + break; + } else { + os << inst_str.substr(cur_start, next_escape - cur_start); + // Escape all necessary characters. + while (next_escape < inst_str.size()) { + char c = inst_str.at(next_escape); + if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') { + os << '\\' << c; + } else { + break; + } + next_escape++; + } + if (next_escape >= inst_str.size()) { + next_escape = std::string::npos; + } + cur_start = next_escape; + } + } + + // Force a new block for some fall-throughs and some instructions that terminate the "local" + // control flow. + force_new_block = inst->IsSwitch() || inst->IsBasicBlockEnd(); + } + // Close last node. + if (dex_pc_to_node_id.size() > 0) { + os << "}\"];\n"; + } + } + + // Create edges between them. + { + std::ostringstream regular_edges; + std::ostringstream taken_edges; + std::ostringstream exception_edges; + + // Common set of exception edges. + std::set<uint32_t> exception_targets; + + // These blocks (given by the first dex pc) need exception per dex-pc handling in a second + // pass. In the first pass we try and see whether we can use a common set of edges. + std::set<uint32_t> blocks_with_detailed_exceptions; + + { + uint32_t last_node_id = std::numeric_limits<uint32_t>::max(); + uint32_t old_dex_pc = 0; + uint32_t block_start_dex_pc = std::numeric_limits<uint32_t>::max(); + const Instruction* inst = Instruction::At(code_item->insns_); + for (uint32_t dex_pc = 0; + dex_pc < code_item->insns_size_in_code_units_; + old_dex_pc = dex_pc, dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) { + { + auto it = dex_pc_to_node_id.find(dex_pc); + if (it != dex_pc_to_node_id.end()) { + if (!exception_targets.empty()) { + // It seems the last block had common exception handlers. Add the exception edges now. + uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second; + for (uint32_t handler_pc : exception_targets) { + auto node_id_it = dex_pc_to_incl_id.find(handler_pc); + if (node_id_it != dex_pc_to_incl_id.end()) { + exception_edges << " node" << node_id + << " -> node" << node_id_it->second << ":p" << handler_pc + << ";\n"; + } + } + exception_targets.clear(); + } + + block_start_dex_pc = dex_pc; + + // Seems to be a fall-through, connect to last_node_id. May be spurious edges for things + // like switch data. + uint32_t old_last = last_node_id; + last_node_id = it->second; + if (old_last != std::numeric_limits<uint32_t>::max()) { + regular_edges << " node" << old_last << ":p" << old_dex_pc + << " -> node" << last_node_id << ":p" << dex_pc + << ";\n"; + } + } + + // Look at the exceptions of the first entry. + CatchHandlerIterator catch_it(*code_item, dex_pc); + for (; catch_it.HasNext(); catch_it.Next()) { + exception_targets.insert(catch_it.GetHandlerAddress()); + } + } + + // Handle instruction. + + // Branch: something with at most two targets. + if (inst->IsBranch()) { + const int32_t offset = inst->GetTargetOffset(); + const bool conditional = !inst->IsUnconditional(); + + auto target_it = dex_pc_to_node_id.find(dex_pc + offset); + if (target_it != dex_pc_to_node_id.end()) { + taken_edges << " node" << last_node_id << ":p" << dex_pc + << " -> node" << target_it->second << ":p" << (dex_pc + offset) + << ";\n"; + } + if (!conditional) { + // No fall-through. + last_node_id = std::numeric_limits<uint32_t>::max(); + } + } else if (inst->IsSwitch()) { + // TODO: Iterate through all switch targets. + const uint16_t* insns = code_item->insns_ + dex_pc; + /* make sure the start of the switch is in range */ + int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16); + /* offset to switch table is a relative branch-style offset */ + const uint16_t* switch_insns = insns + switch_offset; + uint32_t switch_count = switch_insns[1]; + int32_t targets_offset; + if ((*insns & 0xff) == Instruction::PACKED_SWITCH) { + /* 0=sig, 1=count, 2/3=firstKey */ + targets_offset = 4; + } else { + /* 0=sig, 1=count, 2..count*2 = keys */ + targets_offset = 2 + 2 * switch_count; + } + /* make sure the end of the switch is in range */ + /* verify each switch target */ + for (uint32_t targ = 0; targ < switch_count; targ++) { + int32_t offset = + static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) | + static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16); + int32_t abs_offset = dex_pc + offset; + auto target_it = dex_pc_to_node_id.find(abs_offset); + if (target_it != dex_pc_to_node_id.end()) { + // TODO: value label. + taken_edges << " node" << last_node_id << ":p" << dex_pc + << " -> node" << target_it->second << ":p" << (abs_offset) + << ";\n"; + } + } + } + + // Exception edges. If this is not the first instruction in the block + if (block_start_dex_pc != dex_pc) { + std::set<uint32_t> current_handler_pcs; + CatchHandlerIterator catch_it(*code_item, dex_pc); + for (; catch_it.HasNext(); catch_it.Next()) { + current_handler_pcs.insert(catch_it.GetHandlerAddress()); + } + if (current_handler_pcs != exception_targets) { + exception_targets.clear(); // Clear so we don't do something at the end. + blocks_with_detailed_exceptions.insert(block_start_dex_pc); + } + } + + if (inst->IsReturn() || + (inst->Opcode() == Instruction::THROW) || + (inst->IsBranch() && inst->IsUnconditional())) { + // No fall-through. + last_node_id = std::numeric_limits<uint32_t>::max(); + } + } + // Finish up the last block, if it had common exceptions. + if (!exception_targets.empty()) { + // It seems the last block had common exception handlers. Add the exception edges now. + uint32_t node_id = dex_pc_to_node_id.find(block_start_dex_pc)->second; + for (uint32_t handler_pc : exception_targets) { + auto node_id_it = dex_pc_to_incl_id.find(handler_pc); + if (node_id_it != dex_pc_to_incl_id.end()) { + exception_edges << " node" << node_id + << " -> node" << node_id_it->second << ":p" << handler_pc + << ";\n"; + } + } + exception_targets.clear(); + } + } + + // Second pass for detailed exception blocks. + // TODO + // Exception edges. If this is not the first instruction in the block + for (uint32_t dex_pc : blocks_with_detailed_exceptions) { + const Instruction* inst = Instruction::At(&code_item->insns_[dex_pc]); + uint32_t this_node_id = dex_pc_to_incl_id.find(dex_pc)->second; + while (true) { + CatchHandlerIterator catch_it(*code_item, dex_pc); + if (catch_it.HasNext()) { + std::set<uint32_t> handled_targets; + for (; catch_it.HasNext(); catch_it.Next()) { + uint32_t handler_pc = catch_it.GetHandlerAddress(); + auto it = handled_targets.find(handler_pc); + if (it == handled_targets.end()) { + auto node_id_it = dex_pc_to_incl_id.find(handler_pc); + if (node_id_it != dex_pc_to_incl_id.end()) { + exception_edges << " node" << this_node_id << ":p" << dex_pc + << " -> node" << node_id_it->second << ":p" << handler_pc + << ";\n"; + } + + // Mark as done. + handled_targets.insert(handler_pc); + } + } + } + if (inst->IsBasicBlockEnd()) { + break; + } + + // Loop update. Have a break-out if the next instruction is a branch target and thus in + // another block. + dex_pc += inst->SizeInCodeUnits(); + if (dex_pc >= code_item->insns_size_in_code_units_) { + break; + } + if (dex_pc_to_node_id.find(dex_pc) != dex_pc_to_node_id.end()) { + break; + } + inst = inst->Next(); + } + } + + // Write out the sub-graphs to make edges styled. + os << "\n"; + os << " subgraph regular_edges {\n"; + os << " edge [color=\"#000000\",weight=.3,len=3];\n\n"; + os << " " << regular_edges.str() << "\n"; + os << " }\n\n"; + + os << " subgraph taken_edges {\n"; + os << " edge [color=\"#00FF00\",weight=.3,len=3];\n\n"; + os << " " << taken_edges.str() << "\n"; + os << " }\n\n"; + + os << " subgraph exception_edges {\n"; + os << " edge [color=\"#FF0000\",weight=.3,len=3];\n\n"; + os << " " << exception_edges.str() << "\n"; + os << " }\n\n"; + } + + os << "}\n"; +} + +void DumpMethodCFG(ArtMethod* method, std::ostream& os) { + const DexFile* dex_file = method->GetDexFile(); + const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); + + DumpMethodCFGImpl(dex_file, method->GetDexMethodIndex(), code_item, os); +} + +void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) { + // This is painful, we need to find the code item. That means finding the class, and then + // iterating the table. + if (dex_method_idx >= dex_file->NumMethodIds()) { + os << "Could not find method-idx."; + return; + } + const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx); + + const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_); + if (class_def == nullptr) { + os << "Could not find class-def."; + return; + } + + const uint8_t* class_data = dex_file->GetClassData(*class_def); + if (class_data == nullptr) { + os << "No class data."; + return; + } + + ClassDataItemIterator it(*dex_file, class_data); + // Skip fields + while (it.HasNextStaticField() || it.HasNextInstanceField()) { + it.Next(); + } + + // Find method, and dump it. + while (it.HasNextDirectMethod() || it.HasNextVirtualMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == dex_method_idx) { + DumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os); + return; + } + it.Next(); + } + + // Otherwise complain. + os << "Something went wrong, didn't find the method in the class data."; +} + } // namespace art diff --git a/runtime/utils.h b/runtime/utils.h index 1ef98e70d5..d1be51aff7 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -111,22 +111,22 @@ bool EndsWith(const std::string& s, const char* suffix); // "[[I" would be "int[][]", "[Ljava/lang/String;" would be // "java.lang.String[]", and so forth. std::string PrettyDescriptor(mirror::String* descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyDescriptor(const char* descriptor); std::string PrettyDescriptor(mirror::Class* klass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyDescriptor(Primitive::Type type); // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or // "int a.b.C.f" (depending on the value of 'with_type'). std::string PrettyField(ArtField* f, bool with_type = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true); // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or // "a.b.C.m(II)V" (depending on the value of 'with_signature'). std::string PrettyMethod(ArtMethod* m, bool with_signature = true) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true); // Returns a human-readable form of the name of the *class* of the given object. @@ -134,7 +134,7 @@ std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class<java.lang.String>". std::string PrettyTypeOf(mirror::Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a human-readable form of the type at an index in the specified dex file. // Example outputs: char[], java.lang.String. @@ -143,11 +143,11 @@ std::string PrettyType(uint32_t type_idx, const DexFile& dex_file); // Returns a human-readable form of the name of the given class. // Given String.class, the output would be "java.lang.Class<java.lang.String>". std::string PrettyClass(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a human-readable form of the name of the given class with its class loader. std::string PrettyClassAndClassLoader(mirror::Class* c) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns a human-readable version of the Java part of the access flags, e.g., "private static " // (note the trailing whitespace). @@ -182,10 +182,10 @@ bool IsValidMemberName(const char* s); // Returns the JNI native function name for the non-overloaded method 'm'. std::string JniShortName(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the JNI native function name for the overloaded method 'm'. std::string JniLongName(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool ReadFileToString(const std::string& file_name, std::string* result); bool PrintFileToLog(const std::string& file_name, LogSeverity level); @@ -324,6 +324,9 @@ static inline constexpr bool ValidPointerSize(size_t pointer_size) { return pointer_size == 4 || pointer_size == 8; } +void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); +void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os); + } // namespace art #endif // ART_RUNTIME_UTILS_H_ diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc index 66e38b1c13..f00edffab8 100644 --- a/runtime/utils_test.cc +++ b/runtime/utils_test.cc @@ -26,7 +26,7 @@ #include "scoped_thread_state_change.h" #include "handle_scope-inl.h" -#include <valgrind.h> +#include "base/memory_tool.h" namespace art { @@ -358,7 +358,7 @@ TEST_F(UtilsTest, ExecSuccess) { command.push_back("/usr/bin/id"); } std::string error_msg; - if (RUNNING_ON_VALGRIND == 0) { + if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. EXPECT_TRUE(Exec(command, &error_msg)); } @@ -372,7 +372,7 @@ TEST_F(UtilsTest, ExecError) { std::vector<std::string> command; command.push_back("bogus"); std::string error_msg; - if (RUNNING_ON_VALGRIND == 0) { + if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) { // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks. EXPECT_FALSE(Exec(command, &error_msg)); EXPECT_NE(0U, error_msg.size()); diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 8a8b455603..1828b91e2a 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -53,6 +53,9 @@ static constexpr bool kTimeVerifyMethod = !kIsDebugBuild; static constexpr bool gDebugVerify = false; // TODO: Add a constant to method_verifier to turn on verbose logging? +// On VLOG(verifier), should we dump the whole state when we run into a hard failure? +static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true; + void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size, uint16_t registers_size, MethodVerifier* verifier) { @@ -107,7 +110,7 @@ ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool cond } static void SafelyMarkAllRegistersAsConflicts(MethodVerifier* verifier, RegisterLine* reg_line) { - if (verifier->IsConstructor()) { + if (verifier->IsInstanceConstructor()) { // Before we mark all regs as conflicts, check that we don't have an uninitialized this. reg_line->CheckConstructorReturn(verifier); } @@ -329,14 +332,21 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t } else { // Bad method data. CHECK_NE(verifier.failures_.size(), 0U); - CHECK(verifier.have_pending_hard_failure_); - verifier.DumpFailures(LOG(INFO) << "Verification error in " - << PrettyMethod(method_idx, *dex_file) << "\n"); + + if (UNLIKELY(verifier.have_pending_experimental_failure_)) { + // Failed due to being forced into interpreter. This is ok because + // we just want to skip verification. + result = kSoftFailure; + } else { + CHECK(verifier.have_pending_hard_failure_); + verifier.DumpFailures(LOG(INFO) << "Verification error in " + << PrettyMethod(method_idx, *dex_file) << "\n"); + result = kHardFailure; + } if (gDebugVerify) { std::cout << "\n" << verifier.info_messages_.str(); verifier.Dump(std::cout); } - result = kHardFailure; } if (kTimeVerifyMethod) { uint64_t duration_ns = NanoTime() - start_ns; @@ -349,27 +359,29 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t return result; } -MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t dex_method_idx, - const DexFile* dex_file, - Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, - const DexFile::ClassDef* class_def, - const DexFile::CodeItem* code_item, - ArtMethod* method, - uint32_t method_access_flags) { +MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, + VariableIndentationOutputStream* vios, + uint32_t dex_method_idx, + const DexFile* dex_file, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + const DexFile::ClassDef* class_def, + const DexFile::CodeItem* code_item, + ArtMethod* method, + uint32_t method_access_flags) { MethodVerifier* verifier = new MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, dex_method_idx, method, method_access_flags, true, true, true, true); verifier->Verify(); - verifier->DumpFailures(os); - os << verifier->info_messages_.str(); + verifier->DumpFailures(vios->Stream()); + vios->Stream() << verifier->info_messages_.str(); // Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized // and querying any info is dangerous/can abort. if (verifier->have_pending_hard_failure_) { delete verifier; return nullptr; } else { - verifier->Dump(os); + verifier->Dump(vios); return verifier; } } @@ -400,6 +412,7 @@ MethodVerifier::MethodVerifier(Thread* self, monitor_enter_dex_pcs_(nullptr), have_pending_hard_failure_(false), have_pending_runtime_throw_failure_(false), + have_pending_experimental_failure_(false), have_any_pending_runtime_throw_failure_(false), new_instance_count_(0), monitor_enter_count_(0), @@ -628,6 +641,12 @@ std::ostream& MethodVerifier::Fail(VerifyError error) { Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref); } have_pending_hard_failure_ = true; + if (VLOG_IS_ON(verifier) && kDumpRegLinesOnHardFailureIfVLOG) { + ScopedObjectAccess soa(Thread::Current()); + std::ostringstream oss; + Dump(oss); + LOG(ERROR) << oss.str(); + } break; } } @@ -811,6 +830,17 @@ bool MethodVerifier::VerifyInstructions() { } bool MethodVerifier::VerifyInstruction(const Instruction* inst, uint32_t code_offset) { + if (UNLIKELY(inst->IsExperimental())) { + // Experimental instructions don't yet have verifier support implementation. + // While it is possible to use them by themselves, when we try to use stable instructions + // with a virtual register that was created by an experimental instruction, + // the data flow analysis will fail. + Fail(VERIFY_ERROR_FORCE_INTERPRETER) + << "experimental instruction is not supported by verifier; skipping verification"; + have_pending_experimental_failure_ = true; + return false; + } + bool result = true; switch (inst->GetVerifyTypeArgumentA()) { case Instruction::kVerifyRegA: @@ -1013,8 +1043,8 @@ bool MethodVerifier::CheckArrayData(uint32_t cur_offset) { DCHECK_LT(cur_offset, insn_count); /* make sure the start of the array data table is in range */ - array_data_offset = insns[1] | (((int32_t) insns[2]) << 16); - if ((int32_t) cur_offset + array_data_offset < 0 || + array_data_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16); + if (static_cast<int32_t>(cur_offset) + array_data_offset < 0 || cur_offset + array_data_offset + 2 >= insn_count) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset << ", data offset " << array_data_offset @@ -1023,12 +1053,21 @@ bool MethodVerifier::CheckArrayData(uint32_t cur_offset) { } /* offset to array data table is a relative branch-style offset */ array_data = insns + array_data_offset; - /* make sure the table is 32-bit aligned */ - if ((reinterpret_cast<uintptr_t>(array_data) & 0x03) != 0) { + // Make sure the table is at an even dex pc, that is, 32-bit aligned. + if (!IsAligned<4>(array_data)) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned array data table: at " << cur_offset << ", data offset " << array_data_offset; return false; } + // Make sure the array-data is marked as an opcode. This ensures that it was reached when + // traversing the code item linearly. It is an approximation for a by-spec padding value. + if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset + << ", data offset " << array_data_offset + << " not correctly visited, probably bad padding."; + return false; + } + uint32_t value_width = array_data[1]; uint32_t value_count = *reinterpret_cast<const uint32_t*>(&array_data[2]); uint32_t table_size = 4 + (value_width * value_count + 1) / 2; @@ -1117,8 +1156,9 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { DCHECK_LT(cur_offset, insn_count); const uint16_t* insns = code_item_->insns_ + cur_offset; /* make sure the start of the switch is in range */ - int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16; - if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 > insn_count) { + int32_t switch_offset = insns[1] | (static_cast<int32_t>(insns[2]) << 16); + if (static_cast<int32_t>(cur_offset) + switch_offset < 0 || + cur_offset + switch_offset + 2 > insn_count) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset << ", switch offset " << switch_offset << ", count " << insn_count; @@ -1126,12 +1166,21 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { } /* offset to switch table is a relative branch-style offset */ const uint16_t* switch_insns = insns + switch_offset; - /* make sure the table is 32-bit aligned */ - if ((reinterpret_cast<uintptr_t>(switch_insns) & 0x03) != 0) { + // Make sure the table is at an even dex pc, that is, 32-bit aligned. + if (!IsAligned<4>(switch_insns)) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unaligned switch table: at " << cur_offset << ", switch offset " << switch_offset; return false; } + // Make sure the switch data is marked as an opcode. This ensures that it was reached when + // traversing the code item linearly. It is an approximation for a by-spec padding value. + if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset + << ", switch offset " << switch_offset + << " not correctly visited, probably bad padding."; + return false; + } + uint32_t switch_count = switch_insns[1]; int32_t keys_offset, targets_offset; uint16_t expected_signature; @@ -1165,8 +1214,9 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { if (keys_offset > 0 && switch_count > 1) { int32_t last_key = switch_insns[keys_offset] | (switch_insns[keys_offset + 1] << 16); for (uint32_t targ = 1; targ < switch_count; targ++) { - int32_t key = (int32_t) switch_insns[keys_offset + targ * 2] | - (int32_t) (switch_insns[keys_offset + targ * 2 + 1] << 16); + int32_t key = + static_cast<int32_t>(switch_insns[keys_offset + targ * 2]) | + static_cast<int32_t>(switch_insns[keys_offset + targ * 2 + 1] << 16); if (key <= last_key) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid packed switch: last key=" << last_key << ", this=" << key; @@ -1177,11 +1227,11 @@ bool MethodVerifier::CheckSwitchTargets(uint32_t cur_offset) { } /* verify each switch target */ for (uint32_t targ = 0; targ < switch_count; targ++) { - int32_t offset = (int32_t) switch_insns[targets_offset + targ * 2] | - (int32_t) (switch_insns[targets_offset + targ * 2 + 1] << 16); + int32_t offset = static_cast<int32_t>(switch_insns[targets_offset + targ * 2]) | + static_cast<int32_t>(switch_insns[targets_offset + targ * 2 + 1] << 16); int32_t abs_offset = cur_offset + offset; if (abs_offset < 0 || - abs_offset >= (int32_t) insn_count || + abs_offset >= static_cast<int32_t>(insn_count) || !insn_flags_[abs_offset].IsOpcode()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset << " (-> " << reinterpret_cast<void*>(abs_offset) << ") at " @@ -1262,33 +1312,36 @@ std::ostream& MethodVerifier::DumpFailures(std::ostream& os) { } void MethodVerifier::Dump(std::ostream& os) { + VariableIndentationOutputStream vios(&os); + Dump(&vios); +} + +void MethodVerifier::Dump(VariableIndentationOutputStream* vios) { if (code_item_ == nullptr) { - os << "Native method\n"; + vios->Stream() << "Native method\n"; return; } { - os << "Register Types:\n"; - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent_os(&indent_filter); - reg_types_.Dump(indent_os); - } - os << "Dumping instructions and register lines:\n"; - Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count); - std::ostream indent_os(&indent_filter); + vios->Stream() << "Register Types:\n"; + ScopedIndentation indent1(vios); + reg_types_.Dump(vios->Stream()); + } + vios->Stream() << "Dumping instructions and register lines:\n"; + ScopedIndentation indent1(vios); const Instruction* inst = Instruction::At(code_item_->insns_); for (size_t dex_pc = 0; dex_pc < code_item_->insns_size_in_code_units_; - dex_pc += inst->SizeInCodeUnits()) { + dex_pc += inst->SizeInCodeUnits(), inst = inst->Next()) { RegisterLine* reg_line = reg_table_.GetLine(dex_pc); if (reg_line != nullptr) { - indent_os << reg_line->Dump(this) << "\n"; + vios->Stream() << reg_line->Dump(this) << "\n"; } - indent_os << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " "; + vios->Stream() + << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " "; const bool kDumpHexOfInstruction = false; if (kDumpHexOfInstruction) { - indent_os << inst->DumpHex(5) << " "; + vios->Stream() << inst->DumpHex(5) << " "; } - indent_os << inst->DumpString(dex_file_) << "\n"; - inst = inst->Next(); + vios->Stream() << inst->DumpString(dex_file_) << "\n"; } } @@ -1330,9 +1383,15 @@ bool MethodVerifier::SetTypesFromSignature() { // argument as uninitialized. This restricts field access until the superclass constructor is // called. const RegType& declaring_class = GetDeclaringClass(); - if (IsConstructor() && !declaring_class.IsJavaLangObject()) { - reg_line->SetRegisterType(this, arg_start + cur_arg, - reg_types_.UninitializedThisArgument(declaring_class)); + if (IsConstructor()) { + if (declaring_class.IsJavaLangObject()) { + // "this" is implicitly initialized. + reg_line->SetThisInitialized(); + reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class); + } else { + reg_line->SetRegisterType(this, arg_start + cur_arg, + reg_types_.UninitializedThisArgument(declaring_class)); + } } else { reg_line->SetRegisterType(this, arg_start + cur_arg, declaring_class); } @@ -1655,16 +1714,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { std::unique_ptr<RegisterLine> branch_line; std::unique_ptr<RegisterLine> fallthrough_line; - /* - * If we are in a constructor, and we currently have an UninitializedThis type - * in a register somewhere, we need to make sure it isn't overwritten. - */ - bool track_uninitialized_this = false; - size_t uninitialized_this_loc = 0; - if (IsConstructor()) { - track_uninitialized_this = work_line_->GetUninitializedThisLoc(this, &uninitialized_this_loc); - } - switch (inst->Opcode()) { case Instruction::NOP: /* @@ -1742,14 +1791,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { break; } case Instruction::RETURN_VOID: - if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) { + if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) { if (!GetMethodReturnType().IsConflict()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected"; } } break; case Instruction::RETURN: - if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) { + if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) { /* check the method signature */ const RegType& return_type = GetMethodReturnType(); if (!return_type.IsCategory1Types()) { @@ -1774,7 +1823,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } break; case Instruction::RETURN_WIDE: - if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) { + if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) { /* check the method signature */ const RegType& return_type = GetMethodReturnType(); if (!return_type.IsCategory2Types()) { @@ -1790,7 +1839,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } break; case Instruction::RETURN_OBJECT: - if (!IsConstructor() || work_line_->CheckConstructorReturn(this)) { + if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) { const RegType& return_type = GetMethodReturnType(); if (!return_type.IsReferenceTypes()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected"; @@ -2100,7 +2149,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } else { // Now verify if the element width in the table matches the element width declared in // the array - const uint16_t* array_data = insns + (insns[1] | (((int32_t) insns[2]) << 16)); + const uint16_t* array_data = + insns + (insns[1] | (static_cast<int32_t>(insns[2]) << 16)); if (array_data[0] != Instruction::kArrayDataSignature) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid magic for array-data"; } else { @@ -2831,6 +2881,13 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } } } + // Handle this like a RETURN_VOID now. Code is duplicated to separate standard from + // quickened opcodes (otherwise this could be a fall-through). + if (!IsConstructor()) { + if (!GetMethodReturnType().IsConflict()) { + Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void not expected"; + } + } break; // Note: the following instructions encode offsets derived from class linking. // As such they use Class*/Field*/AbstractMethod* as these offsets only have @@ -2922,6 +2979,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // If the code would've normally hard-failed, then the interpreter will throw the // appropriate verification errors at runtime. Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement box-lambda verification + + // Partial verification. Sets the resulting type to always be an object, which + // is good enough for some other verification to occur without hard-failing. + const uint32_t vreg_target_object = inst->VRegA_22x(); // box-lambda vA, vB + const RegType& reg_type = reg_types_.JavaLangObject(need_precise_constants_); + work_line_->SetRegisterType(this, vreg_target_object, reg_type); break; } @@ -2947,20 +3010,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { */ } // end - switch (dec_insn.opcode) - /* - * If we are in a constructor, and we had an UninitializedThis type - * in a register somewhere, we need to make sure it wasn't overwritten. - */ - if (track_uninitialized_this) { - bool was_invoke_direct = (inst->Opcode() == Instruction::INVOKE_DIRECT || - inst->Opcode() == Instruction::INVOKE_DIRECT_RANGE); - if (work_line_->WasUninitializedThisOverwritten(this, uninitialized_this_loc, - was_invoke_direct)) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) - << "Constructor failed to initialize this object"; - } - } - if (have_pending_hard_failure_) { if (Runtime::Current()->IsAotCompiler()) { /* When AOT compiling, check that the last failure is a hard failure */ @@ -3039,7 +3088,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { * just need to walk through and tag the targets. */ if ((opcode_flags & Instruction::kSwitch) != 0) { - int offset_to_switch = insns[1] | (((int32_t) insns[2]) << 16); + int offset_to_switch = insns[1] | (static_cast<int32_t>(insns[2]) << 16); const uint16_t* switch_insns = insns + offset_to_switch; int switch_count = switch_insns[1]; int offset_to_targets, targ; @@ -3060,7 +3109,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { /* offsets are 32-bit, and only partly endian-swapped */ offset = switch_insns[offset_to_targets + targ * 2] | - (((int32_t) switch_insns[offset_to_targets + targ * 2 + 1]) << 16); + (static_cast<int32_t>(switch_insns[offset_to_targets + targ * 2 + 1]) << 16); abs_offset = work_insn_idx_ + offset; DCHECK_LT(abs_offset, code_item_->insns_size_in_code_units_); if (!CheckNotMoveExceptionOrMoveResult(code_item_->insns_, abs_offset)) { @@ -3541,7 +3590,7 @@ class MethodParamListDescriptorIterator { ++pos_; } - const char* GetDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const char* GetDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) { return res_method_->GetTypeDescriptorFromTypeIdx(params_->GetTypeItem(pos_).type_idx_); } @@ -3900,7 +3949,24 @@ void MethodVerifier::VerifyAPut(const Instruction* inst, if (array_type.IsZero()) { // Null array type; this code path will fail at runtime. // Still check that the given value matches the instruction's type. - work_line_->VerifyRegisterType(this, inst->VRegA_23x(), insn_type); + // Note: this is, as usual, complicated by the fact the the instruction isn't fully typed + // and fits multiple register types. + const RegType* modified_reg_type = &insn_type; + if ((modified_reg_type == ®_types_.Integer()) || + (modified_reg_type == ®_types_.LongLo())) { + // May be integer or float | long or double. Overwrite insn_type accordingly. + const RegType& value_type = work_line_->GetRegisterType(this, inst->VRegA_23x()); + if (modified_reg_type == ®_types_.Integer()) { + if (&value_type == ®_types_.Float()) { + modified_reg_type = &value_type; + } + } else { + if (&value_type == ®_types_.DoubleLo()) { + modified_reg_type = &value_type; + } + } + } + work_line_->VerifyRegisterType(this, inst->VRegA_23x(), *modified_reg_type); } else if (!array_type.IsArrayTypes()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput"; } else { @@ -4115,7 +4181,9 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& << " to be compatible with type '" << insn_type << "' but found type '" << *field_type << "' in get-object"; - work_line_->SetRegisterType(this, vregA, reg_types_.Conflict()); + if (error != VERIFY_ERROR_BAD_CLASS_HARD) { + work_line_->SetRegisterType(this, vregA, reg_types_.Conflict()); + } return; } } @@ -4320,6 +4388,10 @@ bool MethodVerifier::UpdateRegisters(uint32_t next_insn, RegisterLine* merge_lin const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn); Instruction::Code opcode = ret_inst->Opcode(); if (opcode == Instruction::RETURN_VOID || opcode == Instruction::RETURN_VOID_NO_BARRIER) { + // Explicitly copy the this-initialized flag from the merge-line, as we didn't copy its + // state. Must be done before SafelyMarkAllRegistersAsConflicts as that will do the + // super-constructor-call checking. + target_line->CopyThisInitialized(*merge_line); SafelyMarkAllRegistersAsConflicts(this, target_line); } else { target_line->CopyFromLine(merge_line); diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 25506947a0..21f8543b38 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -32,6 +32,7 @@ namespace art { class Instruction; struct ReferenceMap2Visitor; class Thread; +class VariableIndentationOutputStream; namespace verifier { @@ -149,25 +150,27 @@ class MethodVerifier { /* Verify a class. Returns "kNoFailure" on success. */ static FailureKind VerifyClass(Thread* self, mirror::Class* klass, bool allow_soft_failures, std::string* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static FailureKind VerifyClass(Thread* self, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, bool allow_soft_failures, std::string* error) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static MethodVerifier* VerifyMethodAndDump(Thread* self, std::ostream& os, uint32_t method_idx, + static MethodVerifier* VerifyMethodAndDump(Thread* self, + VariableIndentationOutputStream* vios, + uint32_t method_idx, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def, const DexFile::CodeItem* code_item, ArtMethod* method, uint32_t method_access_flags) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static FailureKind VerifyMethod(ArtMethod* method, bool allow_soft_failures, - std::string* error) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string* error) SHARED_REQUIRES(Locks::mutator_lock_); uint8_t EncodePcToReferenceMapData() const; @@ -190,28 +193,29 @@ class MethodVerifier { // Dump the state of the verifier, namely each instruction, what flags are set on it, register // information - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); + void Dump(VariableIndentationOutputStream* vios) SHARED_REQUIRES(Locks::mutator_lock_); // Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding // to the locks held at 'dex_pc' in method 'm'. static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc, std::vector<uint32_t>* monitor_enter_dex_pcs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the accessed field corresponding to the quick instruction's field // offset at 'dex_pc' in method 'm'. static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the invoked method corresponding to the quick instruction's vtable // index at 'dex_pc' in method 'm'. static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static void Init() SHARED_REQUIRES(Locks::mutator_lock_); static void Shutdown(); bool CanLoadClasses() const { @@ -224,7 +228,7 @@ class MethodVerifier { ArtMethod* method, uint32_t access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool allow_thread_suspension) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : MethodVerifier(self, dex_file, dex_cache, class_loader, class_def, code_item, method_idx, method, access_flags, can_load_classes, allow_soft_failures, need_precise_constants, false, allow_thread_suspension) {} @@ -233,22 +237,22 @@ class MethodVerifier { // Run verification on the method. Returns true if verification completes and false if the input // has an irrecoverable corruption. - bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool Verify() SHARED_REQUIRES(Locks::mutator_lock_); // Describe VRegs at the given dex pc. std::vector<int32_t> DescribeVRegs(uint32_t dex_pc); static void VisitStaticRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, const RootInfo& roots) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Accessors used by the compiler via CompilerCallback const DexFile::CodeItem* CodeItem() const; RegisterLine* GetRegLine(uint32_t dex_pc); const InstructionFlags& GetInstructionFlags(size_t index) const; - mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_); + mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_); MethodReference GetMethodReference() const; uint32_t GetAccessFlags() const; bool HasCheckCasts() const; @@ -259,15 +263,15 @@ class MethodVerifier { } const RegType& ResolveCheckedClass(uint32_t class_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the method of a quick invoke or null if it cannot be found. ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line, bool is_range, bool allow_failure) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Returns the access field of a quick field access (iget/iput-quick) or null // if it cannot be found. ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Is the method being verified a constructor? bool IsConstructor() const { @@ -279,6 +283,10 @@ class MethodVerifier { return (method_access_flags_ & kAccStatic) != 0; } + bool IsInstanceConstructor() const { + return IsConstructor() && !IsStatic(); + } + SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() { return string_init_pc_reg_map_; } @@ -291,7 +299,7 @@ class MethodVerifier { ArtMethod* method, uint32_t access_flags, bool can_load_classes, bool allow_soft_failures, bool need_precise_constants, bool verify_to_dump, bool allow_thread_suspension) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Adds the given string to the beginning of the last failure message. void PrependToLastFailMessage(std::string); @@ -317,18 +325,18 @@ class MethodVerifier { const DexFile::CodeItem* code_item, ArtMethod* method, uint32_t method_access_flags, bool allow_soft_failures, bool need_precise_constants) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); - void FindLocksAtDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FindLocksAtDexPc() SHARED_REQUIRES(Locks::mutator_lock_); ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Compute the width of the instruction at each address in the instruction stream, and store it in @@ -356,7 +364,7 @@ class MethodVerifier { * Returns "false" if something in the exception table looks fishy, but we're expecting the * exception table to be somewhat sane. */ - bool ScanTryCatchBlocks() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool ScanTryCatchBlocks() SHARED_REQUIRES(Locks::mutator_lock_); /* * Perform static verification on all instructions in a method. @@ -462,11 +470,11 @@ class MethodVerifier { bool* selfOkay); /* Perform detailed code-flow analysis on a single method. */ - bool VerifyCodeFlow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool VerifyCodeFlow() SHARED_REQUIRES(Locks::mutator_lock_); // Set the register types for the first instruction in the method based on the method signature. // This has the side-effect of validating the signature. - bool SetTypesFromSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool SetTypesFromSignature() SHARED_REQUIRES(Locks::mutator_lock_); /* * Perform code flow on a method. @@ -514,7 +522,7 @@ class MethodVerifier { * reordering by specifying that you can't execute the new-instance instruction if a register * contains an uninitialized instance created by that same instruction. */ - bool CodeFlowVerifyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool CodeFlowVerifyMethod() SHARED_REQUIRES(Locks::mutator_lock_); /* * Perform verification for a single instruction. @@ -526,33 +534,33 @@ class MethodVerifier { * addresses. Does not set or clear any other flags in "insn_flags_". */ bool CodeFlowVerifyInstruction(uint32_t* start_guess) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of a new array instruction void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Helper to perform verification on puts of primitive type. void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type, - const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const uint32_t vregA) SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of an aget instruction. The destination register's type will be set to // be that of component type of the array unless the array type is unknown, in which case a // bottom type inferred from the type of instruction is used. is_primitive is false for an // aget-object. void VerifyAGet(const Instruction* inst, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of an aput instruction. void VerifyAPut(const Instruction* inst, const RegType& insn_type, - bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_); // Lookup instance field and fail for resolution violations ArtField* GetInstanceField(const RegType& obj_type, int field_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Lookup static field and fail for resolution violations - ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ArtField* GetStaticField(int field_idx) SHARED_REQUIRES(Locks::mutator_lock_); // Perform verification of an iget/sget/iput/sput instruction. enum class FieldAccessType { // private @@ -562,16 +570,16 @@ class MethodVerifier { template <FieldAccessType kAccType> void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive, bool is_static) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <FieldAccessType kAccType> void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Resolves a class based on an index and performs access checks to ensure the referrer can // access the resolved class. const RegType& ResolveClassAndCheckAccess(uint32_t class_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler @@ -579,7 +587,7 @@ class MethodVerifier { * exception handler can be found or if the Join of exception types fails. */ const RegType& GetCaughtExceptionType() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Resolves a method based on an index and performs access checks to ensure @@ -587,7 +595,7 @@ class MethodVerifier { * Does not throw exceptions. */ ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify the arguments to a method. We're executing in "method", making @@ -614,22 +622,22 @@ class MethodVerifier { ArtMethod* VerifyInvocationArgs(const Instruction* inst, MethodType method_type, bool is_range, bool is_super) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Similar checks to the above, but on the proto. Will be used when the method cannot be // resolved. void VerifyInvocationArgsUnresolvedMethod(const Instruction* inst, MethodType method_type, bool is_range) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); template <class T> ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst, MethodType method_type, bool is_range, ArtMethod* res_method) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify that the target instruction is not "move-exception". It's important that the only way @@ -661,18 +669,18 @@ class MethodVerifier { * Returns "false" if an error is encountered. */ bool UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line, bool update_merge_line) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Return the register type for the method. - const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& GetMethodReturnType() SHARED_REQUIRES(Locks::mutator_lock_); // Get a type representing the declaring class of the method. - const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const RegType& GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); InstructionFlags* CurrentInsnFlags(); const RegType& DetermineCat1Constant(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Try to create a register type from the given class. In case a precise type is requested, but // the class is not instantiable, a soft error (of type NO_CLASS) will be enqueued and a @@ -680,7 +688,7 @@ class MethodVerifier { // Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is // actually touched. const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // The thread we're verifying on. Thread* const self_; @@ -732,6 +740,8 @@ class MethodVerifier { // instructions that would hard fail the verification. // Note: this flag is reset after processing each instruction. bool have_pending_runtime_throw_failure_; + // Is there a pending experimental failure? + bool have_pending_experimental_failure_; // A version of the above that is not reset and thus captures if there were *any* throw failures. bool have_any_pending_runtime_throw_failure_; diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc index 3994536cca..2ab6b4aaab 100644 --- a/runtime/verifier/method_verifier_test.cc +++ b/runtime/verifier/method_verifier_test.cc @@ -30,7 +30,7 @@ namespace verifier { class MethodVerifierTest : public CommonRuntimeTest { protected: void VerifyClass(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { ASSERT_TRUE(descriptor != nullptr); Thread* self = Thread::Current(); mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str()); @@ -42,7 +42,7 @@ class MethodVerifierTest : public CommonRuntimeTest { } void VerifyDexFile(const DexFile& dex) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { // Verify all the classes defined in this file for (size_t i = 0; i < dex.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex.GetClassDef(i); diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index c8aa4fd2c1..b86a4c8d25 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -16,6 +16,7 @@ #include "reg_type-inl.h" +#include "base/bit_vector-inl.h" #include "base/casts.h" #include "class_linker-inl.h" #include "dex_file-inl.h" @@ -46,19 +47,19 @@ const DoubleHiType* DoubleHiType::instance_ = nullptr; const IntegerType* IntegerType::instance_ = nullptr; PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) { CHECK(klass != nullptr); CHECK(!descriptor.empty()); } Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : PrimitiveType(klass, descriptor, cache_id) { } Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : PrimitiveType(klass, descriptor, cache_id) { } @@ -280,7 +281,7 @@ void BooleanType::Destroy() { } } -std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +std::string UndefinedType::Dump() const SHARED_REQUIRES(Locks::mutator_lock_) { return "Undefined"; } @@ -302,18 +303,24 @@ void UndefinedType::Destroy() { PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) : RegType(klass, descriptor, cache_id) { - DCHECK(klass->IsInstantiable()); + // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError + // would be thrown at runtime, but we need to continue verification and *not* create a + // hard failure or abort. } std::string UnresolvedMergedType::Dump() const { std::stringstream result; - std::set<uint16_t> types = GetMergedTypes(); - result << "UnresolvedMergedReferences("; - auto it = types.begin(); - result << reg_type_cache_->GetFromId(*it).Dump(); - for (++it; it != types.end(); ++it) { - result << ", "; - result << reg_type_cache_->GetFromId(*it).Dump(); + result << "UnresolvedMergedReferences(" << GetResolvedPart().Dump() << " | "; + const BitVector& types = GetUnresolvedTypes(); + + bool first = true; + for (uint32_t idx : types.Indexes()) { + if (!first) { + result << ", "; + } else { + first = false; + } + result << reg_type_cache_->GetFromId(idx).Dump(); } result << ")"; return result.str(); @@ -490,32 +497,6 @@ bool UnresolvedType::IsNonZeroReferenceTypes() const { return true; } -std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const { - std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes(); - const RegType& left = reg_type_cache_->GetFromId(refs.first); - const RegType& right = reg_type_cache_->GetFromId(refs.second); - - std::set<uint16_t> types; - if (left.IsUnresolvedMergedReference()) { - types = down_cast<const UnresolvedMergedType*>(&left)->GetMergedTypes(); - } else { - types.insert(refs.first); - } - if (right.IsUnresolvedMergedReference()) { - std::set<uint16_t> right_types = - down_cast<const UnresolvedMergedType*>(&right)->GetMergedTypes(); - types.insert(right_types.begin(), right_types.end()); - } else { - types.insert(refs.second); - } - if (kIsDebugBuild) { - for (const auto& type : types) { - CHECK(!reg_type_cache_->GetFromId(type).IsUnresolvedMergedReference()); - } - } - return types; -} - const RegType& RegType::GetSuperClass(RegTypeCache* cache) const { if (!IsUnresolvedTypes()) { mirror::Class* super_klass = GetClass()->GetSuperClass(); @@ -538,7 +519,7 @@ const RegType& RegType::GetSuperClass(RegTypeCache* cache) const { } } -bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool RegType::IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { // Primitive arrays will always resolve DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '['); @@ -551,11 +532,11 @@ bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lo } } -bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool RegType::IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_) { return IsReference() && GetClass()->IsObjectClass(); } -bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +bool RegType::IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) { if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) { return descriptor_[0] == '['; } else if (HasClass()) { @@ -583,16 +564,20 @@ static const RegType& SelectNonConstant(const RegType& a, const RegType& b) { const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const { DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller - // Perform pointer equality tests for conflict to avoid virtual method dispatch. + // Perform pointer equality tests for undefined and conflict to avoid virtual method dispatch. + const UndefinedType& undefined = reg_types->Undefined(); const ConflictType& conflict = reg_types->Conflict(); - if (this == &conflict) { - DCHECK(IsConflict()); - return *this; // Conflict MERGE * => Conflict - } else if (&incoming_type == &conflict) { - DCHECK(incoming_type.IsConflict()); - return incoming_type; // * MERGE Conflict => Conflict - } else if (IsUndefined() || incoming_type.IsUndefined()) { - return conflict; // Unknown MERGE * => Conflict + DCHECK_EQ(this == &undefined, IsUndefined()); + DCHECK_EQ(&incoming_type == &undefined, incoming_type.IsUndefined()); + DCHECK_EQ(this == &conflict, IsConflict()); + DCHECK_EQ(&incoming_type == &conflict, incoming_type.IsConflict()); + if (this == &undefined || &incoming_type == &undefined) { + // There is a difference between undefined and conflict. Conflicts may be copied around, but + // not used. Undefined registers must not be copied. So any merge with undefined should return + // undefined. + return undefined; + } else if (this == &conflict || &incoming_type == &conflict) { + return conflict; // (Conflict MERGE *) or (* MERGE Conflict) => Conflict } else if (IsConstant() && incoming_type.IsConstant()) { const ConstantType& type1 = *down_cast<const ConstantType*>(this); const ConstantType& type2 = *down_cast<const ConstantType*>(&incoming_type); @@ -686,6 +671,11 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty } else if (IsReferenceTypes() && incoming_type.IsReferenceTypes()) { if (IsZero() || incoming_type.IsZero()) { return SelectNonConstant(*this, incoming_type); // 0 MERGE ref => ref + } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes()) { + // Something that is uninitialized hasn't had its constructor called. Unitialized types are + // special. They may only ever be merged with themselves (must be taken care of by the + // caller of Merge(), see the DCHECK on entry). So mark any other merge as conflicting here. + return conflict; } else if (IsJavaLangObject() || incoming_type.IsJavaLangObject()) { return reg_types->JavaLangObject(false); // Object MERGE ref => Object } else if (IsUnresolvedTypes() || incoming_type.IsUnresolvedTypes()) { @@ -694,11 +684,6 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty // type that reflects our lack of knowledge and that allows the rest of the unresolved // mechanics to continue. return reg_types->FromUnresolvedMerge(*this, incoming_type); - } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes()) { - // Something that is uninitialized hasn't had its constructor called. Mark any merge - // of this type with something that is initialized as conflicting. The cases of a merge - // with itself, 0 or Object are handled above. - return conflict; } else { // Two reference types, compute Join mirror::Class* c1 = GetClass(); mirror::Class* c2 = incoming_type.GetClass(); @@ -797,12 +782,24 @@ void UnresolvedUninitializedRefType::CheckInvariants() const { CHECK(klass_.IsNull()) << *this; } +UnresolvedMergedType::UnresolvedMergedType(const RegType& resolved, + const BitVector& unresolved, + const RegTypeCache* reg_type_cache, + uint16_t cache_id) + : UnresolvedType("", cache_id), + reg_type_cache_(reg_type_cache), + resolved_part_(resolved), + unresolved_types_(unresolved, false, unresolved.GetAllocator()) { + if (kIsDebugBuild) { + CheckInvariants(); + } +} void UnresolvedMergedType::CheckInvariants() const { // Unresolved merged types: merged types should be defined. CHECK(descriptor_.empty()) << *this; CHECK(klass_.IsNull()) << *this; - CHECK_NE(merged_types_.first, 0U) << *this; - CHECK_NE(merged_types_.second, 0U) << *this; + CHECK(resolved_part_.IsReferenceTypes()); + CHECK(!resolved_part_.IsUnresolvedTypes()); } void UnresolvedReferenceType::CheckInvariants() const { diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index d08c937a64..2834a9a54a 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -22,6 +22,7 @@ #include <set> #include <string> +#include "base/bit_vector.h" #include "base/macros.h" #include "base/mutex.h" #include "gc_root.h" @@ -112,7 +113,7 @@ class RegType { } // The high half that corresponds to this low half const RegType& HighHalf(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsConstantBoolean() const; virtual bool IsConstantChar() const { return false; } @@ -165,20 +166,20 @@ class RegType { return result; } virtual bool HasClassVirtual() const { return false; } - bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + bool IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_); + bool IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_); + bool IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_); Primitive::Type GetPrimitiveType() const; bool IsJavaLangObjectArray() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_); const std::string& GetDescriptor() const { DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass())); return descriptor_; } - mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!IsUnresolvedReference()); DCHECK(!klass_.IsNull()) << Dump(); DCHECK(HasClass()); @@ -186,25 +187,25 @@ class RegType { } uint16_t GetId() const { return cache_id_; } const RegType& GetSuperClass(RegTypeCache* cache) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); virtual std::string Dump() const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0; + SHARED_REQUIRES(Locks::mutator_lock_) = 0; // Can this type access other? bool CanAccess(const RegType& other) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this type access a member with the given properties? bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this type be assigned by src? // Note: Object and interface types may always be assigned to one another, see // comment on // ClassJoin. bool IsAssignableFrom(const RegType& src) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this array type potentially be assigned by src. // This function is necessary as array types are valid even if their components types are not, @@ -215,13 +216,13 @@ class RegType { // (both are reference types). bool CanAssignArray(const RegType& src, RegTypeCache& reg_types, Handle<mirror::ClassLoader> class_loader, bool* soft_error) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't // allow assignment to // an interface from an Object. bool IsStrictlyAssignableFrom(const RegType& src) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Are these RegTypes the same? bool Equals(const RegType& other) const { return GetId() == other.GetId(); } @@ -229,7 +230,15 @@ class RegType { // Compute the merge of this register from one edge (path) with incoming_type // from another. const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + // Same as above, but also handles the case where incoming_type == this. + const RegType& SafeMerge(const RegType& incoming_type, RegTypeCache* reg_types) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (Equals(incoming_type)) { + return *this; + } + return Merge(incoming_type, reg_types); + } /* * A basic Join operation on classes. For a pair of types S and T the Join, @@ -258,23 +267,23 @@ class RegType { * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy */ static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); virtual ~RegType() {} void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); protected: RegType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) { if (kIsDebugBuild) { CheckInvariants(); } } - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); const std::string descriptor_; mutable GcRoot<mirror::Class> @@ -285,7 +294,7 @@ class RegType { private: static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(RegType); }; @@ -295,7 +304,7 @@ class ConflictType FINAL : public RegType { public: bool IsConflict() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); // Get the singleton Conflict instance. static const ConflictType* GetInstance() PURE; @@ -304,14 +313,14 @@ class ConflictType FINAL : public RegType { static const ConflictType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Destroy the singleton instance. static void Destroy(); private: ConflictType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) {} static const ConflictType* instance_; @@ -324,7 +333,7 @@ class UndefinedType FINAL : public RegType { public: bool IsUndefined() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); // Get the singleton Undefined instance. static const UndefinedType* GetInstance() PURE; @@ -333,14 +342,14 @@ class UndefinedType FINAL : public RegType { static const UndefinedType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Destroy the singleton instance. static void Destroy(); private: UndefinedType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) {} static const UndefinedType* instance_; @@ -349,7 +358,7 @@ class UndefinedType FINAL : public RegType { class PrimitiveType : public RegType { public: PrimitiveType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_); bool HasClassVirtual() const OVERRIDE { return true; } }; @@ -357,23 +366,23 @@ class PrimitiveType : public RegType { class Cat1Type : public PrimitiveType { public: Cat1Type(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_); }; class IntegerType : public Cat1Type { public: bool IsInteger() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const IntegerType* GetInstance() PURE; static void Destroy(); private: IntegerType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const IntegerType* instance_; }; @@ -381,17 +390,17 @@ class IntegerType : public Cat1Type { class BooleanType FINAL : public Cat1Type { public: bool IsBoolean() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const BooleanType* GetInstance() PURE; static void Destroy(); private: BooleanType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const BooleanType* instance_; @@ -400,17 +409,17 @@ class BooleanType FINAL : public Cat1Type { class ByteType FINAL : public Cat1Type { public: bool IsByte() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const ByteType* GetInstance() PURE; static void Destroy(); private: ByteType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const ByteType* instance_; }; @@ -418,17 +427,17 @@ class ByteType FINAL : public Cat1Type { class ShortType FINAL : public Cat1Type { public: bool IsShort() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const ShortType* GetInstance() PURE; static void Destroy(); private: ShortType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const ShortType* instance_; }; @@ -436,17 +445,17 @@ class ShortType FINAL : public Cat1Type { class CharType FINAL : public Cat1Type { public: bool IsChar() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const CharType* GetInstance() PURE; static void Destroy(); private: CharType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const CharType* instance_; }; @@ -454,17 +463,17 @@ class CharType FINAL : public Cat1Type { class FloatType FINAL : public Cat1Type { public: bool IsFloat() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); static const FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const FloatType* GetInstance() PURE; static void Destroy(); private: FloatType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat1Type(klass, descriptor, cache_id) {} static const FloatType* instance_; }; @@ -472,86 +481,86 @@ class FloatType FINAL : public Cat1Type { class Cat2Type : public PrimitiveType { public: Cat2Type(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_); }; class LongLoType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); bool IsLongLo() const OVERRIDE { return true; } bool IsLong() const OVERRIDE { return true; } static const LongLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const LongLoType* GetInstance() PURE; static void Destroy(); private: LongLoType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const LongLoType* instance_; }; class LongHiType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); bool IsLongHi() const OVERRIDE { return true; } static const LongHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const LongHiType* GetInstance() PURE; static void Destroy(); private: LongHiType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const LongHiType* instance_; }; class DoubleLoType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); bool IsDoubleLo() const OVERRIDE { return true; } bool IsDouble() const OVERRIDE { return true; } static const DoubleLoType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const DoubleLoType* GetInstance() PURE; static void Destroy(); private: DoubleLoType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const DoubleLoType* instance_; }; class DoubleHiType FINAL : public Cat2Type { public: - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); virtual bool IsDoubleHi() const OVERRIDE { return true; } static const DoubleHiType* CreateInstance(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static const DoubleHiType* GetInstance() PURE; static void Destroy(); private: DoubleHiType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : Cat2Type(klass, descriptor, cache_id) {} static const DoubleHiType* instance_; }; class ConstantType : public RegType { public: - ConstantType(uint32_t constant, uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + ConstantType(uint32_t constant, uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(nullptr, "", cache_id), constant_(constant) { } @@ -609,58 +618,58 @@ class ConstantType : public RegType { class PreciseConstType FINAL : public ConstantType { public: PreciseConstType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsPreciseConstant() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class PreciseConstLoType FINAL : public ConstantType { public: PreciseConstLoType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsPreciseConstantLo() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class PreciseConstHiType FINAL : public ConstantType { public: PreciseConstHiType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsPreciseConstantHi() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class ImpreciseConstType FINAL : public ConstantType { public: ImpreciseConstType(uint32_t constat, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constat, cache_id) { } bool IsImpreciseConstant() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class ImpreciseConstLoType FINAL : public ConstantType { public: ImpreciseConstLoType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsImpreciseConstantLo() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; class ImpreciseConstHiType FINAL : public ConstantType { public: ImpreciseConstHiType(uint32_t constant, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : ConstantType(constant, cache_id) {} bool IsImpreciseConstantHi() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // Common parent of all uninitialized types. Uninitialized types are created by @@ -690,14 +699,14 @@ class UninitializedReferenceType FINAL : public UninitializedType { UninitializedReferenceType(mirror::Class* klass, const std::string& descriptor, uint32_t allocation_pc, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(klass, descriptor, allocation_pc, cache_id) {} bool IsUninitializedReference() const OVERRIDE { return true; } bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // Similar to UnresolvedReferenceType but not yet having been passed to a @@ -706,7 +715,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { public: UnresolvedUninitializedRefType(const std::string& descriptor, uint32_t allocation_pc, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -717,10 +726,10 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; // Similar to UninitializedReferenceType but special case for the this argument @@ -730,7 +739,7 @@ class UninitializedThisReferenceType FINAL : public UninitializedType { UninitializedThisReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(klass, descriptor, 0, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -741,17 +750,17 @@ class UninitializedThisReferenceType FINAL : public UninitializedType { bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { public: UnresolvedUninitializedThisRefType(const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UninitializedType(nullptr, descriptor, 0, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -762,10 +771,10 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; // A type of register holding a reference to an Object of type GetClass or a @@ -773,7 +782,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { class ReferenceType FINAL : public RegType { public: ReferenceType(mirror::Class* klass, const std::string& descriptor, - uint16_t cache_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_) : RegType(klass, descriptor, cache_id) {} bool IsReference() const OVERRIDE { return true; } @@ -782,7 +791,7 @@ class ReferenceType FINAL : public RegType { bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // A type of register holding a reference to an Object of type GetClass and only @@ -792,7 +801,7 @@ class PreciseReferenceType FINAL : public RegType { public: PreciseReferenceType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool IsPreciseReference() const OVERRIDE { return true; } @@ -800,14 +809,14 @@ class PreciseReferenceType FINAL : public RegType { bool HasClassVirtual() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); }; // Common parent of unresolved types. class UnresolvedType : public RegType { public: UnresolvedType(const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : RegType(nullptr, descriptor, cache_id) {} bool IsNonZeroReferenceTypes() const OVERRIDE; @@ -819,7 +828,7 @@ class UnresolvedType : public RegType { class UnresolvedReferenceType FINAL : public UnresolvedType { public: UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) { if (kIsDebugBuild) { CheckInvariants(); @@ -830,10 +839,10 @@ class UnresolvedReferenceType FINAL : public UnresolvedType { bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); }; // Type representing the super-class of an unresolved type. @@ -841,7 +850,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType { public: UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : UnresolvedType("", cache_id), unresolved_child_id_(child_id), reg_type_cache_(reg_type_cache) { @@ -859,55 +868,57 @@ class UnresolvedSuperClass FINAL : public UnresolvedType { return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF); } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); const uint16_t unresolved_child_id_; const RegTypeCache* const reg_type_cache_; }; -// A merge of two unresolved types. If the types were resolved this may be -// Conflict or another -// known ReferenceType. +// A merge of unresolved (and resolved) types. If the types were resolved this may be +// Conflict or another known ReferenceType. class UnresolvedMergedType FINAL : public UnresolvedType { public: - UnresolvedMergedType(uint16_t left_id, uint16_t right_id, + // Note: the constructor will copy the unresolved BitVector, not use it directly. + UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved, const RegTypeCache* reg_type_cache, uint16_t cache_id) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : UnresolvedType("", cache_id), - reg_type_cache_(reg_type_cache), - merged_types_(left_id, right_id) { - if (kIsDebugBuild) { - CheckInvariants(); - } - } + SHARED_REQUIRES(Locks::mutator_lock_); - // The top of a tree of merged types. - std::pair<uint16_t, uint16_t> GetTopMergedTypes() const { - DCHECK(IsUnresolvedMergedReference()); - return merged_types_; + // The resolved part. See description below. + const RegType& GetResolvedPart() const { + return resolved_part_; + } + // The unresolved part. + const BitVector& GetUnresolvedTypes() const { + return unresolved_types_; } - - // The complete set of merged types. - std::set<uint16_t> GetMergedTypes() const; bool IsUnresolvedMergedReference() const OVERRIDE { return true; } bool IsUnresolvedTypes() const OVERRIDE { return true; } - std::string Dump() const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_); private: - void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_); const RegTypeCache* const reg_type_cache_; - const std::pair<uint16_t, uint16_t> merged_types_; + + // The original implementation of merged types was a binary tree. Collection of the flattened + // types ("leaves") can be expensive, so we store the expanded list now, as two components: + // 1) A resolved component. We use Zero when there is no resolved component, as that will be + // an identity merge. + // 2) A bitvector of the unresolved reference types. A bitvector was chosen with the assumption + // that there should not be too many types in flight in practice. (We also bias the index + // against the index of Zero, which is one of the later default entries in any cache.) + const RegType& resolved_part_; + const BitVector unresolved_types_; }; std::ostream& operator<<(std::ostream& os, const RegType& rhs) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); } // namespace verifier } // namespace art diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index b371d7e391..e14306c0ae 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -31,7 +31,7 @@ uint16_t RegTypeCache::primitive_count_ = 0; const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1]; static bool MatchingPrecisionForClass(const RegType* entry, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_) { if (entry->IsPreciseReference() == precise) { // We were or weren't looking for a precise reference and we found what we need. return true; @@ -317,39 +317,62 @@ void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() { } const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) { - std::set<uint16_t> types; + BitVector types(1, // Allocate at least a word. + true, // Is expandable. + Allocator::GetMallocAllocator()); // TODO: Arenas in the verifier. + const RegType* left_resolved; if (left.IsUnresolvedMergedReference()) { - RegType& non_const(const_cast<RegType&>(left)); - types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes(); + const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left); + types.Copy(&left_merge->GetUnresolvedTypes()); + left_resolved = &left_merge->GetResolvedPart(); + } else if (left.IsUnresolvedTypes()) { + types.SetBit(left.GetId()); + left_resolved = &Zero(); } else { - types.insert(left.GetId()); + left_resolved = &left; } + + const RegType* right_resolved; if (right.IsUnresolvedMergedReference()) { - RegType& non_const(const_cast<RegType&>(right)); - std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes(); - types.insert(right_types.begin(), right_types.end()); + const UnresolvedMergedType* right_merge = down_cast<const UnresolvedMergedType*>(&right); + types.Union(&right_merge->GetUnresolvedTypes()); + right_resolved = &right_merge->GetResolvedPart(); + } else if (right.IsUnresolvedTypes()) { + types.SetBit(right.GetId()); + right_resolved = &Zero(); } else { - types.insert(right.GetId()); + right_resolved = &right; + } + + // Merge the resolved parts. Left and right might be equal, so use SafeMerge. + const RegType& resolved_parts_merged = left_resolved->SafeMerge(*right_resolved, this); + // If we get a conflict here, the merge result is a conflict, not an unresolved merge type. + if (resolved_parts_merged.IsConflict()) { + return Conflict(); } + // Check if entry already exists. for (size_t i = primitive_count_; i < entries_.size(); i++) { const RegType* cur_entry = entries_[i]; if (cur_entry->IsUnresolvedMergedReference()) { - std::set<uint16_t> cur_entry_types = - (down_cast<const UnresolvedMergedType*>(cur_entry))->GetMergedTypes(); - if (cur_entry_types == types) { + const UnresolvedMergedType* cmp_type = down_cast<const UnresolvedMergedType*>(cur_entry); + const RegType& resolved_part = cmp_type->GetResolvedPart(); + const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes(); + // Use SameBitsSet. "types" is expandable to allow merging in the components, but the + // BitVector in the final RegType will be made non-expandable. + if (&resolved_part == &resolved_parts_merged && + types.SameBitsSet(&unresolved_part)) { return *cur_entry; } } } + // Create entry. - RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size()); + RegType* entry = new UnresolvedMergedType(resolved_parts_merged, + types, + this, + entries_.size()); AddEntry(entry); - if (kIsDebugBuild) { - UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry); - std::set<uint16_t> check_types = tmp_entry->GetMergedTypes(); - CHECK(check_types == types); - } return *entry; } @@ -427,9 +450,18 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) { } } entry = new ReferenceType(klass, "", entries_.size()); - } else if (klass->IsInstantiable()) { + } else if (!klass->IsPrimitive()) { // We're uninitialized because of allocation, look or create a precise type as allocations // may only create objects of that type. + // Note: we do not check whether the given klass is actually instantiable (besides being + // primitive), that is, we allow interfaces and abstract classes here. The reasoning is + // twofold: + // 1) The "new-instance" instruction to generate the uninitialized type will already + // queue an instantiation error. This is a soft error that must be thrown at runtime, + // and could potentially change if the class is resolved differently at runtime. + // 2) Checking whether the klass is instantiable and using conflict may produce a hard + // error when the value is used, which leads to a VerifyError, which is not the + // correct semantics. for (size_t i = primitive_count_; i < entries_.size(); i++) { const RegType* cur_entry = entries_[i]; if (cur_entry->IsPreciseReference() && cur_entry->GetClass() == klass) { diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h index 4b3105c3da..8319de6b28 100644 --- a/runtime/verifier/reg_type_cache.h +++ b/runtime/verifier/reg_type_cache.h @@ -42,7 +42,7 @@ class RegTypeCache { public: explicit RegTypeCache(bool can_load_classes); ~RegTypeCache(); - static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + static void Init() SHARED_REQUIRES(Locks::mutator_lock_) { if (!RegTypeCache::primitive_initialized_) { CHECK_EQ(RegTypeCache::primitive_count_, 0); CreatePrimitiveAndSmallConstantTypes(); @@ -53,110 +53,110 @@ class RegTypeCache { static void ShutDown(); const art::verifier::RegType& GetFromId(uint16_t id) const; const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat1Const(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat2ConstLo(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat2ConstHi(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromUnresolvedSuperClass(const RegType& child) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SHARED_REQUIRES(Locks::mutator_lock_); + const ConstantType& Zero() SHARED_REQUIRES(Locks::mutator_lock_) { return FromCat1Const(0, true); } - const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const ConstantType& One() SHARED_REQUIRES(Locks::mutator_lock_) { return FromCat1Const(1, true); } size_t GetCacheSize() { return entries_.size(); } - const BooleanType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const BooleanType& Boolean() SHARED_REQUIRES(Locks::mutator_lock_) { return *BooleanType::GetInstance(); } - const ByteType& Byte() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const ByteType& Byte() SHARED_REQUIRES(Locks::mutator_lock_) { return *ByteType::GetInstance(); } - const CharType& Char() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const CharType& Char() SHARED_REQUIRES(Locks::mutator_lock_) { return *CharType::GetInstance(); } - const ShortType& Short() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const ShortType& Short() SHARED_REQUIRES(Locks::mutator_lock_) { return *ShortType::GetInstance(); } - const IntegerType& Integer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const IntegerType& Integer() SHARED_REQUIRES(Locks::mutator_lock_) { return *IntegerType::GetInstance(); } - const FloatType& Float() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const FloatType& Float() SHARED_REQUIRES(Locks::mutator_lock_) { return *FloatType::GetInstance(); } - const LongLoType& LongLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const LongLoType& LongLo() SHARED_REQUIRES(Locks::mutator_lock_) { return *LongLoType::GetInstance(); } - const LongHiType& LongHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const LongHiType& LongHi() SHARED_REQUIRES(Locks::mutator_lock_) { return *LongHiType::GetInstance(); } - const DoubleLoType& DoubleLo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DoubleLoType& DoubleLo() SHARED_REQUIRES(Locks::mutator_lock_) { return *DoubleLoType::GetInstance(); } - const DoubleHiType& DoubleHi() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DoubleHiType& DoubleHi() SHARED_REQUIRES(Locks::mutator_lock_) { return *DoubleHiType::GetInstance(); } - const UndefinedType& Undefined() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const UndefinedType& Undefined() SHARED_REQUIRES(Locks::mutator_lock_) { return *UndefinedType::GetInstance(); } const ConflictType& Conflict() { return *ConflictType::GetInstance(); } - const PreciseReferenceType& JavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const PreciseReferenceType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& JavaLangThrowable(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const PreciseReferenceType& JavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_); + const PreciseReferenceType& JavaLangString() SHARED_REQUIRES(Locks::mutator_lock_); + const RegType& JavaLangThrowable(bool precise) SHARED_REQUIRES(Locks::mutator_lock_); + const RegType& JavaLangObject(bool precise) SHARED_REQUIRES(Locks::mutator_lock_); const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Create an uninitialized 'this' argument for the given type. const UninitializedType& UninitializedThisArgument(const RegType& type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const RegType& FromUninitialized(const RegType& uninit_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& ByteConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& CharConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& ShortConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& IntConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& PosByteConstant() SHARED_REQUIRES(Locks::mutator_lock_); + const ImpreciseConstType& PosShortConstant() SHARED_REQUIRES(Locks::mutator_lock_); const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_); const RegType& RegTypeFromPrimitiveType(Primitive::Type) const; void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static void VisitStaticRoots(RootVisitor* visitor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); private: - void FillPrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void FillPrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void AddEntry(RegType* new_entry); template <class Type> static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void CreatePrimitiveAndSmallConstantTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); + static void CreatePrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_); // A quick look up for popular small constants. static constexpr int32_t kMinSmallConstant = -1; diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index 2fecc8b25f..971b1f5d0f 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -18,6 +18,7 @@ #include <set> +#include "base/bit_vector.h" #include "base/casts.h" #include "common_runtime_test.h" #include "reg_type_cache-inl.h" @@ -421,7 +422,7 @@ TEST_F(RegTypeReferenceTest, Dump) { EXPECT_EQ(expected, resolved_unintialiesd.Dump()); expected = "Unresolved And Uninitialized Reference: java.lang.DoesNotExist Allocation PC: 12"; EXPECT_EQ(expected, unresolved_unintialized.Dump()); - expected = "UnresolvedMergedReferences(Unresolved Reference: java.lang.DoesNotExist, Unresolved Reference: java.lang.DoesNotExistEither)"; + expected = "UnresolvedMergedReferences(Zero/null | Unresolved Reference: java.lang.DoesNotExist, Unresolved Reference: java.lang.DoesNotExistEither)"; EXPECT_EQ(expected, unresolved_merged.Dump()); } @@ -477,9 +478,10 @@ TEST_F(RegTypeReferenceTest, Merging) { EXPECT_TRUE(merged.IsUnresolvedMergedReference()); RegType& merged_nonconst = const_cast<RegType&>(merged); - std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes(); - EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin())); - EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin()))); + const BitVector& unresolved_parts = + down_cast<UnresolvedMergedType*>(&merged_nonconst)->GetUnresolvedTypes(); + EXPECT_TRUE(unresolved_parts.IsBitSet(ref_type_0.GetId())); + EXPECT_TRUE(unresolved_parts.IsBitSet(ref_type_1.GetId())); } TEST_F(RegTypeTest, MergingFloat) { diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h index 244deedaf4..9cd2bdffaa 100644 --- a/runtime/verifier/register_line-inl.h +++ b/runtime/verifier/register_line-inl.h @@ -38,10 +38,9 @@ inline bool RegisterLine::SetRegisterType(MethodVerifier* verifier, uint32_t vds verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '" << new_type << "'"; return false; - } else if (new_type.IsConflict()) { // should only be set during a merge - verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Set register to unknown type " << new_type; - return false; } else { + // Note: previously we failed when asked to set a conflict. However, conflicts are OK as long + // as they are not accessed, and our backends can handle this nowadays. line_[vdst] = new_type.GetId(); } // Clear the monitor entry bits for this register. @@ -93,8 +92,9 @@ inline void RegisterLine::CopyRegister1(MethodVerifier* verifier, uint32_t vdst, if (!SetRegisterType(verifier, vdst, type)) { return; } - if ((cat == kTypeCategory1nr && !type.IsCategory1Types()) || - (cat == kTypeCategoryRef && !type.IsReferenceTypes())) { + if (!type.IsConflict() && // Allow conflicts to be copied around. + ((cat == kTypeCategory1nr && !type.IsCategory1Types()) || + (cat == kTypeCategoryRef && !type.IsReferenceTypes()))) { verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy1 v" << vdst << "<-v" << vsrc << " type=" << type << " cat=" << static_cast<int>(cat); } else if (cat == kTypeCategoryRef) { diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc index 2838681f4f..f286a453b1 100644 --- a/runtime/verifier/register_line.cc +++ b/runtime/verifier/register_line.cc @@ -18,66 +18,30 @@ #include "base/stringprintf.h" #include "dex_instruction-inl.h" -#include "method_verifier.h" +#include "method_verifier-inl.h" #include "register_line-inl.h" #include "reg_type-inl.h" namespace art { namespace verifier { -bool RegisterLine::WasUninitializedThisOverwritten(MethodVerifier* verifier, - size_t this_loc, - bool was_invoke_direct) const { - DCHECK(verifier->IsConstructor()); - - // Is the UnintializedThis type still there? - if (GetRegisterType(verifier, this_loc).IsUninitializedThisReference() || - GetRegisterType(verifier, this_loc).IsUnresolvedAndUninitializedThisReference()) { - return false; - } - - // If there is an initialized reference here now, did we just perform an invoke-direct? Note that - // this is the correct approach for dex bytecode: results of invoke-direct are stored in the - // result register. Overwriting "this_loc" can only be done by a constructor call. - if (GetRegisterType(verifier, this_loc).IsReferenceTypes() && was_invoke_direct) { - return false; - // Otherwise we could have just copied a different initialized reference to this location. - } - - // The UnintializedThis in the register is gone, so check to see if it's somewhere else now. - for (size_t i = 0; i < num_regs_; i++) { - if (GetRegisterType(verifier, i).IsUninitializedThisReference() || - GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) { - // We found it somewhere else... - return false; - } - } - - // The UninitializedThis is gone from the original register, and now we can't find it. - return true; -} - -bool RegisterLine::GetUninitializedThisLoc(MethodVerifier* verifier, size_t* vreg) const { - for (size_t i = 0; i < num_regs_; i++) { - if (GetRegisterType(verifier, i).IsUninitializedThisReference() || - GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) { - *vreg = i; - return true; - } - } - return false; -} - bool RegisterLine::CheckConstructorReturn(MethodVerifier* verifier) const { - for (size_t i = 0; i < num_regs_; i++) { - if (GetRegisterType(verifier, i).IsUninitializedThisReference() || - GetRegisterType(verifier, i).IsUnresolvedAndUninitializedThisReference()) { - verifier->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) - << "Constructor returning without calling superclass constructor"; - return false; + if (kIsDebugBuild && this_initialized_) { + // Ensure that there is no UninitializedThisReference type anymore if this_initialized_ is true. + for (size_t i = 0; i < num_regs_; i++) { + const RegType& type = GetRegisterType(verifier, i); + CHECK(!type.IsUninitializedThisReference() && + !type.IsUnresolvedAndUninitializedThisReference()) + << i << ": " << type.IsUninitializedThisReference() << " in " + << PrettyMethod(verifier->GetMethodReference().dex_method_index, + *verifier->GetMethodReference().dex_file); } } - return true; + if (!this_initialized_) { + verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) + << "Constructor returning without calling superclass constructor"; + } + return this_initialized_; } const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst, @@ -148,6 +112,11 @@ void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType } } } + // Is this initializing "this"? + if (uninit_type.IsUninitializedThisReference() || + uninit_type.IsUnresolvedAndUninitializedThisReference()) { + this_initialized_ = true; + } DCHECK_GT(changed, 0u); } @@ -432,6 +401,11 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* } } } + // Check whether "this" was initialized in both paths. + if (this_initialized_ && !incoming_line->this_initialized_) { + this_initialized_ = false; + changed = true; + } return changed; } diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h index 0de0d9ce0f..f61e51fb23 100644 --- a/runtime/verifier/register_line.h +++ b/runtime/verifier/register_line.h @@ -60,63 +60,64 @@ class RegisterLine { // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst". void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This // copies both halves of the register. void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Implement "move-result". Copy the category-1 value from the result register to another // register, and reset the result register. void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Implement "move-result-wide". Copy the category-2 value from the result register to another // register, and reset the result register. void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Set the invisible result register to unknown - void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_REQUIRES(Locks::mutator_lock_); // Set the type of register N, verifying that the register is valid. If "newType" is the "Lo" // part of a 64-bit value, register N+1 will be set to "newType+1". // The register index was validated during the static pass, so we don't need to check it here. ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1, const RegType& new_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* Set the type of the "result" register. */ void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Get the type of register vsrc. const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const; ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1, const RegType& check_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CopyFromLine(const RegisterLine* src) { DCHECK_EQ(num_regs_, src->num_regs_); memcpy(&line_, &src->line_, num_regs_ * sizeof(uint16_t)); monitors_ = src->monitors_; reg_to_lock_depths_ = src->reg_to_lock_depths_; + this_initialized_ = src->this_initialized_; } - std::string Dump(MethodVerifier* verifier) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_); void FillWithGarbage() { memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t)); @@ -131,7 +132,7 @@ class RegisterLine { * the new ones at the same time). */ void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Update all registers holding "uninit_type" to instead hold the corresponding initialized @@ -140,7 +141,7 @@ class RegisterLine { */ void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type, uint32_t this_reg, uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Update all registers to be Conflict except vsrc. @@ -149,6 +150,14 @@ class RegisterLine { void MarkAllRegistersAsConflictsExcept(MethodVerifier* verifier, uint32_t vsrc); void MarkAllRegistersAsConflictsExceptWide(MethodVerifier* verifier, uint32_t vsrc); + void SetThisInitialized() { + this_initialized_ = true; + } + + void CopyThisInitialized(const RegisterLine& src) { + this_initialized_ = src.this_initialized_; + } + /* * Check constraints on constructor return. Specifically, make sure that the "this" argument got * initialized. @@ -158,18 +167,6 @@ class RegisterLine { */ bool CheckConstructorReturn(MethodVerifier* verifier) const; - /* - * Check if an UninitializedThis at the specified location has been overwritten before - * being correctly initialized. - */ - bool WasUninitializedThisOverwritten(MethodVerifier* verifier, size_t this_loc, - bool was_invoke_direct) const; - - /* - * Get the first location of an UninitializedThis type, or return kInvalidVreg if there are none. - */ - bool GetUninitializedThisLoc(MethodVerifier* verifier, size_t* vreg) const; - // Compare two register lines. Returns 0 if they match. // Using this for a sort is unwise, since the value can change based on machine endianness. int CompareLine(const RegisterLine* line2) const { @@ -194,7 +191,7 @@ class RegisterLine { */ const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst, bool is_range, bool allow_failure = false) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for a simple two-register instruction (e.g. "neg-int"). @@ -202,22 +199,22 @@ class RegisterLine { */ void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type1, const RegType& src_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for a simple three-register instruction (e.g. "add-int"). @@ -227,18 +224,18 @@ class RegisterLine { void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type1_1, const RegType& src_type1_2, const RegType& src_type2_1, const RegType& src_type2_2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst, const RegType& long_lo_type, const RegType& long_hi_type, const RegType& int_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for a binary "2addr" operation. "src_type1"/"src_type2" @@ -248,18 +245,18 @@ class RegisterLine { const RegType& dst_type, const RegType& src_type1, const RegType& src_type2, bool check_boolean_op) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type1, const RegType& dst_type2, const RegType& src_type1_1, const RegType& src_type1_2, const RegType& src_type2_1, const RegType& src_type2_2) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst, const RegType& long_lo_type, const RegType& long_hi_type, const RegType& int_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); /* * Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8"). @@ -270,15 +267,15 @@ class RegisterLine { void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type, const RegType& src_type, bool check_boolean_op, bool is_lit16) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx. void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Stack of currently held monitors and where they were locked size_t MonitorStackDepth() const { @@ -290,7 +287,7 @@ class RegisterLine { bool VerifyMonitorStackEmpty(MethodVerifier* verifier) const; bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); size_t GetMaxNonZeroReferenceReg(MethodVerifier* verifier, size_t max_ref_reg) const; @@ -354,7 +351,7 @@ class RegisterLine { } RegisterLine(size_t num_regs, MethodVerifier* verifier) - : num_regs_(num_regs) { + : num_regs_(num_regs), this_initialized_(false) { memset(&line_, 0, num_regs_ * sizeof(uint16_t)); SetResultTypeToUnknown(verifier); } @@ -372,6 +369,9 @@ class RegisterLine { // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5. AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_; + // Whether "this" initialization (a constructor supercall) has happened. + bool this_initialized_; + // An array of RegType Ids associated with each dex register. uint16_t line_[0]; diff --git a/runtime/verifier/verify_mode.h b/runtime/verifier/verify_mode.h new file mode 100644 index 0000000000..bea43787c1 --- /dev/null +++ b/runtime/verifier/verify_mode.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_VERIFIER_VERIFY_MODE_H_ +#define ART_RUNTIME_VERIFIER_VERIFY_MODE_H_ + +#include <stdint.h> + +namespace art { +namespace verifier { + +// The mode that the verifier should run as. +enum class VerifyMode : int8_t { + kNone, // Everything is assumed verified. + kEnable, // Standard verification, try pre-verifying at compile-time. + kSoftFail, // Force a soft fail, punting to the interpreter with access checks. +}; + +} // namespace verifier +} // namespace art + +#endif // ART_RUNTIME_VERIFIER_VERIFY_MODE_H_ diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h index 66b9abece7..6dd8168cbf 100644 --- a/runtime/well_known_classes.h +++ b/runtime/well_known_classes.h @@ -38,7 +38,7 @@ struct WellKnownClasses { static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init); static mirror::Class* ToClass(jclass global_jclass) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); static jclass com_android_dex_Dex; static jclass dalvik_system_DexFile; diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc index 1391d147a7..c984b17c2c 100644 --- a/sigchainlib/sigchain.cc +++ b/sigchainlib/sigchain.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include <android/log.h> #else #include <stdarg.h> @@ -103,7 +103,7 @@ static void log(const char* format, ...) { va_list ap; va_start(ap, format); vsnprintf(buf, sizeof(buf), format, ap); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf); #else std::cout << buf << "\n"; @@ -337,14 +337,16 @@ extern "C" void SetSpecialSignalHandlerFn(int signal, SpecialSignalHandlerFn fn) // In case the chain isn't claimed, claim it for ourself so we can ensure the managed handler // goes first. if (!user_sigactions[signal].IsClaimed()) { - struct sigaction tmp; - tmp.sa_sigaction = sigchainlib_managed_handler_sigaction; - sigemptyset(&tmp.sa_mask); - tmp.sa_flags = SA_SIGINFO | SA_ONSTACK; + struct sigaction act, old_act; + act.sa_sigaction = sigchainlib_managed_handler_sigaction; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO | SA_ONSTACK; #if !defined(__APPLE__) && !defined(__mips__) - tmp.sa_restorer = nullptr; + act.sa_restorer = nullptr; #endif - user_sigactions[signal].Claim(tmp); + if (sigaction(signal, &act, &old_act) != -1) { + user_sigactions[signal].Claim(old_act); + } } } diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc index 8495a5417f..dfe0c6f981 100644 --- a/sigchainlib/sigchain_dummy.cc +++ b/sigchainlib/sigchain_dummy.cc @@ -17,7 +17,7 @@ #include <stdio.h> #include <stdlib.h> -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ #include <android/log.h> #else #include <stdarg.h> @@ -38,7 +38,7 @@ static void log(const char* format, ...) { va_list ap; va_start(ap, format); vsnprintf(buf, sizeof(buf), format, ap); -#ifdef HAVE_ANDROID_OS +#ifdef __ANDROID__ __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf); #else std::cout << buf << "\n"; diff --git a/test/003-omnibus-opcodes/build b/test/003-omnibus-opcodes/build index f909fb2219..faa298337c 100644 --- a/test/003-omnibus-opcodes/build +++ b/test/003-omnibus-opcodes/build @@ -22,5 +22,10 @@ ${JAVAC} -d classes `find src -name '*.java'` rm classes/UnresClass.class ${JAVAC} -d classes `find src2 -name '*.java'` -${DX} -JXmx256m --debug --dex --output=classes.dex classes +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + ${JACK} --import classes.jack --output-dex . +else + ${DX} -JXmx256m --debug --dex --output=classes.dex classes + fi zip $TEST_NAME.jar classes.dex diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc index ca256ec034..db0dd32771 100644 --- a/test/004-JniTest/jni_test.cc +++ b/test/004-JniTest/jni_test.cc @@ -626,3 +626,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testNewStringObject(JNIEnv* env, jcl assert(strcmp(test_array, chars6) == 0); env->ReleaseStringUTFChars(s6, chars6); } + +extern "C" JNIEXPORT jlong JNICALL Java_Main_testGetMethodID(JNIEnv* env, jclass, jclass c) { + return reinterpret_cast<jlong>(env->GetMethodID(c, "a", "()V")); +} diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java index ac204175bf..810dda0e71 100644 --- a/test/004-JniTest/src/Main.java +++ b/test/004-JniTest/src/Main.java @@ -14,7 +14,9 @@ * limitations under the License. */ +import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; +import java.lang.reflect.Proxy; public class Main { public static void main(String[] args) { @@ -35,6 +37,7 @@ public class Main { testCallNonvirtual(); testNewStringObject(); testRemoveLocalObject(); + testProxyGetMethodID(); } private static native void testFindClassOnAttachedNativeThread(); @@ -194,6 +197,31 @@ public class Main { private static native void testCallNonvirtual(); private static native void testNewStringObject(); + + private interface SimpleInterface { + void a(); + } + + private static class DummyInvocationHandler implements InvocationHandler { + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + return null; + } + } + + private static void testProxyGetMethodID() { + InvocationHandler handler = new DummyInvocationHandler(); + SimpleInterface proxy = + (SimpleInterface) Proxy.newProxyInstance(SimpleInterface.class.getClassLoader(), + new Class[] {SimpleInterface.class}, handler); + if (testGetMethodID(SimpleInterface.class) == 0) { + throw new AssertionError(); + } + if (testGetMethodID(proxy.getClass()) == 0) { + throw new AssertionError(); + } + } + + private static native long testGetMethodID(Class<?> c); } class JniCallNonvirtualTest { diff --git a/test/004-ReferenceMap/build b/test/004-ReferenceMap/build new file mode 100644 index 0000000000..08987b556c --- /dev/null +++ b/test/004-ReferenceMap/build @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Stop if something fails. +set -e + +# The test relies on DEX file produced by javac+dx so keep building with them for now +# (see b/19467889) +mkdir classes +${JAVAC} -d classes `find src -name '*.java'` +${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \ + --dump-width=1000 ${DX_FLAGS} classes +zip $TEST_NAME.jar classes.dex diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc index e626e48be9..767e1de68f 100644 --- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc @@ -29,10 +29,10 @@ namespace art { } while (false); struct ReferenceMap2Visitor : public CheckReferenceMapVisitor { - explicit ReferenceMap2Visitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit ReferenceMap2Visitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : CheckReferenceMapVisitor(thread) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (CheckReferenceMapVisitor::VisitFrame()) { return true; } diff --git a/test/004-StackWalk/build b/test/004-StackWalk/build new file mode 100644 index 0000000000..08987b556c --- /dev/null +++ b/test/004-StackWalk/build @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Stop if something fails. +set -e + +# The test relies on DEX file produced by javac+dx so keep building with them for now +# (see b/19467889) +mkdir classes +${JAVAC} -d classes `find src -name '*.java'` +${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \ + --dump-width=1000 ${DX_FLAGS} classes +zip $TEST_NAME.jar classes.dex diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc index 6b155149a4..3a5854ba96 100644 --- a/test/004-StackWalk/stack_walk_jni.cc +++ b/test/004-StackWalk/stack_walk_jni.cc @@ -29,10 +29,10 @@ static int gJava_StackWalk_refmap_calls = 0; class TestReferenceMapVisitor : public CheckReferenceMapVisitor { public: - explicit TestReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + explicit TestReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) : CheckReferenceMapVisitor(thread) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { if (CheckReferenceMapVisitor::VisitFrame()) { return true; } diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src/Main.java index 6e7d5b6658..d5b389f628 100644 --- a/test/004-ThreadStress/src/Main.java +++ b/test/004-ThreadStress/src/Main.java @@ -32,6 +32,7 @@ import java.util.Set; // // ThreadStress command line parameters: // -n X ............ number of threads +// -d X ............ number of daemon threads // -o X ............ number of overall operations // -t X ............ number of operations per thread // --dumpmap ....... print the frequency map @@ -301,6 +302,7 @@ public class Main implements Runnable { public static void parseAndRun(String[] args) throws Exception { int numberOfThreads = -1; + int numberOfDaemons = -1; int totalOperations = -1; int operationsPerThread = -1; Object lock = new Object(); @@ -312,6 +314,9 @@ public class Main implements Runnable { if (args[i].equals("-n")) { i++; numberOfThreads = Integer.parseInt(args[i]); + } else if (args[i].equals("-d")) { + i++; + numberOfDaemons = Integer.parseInt(args[i]); } else if (args[i].equals("-o")) { i++; totalOperations = Integer.parseInt(args[i]); @@ -338,6 +343,10 @@ public class Main implements Runnable { numberOfThreads = 5; } + if (numberOfDaemons == -1) { + numberOfDaemons = 3; + } + if (totalOperations == -1) { totalOperations = 1000; } @@ -355,14 +364,16 @@ public class Main implements Runnable { System.out.println(frequencyMap); } - runTest(numberOfThreads, operationsPerThread, lock, frequencyMap); + runTest(numberOfThreads, numberOfDaemons, operationsPerThread, lock, frequencyMap); } - public static void runTest(final int numberOfThreads, final int operationsPerThread, - final Object lock, Map<Operation, Double> frequencyMap) - throws Exception { - // Each thread is going to do operationsPerThread - // operations. The distribution of operations is determined by + public static void runTest(final int numberOfThreads, final int numberOfDaemons, + final int operationsPerThread, final Object lock, + Map<Operation, Double> frequencyMap) throws Exception { + // Each normal thread is going to do operationsPerThread + // operations. Each daemon thread will loop over all + // the operations and will not stop. + // The distribution of operations is determined by // the Operation.frequency values. We fill out an Operation[] // for each thread with the operations it is to perform. The // Operation[] is shuffled so that there is more random @@ -371,7 +382,9 @@ public class Main implements Runnable { // Fill in the Operation[] array for each thread by laying // down references to operation according to their desired // frequency. - final Main[] threadStresses = new Main[numberOfThreads]; + // The first numberOfThreads elements are normal threads, the last + // numberOfDaemons elements are daemon threads. + final Main[] threadStresses = new Main[numberOfThreads + numberOfDaemons]; for (int t = 0; t < threadStresses.length; t++) { Operation[] operations = new Operation[operationsPerThread]; int o = 0; @@ -388,9 +401,10 @@ public class Main implements Runnable { } } } - // Randomize the oepration order + // Randomize the operation order Collections.shuffle(Arrays.asList(operations)); - threadStresses[t] = new Main(lock, t, operations); + threadStresses[t] = t < numberOfThreads ? new Main(lock, t, operations) : + new Daemon(lock, t, operations); } // Enable to dump operation counts per thread to make sure its @@ -434,9 +448,14 @@ public class Main implements Runnable { thread.join(); } catch (InterruptedException e) { } - System.out.println("Thread exited for " + id + " with " - + (operationsPerThread - threadStress.nextOperation) - + " operations remaining."); + try { + System.out.println("Thread exited for " + id + " with " + + (operationsPerThread - threadStress.nextOperation) + + " operations remaining."); + } catch (OutOfMemoryError e) { + // Ignore OOME since we need to print "Finishing worker" for the test + // to pass. + } } System.out.println("Finishing worker"); } @@ -459,6 +478,14 @@ public class Main implements Runnable { notifier.start(); } + // Create and start the daemon threads. + for (int r = 0; r < numberOfDaemons; r++) { + Main daemon = threadStresses[numberOfThreads + r]; + Thread t = new Thread(daemon, "Daemon thread " + daemon.id); + t.setDaemon(true); + t.start(); + } + for (int r = 0; r < runners.length; r++) { runners[r].start(); } @@ -467,9 +494,9 @@ public class Main implements Runnable { } } - private final Operation[] operations; + protected final Operation[] operations; private final Object lock; - private final int id; + protected final int id; private int nextOperation; @@ -503,4 +530,36 @@ public class Main implements Runnable { } } + private static class Daemon extends Main { + private Daemon(Object lock, int id, Operation[] operations) { + super(lock, id, operations); + } + + public void run() { + try { + if (DEBUG) { + System.out.println("Starting ThreadStress Daemon " + id); + } + int i = 0; + while (true) { + Operation operation = operations[i]; + if (DEBUG) { + System.out.println("ThreadStress Daemon " + id + + " operation " + i + + " is " + operation); + } + operation.perform(); + i = (i + 1) % operations.length; + } + } catch (OutOfMemoryError e) { + // Catch OutOfMemoryErrors since these can cause the test to fail it they print + // the stack trace after "Finishing worker". + } finally { + if (DEBUG) { + System.out.println("Finishing ThreadStress Daemon for " + id); + } + } + } + } + } diff --git a/test/005-annotations/build b/test/005-annotations/build index 24740554e0..3f00a1a3cd 100644 --- a/test/005-annotations/build +++ b/test/005-annotations/build @@ -25,4 +25,12 @@ ${JAVAC} -d classes `find src -name '*.java'` # ...but not at run time. rm 'classes/android/test/anno/MissingAnnotation.class' rm 'classes/android/test/anno/ClassWithInnerAnnotationClass$MissingInnerAnnotationClass.class' -${DX} -JXmx256m --debug --dex --output=$TEST_NAME.jar classes + +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + ${JACK} --import classes.jack --output-dex . +else + ${DX} -JXmx256m --debug --dex --output=classes.dex classes +fi + +zip $TEST_NAME.jar classes.dex diff --git a/test/011-array-copy/src/Main.java b/test/011-array-copy/src/Main.java index 505d8b09ce..96e1dbf21a 100644 --- a/test/011-array-copy/src/Main.java +++ b/test/011-array-copy/src/Main.java @@ -23,6 +23,7 @@ public class Main { public static void main(String args[]) { testObjectCopy(); testOverlappingMoves(); + testFloatAndDouble(); } public static void testObjectCopy() { @@ -143,4 +144,13 @@ public class Main { /* copy forward, mixed alignment, trivial length */ makeCopies(0, 5, 1); } + + private static void testFloatAndDouble() { + // Float & double copies have the same implementation as int & long. However, there are + // protective DCHECKs in the code (there is nothing unifying like ByteSizedArray or + // ShortSizedArray). Just test that we don't fail those checks. + final int len = 10; + System.arraycopy(new float[len], 0, new float[len], 0, len); + System.arraycopy(new double[len], 0, new double[len], 0, len); + } } diff --git a/test/022-interface/build b/test/022-interface/build index c86b1dcd37..3f8915c27e 100644 --- a/test/022-interface/build +++ b/test/022-interface/build @@ -19,5 +19,11 @@ set -e # Use classes that are compiled with ecj that exposes an invokeinterface # issue when interfaces override methods in Object -${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + ${JACK} --import classes.jack --output-dex . +else + ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes +fi + zip $TEST_NAME.jar classes.dex diff --git a/test/023-many-interfaces/build b/test/023-many-interfaces/build index ad42a2d857..3bb6747c17 100644 --- a/test/023-many-interfaces/build +++ b/test/023-many-interfaces/build @@ -21,8 +21,14 @@ set -e gcc -Wall -Werror -o iface-gen iface-gen.c ./iface-gen -mkdir classes -${JAVAC} -d classes src/*.java +if [ ${USE_JACK} = "true" ]; then + # Use the default Jack commands + ./default-build +else + mkdir classes + ${JAVAC} -d classes src/*.java -${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes -zip $TEST_NAME.jar classes.dex + # dx needs more memory for that test so do not pass Xmx option here. + ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes + zip $TEST_NAME.jar classes.dex +fi diff --git a/test/036-finalizer/src/Main.java b/test/036-finalizer/src/Main.java index 8c7c27d79c..0de56f9a7c 100644 --- a/test/036-finalizer/src/Main.java +++ b/test/036-finalizer/src/Main.java @@ -68,14 +68,17 @@ public class Main { return s[0]; } - public static void main(String[] args) { - WeakReference<FinalizerTest> wimp = makeRef(); + private static void printWeakReference(WeakReference<FinalizerTest> wimp) { + // Reference ft so we are sure the WeakReference cannot be cleared. FinalizerTest keepLive = wimp.get(); - System.out.println("wimp: " + wimpString(wimp)); + } + + public static void main(String[] args) { + WeakReference<FinalizerTest> wimp = makeRef(); + printWeakReference(wimp); /* this will try to collect and finalize ft */ - keepLive = null; System.out.println("gc"); Runtime.getRuntime().gc(); diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt index fa053fb92d..d657d44e61 100644 --- a/test/046-reflect/expected.txt +++ b/test/046-reflect/expected.txt @@ -24,7 +24,7 @@ Method name is myMethod SuperTarget constructor ()V Target constructor ()V Before, float is 3.1415925 -myMethod: hi there 3.1415925 Q ! +myMethod: hi there 3.1415925 ✔ ! Result of invoke: 7 Calling no-arg void-return method myNoargMethod ()V diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java index 0d8e576086..0c90109c69 100644 --- a/test/046-reflect/src/Main.java +++ b/test/046-reflect/src/Main.java @@ -147,7 +147,7 @@ public class Main { Object[] argList = new Object[] { new String[] { "hi there" }, new Float(3.1415926f), - new Character('Q') + new Character('\u2714') }; System.out.println("Before, float is " + ((Float)argList[1]).floatValue()); diff --git a/test/051-thread/thread_test.cc b/test/051-thread/thread_test.cc index 2b8e675cc6..4215207c97 100644 --- a/test/051-thread/thread_test.cc +++ b/test/051-thread/thread_test.cc @@ -28,7 +28,7 @@ extern "C" JNIEXPORT jint JNICALL Java_Main_getNativePriority(JNIEnv* env, extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportsThreadPriorities( JNIEnv* env ATTRIBUTE_UNUSED, jclass clazz ATTRIBUTE_UNUSED) { -#if defined(HAVE_ANDROID_OS) +#if defined(__ANDROID__) return JNI_TRUE; #else return JNI_FALSE; diff --git a/test/056-const-string-jumbo/build b/test/056-const-string-jumbo/build index ef286d140e..ae42519b8f 100644 --- a/test/056-const-string-jumbo/build +++ b/test/056-const-string-jumbo/build @@ -39,8 +39,13 @@ function writeFile(name, start, end) { printf("}\n") > fileName; }' -mkdir classes -${JAVAC} -d classes src/*.java +if [ ${USE_JACK} = "true" ]; then + ${JACK} --output-dex . src +else + mkdir classes + ${JAVAC} -d classes src/*.java + + ${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes +fi -${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes zip $TEST_NAME.jar classes.dex diff --git a/test/074-gc-thrash/src/Main.java b/test/074-gc-thrash/src/Main.java index 238e73a6b2..f947d0b3dc 100644 --- a/test/074-gc-thrash/src/Main.java +++ b/test/074-gc-thrash/src/Main.java @@ -218,17 +218,7 @@ class Deep extends Thread { return; } - /* - * Check the results of the last trip through. Everything in - * "weak" should be matched in "strong", and the two should be - * equivalent (object-wise, not just string-equality-wise). - */ - for (int i = 0; i < MAX_DEPTH; i++) { - if (strong[i] != weak[i].get()) { - System.err.println("Deep: " + i + " strong=" + strong[i] + - ", weak=" + weak[i].get()); - } - } + checkStringReferences(); /* * Wipe "strong", do a GC, see if "weak" got collected. @@ -248,6 +238,26 @@ class Deep extends Thread { System.out.println("Deep: iters=" + iter / MAX_DEPTH); } + + /** + * Check the results of the last trip through. Everything in + * "weak" should be matched in "strong", and the two should be + * equivalent (object-wise, not just string-equality-wise). + * + * We do that check in a separate method to avoid retaining these + * String references in local DEX registers. In interpreter mode, + * they would retain these references until the end of the method + * or until they are updated to another value. + */ + private static void checkStringReferences() { + for (int i = 0; i < MAX_DEPTH; i++) { + if (strong[i] != weak[i].get()) { + System.err.println("Deep: " + i + " strong=" + strong[i] + + ", weak=" + weak[i].get()); + } + } + } + /** * Recursively dive down, setting one or more local variables. * diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java index 4dfa73cbaf..bd606a6d4e 100644 --- a/test/082-inline-execute/src/Main.java +++ b/test/082-inline-execute/src/Main.java @@ -45,6 +45,8 @@ public class Main { test_Long_reverseBytes(); test_Integer_reverse(); test_Long_reverse(); + test_Integer_numberOfLeadingZeros(); + test_Long_numberOfLeadingZeros(); test_StrictMath_abs_I(); test_StrictMath_abs_J(); test_StrictMath_min_I(); @@ -1000,6 +1002,86 @@ public class Main { Assert.assertEquals(Long.reverse(0x8765432187654321L), 0x84c2a6e184c2a6e1L); Assert.assertEquals(Long.reverse(Long.MAX_VALUE), 0xfffffffffffffffeL); Assert.assertEquals(Long.reverse(Long.MIN_VALUE), 1L); + + Assert.assertEquals(test_Long_reverse_b22324327(0xaaaaaaaaaaaaaaaaL, 0x5555555555555555L), + 157472205507277347L); + } + + // A bit more complicated than the above. Use local variables to stress register allocation. + private static long test_Long_reverse_b22324327(long l1, long l2) { + // A couple of local integers. Use them in a loop, so they get promoted. + int i1 = 0, i2 = 1, i3 = 2, i4 = 3, i5 = 4, i6 = 5, i7 = 6, i8 = 7; + for (int k = 0; k < 10; k++) { + i1 += 1; + i2 += 2; + i3 += 3; + i4 += 4; + i5 += 5; + i6 += 6; + i7 += 7; + i8 += 8; + } + + // Do the Long.reverse() calls, save the results. + long r1 = Long.reverse(l1); + long r2 = Long.reverse(l2); + + // Some more looping with the ints. + for (int k = 0; k < 10; k++) { + i1 += 1; + i2 += 2; + i3 += 3; + i4 += 4; + i5 += 5; + i6 += 6; + i7 += 7; + i8 += 8; + } + + // Include everything in the result, so things are kept live. Try to be a little bit clever to + // avoid things being folded somewhere. + return (r1 / i1) + (r2 / i2) + i3 + i4 + i5 + i6 + i7 + i8; + } + + public static boolean doThrow = false; + + public static int $noinline$return_int_zero() { + if (doThrow) { + throw new Error(); + } + return 0; + } + + public static void test_Integer_numberOfLeadingZeros() { + Assert.assertEquals(Integer.numberOfLeadingZeros(0), Integer.SIZE); + Assert.assertEquals(Integer.numberOfLeadingZeros(1), Integer.SIZE - 1); + Assert.assertEquals(Integer.numberOfLeadingZeros(1 << (Integer.SIZE-1)), 0); + Assert.assertEquals(Integer.numberOfLeadingZeros($noinline$return_int_zero()), Integer.SIZE); + for (int i = 0; i < Integer.SIZE; i++) { + Assert.assertEquals(Integer.numberOfLeadingZeros(1 << i), Integer.SIZE - 1 - i); + Assert.assertEquals(Integer.numberOfLeadingZeros((1 << i) | 1), Integer.SIZE - 1 - i); + Assert.assertEquals(Integer.numberOfLeadingZeros(0xFFFFFFFF >>> i), i); + } + } + + public static long $noinline$return_long_zero() { + if (doThrow) { + throw new Error(); + } + return 0; + } + + public static void test_Long_numberOfLeadingZeros() { + Assert.assertEquals(Long.numberOfLeadingZeros(0L), Long.SIZE); + Assert.assertEquals(Long.numberOfLeadingZeros(1L), Long.SIZE - 1); + Assert.assertEquals(Long.numberOfLeadingZeros(1L << ((Long.SIZE/2)-1)), Long.SIZE/2); + Assert.assertEquals(Long.numberOfLeadingZeros(1L << (Long.SIZE-1)), 0); + Assert.assertEquals(Long.numberOfLeadingZeros($noinline$return_long_zero()), Long.SIZE); + for (int i = 0; i < Long.SIZE; i++) { + Assert.assertEquals(Long.numberOfLeadingZeros(1L << i), Long.SIZE - 1 - i); + Assert.assertEquals(Long.numberOfLeadingZeros((1L << i) | 1L), Long.SIZE - 1 - i); + Assert.assertEquals(Long.numberOfLeadingZeros(0xFFFFFFFFFFFFFFFFL >>> i), i); + } } static Object runtime; diff --git a/test/085-old-style-inner-class/build b/test/085-old-style-inner-class/build index 963d6b3928..6f50a76863 100644 --- a/test/085-old-style-inner-class/build +++ b/test/085-old-style-inner-class/build @@ -22,7 +22,12 @@ set -e mkdir classes ${JAVAC} -source 1.4 -target 1.4 -d classes `find src -name '*.java'` -# Suppress stderr to keep the inner class warnings out of the expected output. -${DX} --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes 2>/dev/null +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + ${JACK} --import classes.jack --output-dex . +else + # Suppress stderr to keep the inner class warnings out of the expected output. + ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes 2>/dev/null +fi zip $TEST_NAME.jar classes.dex diff --git a/test/089-many-methods/build b/test/089-many-methods/build index 7ede759495..ff77c60f64 100644 --- a/test/089-many-methods/build +++ b/test/089-many-methods/build @@ -43,7 +43,8 @@ function writeFileMethod(name) { printf("}\n") > fileName; }' +# The test relies on the error message produced by dx, not jack, so keep building with dx for now +# (b/19467889). mkdir classes ${JAVAC} -d classes `find src -name '*.java'` ${DX} -JXmx1024m --dex --no-optimize classes - diff --git a/test/097-duplicate-method/build b/test/097-duplicate-method/build index 657677986e..a8558739de 100644 --- a/test/097-duplicate-method/build +++ b/test/097-duplicate-method/build @@ -18,8 +18,19 @@ set -e mkdir classes -${JAVAC} -d classes src/*.java -${JASMIN} -d classes src/*.j -${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes +if [ ${USE_JACK} = "true" ]; then + ${JACK} --output-jack src.jack src + + ${JASMIN} -d classes src/*.j + ${JILL} classes --output jasmin.jack + + # We set jack.import.type.policy=keep-first to consider class definitions from jasmin first. + ${JACK} --import jasmin.jack --import src.jack -D jack.import.type.policy=keep-first --output-dex . +else + ${JAVAC} -d classes src/*.java + ${JASMIN} -d classes src/*.j + + ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes +fi zip $TEST_NAME.jar classes.dex diff --git a/test/099-vmdebug/expected.txt b/test/099-vmdebug/expected.txt index 579f98fe7c..b8d72f66f8 100644 --- a/test/099-vmdebug/expected.txt +++ b/test/099-vmdebug/expected.txt @@ -17,3 +17,9 @@ Test tracing with bogus (< 1024 && != 0) filesize Got expected exception Test sampling with bogus (<= 0) interval Got expected exception +Instances of ClassA 2 +Instances of ClassB 1 +Instances of null 0 +Instances of ClassA assignable 3 +Array counts [2, 1, 0] +Array counts assignable [3, 1, 0] diff --git a/test/099-vmdebug/src/Main.java b/test/099-vmdebug/src/Main.java index add2ff6fb6..1be5765155 100644 --- a/test/099-vmdebug/src/Main.java +++ b/test/099-vmdebug/src/Main.java @@ -17,6 +17,8 @@ import java.io.File; import java.io.IOException; import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.ArrayList; import java.util.Map; public class Main { @@ -30,7 +32,9 @@ public class Main { return; } testMethodTracing(); + testCountInstances(); testRuntimeStat(); + testRuntimeStats(); } private static File createTempFile() throws Exception { @@ -220,12 +224,39 @@ public class Main { checkHistogram(blocking_gc_count_rate_histogram); } + static class ClassA { } + static class ClassB { } + static class ClassC extends ClassA { } + + private static void testCountInstances() throws Exception { + ArrayList<Object> l = new ArrayList<Object>(); + l.add(new ClassA()); + l.add(new ClassB()); + l.add(new ClassA()); + l.add(new ClassC()); + Runtime.getRuntime().gc(); + System.out.println("Instances of ClassA " + + VMDebug.countInstancesofClass(ClassA.class, false)); + System.out.println("Instances of ClassB " + + VMDebug.countInstancesofClass(ClassB.class, false)); + System.out.println("Instances of null " + VMDebug.countInstancesofClass(null, false)); + System.out.println("Instances of ClassA assignable " + + VMDebug.countInstancesofClass(ClassA.class, true)); + Class[] classes = new Class[]{ClassA.class, ClassB.class, null}; + long[] counts = VMDebug.countInstancesofClasses(classes, false); + System.out.println("Array counts " + Arrays.toString(counts)); + counts = VMDebug.countInstancesofClasses(classes, true); + System.out.println("Array counts assignable " + Arrays.toString(counts)); + } + private static class VMDebug { private static final Method startMethodTracingMethod; private static final Method stopMethodTracingMethod; private static final Method getMethodTracingModeMethod; private static final Method getRuntimeStatMethod; private static final Method getRuntimeStatsMethod; + private static final Method countInstancesOfClassMethod; + private static final Method countInstancesOfClassesMethod; static { try { Class c = Class.forName("dalvik.system.VMDebug"); @@ -235,6 +266,10 @@ public class Main { getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode"); getRuntimeStatMethod = c.getDeclaredMethod("getRuntimeStat", String.class); getRuntimeStatsMethod = c.getDeclaredMethod("getRuntimeStats"); + countInstancesOfClassMethod = c.getDeclaredMethod("countInstancesOfClass", + Class.class, Boolean.TYPE); + countInstancesOfClassesMethod = c.getDeclaredMethod("countInstancesOfClasses", + Class[].class, Boolean.TYPE); } catch (Exception e) { throw new RuntimeException(e); } @@ -257,5 +292,13 @@ public class Main { public static Map<String, String> getRuntimeStats() throws Exception { return (Map<String, String>) getRuntimeStatsMethod.invoke(null); } + public static long countInstancesofClass(Class c, boolean assignable) throws Exception { + return (long) countInstancesOfClassMethod.invoke(null, new Object[]{c, assignable}); + } + public static long[] countInstancesofClasses(Class[] classes, boolean assignable) + throws Exception { + return (long[]) countInstancesOfClassesMethod.invoke( + null, new Object[]{classes, assignable}); + } } } diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt index 7db61a1023..c932761c3b 100644 --- a/test/100-reflect2/expected.txt +++ b/test/100-reflect2/expected.txt @@ -1,6 +1,6 @@ true 8 -x +✔ 3.141592653589793 3.14 32 diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java index 72e14b15f3..bf3a574c99 100644 --- a/test/100-reflect2/src/Main.java +++ b/test/100-reflect2/src/Main.java @@ -20,7 +20,7 @@ import java.util.*; class Main { private static boolean z = true; private static byte b = 8; - private static char c = 'x'; + private static char c = '\u2714'; private static double d = Math.PI; private static float f = 3.14f; private static int i = 32; @@ -144,7 +144,7 @@ class Main { /* private static boolean z = true; private static byte b = 8; - private static char c = 'x'; + private static char c = '\u2714'; private static double d = Math.PI; private static float f = 3.14f; private static int i = 32; @@ -263,7 +263,7 @@ class Main { show(ctor.newInstance((Object[]) null)); ctor = String.class.getConstructor(char[].class, int.class, int.class); - show(ctor.newInstance(new char[] { 'x', 'y', 'z', '!' }, 1, 2)); + show(ctor.newInstance(new char[] { '\u2714', 'y', 'z', '!' }, 1, 2)); } private static void testPackagePrivateConstructor() { diff --git a/test/107-int-math2/src/Main.java b/test/107-int-math2/src/Main.java index 6a6227cee5..0c91d4438d 100644 --- a/test/107-int-math2/src/Main.java +++ b/test/107-int-math2/src/Main.java @@ -412,7 +412,7 @@ class Main extends IntMathBase { */ static int lit8Test(int x) { - int[] results = new int[8]; + int[] results = new int[9]; /* try to generate op-int/lit8" instructions */ results[0] = x + 10; @@ -423,6 +423,7 @@ class Main extends IntMathBase { results[5] = x & 10; results[6] = x | -10; results[7] = x ^ -10; + results[8] = x * -256; int minInt = -2147483648; int result = minInt / -1; if (result != minInt) {return 1; } @@ -434,6 +435,7 @@ class Main extends IntMathBase { if (results[5] != 8) {return 7; } if (results[6] != -1) {return 8; } if (results[7] != 55563) {return 9; } + if (results[8] != 14222080) {return 10; } return 0; } diff --git a/test/109-suspend-check/src/Main.java b/test/109-suspend-check/src/Main.java index 8046d751ed..3c3353b4db 100644 --- a/test/109-suspend-check/src/Main.java +++ b/test/109-suspend-check/src/Main.java @@ -32,6 +32,8 @@ public class Main { new InfiniteWhileLoopWithSpecialPutOrNop(new SpecialMethods2()), new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods1()), new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods2()), + new InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(new SpecialMethods1()), + new InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(new SpecialMethods2()), }; doWhileLoopWithLong.start(); for (SimpleLoopThread loop : simpleLoops) { @@ -135,6 +137,21 @@ class InfiniteWhileLoopWithSpecialConstOrIGet extends SimpleLoopThread { } } +class InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch extends SimpleLoopThread { + private SpecialMethodInterface smi; + public InfiniteWhileLoopWithSpecialConstOrIGetInTryCatch(SpecialMethodInterface smi) { + this.smi = smi; + } + public void run() { + try { + long i = 0L; + while (keepGoing) { + i += smi.ConstOrIGet(); + } + } catch (Throwable ignored) { } + } +} + class InfiniteWhileLoopWithIntrinsic extends SimpleLoopThread { private String[] strings = { "a", "b", "c", "d" }; private int sum = 0; diff --git a/test/111-unresolvable-exception/build b/test/111-unresolvable-exception/build index c21a9eff83..e772fb812f 100644 --- a/test/111-unresolvable-exception/build +++ b/test/111-unresolvable-exception/build @@ -21,5 +21,10 @@ mkdir classes ${JAVAC} -d classes `find src -name '*.java'` rm classes/TestException.class -${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + ${JACK} --import classes.jack --output-dex . +else + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes +fi zip $TEST_NAME.jar classes.dex diff --git a/test/113-multidex/build b/test/113-multidex/build index ec8706ea02..8ef5c0eb0f 100644 --- a/test/113-multidex/build +++ b/test/113-multidex/build @@ -17,16 +17,32 @@ # Stop if something fails. set -e -mkdir classes - # All except Main +mkdir classes ${JAVAC} -d classes `find src -name '*.java'` rm classes/Main.class -${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes # Only Main -${JAVAC} -d classes `find src -name '*.java'` -rm classes/Second.class classes/FillerA.class classes/FillerB.class classes/Inf*.class -${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes +mkdir classes2 +${JAVAC} -d classes2 `find src -name '*.java'` +rm classes2/Second.class classes2/FillerA.class classes2/FillerB.class classes2/Inf*.class + +if [ ${USE_JACK} = "true" ]; then + # Create .jack files from classes generated with javac. + ${JILL} classes --output classes.jack + ${JILL} classes2 --output classes2.jack + + # Create DEX files from .jack files. + ${JACK} --import classes.jack --output-dex . + mv classes.dex classes-1.dex + ${JACK} --import classes2.jack --output-dex . + mv classes.dex classes2.dex + mv classes-1.dex classes.dex +else + # All except Main + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes + # Only Main + ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2 +fi zip $TEST_NAME.jar classes.dex classes2.dex diff --git a/test/114-ParallelGC/src/Main.java b/test/114-ParallelGC/src/Main.java index 46029cf26a..159dd5c926 100644 --- a/test/114-ParallelGC/src/Main.java +++ b/test/114-ParallelGC/src/Main.java @@ -53,20 +53,21 @@ public class Main implements Runnable { } // Allocate objects to definitely run GC before quitting. - ArrayList<Object> l = new ArrayList<Object>(); - try { - for (int i = 0; i < 100000; i++) { - l.add(new ArrayList<Object>(i)); - } - } catch (OutOfMemoryError oom) { - } - // Make the (outer) ArrayList unreachable. Note it may still - // be reachable under an interpreter or a compiler without a - // liveness analysis. - l = null; + allocateObjectsToRunGc(); + new ArrayList<Object>(50); } + private static void allocateObjectsToRunGc() { + ArrayList<Object> l = new ArrayList<Object>(); + try { + for (int i = 0; i < 100000; i++) { + l.add(new ArrayList<Object>(i)); + } + } catch (OutOfMemoryError oom) { + } + } + private Main(CyclicBarrier startBarrier) { this.startBarrier = startBarrier; } diff --git a/test/115-native-bridge/expected.txt b/test/115-native-bridge/expected.txt index 464d2c887e..372ecd0484 100644 --- a/test/115-native-bridge/expected.txt +++ b/test/115-native-bridge/expected.txt @@ -61,3 +61,4 @@ Getting trampoline for Java_Main_testNewStringObject with shorty V. trampoline_Java_Main_testNewStringObject called! Getting trampoline for Java_Main_testSignal with shorty I. NB signal handler with signal 11. +NB signal handler with signal 4. diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc index c8141a7fb8..04326b3028 100644 --- a/test/115-native-bridge/nativebridge.cc +++ b/test/115-native-bridge/nativebridge.cc @@ -200,15 +200,23 @@ static jint trampoline_Java_Main_testSignal(JNIEnv*, jclass) { #if !defined(__APPLE__) && !defined(__mips__) tmp.sa_restorer = nullptr; #endif - sigaction(SIGSEGV, &tmp, nullptr); -#if defined(__arm__) || defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) - // On supported architectures we cause a real SEGV. + // Test segv + sigaction(SIGSEGV, &tmp, nullptr); +#if defined(__arm__) || defined(__i386__) || defined(__aarch64__) *go_away_compiler = 'a'; +#elif defined(__x86_64__) + // Cause a SEGV using an instruction known to be 3 bytes long + asm volatile("movl $0, %%eax;" "movb $1, (%%eax);" : : : "%eax"); #else // On other architectures we simulate SEGV. kill(getpid(), SIGSEGV); #endif + + // Test sigill + sigaction(SIGILL, &tmp, nullptr); + kill(getpid(), SIGILL); + return 1234; } @@ -385,27 +393,29 @@ extern "C" bool nb_is_compatible(uint32_t bridge_version ATTRIBUTE_UNUSED) { // 004-SignalTest. static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) { printf("NB signal handler with signal %d.\n", sig); + if (sig == SIGSEGV) { #if defined(__arm__) - struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); - struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); - sc->arm_pc += 2; // Skip instruction causing segv. + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + sc->arm_pc += 2; // Skip instruction causing segv & sigill. #elif defined(__aarch64__) - struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); - struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); - sc->pc += 4; // Skip instruction causing segv. + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + sc->pc += 4; // Skip instruction causing segv & sigill. #elif defined(__i386__) || defined(__x86_64__) - struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); - uc->CTX_EIP += 3; + struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); + uc->CTX_EIP += 3; #else - UNUSED(context); + UNUSED(context); #endif + } // We handled this... return true; } static ::android::NativeBridgeSignalHandlerFn native_bridge_get_signal_handler(int signal) { - // Only test segfault handler. - if (signal == SIGSEGV) { + // Test segv for already claimed signal, and sigill for not claimed signal + if ((signal == SIGSEGV) || (signal == SIGILL)) { return &nb_signalhandler; } return nullptr; diff --git a/test/121-modifiers/build b/test/121-modifiers/build index d73be86f95..85b69e92a6 100644 --- a/test/121-modifiers/build +++ b/test/121-modifiers/build @@ -30,5 +30,11 @@ set -e # mv NonInf.out classes/NonInf.class # mv Main.class A.class A\$B.class A\$C.class classes/ -${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + # Workaround b/19561685: disable sanity checks to produce a DEX file with invalid modifiers. + ${JACK} --sanity-checks off --import classes.jack --output-dex . +else + ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes +fi zip $TEST_NAME.jar classes.dex diff --git a/test/124-missing-classes/build b/test/124-missing-classes/build index 62e57c86da..b92ecf9382 100644 --- a/test/124-missing-classes/build +++ b/test/124-missing-classes/build @@ -25,4 +25,11 @@ ${JAVAC} -d classes `find src -name '*.java'` # ...but not at run time. rm 'classes/MissingClass.class' rm 'classes/Main$MissingInnerClass.class' -${DX} -JXmx256m --debug --dex --output=$TEST_NAME.jar classes + +if [ ${USE_JACK} = "true" ]; then + ${JILL} classes --output classes.jack + ${JACK} --import classes.jack --output-dex . +else + ${DX} -JXmx256m --debug --dex --output=classes.dex classes +fi +zip $TEST_NAME.jar classes.dex diff --git a/test/126-miranda-multidex/build b/test/126-miranda-multidex/build index 4c30f3f721..b7f2118d2f 100644 --- a/test/126-miranda-multidex/build +++ b/test/126-miranda-multidex/build @@ -17,16 +17,32 @@ # Stop if something fails. set -e +# All except MirandaInterface mkdir classes - -# All except Main ${JAVAC} -d classes `find src -name '*.java'` rm classes/MirandaInterface.class -${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes -# Only Main -${JAVAC} -d classes `find src -name '*.java'` -rm classes/Main.class classes/MirandaAbstract.class classes/MirandaClass*.class classes/MirandaInterface2*.class -${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes +# Only MirandaInterface +mkdir classes2 +${JAVAC} -d classes2 `find src -name '*.java'` +rm classes2/Main.class classes2/MirandaAbstract.class classes2/MirandaClass*.class classes2/MirandaInterface2*.class + +if [ ${USE_JACK} = "true" ]; then + # Create .jack files from classes generated with javac. + ${JILL} classes --output classes.jack + ${JILL} classes2 --output classes2.jack + + # Create DEX files from .jack files. + ${JACK} --import classes.jack --output-dex . + mv classes.dex classes-1.dex + ${JACK} --import classes2.jack --output-dex . + mv classes.dex classes2.dex + mv classes-1.dex classes.dex +else + # All except Main + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes + # Only Main + ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2 +fi zip $TEST_NAME.jar classes.dex classes2.dex diff --git a/test/127-secondarydex/build b/test/127-secondarydex/build index 712774f7ef..0d9f4d6291 100755 --- a/test/127-secondarydex/build +++ b/test/127-secondarydex/build @@ -23,9 +23,21 @@ ${JAVAC} -d classes `find src -name '*.java'` mkdir classes-ex mv classes/Super.class classes-ex -if [ ${NEED_DEX} = "true" ]; then - ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes +if [ ${USE_JACK} = "true" ]; then + # Create .jack files from classes generated with javac. + ${JILL} classes --output classes.jack + ${JILL} classes-ex --output classes-ex.jack + + # Create DEX files from .jack files. + ${JACK} --import classes.jack --output-dex . zip $TEST_NAME.jar classes.dex - ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex + ${JACK} --import classes-ex.jack --output-dex . zip ${TEST_NAME}-ex.jar classes.dex +else + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes + zip $TEST_NAME.jar classes.dex + ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex + zip ${TEST_NAME}-ex.jar classes.dex + fi fi diff --git a/test/131-structural-change/build b/test/131-structural-change/build index 7ddc81d9b8..ff0da2098d 100755 --- a/test/131-structural-change/build +++ b/test/131-structural-change/build @@ -17,15 +17,23 @@ # Stop if something fails. set -e -mkdir classes -${JAVAC} -d classes `find src -name '*.java'` - -mkdir classes-ex -${JAVAC} -d classes-ex `find src-ex -name '*.java'` - -if [ ${NEED_DEX} = "true" ]; then - ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes +if [ ${USE_JACK} = "true" ]; then + ${JACK} --output-dex . src zip $TEST_NAME.jar classes.dex - ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex + + ${JACK} --output-dex . src-ex zip ${TEST_NAME}-ex.jar classes.dex +else + mkdir classes + ${JAVAC} -d classes `find src -name '*.java'` + + mkdir classes-ex + ${JAVAC} -d classes-ex `find src-ex -name '*.java'` + + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes + zip $TEST_NAME.jar classes.dex + ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex + zip ${TEST_NAME}-ex.jar classes.dex + fi fi diff --git a/test/140-dce-regression/expected.txt b/test/140-dce-regression/expected.txt new file mode 100644 index 0000000000..863339fb8c --- /dev/null +++ b/test/140-dce-regression/expected.txt @@ -0,0 +1 @@ +Passed diff --git a/test/140-dce-regression/info.txt b/test/140-dce-regression/info.txt new file mode 100644 index 0000000000..de6ad34d49 --- /dev/null +++ b/test/140-dce-regression/info.txt @@ -0,0 +1 @@ +Regression test for quick dead code elimination. diff --git a/test/140-dce-regression/src/Main.java b/test/140-dce-regression/src/Main.java new file mode 100644 index 0000000000..f255029803 --- /dev/null +++ b/test/140-dce-regression/src/Main.java @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static void testArrayLength() { + int[] arr = null; + int len = 0; + try { + len = arr.length; + len = 5; + } catch (NullPointerException npe) { + System.out.println("Passed"); + } + } + + public static void main(String[] args) { + testArrayLength(); + } +} diff --git a/test/140-field-packing/expected.txt b/test/140-field-packing/expected.txt new file mode 100644 index 0000000000..2b0a2ce905 --- /dev/null +++ b/test/140-field-packing/expected.txt @@ -0,0 +1,2 @@ +running test... +test completed. diff --git a/test/140-field-packing/info.txt b/test/140-field-packing/info.txt new file mode 100644 index 0000000000..a28bd0463e --- /dev/null +++ b/test/140-field-packing/info.txt @@ -0,0 +1 @@ +Test field packing for classes with various arrangements of fields. diff --git a/test/140-field-packing/src/GapOrder.java b/test/140-field-packing/src/GapOrder.java new file mode 100644 index 0000000000..09d09b8e0b --- /dev/null +++ b/test/140-field-packing/src/GapOrder.java @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Regression test for 22460222, the sub class. +// The field gaps order was wrong. If there were two gaps of different sizes, +// and the larger one was needed, it wouldn't be found. + +import java.lang.reflect.Field; +import java.lang.reflect.Method; + +class GapOrder extends GapOrderBase { + // The base class is 9 bytes. The entire class should be packed as: + // + // 00: oooo oooo + // 08: b-ss rrrr + // 16: rrrr iiii + // 24: dddd dddd + // + // The problem was, the packer wasn't finding the gap where iiii should go, + // because the gap where ss goes was given priority. Instead it packed as: + // 00: oooo oooo + // 08: b--- rrrr + // 16: rrrr ---- + // 24: dddd dddd + // 32: iiii ss + public Object r1; + public Object r2; + public double d; + public int i; + public short s; + + static private void CheckField(String fieldName, int expected) { + Field field = null; + try { + field = GapOrder.class.getField(fieldName); + } catch (ReflectiveOperationException e) { + System.out.println(fieldName + " not found in GapOrder."); + return; + } + + int actual = -1; + try { + Method getOffset = Field.class.getMethod("getOffset"); + actual = (Integer)getOffset.invoke(field); + } catch (ReflectiveOperationException e) { + System.out.println("Unable to get field offset for " + fieldName + ":" + e); + return; + } + + if (actual != expected) { + System.out.println( + String.format("GapOrder.%s has offset %d, but expected %d", + fieldName, actual, expected)); + } + } + + static public void Check() { + CheckField("r1", 12); + CheckField("r2", 16); + CheckField("d", 24); + CheckField("i", 20); + CheckField("s", 10); + } +} + diff --git a/test/140-field-packing/src/GapOrderBase.java b/test/140-field-packing/src/GapOrderBase.java new file mode 100644 index 0000000000..4a0b378c57 --- /dev/null +++ b/test/140-field-packing/src/GapOrderBase.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Regression test for 22460222, the base class. +// The field gaps order was wrong. If there were two gaps of different sizes, +// and the larger one was needed, it wouldn't be found. + +// This class has a size of 9 bytes: 8 for object plus 1 for the field 'b'. +class GapOrderBase { + public byte b; +} diff --git a/test/140-field-packing/src/Main.java b/test/140-field-packing/src/Main.java new file mode 100644 index 0000000000..2810b32a82 --- /dev/null +++ b/test/140-field-packing/src/Main.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) { + System.out.println("running test..."); + GapOrder.Check(); + System.out.println("test completed."); + } +} diff --git a/test/303-verification-stress/build b/test/303-verification-stress/build index 789d38e4cb..5ff73eccf7 100644 --- a/test/303-verification-stress/build +++ b/test/303-verification-stress/build @@ -21,8 +21,14 @@ set -e gcc -Wall -Werror -o classes-gen classes-gen.c ./classes-gen -mkdir classes -${JAVAC} -d classes src/*.java +if [ ${USE_JACK} = "true" ]; then + # Use the default Jack commands + ./default-build +else + mkdir classes + ${JAVAC} -d classes src/*.java -${DX} --debug --dex --output=classes.dex classes -zip $TEST_NAME.jar classes.dex + # dx needs more memory for that test so do not pass Xmx option here. + ${DX} --debug --dex --output=classes.dex classes + zip $TEST_NAME.jar classes.dex +fi diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java index a1e62b3b39..f2e451854b 100644 --- a/test/401-optimizing-compiler/src/Main.java +++ b/test/401-optimizing-compiler/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/402-optimizing-control-flow/src/Main.java b/test/402-optimizing-control-flow/src/Main.java index c9c24dd568..4c93d266e8 100644 --- a/test/402-optimizing-control-flow/src/Main.java +++ b/test/402-optimizing-control-flow/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/403-optimizing-long/src/Main.java b/test/403-optimizing-long/src/Main.java index 21af4e14aa..5927d1c325 100644 --- a/test/403-optimizing-long/src/Main.java +++ b/test/403-optimizing-long/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/404-optimizing-allocator/src/Main.java b/test/404-optimizing-allocator/src/Main.java index 7b31820470..1ff5475e45 100644 --- a/test/404-optimizing-allocator/src/Main.java +++ b/test/404-optimizing-allocator/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$reg$ is a marker for the optimizing compiler to ensure +// Note that $opt$reg$ is a marker for the optimizing compiler to test // it does use its register allocator. public class Main { diff --git a/test/405-optimizing-long-allocator/src/Main.java b/test/405-optimizing-long-allocator/src/Main.java index 9fd840b543..a0e0bb5355 100644 --- a/test/405-optimizing-long-allocator/src/Main.java +++ b/test/405-optimizing-long-allocator/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it compiles these methods. public class Main { diff --git a/test/411-optimizing-arith/expected.txt b/test/411-optimizing-arith-mul/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/411-optimizing-arith/expected.txt +++ b/test/411-optimizing-arith-mul/expected.txt diff --git a/test/411-optimizing-arith/info.txt b/test/411-optimizing-arith-mul/info.txt index 10155512f0..10155512f0 100644 --- a/test/411-optimizing-arith/info.txt +++ b/test/411-optimizing-arith-mul/info.txt diff --git a/test/411-optimizing-arith/src/Main.java b/test/411-optimizing-arith-mul/src/Main.java index 3a5d7c05c9..60e418e1e5 100644 --- a/test/411-optimizing-arith/src/Main.java +++ b/test/411-optimizing-arith-mul/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/412-new-array/src/Main.java b/test/412-new-array/src/Main.java index e4669b8a96..b9c2a053e0 100644 --- a/test/412-new-array/src/Main.java +++ b/test/412-new-array/src/Main.java @@ -17,7 +17,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main extends TestCase { diff --git a/test/414-optimizing-arith-sub/src/Main.java b/test/414-optimizing-arith-sub/src/Main.java index 30e84368d0..b4531cdfd4 100644 --- a/test/414-optimizing-arith-sub/src/Main.java +++ b/test/414-optimizing-arith-sub/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/415-optimizing-arith-neg/src/Main.java b/test/415-optimizing-arith-neg/src/Main.java index bd8a1583d5..c53b639d40 100644 --- a/test/415-optimizing-arith-neg/src/Main.java +++ b/test/415-optimizing-arith-neg/src/Main.java @@ -14,8 +14,9 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure -// it does compile the method. +// Note that $opt$ is a marker for the optimizing compiler to test +// it does compile the method, and that $noinline$ is a marker to +// test that it does not inline it. public class Main { public static void assertEquals(int expected, int result) { @@ -68,23 +69,23 @@ public class Main { public static void main(String[] args) { negInt(); - $opt$InplaceNegOneInt(1); + $opt$noinline$InplaceNegOneInt(1); negLong(); - $opt$InplaceNegOneLong(1L); + $opt$noinline$InplaceNegOneLong(1L); negFloat(); negDouble(); } private static void negInt() { - assertEquals(-1, $opt$NegInt(1)); - assertEquals(1, $opt$NegInt(-1)); - assertEquals(0, $opt$NegInt(0)); - assertEquals(51, $opt$NegInt(-51)); - assertEquals(-51, $opt$NegInt(51)); - assertEquals(2147483647, $opt$NegInt(-2147483647)); // -(2^31 - 1) - assertEquals(-2147483647, $opt$NegInt(2147483647)); // 2^31 - 1 + assertEquals(-1, $opt$noinline$NegInt(1)); + assertEquals(1, $opt$noinline$NegInt(-1)); + assertEquals(0, $opt$noinline$NegInt(0)); + assertEquals(51, $opt$noinline$NegInt(-51)); + assertEquals(-51, $opt$noinline$NegInt(51)); + assertEquals(2147483647, $opt$noinline$NegInt(-2147483647)); // -(2^31 - 1) + assertEquals(-2147483647, $opt$noinline$NegInt(2147483647)); // 2^31 - 1 // From the Java 7 SE Edition specification: // http://docs.oracle.com/javase/specs/jls/se7/html/jls-15.html#jls-15.15.4 // @@ -95,101 +96,128 @@ public class Main { // int or long results in that same maximum negative number. // Overflow occurs in this case, but no exception is thrown. // For all integer values x, -x equals (~x)+1.'' - assertEquals(-2147483648, $opt$NegInt(-2147483648)); // -(2^31) - } - - private static void $opt$InplaceNegOneInt(int a) { - a = -a; - assertEquals(-1, a); + assertEquals(-2147483648, $opt$noinline$NegInt(-2147483648)); // -(2^31) } private static void negLong() { - assertEquals(-1L, $opt$NegLong(1L)); - assertEquals(1L, $opt$NegLong(-1L)); - assertEquals(0L, $opt$NegLong(0L)); - assertEquals(51L, $opt$NegLong(-51L)); - assertEquals(-51L, $opt$NegLong(51L)); - - assertEquals(2147483647L, $opt$NegLong(-2147483647L)); // -(2^31 - 1) - assertEquals(-2147483647L, $opt$NegLong(2147483647L)); // (2^31 - 1) - assertEquals(2147483648L, $opt$NegLong(-2147483648L)); // -(2^31) - assertEquals(-2147483648L, $opt$NegLong(2147483648L)); // 2^31 - - assertEquals(9223372036854775807L, $opt$NegLong(-9223372036854775807L)); // -(2^63 - 1) - assertEquals(-9223372036854775807L, $opt$NegLong(9223372036854775807L)); // 2^63 - 1 + assertEquals(-1L, $opt$noinline$NegLong(1L)); + assertEquals(1L, $opt$noinline$NegLong(-1L)); + assertEquals(0L, $opt$noinline$NegLong(0L)); + assertEquals(51L, $opt$noinline$NegLong(-51L)); + assertEquals(-51L, $opt$noinline$NegLong(51L)); + + assertEquals(2147483647L, $opt$noinline$NegLong(-2147483647L)); // -(2^31 - 1) + assertEquals(-2147483647L, $opt$noinline$NegLong(2147483647L)); // (2^31 - 1) + assertEquals(2147483648L, $opt$noinline$NegLong(-2147483648L)); // -(2^31) + assertEquals(-2147483648L, $opt$noinline$NegLong(2147483648L)); // 2^31 + + assertEquals(9223372036854775807L, $opt$noinline$NegLong(-9223372036854775807L)); // -(2^63 - 1) + assertEquals(-9223372036854775807L, $opt$noinline$NegLong(9223372036854775807L)); // 2^63 - 1 // See remark regarding the negation of the maximum negative // (long) value in negInt(). - assertEquals(-9223372036854775808L, $opt$NegLong(-9223372036854775808L)); // -(2^63) + assertEquals(-9223372036854775808L, $opt$noinline$NegLong(-9223372036854775808L)); // -(2^63) + } + + private static void negFloat() { + assertEquals("-0.0", $opt$noinline$NegFloat(0F)); + assertEquals("0.0", $opt$noinline$NegFloat(-0F)); + assertEquals(-1F, $opt$noinline$NegFloat(1F)); + assertEquals(1F, $opt$noinline$NegFloat(-1F)); + assertEquals(51F, $opt$noinline$NegFloat(-51F)); + assertEquals(-51F, $opt$noinline$NegFloat(51F)); + + assertEquals(-0.1F, $opt$noinline$NegFloat(0.1F)); + assertEquals(0.1F, $opt$noinline$NegFloat(-0.1F)); + assertEquals(343597.38362F, $opt$noinline$NegFloat(-343597.38362F)); + assertEquals(-343597.38362F, $opt$noinline$NegFloat(343597.38362F)); + + assertEquals(-Float.MIN_NORMAL, $opt$noinline$NegFloat(Float.MIN_NORMAL)); + assertEquals(Float.MIN_NORMAL, $opt$noinline$NegFloat(-Float.MIN_NORMAL)); + assertEquals(-Float.MIN_VALUE, $opt$noinline$NegFloat(Float.MIN_VALUE)); + assertEquals(Float.MIN_VALUE, $opt$noinline$NegFloat(-Float.MIN_VALUE)); + assertEquals(-Float.MAX_VALUE, $opt$noinline$NegFloat(Float.MAX_VALUE)); + assertEquals(Float.MAX_VALUE, $opt$noinline$NegFloat(-Float.MAX_VALUE)); + + assertEquals(Float.NEGATIVE_INFINITY, $opt$noinline$NegFloat(Float.POSITIVE_INFINITY)); + assertEquals(Float.POSITIVE_INFINITY, $opt$noinline$NegFloat(Float.NEGATIVE_INFINITY)); + assertIsNaN($opt$noinline$NegFloat(Float.NaN)); } - private static void $opt$InplaceNegOneLong(long a) { + private static void negDouble() { + assertEquals("-0.0", $opt$noinline$NegDouble(0D)); + assertEquals("0.0", $opt$noinline$NegDouble(-0D)); + assertEquals(-1D, $opt$noinline$NegDouble(1D)); + assertEquals(1D, $opt$noinline$NegDouble(-1D)); + assertEquals(51D, $opt$noinline$NegDouble(-51D)); + assertEquals(-51D, $opt$noinline$NegDouble(51D)); + + assertEquals(-0.1D, $opt$noinline$NegDouble(0.1D)); + assertEquals(0.1D, $opt$noinline$NegDouble(-0.1D)); + assertEquals(343597.38362D, $opt$noinline$NegDouble(-343597.38362D)); + assertEquals(-343597.38362D, $opt$noinline$NegDouble(343597.38362D)); + + assertEquals(-Double.MIN_NORMAL, $opt$noinline$NegDouble(Double.MIN_NORMAL)); + assertEquals(Double.MIN_NORMAL, $opt$noinline$NegDouble(-Double.MIN_NORMAL)); + assertEquals(-Double.MIN_VALUE, $opt$noinline$NegDouble(Double.MIN_VALUE)); + assertEquals(Double.MIN_VALUE, $opt$noinline$NegDouble(-Double.MIN_VALUE)); + assertEquals(-Double.MAX_VALUE, $opt$noinline$NegDouble(Double.MAX_VALUE)); + assertEquals(Double.MAX_VALUE, $opt$noinline$NegDouble(-Double.MAX_VALUE)); + + assertEquals(Double.NEGATIVE_INFINITY, $opt$noinline$NegDouble(Double.POSITIVE_INFINITY)); + assertEquals(Double.POSITIVE_INFINITY, $opt$noinline$NegDouble(Double.NEGATIVE_INFINITY)); + assertIsNaN($opt$noinline$NegDouble(Double.NaN)); + } + + + static boolean doThrow = false; + + private static void $opt$noinline$InplaceNegOneInt(int a) { + if (doThrow) { + // Try defeating inlining. + throw new Error(); + } a = -a; - assertEquals(-1L, a); + assertEquals(-1, a); } - private static void negFloat() { - assertEquals("-0.0", $opt$NegFloat(0F)); - assertEquals("0.0", $opt$NegFloat(-0F)); - assertEquals(-1F, $opt$NegFloat(1F)); - assertEquals(1F, $opt$NegFloat(-1F)); - assertEquals(51F, $opt$NegFloat(-51F)); - assertEquals(-51F, $opt$NegFloat(51F)); - - assertEquals(-0.1F, $opt$NegFloat(0.1F)); - assertEquals(0.1F, $opt$NegFloat(-0.1F)); - assertEquals(343597.38362F, $opt$NegFloat(-343597.38362F)); - assertEquals(-343597.38362F, $opt$NegFloat(343597.38362F)); - - assertEquals(-Float.MIN_NORMAL, $opt$NegFloat(Float.MIN_NORMAL)); - assertEquals(Float.MIN_NORMAL, $opt$NegFloat(-Float.MIN_NORMAL)); - assertEquals(-Float.MIN_VALUE, $opt$NegFloat(Float.MIN_VALUE)); - assertEquals(Float.MIN_VALUE, $opt$NegFloat(-Float.MIN_VALUE)); - assertEquals(-Float.MAX_VALUE, $opt$NegFloat(Float.MAX_VALUE)); - assertEquals(Float.MAX_VALUE, $opt$NegFloat(-Float.MAX_VALUE)); - - assertEquals(Float.NEGATIVE_INFINITY, $opt$NegFloat(Float.POSITIVE_INFINITY)); - assertEquals(Float.POSITIVE_INFINITY, $opt$NegFloat(Float.NEGATIVE_INFINITY)); - assertIsNaN($opt$NegFloat(Float.NaN)); + private static void $opt$noinline$InplaceNegOneLong(long a) { + if (doThrow) { + // Try defeating inlining. + throw new Error(); + } + a = -a; + assertEquals(-1L, a); } - private static void negDouble() { - assertEquals("-0.0", $opt$NegDouble(0D)); - assertEquals("0.0", $opt$NegDouble(-0D)); - assertEquals(-1D, $opt$NegDouble(1D)); - assertEquals(1D, $opt$NegDouble(-1D)); - assertEquals(51D, $opt$NegDouble(-51D)); - assertEquals(-51D, $opt$NegDouble(51D)); - - assertEquals(-0.1D, $opt$NegDouble(0.1D)); - assertEquals(0.1D, $opt$NegDouble(-0.1D)); - assertEquals(343597.38362D, $opt$NegDouble(-343597.38362D)); - assertEquals(-343597.38362D, $opt$NegDouble(343597.38362D)); - - assertEquals(-Double.MIN_NORMAL, $opt$NegDouble(Double.MIN_NORMAL)); - assertEquals(Double.MIN_NORMAL, $opt$NegDouble(-Double.MIN_NORMAL)); - assertEquals(-Double.MIN_VALUE, $opt$NegDouble(Double.MIN_VALUE)); - assertEquals(Double.MIN_VALUE, $opt$NegDouble(-Double.MIN_VALUE)); - assertEquals(-Double.MAX_VALUE, $opt$NegDouble(Double.MAX_VALUE)); - assertEquals(Double.MAX_VALUE, $opt$NegDouble(-Double.MAX_VALUE)); - - assertEquals(Double.NEGATIVE_INFINITY, $opt$NegDouble(Double.POSITIVE_INFINITY)); - assertEquals(Double.POSITIVE_INFINITY, $opt$NegDouble(Double.NEGATIVE_INFINITY)); - assertIsNaN($opt$NegDouble(Double.NaN)); - } - - static int $opt$NegInt(int a){ + private static int $opt$noinline$NegInt(int a){ + if (doThrow) { + // Try defeating inlining. + throw new Error(); + } return -a; } - static long $opt$NegLong(long a){ + private static long $opt$noinline$NegLong(long a){ + if (doThrow) { + // Try defeating inlining. + throw new Error(); + } return -a; } - static float $opt$NegFloat(float a){ + private static float $opt$noinline$NegFloat(float a){ + if (doThrow) { + // Try defeating inlining. + throw new Error(); + } return -a; } - static double $opt$NegDouble(double a){ + private static double $opt$noinline$NegDouble(double a){ + if (doThrow) { + // Try defeating inlining. + throw new Error(); + } return -a; } } diff --git a/test/417-optimizing-arith-div/src/Main.java b/test/417-optimizing-arith-div/src/Main.java index 909ceb43d6..68e89b3eb2 100644 --- a/test/417-optimizing-arith-div/src/Main.java +++ b/test/417-optimizing-arith-div/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/421-large-frame/src/Main.java b/test/421-large-frame/src/Main.java index 81896abbd8..6717ba0661 100644 --- a/test/421-large-frame/src/Main.java +++ b/test/421-large-frame/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/422-type-conversion/src/Main.java b/test/422-type-conversion/src/Main.java index 9f8f417cff..146f309c81 100644 --- a/test/422-type-conversion/src/Main.java +++ b/test/422-type-conversion/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/427-bitwise/src/Main.java b/test/427-bitwise/src/Main.java index e9840669dd..aa69554a4f 100644 --- a/test/427-bitwise/src/Main.java +++ b/test/427-bitwise/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java index 3899d7fb26..c108a900e2 100644 --- a/test/441-checker-inliner/src/Main.java +++ b/test/441-checker-inliner/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2014 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { @@ -157,6 +157,31 @@ public class Main { return x; } + /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (before) + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect + /// CHECK-DAG: Return [<<Result>>] + + /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt + /// CHECK-DAG: Return [<<Result>>] + + private static int returnAbs(int i) { + return Math.abs(i); + } + + /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (before) + /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1 + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect + /// CHECK-DAG: Return [<<Result>>] + + /// CHECK-START: int Main.InlinedIntrinsicsAreStillIntrinsic() inliner (after) + /// CHECK-DAG: <<ConstMinus1:i\d+>> IntConstant -1 + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt + /// CHECK-DAG: Return [<<Result>>] + + public static int InlinedIntrinsicsAreStillIntrinsic() { + return returnAbs(-1); + } private static void returnVoid() { return; @@ -238,5 +263,13 @@ public class Main { if (InlineWithControlFlow(false) != 2) { throw new Error(); } + + if (InlinedIntrinsicsAreStillIntrinsic() != 1) { + throw new Error(); + } + + if (returnAbs(-1) != 1) { + throw new Error(); + } } } diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java index b7863be6ce..59e7282ac7 100644 --- a/test/442-checker-constant-folding/src/Main.java +++ b/test/442-checker-constant-folding/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2014 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { @@ -46,9 +46,9 @@ public class Main { } } + /** - * Tiny three-register program exercising int constant folding - * on negation. + * Exercise constant folding on negation. */ /// CHECK-START: int Main.IntNegation() constant_folding (before) @@ -60,6 +60,9 @@ public class Main { /// CHECK-DAG: <<ConstN42:i\d+>> IntConstant -42 /// CHECK-DAG: Return [<<ConstN42>>] + /// CHECK-START: int Main.IntNegation() constant_folding (after) + /// CHECK-NOT: Neg + public static int IntNegation() { int x, y; x = 42; @@ -67,9 +70,28 @@ public class Main { return y; } + /// CHECK-START: long Main.LongNegation() constant_folding (before) + /// CHECK-DAG: <<Const42:j\d+>> LongConstant 42 + /// CHECK-DAG: <<Neg:j\d+>> Neg [<<Const42>>] + /// CHECK-DAG: Return [<<Neg>>] + + /// CHECK-START: long Main.LongNegation() constant_folding (after) + /// CHECK-DAG: <<ConstN42:j\d+>> LongConstant -42 + /// CHECK-DAG: Return [<<ConstN42>>] + + /// CHECK-START: long Main.LongNegation() constant_folding (after) + /// CHECK-NOT: Neg + + public static long LongNegation() { + long x, y; + x = 42L; + y = -x; + return y; + } + + /** - * Tiny three-register program exercising int constant folding - * on addition. + * Exercise constant folding on addition. */ /// CHECK-START: int Main.IntAddition1() constant_folding (before) @@ -82,6 +104,9 @@ public class Main { /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 /// CHECK-DAG: Return [<<Const3>>] + /// CHECK-START: int Main.IntAddition1() constant_folding (after) + /// CHECK-NOT: Add + public static int IntAddition1() { int a, b, c; a = 1; @@ -90,11 +115,6 @@ public class Main { return c; } - /** - * Small three-register program exercising int constant folding - * on addition. - */ - /// CHECK-START: int Main.IntAddition2() constant_folding (before) /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 @@ -109,6 +129,9 @@ public class Main { /// CHECK-DAG: <<Const14:i\d+>> IntConstant 14 /// CHECK-DAG: Return [<<Const14>>] + /// CHECK-START: int Main.IntAddition2() constant_folding (after) + /// CHECK-NOT: Add + public static int IntAddition2() { int a, b, c; a = 1; @@ -121,9 +144,30 @@ public class Main { return c; } + /// CHECK-START: long Main.LongAddition() constant_folding (before) + /// CHECK-DAG: <<Const1:j\d+>> LongConstant 1 + /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 + /// CHECK-DAG: <<Add:j\d+>> Add [<<Const1>>,<<Const2>>] + /// CHECK-DAG: Return [<<Add>>] + + /// CHECK-START: long Main.LongAddition() constant_folding (after) + /// CHECK-DAG: <<Const3:j\d+>> LongConstant 3 + /// CHECK-DAG: Return [<<Const3>>] + + /// CHECK-START: long Main.LongAddition() constant_folding (after) + /// CHECK-NOT: Add + + public static long LongAddition() { + long a, b, c; + a = 1L; + b = 2L; + c = a + b; + return c; + } + + /** - * Tiny three-register program exercising int constant folding - * on subtraction. + * Exercise constant folding on subtraction. */ /// CHECK-START: int Main.IntSubtraction() constant_folding (before) @@ -136,6 +180,9 @@ public class Main { /// CHECK-DAG: <<Const4:i\d+>> IntConstant 4 /// CHECK-DAG: Return [<<Const4>>] + /// CHECK-START: int Main.IntSubtraction() constant_folding (after) + /// CHECK-NOT: Sub + public static int IntSubtraction() { int a, b, c; a = 6; @@ -144,54 +191,446 @@ public class Main { return c; } + /// CHECK-START: long Main.LongSubtraction() constant_folding (before) + /// CHECK-DAG: <<Const6:j\d+>> LongConstant 6 + /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 + /// CHECK-DAG: <<Sub:j\d+>> Sub [<<Const6>>,<<Const2>>] + /// CHECK-DAG: Return [<<Sub>>] + + /// CHECK-START: long Main.LongSubtraction() constant_folding (after) + /// CHECK-DAG: <<Const4:j\d+>> LongConstant 4 + /// CHECK-DAG: Return [<<Const4>>] + + /// CHECK-START: long Main.LongSubtraction() constant_folding (after) + /// CHECK-NOT: Sub + + public static long LongSubtraction() { + long a, b, c; + a = 6L; + b = 2L; + c = a - b; + return c; + } + + /** - * Tiny three-register program exercising long constant folding - * on addition. + * Exercise constant folding on multiplication. */ - /// CHECK-START: long Main.LongAddition() constant_folding (before) - /// CHECK-DAG: <<Const1:j\d+>> LongConstant 1 - /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 - /// CHECK-DAG: <<Add:j\d+>> Add [<<Const1>>,<<Const2>>] - /// CHECK-DAG: Return [<<Add>>] + /// CHECK-START: int Main.IntMultiplication() constant_folding (before) + /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7 + /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 + /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Const7>>,<<Const3>>] + /// CHECK-DAG: Return [<<Mul>>] - /// CHECK-START: long Main.LongAddition() constant_folding (after) + /// CHECK-START: int Main.IntMultiplication() constant_folding (after) + /// CHECK-DAG: <<Const21:i\d+>> IntConstant 21 + /// CHECK-DAG: Return [<<Const21>>] + + /// CHECK-START: int Main.IntMultiplication() constant_folding (after) + /// CHECK-NOT: Mul + + public static int IntMultiplication() { + int a, b, c; + a = 7; + b = 3; + c = a * b; + return c; + } + + /// CHECK-START: long Main.LongMultiplication() constant_folding (before) + /// CHECK-DAG: <<Const7:j\d+>> LongConstant 7 /// CHECK-DAG: <<Const3:j\d+>> LongConstant 3 - /// CHECK-DAG: Return [<<Const3>>] + /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const7>>,<<Const3>>] + /// CHECK-DAG: Return [<<Mul>>] - public static long LongAddition() { + /// CHECK-START: long Main.LongMultiplication() constant_folding (after) + /// CHECK-DAG: <<Const21:j\d+>> LongConstant 21 + /// CHECK-DAG: Return [<<Const21>>] + + /// CHECK-START: long Main.LongMultiplication() constant_folding (after) + /// CHECK-NOT: Mul + + public static long LongMultiplication() { long a, b, c; - a = 1L; - b = 2L; - c = a + b; + a = 7L; + b = 3L; + c = a * b; return c; } + /** - * Tiny three-register program exercising long constant folding - * on subtraction. + * Exercise constant folding on division. */ - /// CHECK-START: long Main.LongSubtraction() constant_folding (before) - /// CHECK-DAG: <<Const6:j\d+>> LongConstant 6 - /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 - /// CHECK-DAG: <<Sub:j\d+>> Sub [<<Const6>>,<<Const2>>] - /// CHECK-DAG: Return [<<Sub>>] + /// CHECK-START: int Main.IntDivision() constant_folding (before) + /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 + /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 + /// CHECK-DAG: <<Div0Chk:i\d+>> DivZeroCheck [<<Const3>>] + /// CHECK-DAG: <<Div:i\d+>> Div [<<Const8>>,<<Div0Chk>>] + /// CHECK-DAG: Return [<<Div>>] - /// CHECK-START: long Main.LongSubtraction() constant_folding (after) - /// CHECK-DAG: <<Const4:j\d+>> LongConstant 4 - /// CHECK-DAG: Return [<<Const4>>] + /// CHECK-START: int Main.IntDivision() constant_folding (after) + /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 + /// CHECK-DAG: Return [<<Const2>>] - public static long LongSubtraction() { + /// CHECK-START: int Main.IntDivision() constant_folding (after) + /// CHECK-NOT: DivZeroCheck + /// CHECK-NOT: Div + + public static int IntDivision() { + int a, b, c; + a = 8; + b = 3; + c = a / b; + return c; + } + + /// CHECK-START: long Main.LongDivision() constant_folding (before) + /// CHECK-DAG: <<Const8:j\d+>> LongConstant 8 + /// CHECK-DAG: <<Const3:j\d+>> LongConstant 3 + /// CHECK-DAG: <<Div0Chk:j\d+>> DivZeroCheck [<<Const3>>] + /// CHECK-DAG: <<Div:j\d+>> Div [<<Const8>>,<<Div0Chk>>] + /// CHECK-DAG: Return [<<Div>>] + + /// CHECK-START: long Main.LongDivision() constant_folding (after) + /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 + /// CHECK-DAG: Return [<<Const2>>] + + /// CHECK-START: long Main.LongDivision() constant_folding (after) + /// CHECK-NOT: DivZeroCheck + /// CHECK-NOT: Div + + public static long LongDivision() { long a, b, c; - a = 6L; - b = 2L; - c = a - b; + a = 8L; + b = 3L; + c = a / b; return c; } + + /** + * Exercise constant folding on remainder. + */ + + /// CHECK-START: int Main.IntRemainder() constant_folding (before) + /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 + /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 + /// CHECK-DAG: <<Div0Chk:i\d+>> DivZeroCheck [<<Const3>>] + /// CHECK-DAG: <<Rem:i\d+>> Rem [<<Const8>>,<<Div0Chk>>] + /// CHECK-DAG: Return [<<Rem>>] + + /// CHECK-START: int Main.IntRemainder() constant_folding (after) + /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 + /// CHECK-DAG: Return [<<Const2>>] + + /// CHECK-START: int Main.IntRemainder() constant_folding (after) + /// CHECK-NOT: DivZeroCheck + /// CHECK-NOT: Rem + + public static int IntRemainder() { + int a, b, c; + a = 8; + b = 3; + c = a % b; + return c; + } + + /// CHECK-START: long Main.LongRemainder() constant_folding (before) + /// CHECK-DAG: <<Const8:j\d+>> LongConstant 8 + /// CHECK-DAG: <<Const3:j\d+>> LongConstant 3 + /// CHECK-DAG: <<Div0Chk:j\d+>> DivZeroCheck [<<Const3>>] + /// CHECK-DAG: <<Rem:j\d+>> Rem [<<Const8>>,<<Div0Chk>>] + /// CHECK-DAG: Return [<<Rem>>] + + /// CHECK-START: long Main.LongRemainder() constant_folding (after) + /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 + /// CHECK-DAG: Return [<<Const2>>] + + /// CHECK-START: long Main.LongRemainder() constant_folding (after) + /// CHECK-NOT: DivZeroCheck + /// CHECK-NOT: Rem + + public static long LongRemainder() { + long a, b, c; + a = 8L; + b = 3L; + c = a % b; + return c; + } + + + /** + * Exercise constant folding on left shift. + */ + + /// CHECK-START: int Main.ShlIntLong() constant_folding (before) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2 + /// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>] + /// CHECK-DAG: <<Shl:i\d+>> Shl [<<Const1>>,<<TypeConv>>] + /// CHECK-DAG: Return [<<Shl>>] + + /// CHECK-START: int Main.ShlIntLong() constant_folding (after) + /// CHECK-DAG: <<Const4:i\d+>> IntConstant 4 + /// CHECK-DAG: Return [<<Const4>>] + + /// CHECK-START: int Main.ShlIntLong() constant_folding (after) + /// CHECK-NOT: Shl + + public static int ShlIntLong() { + int lhs = 1; + long rhs = 2; + return lhs << rhs; + } + + /// CHECK-START: long Main.ShlLongInt() constant_folding (before) + /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3 + /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 + /// CHECK-DAG: <<Shl:j\d+>> Shl [<<Const3L>>,<<Const2>>] + /// CHECK-DAG: Return [<<Shl>>] + + /// CHECK-START: long Main.ShlLongInt() constant_folding (after) + /// CHECK-DAG: <<Const12L:j\d+>> LongConstant 12 + /// CHECK-DAG: Return [<<Const12L>>] + + /// CHECK-START: long Main.ShlLongInt() constant_folding (after) + /// CHECK-NOT: Shl + + public static long ShlLongInt() { + long lhs = 3; + int rhs = 2; + return lhs << rhs; + } + + + /** + * Exercise constant folding on right shift. + */ + + /// CHECK-START: int Main.ShrIntLong() constant_folding (before) + /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7 + /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2 + /// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>] + /// CHECK-DAG: <<Shr:i\d+>> Shr [<<Const7>>,<<TypeConv>>] + /// CHECK-DAG: Return [<<Shr>>] + + /// CHECK-START: int Main.ShrIntLong() constant_folding (after) + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK-DAG: Return [<<Const1>>] + + /// CHECK-START: int Main.ShrIntLong() constant_folding (after) + /// CHECK-NOT: Shr + + public static int ShrIntLong() { + int lhs = 7; + long rhs = 2; + return lhs >> rhs; + } + + /// CHECK-START: long Main.ShrLongInt() constant_folding (before) + /// CHECK-DAG: <<Const9L:j\d+>> LongConstant 9 + /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 + /// CHECK-DAG: <<Shr:j\d+>> Shr [<<Const9L>>,<<Const2>>] + /// CHECK-DAG: Return [<<Shr>>] + + /// CHECK-START: long Main.ShrLongInt() constant_folding (after) + /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2 + /// CHECK-DAG: Return [<<Const2L>>] + + /// CHECK-START: long Main.ShrLongInt() constant_folding (after) + /// CHECK-NOT: Shr + + public static long ShrLongInt() { + long lhs = 9; + int rhs = 2; + return lhs >> rhs; + } + + + /** + * Exercise constant folding on unsigned right shift. + */ + + /// CHECK-START: int Main.UShrIntLong() constant_folding (before) + /// CHECK-DAG: <<ConstM7:i\d+>> IntConstant -7 + /// CHECK-DAG: <<Const2L:j\d+>> LongConstant 2 + /// CHECK-DAG: <<TypeConv:i\d+>> TypeConversion [<<Const2L>>] + /// CHECK-DAG: <<UShr:i\d+>> UShr [<<ConstM7>>,<<TypeConv>>] + /// CHECK-DAG: Return [<<UShr>>] + + /// CHECK-START: int Main.UShrIntLong() constant_folding (after) + /// CHECK-DAG: <<ConstRes:i\d+>> IntConstant 1073741822 + /// CHECK-DAG: Return [<<ConstRes>>] + + /// CHECK-START: int Main.UShrIntLong() constant_folding (after) + /// CHECK-NOT: UShr + + public static int UShrIntLong() { + int lhs = -7; + long rhs = 2; + return lhs >>> rhs; + } + + /// CHECK-START: long Main.UShrLongInt() constant_folding (before) + /// CHECK-DAG: <<ConstM9L:j\d+>> LongConstant -9 + /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 + /// CHECK-DAG: <<UShr:j\d+>> UShr [<<ConstM9L>>,<<Const2>>] + /// CHECK-DAG: Return [<<UShr>>] + + /// CHECK-START: long Main.UShrLongInt() constant_folding (after) + /// CHECK-DAG: <<ConstRes:j\d+>> LongConstant 4611686018427387901 + /// CHECK-DAG: Return [<<ConstRes>>] + + /// CHECK-START: long Main.UShrLongInt() constant_folding (after) + /// CHECK-NOT: UShr + + public static long UShrLongInt() { + long lhs = -9; + int rhs = 2; + return lhs >>> rhs; + } + + + /** + * Exercise constant folding on logical and. + */ + + /// CHECK-START: long Main.AndIntLong() constant_folding (before) + /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10 + /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3 + /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>] + /// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const3L>>] + /// CHECK-DAG: Return [<<And>>] + + /// CHECK-START: long Main.AndIntLong() constant_folding (after) + /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 + /// CHECK-DAG: Return [<<Const2>>] + + /// CHECK-START: long Main.AndIntLong() constant_folding (after) + /// CHECK-NOT: And + + public static long AndIntLong() { + int lhs = 10; + long rhs = 3; + return lhs & rhs; + } + + /// CHECK-START: long Main.AndLongInt() constant_folding (before) + /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10 + /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 + /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>] + /// CHECK-DAG: <<And:j\d+>> And [<<Const10L>>,<<TypeConv>>] + /// CHECK-DAG: Return [<<And>>] + + /// CHECK-START: long Main.AndLongInt() constant_folding (after) + /// CHECK-DAG: <<Const2:j\d+>> LongConstant 2 + /// CHECK-DAG: Return [<<Const2>>] + + /// CHECK-START: long Main.AndLongInt() constant_folding (after) + /// CHECK-NOT: And + + public static long AndLongInt() { + long lhs = 10; + int rhs = 3; + return lhs & rhs; + } + + + /** + * Exercise constant folding on logical or. + */ + + /// CHECK-START: long Main.OrIntLong() constant_folding (before) + /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10 + /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3 + /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>] + /// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const3L>>] + /// CHECK-DAG: Return [<<Or>>] + + /// CHECK-START: long Main.OrIntLong() constant_folding (after) + /// CHECK-DAG: <<Const11:j\d+>> LongConstant 11 + /// CHECK-DAG: Return [<<Const11>>] + + /// CHECK-START: long Main.OrIntLong() constant_folding (after) + /// CHECK-NOT: Or + + public static long OrIntLong() { + int lhs = 10; + long rhs = 3; + return lhs | rhs; + } + + /// CHECK-START: long Main.OrLongInt() constant_folding (before) + /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10 + /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 + /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>] + /// CHECK-DAG: <<Or:j\d+>> Or [<<Const10L>>,<<TypeConv>>] + /// CHECK-DAG: Return [<<Or>>] + + /// CHECK-START: long Main.OrLongInt() constant_folding (after) + /// CHECK-DAG: <<Const11:j\d+>> LongConstant 11 + /// CHECK-DAG: Return [<<Const11>>] + + /// CHECK-START: long Main.OrLongInt() constant_folding (after) + /// CHECK-NOT: Or + + public static long OrLongInt() { + long lhs = 10; + int rhs = 3; + return lhs | rhs; + } + + /** - * Three-register program with a constant (static) condition. + * Exercise constant folding on logical exclusive or. + */ + + /// CHECK-START: long Main.XorIntLong() constant_folding (before) + /// CHECK-DAG: <<Const10:i\d+>> IntConstant 10 + /// CHECK-DAG: <<Const3L:j\d+>> LongConstant 3 + /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const10>>] + /// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const3L>>] + /// CHECK-DAG: Return [<<Xor>>] + + /// CHECK-START: long Main.XorIntLong() constant_folding (after) + /// CHECK-DAG: <<Const9:j\d+>> LongConstant 9 + /// CHECK-DAG: Return [<<Const9>>] + + /// CHECK-START: long Main.XorIntLong() constant_folding (after) + /// CHECK-NOT: Xor + + public static long XorIntLong() { + int lhs = 10; + long rhs = 3; + return lhs ^ rhs; + } + + /// CHECK-START: long Main.XorLongInt() constant_folding (before) + /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10 + /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 + /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>] + /// CHECK-DAG: <<Xor:j\d+>> Xor [<<Const10L>>,<<TypeConv>>] + /// CHECK-DAG: Return [<<Xor>>] + + /// CHECK-START: long Main.XorLongInt() constant_folding (after) + /// CHECK-DAG: <<Const9:j\d+>> LongConstant 9 + /// CHECK-DAG: Return [<<Const9>>] + + /// CHECK-START: long Main.XorLongInt() constant_folding (after) + /// CHECK-NOT: Xor + + public static long XorLongInt() { + long lhs = 10; + int rhs = 3; + return lhs ^ rhs; + } + + + /** + * Exercise constant folding on constant (static) condition. */ /// CHECK-START: int Main.StaticCondition() constant_folding (before) @@ -204,6 +643,9 @@ public class Main { /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 /// CHECK-DAG: If [<<Const1>>] + /// CHECK-START: int Main.StaticCondition() constant_folding (after) + /// CHECK-NOT: GreaterThanOrEqual + public static int StaticCondition() { int a, b, c; a = 7; @@ -215,9 +657,10 @@ public class Main { return c; } + /** - * Four-variable program with jumps leading to the creation of many - * blocks. + * Exercise constant folding on a program with condition + * (i.e. jumps) leading to the creation of many blocks. * * The intent of this test is to ensure that all constant expressions * are actually evaluated at compile-time, thanks to the reverse @@ -238,6 +681,10 @@ public class Main { /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const7>>,<<Const3>>] /// CHECK-DAG: Return [<<Phi>>] + /// CHECK-START: int Main.JumpsAndConditionals(boolean) constant_folding (after) + /// CHECK-NOT: Add + /// CHECK-NOT: Sub + public static int JumpsAndConditionals(boolean cond) { int a, b, c; a = 5; @@ -249,6 +696,7 @@ public class Main { return c; } + /** * Test optimizations of arithmetic identities yielding a constant result. */ @@ -262,9 +710,11 @@ public class Main { /// CHECK-START: int Main.And0(int) constant_folding (after) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-NOT: And /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: int Main.And0(int) constant_folding (after) + /// CHECK-NOT: And + public static int And0(int arg) { return arg & 0; } @@ -278,9 +728,11 @@ public class Main { /// CHECK-START: long Main.Mul0(long) constant_folding (after) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 - /// CHECK-NOT: Mul /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: long Main.Mul0(long) constant_folding (after) + /// CHECK-NOT: Mul + public static long Mul0(long arg) { return arg * 0; } @@ -293,9 +745,11 @@ public class Main { /// CHECK-START: int Main.OrAllOnes(int) constant_folding (after) /// CHECK-DAG: <<ConstF:i\d+>> IntConstant -1 - /// CHECK-NOT: Or /// CHECK-DAG: Return [<<ConstF>>] + /// CHECK-START: int Main.OrAllOnes(int) constant_folding (after) + /// CHECK-NOT: Or + public static int OrAllOnes(int arg) { return arg | -1; } @@ -309,9 +763,11 @@ public class Main { /// CHECK-START: long Main.Rem0(long) constant_folding (after) /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 - /// CHECK-NOT: Rem /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: long Main.Rem0(long) constant_folding (after) + /// CHECK-NOT: Rem + public static long Rem0(long arg) { return 0 % arg; } @@ -324,9 +780,11 @@ public class Main { /// CHECK-START: int Main.Rem1(int) constant_folding (after) /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-NOT: Rem /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: int Main.Rem1(int) constant_folding (after) + /// CHECK-NOT: Rem + public static int Rem1(int arg) { return arg % 1; } @@ -340,9 +798,11 @@ public class Main { /// CHECK-START: long Main.RemN1(long) constant_folding (after) /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 - /// CHECK-NOT: Rem /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: long Main.RemN1(long) constant_folding (after) + /// CHECK-NOT: Rem + public static long RemN1(long arg) { return arg % -1; } @@ -356,13 +816,34 @@ public class Main { /// CHECK-START: int Main.Shl0(int) constant_folding (after) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-NOT: Shl /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: int Main.Shl0(int) constant_folding (after) + /// CHECK-NOT: Shl + public static int Shl0(int arg) { return 0 << arg; } + /// CHECK-START: long Main.ShlLong0WithInt(int) constant_folding (before) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const0L:j\d+>> LongConstant 0 + /// CHECK-DAG: <<Shl:j\d+>> Shl [<<Const0L>>,<<Arg>>] + /// CHECK-DAG: Return [<<Shl>>] + + /// CHECK-START: long Main.ShlLong0WithInt(int) constant_folding (after) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const0L:j\d+>> LongConstant 0 + /// CHECK-DAG: Return [<<Const0L>>] + + /// CHECK-START: long Main.ShlLong0WithInt(int) constant_folding (after) + /// CHECK-NOT: Shl + + public static long ShlLong0WithInt(int arg) { + long long_zero = 0; + return long_zero << arg; + } + /// CHECK-START: long Main.Shr0(int) constant_folding (before) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 @@ -372,9 +853,11 @@ public class Main { /// CHECK-START: long Main.Shr0(int) constant_folding (after) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 - /// CHECK-NOT: Shr /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: long Main.Shr0(int) constant_folding (after) + /// CHECK-NOT: Shr + public static long Shr0(int arg) { return (long)0 >> arg; } @@ -387,9 +870,11 @@ public class Main { /// CHECK-START: long Main.SubSameLong(long) constant_folding (after) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 - /// CHECK-NOT: Sub /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: long Main.SubSameLong(long) constant_folding (after) + /// CHECK-NOT: Sub + public static long SubSameLong(long arg) { return arg - arg; } @@ -403,9 +888,11 @@ public class Main { /// CHECK-START: int Main.UShr0(int) constant_folding (after) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-NOT: UShr /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: int Main.UShr0(int) constant_folding (after) + /// CHECK-NOT: UShr + public static int UShr0(int arg) { return 0 >>> arg; } @@ -418,9 +905,11 @@ public class Main { /// CHECK-START: int Main.XorSameInt(int) constant_folding (after) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-NOT: Xor /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: int Main.XorSameInt(int) constant_folding (after) + /// CHECK-NOT: Xor + public static int XorSameInt(int arg) { return arg ^ arg; } @@ -473,6 +962,11 @@ public class Main { return arg < Double.NaN; } + + /** + * Exercise constant folding on type conversions. + */ + /// CHECK-START: int Main.ReturnInt33() constant_folding (before) /// CHECK-DAG: <<Const33:j\d+>> LongConstant 33 /// CHECK-DAG: <<Convert:i\d+>> TypeConversion [<<Const33>>] @@ -482,6 +976,9 @@ public class Main { /// CHECK-DAG: <<Const33:i\d+>> IntConstant 33 /// CHECK-DAG: Return [<<Const33>>] + /// CHECK-START: int Main.ReturnInt33() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static int ReturnInt33() { long imm = 33L; return (int) imm; @@ -496,6 +993,9 @@ public class Main { /// CHECK-DAG: <<ConstMax:i\d+>> IntConstant 2147483647 /// CHECK-DAG: Return [<<ConstMax>>] + /// CHECK-START: int Main.ReturnIntMax() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static int ReturnIntMax() { float imm = 1.0e34f; return (int) imm; @@ -510,6 +1010,9 @@ public class Main { /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: int Main.ReturnInt0() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static int ReturnInt0() { double imm = Double.NaN; return (int) imm; @@ -524,6 +1027,9 @@ public class Main { /// CHECK-DAG: <<Const33:j\d+>> LongConstant 33 /// CHECK-DAG: Return [<<Const33>>] + /// CHECK-START: long Main.ReturnLong33() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static long ReturnLong33() { int imm = 33; return (long) imm; @@ -538,6 +1044,9 @@ public class Main { /// CHECK-DAG: <<Const34:j\d+>> LongConstant 34 /// CHECK-DAG: Return [<<Const34>>] + /// CHECK-START: long Main.ReturnLong34() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static long ReturnLong34() { float imm = 34.0f; return (long) imm; @@ -552,6 +1061,9 @@ public class Main { /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 /// CHECK-DAG: Return [<<Const0>>] + /// CHECK-START: long Main.ReturnLong0() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static long ReturnLong0() { double imm = -Double.NaN; return (long) imm; @@ -566,6 +1078,9 @@ public class Main { /// CHECK-DAG: <<Const33:f\d+>> FloatConstant 33 /// CHECK-DAG: Return [<<Const33>>] + /// CHECK-START: float Main.ReturnFloat33() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static float ReturnFloat33() { int imm = 33; return (float) imm; @@ -580,6 +1095,9 @@ public class Main { /// CHECK-DAG: <<Const34:f\d+>> FloatConstant 34 /// CHECK-DAG: Return [<<Const34>>] + /// CHECK-START: float Main.ReturnFloat34() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static float ReturnFloat34() { long imm = 34L; return (float) imm; @@ -594,6 +1112,9 @@ public class Main { /// CHECK-DAG: <<Const:f\d+>> FloatConstant 99.25 /// CHECK-DAG: Return [<<Const>>] + /// CHECK-START: float Main.ReturnFloat99P25() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static float ReturnFloat99P25() { double imm = 99.25; return (float) imm; @@ -622,6 +1143,9 @@ public class Main { /// CHECK-DAG: <<Const34:d\d+>> DoubleConstant 34 /// CHECK-DAG: Return [<<Const34>>] + /// CHECK-START: double Main.ReturnDouble34() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static double ReturnDouble34() { long imm = 34L; return (double) imm; @@ -636,46 +1160,90 @@ public class Main { /// CHECK-DAG: <<Const:d\d+>> DoubleConstant 99.25 /// CHECK-DAG: Return [<<Const>>] + /// CHECK-START: double Main.ReturnDouble99P25() constant_folding (after) + /// CHECK-NOT: TypeConversion + public static double ReturnDouble99P25() { float imm = 99.25f; return (double) imm; } + public static void main(String[] args) { - assertIntEquals(IntNegation(), -42); - assertIntEquals(IntAddition1(), 3); - assertIntEquals(IntAddition2(), 14); - assertIntEquals(IntSubtraction(), 4); - assertLongEquals(LongAddition(), 3L); - assertLongEquals(LongSubtraction(), 4L); - assertIntEquals(StaticCondition(), 5); - assertIntEquals(JumpsAndConditionals(true), 7); - assertIntEquals(JumpsAndConditionals(false), 3); + assertIntEquals(-42, IntNegation()); + assertLongEquals(-42L, LongNegation()); + + assertIntEquals(3, IntAddition1()); + assertIntEquals(14, IntAddition2()); + assertLongEquals(3L, LongAddition()); + + assertIntEquals(4, IntSubtraction()); + assertLongEquals(4L, LongSubtraction()); + + assertIntEquals(21, IntMultiplication()); + assertLongEquals(21L, LongMultiplication()); + + assertIntEquals(2, IntDivision()); + assertLongEquals(2L, LongDivision()); + + assertIntEquals(2, IntRemainder()); + assertLongEquals(2L, LongRemainder()); + + assertIntEquals(4, ShlIntLong()); + assertLongEquals(12L, ShlLongInt()); + + assertIntEquals(1, ShrIntLong()); + assertLongEquals(2L, ShrLongInt()); + + assertIntEquals(1073741822, UShrIntLong()); + assertLongEquals(4611686018427387901L, UShrLongInt()); + + assertLongEquals(2, AndIntLong()); + assertLongEquals(2, AndLongInt()); + + assertLongEquals(11, OrIntLong()); + assertLongEquals(11, OrLongInt()); + + assertLongEquals(9, XorIntLong()); + assertLongEquals(9, XorLongInt()); + + assertIntEquals(5, StaticCondition()); + + assertIntEquals(7, JumpsAndConditionals(true)); + assertIntEquals(3, JumpsAndConditionals(false)); + int arbitrary = 123456; // Value chosen arbitrarily. - assertIntEquals(And0(arbitrary), 0); - assertLongEquals(Mul0(arbitrary), 0); - assertIntEquals(OrAllOnes(arbitrary), -1); - assertLongEquals(Rem0(arbitrary), 0); - assertIntEquals(Rem1(arbitrary), 0); - assertLongEquals(RemN1(arbitrary), 0); - assertIntEquals(Shl0(arbitrary), 0); - assertLongEquals(Shr0(arbitrary), 0); - assertLongEquals(SubSameLong(arbitrary), 0); - assertIntEquals(UShr0(arbitrary), 0); - assertIntEquals(XorSameInt(arbitrary), 0); + + assertIntEquals(0, And0(arbitrary)); + assertLongEquals(0, Mul0(arbitrary)); + assertIntEquals(-1, OrAllOnes(arbitrary)); + assertLongEquals(0, Rem0(arbitrary)); + assertIntEquals(0, Rem1(arbitrary)); + assertLongEquals(0, RemN1(arbitrary)); + assertIntEquals(0, Shl0(arbitrary)); + assertLongEquals(0, ShlLong0WithInt(arbitrary)); + assertLongEquals(0, Shr0(arbitrary)); + assertLongEquals(0, SubSameLong(arbitrary)); + assertIntEquals(0, UShr0(arbitrary)); + assertIntEquals(0, XorSameInt(arbitrary)); + assertFalse(CmpFloatGreaterThanNaN(arbitrary)); assertFalse(CmpDoubleLessThanNaN(arbitrary)); - assertIntEquals(ReturnInt33(), 33); - assertIntEquals(ReturnIntMax(), 2147483647); - assertIntEquals(ReturnInt0(), 0); - assertLongEquals(ReturnLong33(), 33); - assertLongEquals(ReturnLong34(), 34); - assertLongEquals(ReturnLong0(), 0); - assertFloatEquals(ReturnFloat33(), 33); - assertFloatEquals(ReturnFloat34(), 34); - assertFloatEquals(ReturnFloat99P25(), 99.25f); - assertDoubleEquals(ReturnDouble33(), 33); - assertDoubleEquals(ReturnDouble34(), 34); - assertDoubleEquals(ReturnDouble99P25(), 99.25); + + assertIntEquals(33, ReturnInt33()); + assertIntEquals(2147483647, ReturnIntMax()); + assertIntEquals(0, ReturnInt0()); + + assertLongEquals(33, ReturnLong33()); + assertLongEquals(34, ReturnLong34()); + assertLongEquals(0, ReturnLong0()); + + assertFloatEquals(33, ReturnFloat33()); + assertFloatEquals(34, ReturnFloat34()); + assertFloatEquals(99.25f, ReturnFloat99P25()); + + assertDoubleEquals(33, ReturnDouble33()); + assertDoubleEquals(34, ReturnDouble34()); + assertDoubleEquals(99.25, ReturnDouble99P25()); } } diff --git a/test/445-checker-licm/src/Main.java b/test/445-checker-licm/src/Main.java index 42f9a11092..6ee8a4d66f 100644 --- a/test/445-checker-licm/src/Main.java +++ b/test/445-checker-licm/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/446-checker-inliner2/src/Main.java b/test/446-checker-inliner2/src/Main.java index de00a09256..e8168af4e1 100644 --- a/test/446-checker-inliner2/src/Main.java +++ b/test/446-checker-inliner2/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2014 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/447-checker-inliner3/src/Main.java b/test/447-checker-inliner3/src/Main.java index e3fdffdd46..0b980d0143 100644 --- a/test/447-checker-inliner3/src/Main.java +++ b/test/447-checker-inliner3/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2014 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/449-checker-bce/expected.txt b/test/449-checker-bce/expected.txt index e69de29bb2..e114c50371 100644 --- a/test/449-checker-bce/expected.txt +++ b/test/449-checker-bce/expected.txt @@ -0,0 +1 @@ +java.lang.ArrayIndexOutOfBoundsException: length=5; index=82 diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java index ed6fc1ee2b..a746664160 100644 --- a/test/449-checker-bce/src/Main.java +++ b/test/449-checker-bce/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { @@ -1101,6 +1101,28 @@ public class Main { } + public void testExceptionMessage() { + short[] B1 = new short[5]; + int[] B2 = new int[5]; + Exception err = null; + try { + testExceptionMessage1(B1, B2, null, -1, 6); + } catch (Exception e) { + err = e; + } + System.out.println(err); + } + + void testExceptionMessage1(short[] a1, int[] a2, long a3[], int start, int finish) { + int j = finish + 77; + // Bug: 22665511 + // A deoptimization will be triggered here right before the loop. Need to make + // sure the value of j is preserved for the interpreter. + for (int i = start; i <= finish; i++) { + a2[j - 1] = a1[i + 1]; + } + } + // Make sure this method is compiled with optimizing. /// CHECK-START: void Main.main(java.lang.String[]) register (after) /// CHECK: ParallelMove @@ -1141,6 +1163,7 @@ public class Main { }; testUnknownBounds(); + new Main().testExceptionMessage(); } } diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java index 9070627f1c..251a53e456 100644 --- a/test/450-checker-types/src/Main.java +++ b/test/450-checker-types/src/Main.java @@ -1,19 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ interface Interface { void $noinline$f(); @@ -52,6 +51,15 @@ class SubclassB extends Super { } } +class Generic<A> { + private A a = null; + public A get() { + return a; + } +} + +final class Final {} + public class Main { /// CHECK-START: void Main.testSimpleRemove() instruction_simplifier_after_types (before) @@ -395,6 +403,104 @@ public class Main { ((SubclassA)a[0]).$noinline$g(); } + private Generic<SubclassC> genericC = new Generic<SubclassC>(); + private Generic<Final> genericFinal = new Generic<Final>(); + + private SubclassC get() { + return genericC.get(); + } + + private Final getFinal() { + return genericFinal.get(); + } + + /// CHECK-START: SubclassC Main.inlineGenerics() reference_type_propagation (after) + /// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:SubclassC exact:false + /// CHECK-NEXT: Return [<<Invoke>>] + + /// CHECK-START: SubclassC Main.inlineGenerics() reference_type_propagation_after_inlining (after) + /// CHECK: <<BoundType:l\d+>> BoundType klass:SubclassC exact:false + /// CHECK: Return [<<BoundType>>] + private SubclassC inlineGenerics() { + SubclassC c = get(); + return c; + } + + /// CHECK-START: Final Main.inlineGenericsFinal() reference_type_propagation (after) + /// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:Final exact:true + /// CHECK-NEXT: Return [<<Invoke>>] + + /// CHECK-START: Final Main.inlineGenericsFinal() reference_type_propagation_after_inlining (after) + /// CHECK: <<BoundType:l\d+>> BoundType klass:Final exact:true + /// CHECK: Return [<<BoundType>>] + private Final inlineGenericsFinal() { + Final f = getFinal(); + return f; + } + + /// CHECK-START: void Main.boundOnlyOnceIfNotNull(java.lang.Object) reference_type_propagation_after_inlining (after) + /// CHECK: BoundType + /// CHECK-NOT: BoundType + private void boundOnlyOnceIfNotNull(Object o) { + if (o != null) { + o.toString(); + } + } + + /// CHECK-START: void Main.boundOnlyOnceIfInstanceOf(java.lang.Object) reference_type_propagation_after_inlining (after) + /// CHECK: BoundType + /// CHECK-NOT: BoundType + private void boundOnlyOnceIfInstanceOf(Object o) { + if (o instanceof Main) { + o.toString(); + } + } + + /// CHECK-START: Final Main.boundOnlyOnceCheckCast(Generic) reference_type_propagation_after_inlining (after) + /// CHECK: BoundType + /// CHECK-NOT: BoundType + private Final boundOnlyOnceCheckCast(Generic<Final> o) { + Final f = o.get(); + return f; + } + + private Super getSuper() { + return new SubclassA(); + } + + /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) reference_type_propagation (after) + /// CHECK: <<Phi:l\d+>> Phi klass:Super + /// CHECK: NullCheck [<<Phi>>] klass:Super + + /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) reference_type_propagation_after_inlining (after) + /// CHECK: <<Phi:l\d+>> Phi klass:SubclassA + /// CHECK: NullCheck [<<Phi>>] klass:SubclassA + private void updateNodesInTheSameBlockAsPhi(boolean cond) { + Super s = getSuper(); + if (cond) { + s = new SubclassA(); + } + s.$noinline$f(); + } + + /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) reference_type_propagation_after_inlining (after) + /// CHECK: <<This:l\d+>> ParameterValue + /// CHECK: <<Param:l\d+>> ParameterValue + /// CHECK: <<Clazz:l\d+>> LoadClass + /// CHECK: CheckCast [<<Param>>,<<Clazz>>] + /// CHECK: BoundType [<<Param>>] can_be_null:true + + /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) instruction_simplifier_after_types (after) + /// CHECK: <<This:l\d+>> ParameterValue + /// CHECK: <<Param:l\d+>> ParameterValue + /// CHECK: <<Clazz:l\d+>> LoadClass + /// CHECK: CheckCast [<<Param>>,<<Clazz>>] + /// CHECK: <<Bound:l\d+>> BoundType [<<Param>>] + /// CHECK: NullCheck [<<Bound>>] + public String checkcastPreserveNullCheck(Object a) { + return ((SubclassA)a).toString(); + } + public static void main(String[] args) { } } diff --git a/test/451-regression-add-float/src/Main.java b/test/451-regression-add-float/src/Main.java index 0d4bf065ea..093c132abe 100644 --- a/test/451-regression-add-float/src/Main.java +++ b/test/451-regression-add-float/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/454-get-vreg/build b/test/454-get-vreg/build new file mode 100644 index 0000000000..08987b556c --- /dev/null +++ b/test/454-get-vreg/build @@ -0,0 +1,26 @@ +#!/bin/bash +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Stop if something fails. +set -e + +# The test relies on DEX file produced by javac+dx so keep building with them for now +# (see b/19467889) +mkdir classes +${JAVAC} -d classes `find src -name '*.java'` +${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \ + --dump-width=1000 ${DX_FLAGS} classes +zip $TEST_NAME.jar classes.dex diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc index 33bdc200db..9facfdb076 100644 --- a/test/454-get-vreg/get_vreg_jni.cc +++ b/test/454-get-vreg/get_vreg_jni.cc @@ -28,12 +28,12 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context, mirror::Object* this_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_value_(this_value), found_method_index_(0) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/455-set-vreg/set_vreg_jni.cc b/test/455-set-vreg/set_vreg_jni.cc index 754118935c..21149f67e8 100644 --- a/test/455-set-vreg/set_vreg_jni.cc +++ b/test/455-set-vreg/set_vreg_jni.cc @@ -28,11 +28,11 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context, mirror::Object* this_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_value_(this_value) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc index 96f0e52995..c21168b81e 100644 --- a/test/457-regs/regs_jni.cc +++ b/test/457-regs/regs_jni.cc @@ -28,10 +28,10 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java index aa4dda17b6..a14200e7ce 100644 --- a/test/458-checker-instruction-simplification/src/Main.java +++ b/test/458-checker-instruction-simplification/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc index 23fe43d906..8108c97f77 100644 --- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc +++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc @@ -28,12 +28,12 @@ namespace { class TestVisitor : public StackVisitor { public: TestVisitor(Thread* thread, Context* context, mirror::Object* this_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames), this_value_(this_value), found_method_index_(0) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java b/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java index cee8e0fbe7..171ade875c 100644 --- a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java +++ b/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class OtherDex { public static void emptyMethod() { diff --git a/test/462-checker-inlining-across-dex-files/src/Main.java b/test/462-checker-inlining-across-dex-files/src/Main.java index 64979ca7ab..1fe49a8046 100644 --- a/test/462-checker-inlining-across-dex-files/src/Main.java +++ b/test/462-checker-inlining-across-dex-files/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // Add a class that will be the first entry in the dex cache, to // avoid having the OtherDex and Main classes share the same cache index. diff --git a/test/463-checker-boolean-simplifier/src/Main.java b/test/463-checker-boolean-simplifier/src/Main.java index 0b75930146..61510d80e2 100644 --- a/test/463-checker-boolean-simplifier/src/Main.java +++ b/test/463-checker-boolean-simplifier/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { @@ -119,9 +119,6 @@ public class Main { /// CHECK-DAG: <<Cond:z\d+>> LessThan [<<ParamX>>,<<ParamY>>] /// CHECK-DAG: Return [<<Cond>>] - /// CHECK-START: boolean Main.LessThan(int, int) boolean_simplifier (after) - /// CHECK-NOT: GreaterThanOrEqual - public static boolean LessThan(int x, int y) { return (x < y) ? true : false; } diff --git a/test/464-checker-inline-sharpen-calls/src/Main.java b/test/464-checker-inline-sharpen-calls/src/Main.java index 876496fdc4..6dce96c9ca 100644 --- a/test/464-checker-inline-sharpen-calls/src/Main.java +++ b/test/464-checker-inline-sharpen-calls/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public final class Main { diff --git a/test/465-checker-clinit-gvn/src/Main.java b/test/465-checker-clinit-gvn/src/Main.java index 704e9fe123..9c77acc4d1 100644 --- a/test/465-checker-clinit-gvn/src/Main.java +++ b/test/465-checker-clinit-gvn/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ class OtherClass { static { diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc index c4f415b3f9..9b32fc397b 100644 --- a/test/466-get-live-vreg/get_live_vreg_jni.cc +++ b/test/466-get-live-vreg/get_live_vreg_jni.cc @@ -27,10 +27,10 @@ namespace { class TestVisitor : public StackVisitor { public: - TestVisitor(Thread* thread, Context* context) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + TestVisitor(Thread* thread, Context* context) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { ArtMethod* m = GetMethod(); std::string m_name(m->GetName()); diff --git a/test/473-checker-inliner-constants/src/Main.java b/test/473-checker-inliner-constants/src/Main.java index 85f6565503..8638514919 100644 --- a/test/473-checker-inliner-constants/src/Main.java +++ b/test/473-checker-inliner-constants/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/474-checker-boolean-input/src/Main.java b/test/474-checker-boolean-input/src/Main.java index 86d0f7c916..a2b219dd6d 100644 --- a/test/474-checker-boolean-input/src/Main.java +++ b/test/474-checker-boolean-input/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/474-fp-sub-neg/expected.txt b/test/474-fp-sub-neg/expected.txt index e6ffe0d430..1c15abba3d 100644 --- a/test/474-fp-sub-neg/expected.txt +++ b/test/474-fp-sub-neg/expected.txt @@ -1,2 +1,6 @@ -0.0 +0.0 +0.0 -0.0 +0.0 +0.0 diff --git a/test/474-fp-sub-neg/info.txt b/test/474-fp-sub-neg/info.txt index eced93fef5..82effdb45e 100644 --- a/test/474-fp-sub-neg/info.txt +++ b/test/474-fp-sub-neg/info.txt @@ -1,5 +1,11 @@ Regression check for optimizing simplify instruction pass. + A pair (sub, neg) should not be transforemd to (sub) for fp calculation because we can lose the sign of zero for the following expression: - ( A - B ) != B - A ; if B == A + +Addition or subtraction with fp zero should not be eliminated +because: + -0.0 + 0.0 = 0.0 + -0.0 - -0.0 = 0.0 diff --git a/test/474-fp-sub-neg/src/Main.java b/test/474-fp-sub-neg/src/Main.java index e6bce6793f..c190e8e40b 100644 --- a/test/474-fp-sub-neg/src/Main.java +++ b/test/474-fp-sub-neg/src/Main.java @@ -24,6 +24,8 @@ public class Main { } System.out.println(f); + System.out.println(f + 0f); + System.out.println(f - (-0f)); } public static void doubleTest() { @@ -35,6 +37,8 @@ public class Main { } System.out.println(d); + System.out.println(d + 0f); + System.out.println(d - (-0f)); } public static void main(String[] args) { diff --git a/test/475-regression-inliner-ids/src/Main.java b/test/475-regression-inliner-ids/src/Main.java index bf22062bcd..423c3b5c92 100644 --- a/test/475-regression-inliner-ids/src/Main.java +++ b/test/475-regression-inliner-ids/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ import java.lang.reflect.Method; diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java index e709ba0902..41bec057ee 100644 --- a/test/476-checker-ctor-memory-barrier/src/Main.java +++ b/test/476-checker-ctor-memory-barrier/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // TODO: Add more tests after we can inline functions with calls. diff --git a/test/477-checker-bound-type/src/Main.java b/test/477-checker-bound-type/src/Main.java index fe52e83664..c873702408 100644 --- a/test/477-checker-bound-type/src/Main.java +++ b/test/477-checker-bound-type/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/477-long-to-float-conversion-precision/src/Main.java b/test/477-long-to-float-conversion-precision/src/Main.java index cd9703943d..568bc04d6c 100644 --- a/test/477-long-to-float-conversion-precision/src/Main.java +++ b/test/477-long-to-float-conversion-precision/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/478-checker-inliner-nested-loop/src/Main.java b/test/478-checker-inliner-nested-loop/src/Main.java index aa023491a5..86c119f3d0 100644 --- a/test/478-checker-inliner-nested-loop/src/Main.java +++ b/test/478-checker-inliner-nested-loop/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/479-regression-implicit-null-check/src/Main.java b/test/479-regression-implicit-null-check/src/Main.java index 6b6f2e4d2a..005ba7fbc2 100644 --- a/test/479-regression-implicit-null-check/src/Main.java +++ b/test/479-regression-implicit-null-check/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/480-checker-dead-blocks/src/Main.java b/test/480-checker-dead-blocks/src/Main.java index 4cc16344a4..5adafaf10d 100644 --- a/test/480-checker-dead-blocks/src/Main.java +++ b/test/480-checker-dead-blocks/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/481-regression-phi-cond/src/Main.java b/test/481-regression-phi-cond/src/Main.java index bad9669048..54982f7ec4 100644 --- a/test/481-regression-phi-cond/src/Main.java +++ b/test/481-regression-phi-cond/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java index a4280de749..2cfb04d652 100644 --- a/test/482-checker-loop-back-edge-use/src/Main.java +++ b/test/482-checker-loop-back-edge-use/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { @@ -36,11 +36,11 @@ public class Main { } /// CHECK-START: void Main.loop3(boolean) liveness (after) - /// CHECK: ParameterValue liveness:4 ranges:{[4,64)} uses:[60,64] - /// CHECK: Goto liveness:62 + /// CHECK: ParameterValue liveness:4 ranges:{[4,60)} uses:[56,60] + /// CHECK: Goto liveness:58 - /// CHECK-START: void Main.loop3(boolean) liveness (after) - /// CHECK-NOT: Goto liveness:56 + // CHECK-START: void Main.loop3(boolean) liveness (after) + // CHECK-NOT: Goto liveness:50 public static void loop3(boolean incoming) { // 'incoming' only needs a use at the outer loop's back edge. while (System.currentTimeMillis() != 42) { @@ -49,11 +49,11 @@ public class Main { } } - /// CHECK-START: void Main.loop4(boolean) liveness (after) - /// CHECK: ParameterValue liveness:4 ranges:{[4,24)} uses:[24] + // CHECK-START: void Main.loop4(boolean) liveness (after) + // CHECK: ParameterValue liveness:4 ranges:{[4,22)} uses:[22] - /// CHECK-START: void Main.loop4(boolean) liveness (after) - /// CHECK-NOT: Goto liveness:22 + // CHECK-START: void Main.loop4(boolean) liveness (after) + // CHECK-NOT: Goto liveness:18 public static void loop4(boolean incoming) { // 'incoming' has no loop use, so should not have back edge uses. System.out.println(incoming); diff --git a/test/484-checker-register-hints/src/Main.java b/test/484-checker-register-hints/src/Main.java index 3715ca2b14..6e68f7c91e 100644 --- a/test/484-checker-register-hints/src/Main.java +++ b/test/484-checker-register-hints/src/Main.java @@ -16,6 +16,14 @@ public class Main { + static class Foo { + int field0; + int field1; + int field2; + int field3; + int field4; + }; + /// CHECK-START: void Main.test1(boolean, int, int, int, int, int) register (after) /// CHECK: name "B0" /// CHECK-NOT: ParallelMove @@ -25,7 +33,7 @@ public class Main { /// CHECK-NOT: ParallelMove /// CHECK: name "B3" /// CHECK-NOT: end_block - /// CHECK: ArraySet + /// CHECK: InstanceFieldSet // We could check here that there is a parallel move, but it's only valid // for some architectures (for example x86), as other architectures may // not do move at all. @@ -36,19 +44,19 @@ public class Main { int e = live1; int f = live2; int g = live3; + int j = live0; if (z) { } else { // Create enough live instructions to force spilling on x86. int h = live4; int i = live5; - array[2] = e + i + h; - array[3] = f + i + h; - array[4] = g + i + h; - array[0] = h; - array[1] = i + h; - + foo.field2 = e + i + h; + foo.field3 = f + i + h; + foo.field4 = g + i + h; + foo.field0 = h; + foo.field1 = i + h; } - live1 = e + f + g; + live1 = e + f + g + j; } /// CHECK-START: void Main.test2(boolean, int, int, int, int, int) register (after) @@ -60,7 +68,7 @@ public class Main { /// CHECK-NOT: ParallelMove /// CHECK: name "B3" /// CHECK-NOT: end_block - /// CHECK: ArraySet + /// CHECK: InstanceFieldSet // We could check here that there is a parallel move, but it's only valid // for some architectures (for example x86), as other architectures may // not do move at all. @@ -71,18 +79,19 @@ public class Main { int e = live1; int f = live2; int g = live3; + int j = live0; if (z) { if (y) { int h = live4; int i = live5; - array[2] = e + i + h; - array[3] = f + i + h; - array[4] = g + i + h; - array[0] = h; - array[1] = i + h; + foo.field2 = e + i + h; + foo.field3 = f + i + h; + foo.field4 = g + i + h; + foo.field0 = h; + foo.field1 = i + h; } } - live1 = e + f + g; + live1 = e + f + g + j; } /// CHECK-START: void Main.test3(boolean, int, int, int, int, int) register (after) @@ -94,7 +103,7 @@ public class Main { /// CHECK-NOT: ParallelMove /// CHECK: name "B6" /// CHECK-NOT: end_block - /// CHECK: ArraySet + /// CHECK: InstanceFieldSet // We could check here that there is a parallel move, but it's only valid // for some architectures (for example x86), as other architectures may // not do move at all. @@ -107,6 +116,7 @@ public class Main { int e = live1; int f = live2; int g = live3; + int j = live0; if (z) { live1 = e; } else { @@ -115,24 +125,25 @@ public class Main { } else { int h = live4; int i = live5; - array[2] = e + i + h; - array[3] = f + i + h; - array[4] = g + i + h; - array[0] = h; - array[1] = i + h; + foo.field2 = e + i + h; + foo.field3 = f + i + h; + foo.field4 = g + i + h; + foo.field0 = h; + foo.field1 = i + h; } } - live1 = e + f + g; + live1 = e + f + g + j; } public static void main(String[] args) { } static boolean y; + static int live0; static int live1; static int live2; static int live3; static int live4; static int live5; - static int[] array; + static Foo foo; } diff --git a/test/491-current-method/src/Main.java b/test/491-current-method/src/Main.java index 87ef05218d..51a41a6cc7 100644 --- a/test/491-current-method/src/Main.java +++ b/test/491-current-method/src/Main.java @@ -16,7 +16,7 @@ class Main { - // The code below is written in a way that will crash + // The code below is written in a way that would crash // the generated code at the time of submission of this test. // Therefore, changes to the register allocator may // affect the reproducibility of the crash. @@ -25,8 +25,8 @@ class Main { // to put the ART current method. c = c / 42; // We use the empty string for forcing the slow path. - // The slow path for charAt when it is intrinsified, will - // move the parameter to ECX, and therefore overwrite the ART + // The slow path for charAt, when it is intrinsified, will + // move the parameter to ECX and therefore overwrite the ART // current method. "".charAt(c); diff --git a/test/494-checker-instanceof-tests/src/Main.java b/test/494-checker-instanceof-tests/src/Main.java index bff9c72ded..2eac6c92a5 100644 --- a/test/494-checker-instanceof-tests/src/Main.java +++ b/test/494-checker-instanceof-tests/src/Main.java @@ -129,6 +129,26 @@ public class Main { return $inline$interfaceTypeTest(finalUnrelatedField); } + // Check that we remove the LoadClass instruction from the graph. + /// CHECK-START: boolean Main.knownTestWithLoadedClass() register (after) + /// CHECK-NOT: LoadClass + public static boolean knownTestWithLoadedClass() { + return new String() instanceof String; + } + + // Check that we do not remove the LoadClass instruction from the graph. + /// CHECK-START: boolean Main.knownTestWithUnloadedClass() register (after) + /// CHECK: <<Const:i\d+>> IntConstant 0 + /// CHECK: LoadClass + /// CHECK: Return [<<Const>>] + public static boolean knownTestWithUnloadedClass() { + return $inline$returnMain() instanceof String; + } + + public static Object $inline$returnMain() { + return new Main(); + } + public static void expect(boolean expected, boolean actual) { if (expected != actual) { throw new Error("Unexpected result"); diff --git a/test/495-checker-checkcast-tests/src/Main.java b/test/495-checker-checkcast-tests/src/Main.java index aa6d5a75f7..4b2bf09d3f 100644 --- a/test/495-checker-checkcast-tests/src/Main.java +++ b/test/495-checker-checkcast-tests/src/Main.java @@ -112,6 +112,33 @@ public class Main { return $inline$interfaceTypeTest(finalUnrelatedField); } + /// CHECK-START: java.lang.String Main.knownTestWithLoadedClass() register (after) + /// CHECK-NOT: LoadClass + public static String knownTestWithLoadedClass() { + return (String)$inline$getString(); + } + + /// CHECK-START: Itf Main.knownTestWithUnloadedClass() register (after) + /// CHECK: LoadClass + public static Itf knownTestWithUnloadedClass() { + return (Itf)$inline$getString(); + } + + public static Object $inline$getString() { + return new String(); + } + + public static Object $inline$getMain() { + return new Main(); + } + + /// CHECK-START: void Main.nonNullBoundType() register (after) + /// CHECK-NOT: NullCheck + public static void nonNullBoundType() { + Main main = (Main)$inline$getMain(); + main.getClass(); + } + public static void main(String[] args) { classTypeTestNull(); try { diff --git a/test/508-checker-disassembly/src/Main.java b/test/508-checker-disassembly/src/Main.java index 29c9374aed..0805731267 100644 --- a/test/508-checker-disassembly/src/Main.java +++ b/test/508-checker-disassembly/src/Main.java @@ -1,18 +1,18 @@ /* -* Copyright (C) 2015 The Android Open Source Project -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ public class Main { // A very simple check that disassembly information has been added to the diff --git a/test/510-checker-try-catch/smali/Builder.smali b/test/510-checker-try-catch/smali/Builder.smali index 95708a2c81..2274ba4d43 100644 --- a/test/510-checker-try-catch/smali/Builder.smali +++ b/test/510-checker-try-catch/smali/Builder.smali @@ -630,6 +630,172 @@ goto :return .end method +## CHECK-START: int Builder.testSwitchTryEnter(int, int, int, int) builder (after) + +## CHECK: name "B0" +## CHECK: successors "<<BPSwitch0:B\d+>>" + +## CHECK: name "<<BPSwitch0>>" +## CHECK: predecessors "B0" +## CHECK: successors "<<BEnterTry2:B\d+>>" "<<BPSwitch1:B\d+>>" +## CHECK: If + +## CHECK: name "<<BPSwitch1>>" +## CHECK: predecessors "<<BPSwitch0>>" +## CHECK: successors "<<BOutside:B\d+>>" "<<BEnterTry1:B\d+>>" +## CHECK: If + +## CHECK: name "<<BTry1:B\d+>>" +## CHECK: predecessors "<<BEnterTry1>>" +## CHECK: successors "<<BTry2:B\d+>>" +## CHECK: Div + +## CHECK: name "<<BTry2>>" +## CHECK: predecessors "<<BEnterTry2>>" "<<BTry1>>" +## CHECK: successors "<<BExitTry:B\d+>>" +## CHECK: Div + +## CHECK: name "<<BOutside>>" +## CHECK: predecessors "<<BPSwitch1>>" "<<BExitTry>>" +## CHECK: successors "<<BCatchReturn:B\d+>>" +## CHECK: Div + +## CHECK: name "<<BCatchReturn>>" +## CHECK: predecessors "<<BOutside>>" "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry>>" +## CHECK: flags "catch_block" +## CHECK: Return + +## CHECK: name "<<BEnterTry1>>" +## CHECK: predecessors "<<BPSwitch1>>" +## CHECK: successors "<<BTry1>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:entry + +## CHECK: name "<<BEnterTry2>>" +## CHECK: predecessors "<<BPSwitch0>>" +## CHECK: successors "<<BTry2>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:entry + +## CHECK: name "<<BExitTry>>" +## CHECK: predecessors "<<BTry2>>" +## CHECK: successors "<<BOutside>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:exit + +.method public static testSwitchTryEnter(IIII)I + .registers 4 + + packed-switch p0, :pswitch_data + + :try_start + div-int/2addr p0, p1 + + :pswitch1 + div-int/2addr p0, p2 + goto :pswitch2 + + :pswitch_data + .packed-switch 0x0 + :pswitch1 + :pswitch2 + .end packed-switch + :try_end + .catchall {:try_start .. :try_end} :catch_all + + :pswitch2 + div-int/2addr p0, p3 + + :catch_all + return p0 +.end method + +## CHECK-START: int Builder.testSwitchTryExit(int, int, int, int) builder (after) + +## CHECK: name "B0" +## CHECK: successors "<<BEnterTry1:B\d+>>" + +## CHECK: name "<<BPSwitch0:B\d+>>" +## CHECK: predecessors "<<BEnterTry1>>" +## CHECK: successors "<<BTry2:B\d+>>" "<<BExitTry1:B\d+>>" +## CHECK: If + +## CHECK: name "<<BPSwitch1:B\d+>>" +## CHECK: predecessors "<<BExitTry1>>" +## CHECK: successors "<<BOutside:B\d+>>" "<<BEnterTry2:B\d+>>" +## CHECK: If + +## CHECK: name "<<BTry1:B\d+>>" +## CHECK: predecessors "<<BEnterTry2>>" +## CHECK: successors "<<BTry2>>" +## CHECK: Div + +## CHECK: name "<<BTry2>>" +## CHECK: predecessors "<<BPSwitch0>>" +## CHECK: successors "<<BExitTry2:B\d+>>" +## CHECK: Div + +## CHECK: name "<<BOutside>>" +## CHECK: predecessors "<<BPSwitch1>>" "<<BExitTry2>>" +## CHECK: successors "<<BCatchReturn:B\d+>>" +## CHECK: Div + +## CHECK: name "<<BCatchReturn>>" +## CHECK: predecessors "<<BOutside>>" "<<BEnterTry1>>" "<<BExitTry1>>" "<<BEnterTry2>>" "<<BExitTry2>>" +## CHECK: flags "catch_block" +## CHECK: Return + +## CHECK: name "<<BEnterTry1>>" +## CHECK: predecessors "B0" +## CHECK: successors "<<BPSwitch0>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:entry + +## CHECK: name "<<BExitTry1>>" +## CHECK: predecessors "<<BPSwitch0>>" +## CHECK: successors "<<BPSwitch1>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:exit + +## CHECK: name "<<BEnterTry2>>" +## CHECK: predecessors "<<BPSwitch1>>" +## CHECK: successors "<<BTry1>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:entry + +## CHECK: name "<<BExitTry2>>" +## CHECK: predecessors "<<BTry2>>" +## CHECK: successors "<<BOutside>>" +## CHECK: xhandlers "<<BCatchReturn>>" +## CHECK: TryBoundary kind:exit + +.method public static testSwitchTryExit(IIII)I + .registers 4 + + :try_start + div-int/2addr p0, p1 + packed-switch p0, :pswitch_data + + div-int/2addr p0, p1 + + :pswitch1 + div-int/2addr p0, p2 + :try_end + .catchall {:try_start .. :try_end} :catch_all + + :pswitch2 + div-int/2addr p0, p3 + + :catch_all + return p0 + + :pswitch_data + .packed-switch 0x0 + :pswitch1 + :pswitch2 + .end packed-switch +.end method + # Test that a TryBoundary is inserted between a Throw instruction and the exit # block when covered by a try range. @@ -650,6 +816,10 @@ ## CHECK: flags "catch_block" ## CHECK: StoreLocal [v0,<<Minus1>>] +## CHECK: name "<<BExit>>" +## CHECK: predecessors "<<BExitTry>>" "<<BCatch>>" +## CHECK: Exit + ## CHECK: name "<<BEnterTry>>" ## CHECK: predecessors "B0" ## CHECK: successors "<<BTry>>" @@ -662,10 +832,6 @@ ## CHECK: xhandlers "<<BCatch>>" ## CHECK: TryBoundary kind:exit -## CHECK: name "<<BExit>>" -## CHECK: predecessors "<<BExitTry>>" "<<BCatch>>" -## CHECK: Exit - .method public static testThrow(Ljava/lang/Exception;)I .registers 2 @@ -693,6 +859,9 @@ ## CHECK: name "<<BReturn:B\d+>>" ## CHECK: predecessors "<<BExitTry>>" +## CHECK: successors "<<BExit:B\d+>>" + +## CHECK: name "<<BExit>>" ## CHECK: name "<<BTry:B\d+>>" ## CHECK: predecessors "<<BEnterTry>>" @@ -745,6 +914,14 @@ ## CHECK: name "<<BReturn:B\d+>>" ## CHECK: predecessors "<<BExitTry2>>" +## CHECK: name "{{B\d+}}" +## CHECK: Exit + +## CHECK: name "<<BTry2:B\d+>>" +## CHECK: predecessors "<<BEnterTry2>>" +## CHECK: successors "<<BExitTry2>>" +## CHECK: Div + ## CHECK: name "<<BEnterTry1>>" ## CHECK: predecessors "B0" ## CHECK: successors "<<BTry1>>" @@ -757,11 +934,6 @@ ## CHECK: xhandlers "<<BCatch>>" ## CHECK: TryBoundary kind:exit -## CHECK: name "<<BTry2:B\d+>>" -## CHECK: predecessors "<<BEnterTry2>>" -## CHECK: successors "<<BExitTry2>>" -## CHECK: Div - ## CHECK: name "<<BEnterTry2>>" ## CHECK: predecessors "<<BCatch>>" ## CHECK: successors "<<BTry2>>" @@ -797,48 +969,51 @@ ## CHECK: successors "<<BCatch1:B\d+>>" ## CHECK: name "<<BCatch1>>" -## CHECK: predecessors "B0" "<<BEnter2:B\d+>>" "<<BExit2:B\d+>>" -## CHECK: successors "<<BEnter1:B\d+>>" +## CHECK: predecessors "B0" "<<BEnterTry2:B\d+>>" "<<BExitTry2:B\d+>>" +## CHECK: successors "<<BEnterTry1:B\d+>>" ## CHECK: flags "catch_block" ## CHECK: name "<<BCatch2:B\d+>>" -## CHECK: predecessors "<<BExit1:B\d+>>" "<<BEnter1>>" "<<BExit1>>" -## CHECK: successors "<<BEnter2>>" +## CHECK: predecessors "<<BExitTry1:B\d+>>" "<<BEnterTry1>>" "<<BExitTry1>>" +## CHECK: successors "<<BEnterTry2>>" ## CHECK: flags "catch_block" ## CHECK: name "<<BReturn:B\d+>>" -## CHECK: predecessors "<<BExit2>>" +## CHECK: predecessors "<<BExitTry2>>" +## CHECK: successors "<<BExit:B\d+>>" ## CHECK: Return +## CHECK: name "<<BExit>>" + ## CHECK: name "<<BTry1:B\d+>>" -## CHECK: predecessors "<<BEnter1>>" -## CHECK: successors "<<BExit1>>" +## CHECK: predecessors "<<BEnterTry1>>" +## CHECK: successors "<<BExitTry1>>" ## CHECK: Div -## CHECK: name "<<BEnter1>>" +## CHECK: name "<<BTry2:B\d+>>" +## CHECK: predecessors "<<BEnterTry2>>" +## CHECK: successors "<<BExitTry2>>" +## CHECK: Div + +## CHECK: name "<<BEnterTry1>>" ## CHECK: predecessors "<<BCatch1>>" ## CHECK: successors "<<BTry1>>" ## CHECK: xhandlers "<<BCatch2>>" ## CHECK: TryBoundary kind:entry -## CHECK: name "<<BExit1>>" +## CHECK: name "<<BExitTry1>>" ## CHECK: predecessors "<<BTry1>>" ## CHECK: successors "<<BCatch2>>" ## CHECK: xhandlers "<<BCatch2>>" ## CHECK: TryBoundary kind:exit -## CHECK: name "<<BTry2:B\d+>>" -## CHECK: predecessors "<<BEnter2>>" -## CHECK: successors "<<BExit2>>" -## CHECK: Div - -## CHECK: name "<<BEnter2>>" +## CHECK: name "<<BEnterTry2>>" ## CHECK: predecessors "<<BCatch2>>" ## CHECK: successors "<<BTry2>>" ## CHECK: xhandlers "<<BCatch1>>" ## CHECK: TryBoundary kind:entry -## CHECK: name "<<BExit2>>" +## CHECK: name "<<BExitTry2>>" ## CHECK: predecessors "<<BTry2>>" ## CHECK: successors "<<BReturn>>" ## CHECK: xhandlers "<<BCatch1>>" @@ -972,3 +1147,29 @@ :try_end .catchall {:try_start .. :try_end} :catch_all .end method + +## CHECK-START: int Builder.testSynchronized(java.lang.Object) builder (after) +## CHECK: flags "catch_block" +## CHECK-NOT: end_block +## CHECK: MonitorOperation kind:exit + +.method public static testSynchronized(Ljava/lang/Object;)I + .registers 2 + + monitor-enter p0 + + :try_start_9 + invoke-virtual {p0}, Ljava/lang/Object;->hashCode()I + move-result v0 + + monitor-exit p0 + return v0 + + :catchall_11 + move-exception v0 + monitor-exit p0 + :try_end_15 + .catchall {:try_start_9 .. :try_end_15} :catchall_11 + + throw v0 +.end method diff --git a/test/510-checker-try-catch/smali/SsaBuilder.smali b/test/510-checker-try-catch/smali/SsaBuilder.smali new file mode 100644 index 0000000000..2ddcbced9c --- /dev/null +++ b/test/510-checker-try-catch/smali/SsaBuilder.smali @@ -0,0 +1,199 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LSsaBuilder; + +.super Ljava/lang/Object; + +# Tests that catch blocks with both normal and exceptional predecessors are +# split in two. + +## CHECK-START: int SsaBuilder.testSimplifyCatchBlock(int, int, int) ssa_builder (after) + +## CHECK: name "B0" +## CHECK-NEXT: from_bci +## CHECK-NEXT: to_bci +## CHECK-NEXT: predecessors +## CHECK-NEXT: successors "<<BExtracted:B\d+>>" + +## CHECK: name "<<BCatch:B\d+>>" +## CHECK-NEXT: from_bci +## CHECK-NEXT: to_bci +## CHECK-NEXT: predecessors +## CHECK-NEXT: successors "<<BExtracted>>" +## CHECK-NEXT: xhandlers +## CHECK-NEXT: flags "catch_block" +## CHECK-NOT: Add + +## CHECK: name "<<BExtracted>>" +## CHECK-NEXT: from_bci +## CHECK-NEXT: to_bci +## CHECK-NEXT: predecessors "B0" "<<BCatch>>" +## CHECK-NOT: flags "catch_block" +## CHECK: Add + +.method public static testSimplifyCatchBlock(III)I + .registers 4 + + :catch_all + add-int/2addr p0, p1 + + :try_start + div-int/2addr p0, p2 + :try_end + .catchall {:try_start .. :try_end} :catch_all + + return p0 +.end method + +# Should be rejected because :catch_all is a loop header. + +## CHECK-START: int SsaBuilder.testCatchLoopHeader(int, int, int) ssa_builder (after, bad_state) + +.method public static testCatchLoopHeader(III)I + .registers 4 + + :try_start_1 + div-int/2addr p0, p1 + return p0 + :try_end_1 + .catchall {:try_start_1 .. :try_end_1} :catch_all + + :catch_all + :try_start_2 + div-int/2addr p0, p2 + return p0 + :try_end_2 + .catchall {:try_start_2 .. :try_end_2} :catch_all + +.end method + +# Tests creation of catch Phis. + +## CHECK-START: int SsaBuilder.testPhiCreation(int, int, int) ssa_builder (after) +## CHECK-DAG: <<P0:i\d+>> ParameterValue +## CHECK-DAG: <<P1:i\d+>> ParameterValue +## CHECK-DAG: <<P2:i\d+>> ParameterValue + +## CHECK-DAG: <<DZC1:i\d+>> DivZeroCheck [<<P1>>] +## CHECK-DAG: <<Div1:i\d+>> Div [<<P0>>,<<DZC1>>] +## CHECK-DAG: <<DZC2:i\d+>> DivZeroCheck [<<P1>>] +## CHECK-DAG: <<Div2:i\d+>> Div [<<Div1>>,<<DZC2>>] +## CHECK-DAG: <<DZC3:i\d+>> DivZeroCheck [<<P1>>] +## CHECK-DAG: <<Div3:i\d+>> Div [<<Div2>>,<<DZC3>>] + +## CHECK-DAG: <<Phi1:i\d+>> Phi [<<P0>>,<<P1>>,<<P2>>] reg:0 is_catch_phi:true +## CHECK-DAG: <<Phi2:i\d+>> Phi [<<Div3>>,<<Phi1>>] reg:0 is_catch_phi:false +## CHECK-DAG: Return [<<Phi2>>] + +.method public static testPhiCreation(III)I + .registers 4 + + :try_start + move v0, p0 + div-int/2addr p0, p1 + + move v0, p1 + div-int/2addr p0, p1 + + move v0, p2 + div-int/2addr p0, p1 + + move v0, p0 + :try_end + .catchall {:try_start .. :try_end} :catch_all + + :return + return v0 + + :catch_all + goto :return +.end method + +# Tests that phi elimination does not remove catch phis where the value does +# not dominate the phi. + +## CHECK-START: int SsaBuilder.testPhiElimination(int, int) ssa_builder (after) +## CHECK-DAG: <<P0:i\d+>> ParameterValue +## CHECK-DAG: <<P1:i\d+>> ParameterValue +## CHECK-DAG: <<Cst5:i\d+>> IntConstant 5 +## CHECK-DAG: <<Cst7:i\d+>> IntConstant 7 + +## CHECK-DAG: <<Add1:i\d+>> Add [<<Cst7>>,<<Cst7>>] +## CHECK-DAG: <<DZC:i\d+>> DivZeroCheck [<<P1>>] +## CHECK-DAG: <<Div:i\d+>> Div [<<P0>>,<<DZC>>] + +## CHECK-DAG: <<Phi1:i\d+>> Phi [<<Add1>>] reg:1 is_catch_phi:true +## CHECK-DAG: <<Add2:i\d+>> Add [<<Cst5>>,<<Phi1>>] + +## CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cst5>>,<<Add2>>] reg:0 is_catch_phi:false +## CHECK-DAG: Return [<<Phi2>>] + +.method public static testPhiElimination(II)I + .registers 4 + + :try_start + # The constant in entry block will dominate the vreg 0 catch phi. + const v0, 5 + + # Insert addition so that the value of vreg 1 does not dominate the phi. + const v1, 7 + add-int/2addr v1, v1 + + div-int/2addr p0, p1 + :try_end + .catchall {:try_start .. :try_end} :catch_all + + :return + return v0 + + :catch_all + add-int/2addr v0, v1 + goto :return +.end method + +# Tests that dead catch blocks are removed. + +## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) ssa_builder (before) +## CHECK: Mul + +## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) ssa_builder (after) +## CHECK-DAG: <<P0:i\d+>> ParameterValue +## CHECK-DAG: <<P1:i\d+>> ParameterValue +## CHECK-DAG: <<P2:i\d+>> ParameterValue +## CHECK-DAG: <<Add1:i\d+>> Add [<<P0>>,<<P1>>] +## CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<P2>>] +## CHECK-DAG: Return [<<Add2>>] + +## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) ssa_builder (after) +## CHECK-NOT: flags "catch_block" +## CHECK-NOT: Mul + +.method public static testDeadCatchBlock(III)I + .registers 4 + + :try_start + add-int/2addr p0, p1 + add-int/2addr p0, p2 + move v0, p0 + :try_end + .catchall {:try_start .. :try_end} :catch_all + + :return + return v0 + + :catch_all + mul-int/2addr v1, v1 + goto :return +.end method diff --git a/test/519-bound-load-class/src/Main.java b/test/519-bound-load-class/src/Main.java index 41bb951cfb..cddeb093f7 100644 --- a/test/519-bound-load-class/src/Main.java +++ b/test/519-bound-load-class/src/Main.java @@ -16,9 +16,24 @@ public class Main { public static void main(String[] args) { + testInstanceOf(); + try { + testNull(); + throw new Error("Expected ClassClastException"); + } catch (ClassCastException e) { /* ignore */ } + } + + public static void testInstanceOf() { Object o = Main.class; if (o instanceof Main) { System.out.println((Main)o); } } + + public static void testNull() { + Object o = Main.class; + if (o != null) { + System.out.println((Main)o); + } + } } diff --git a/test/522-checker-regression-monitor-exit/expected.txt b/test/522-checker-regression-monitor-exit/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/522-checker-regression-monitor-exit/expected.txt diff --git a/test/522-checker-regression-monitor-exit/info.txt b/test/522-checker-regression-monitor-exit/info.txt new file mode 100644 index 0000000000..7cfc963090 --- /dev/null +++ b/test/522-checker-regression-monitor-exit/info.txt @@ -0,0 +1,3 @@ +Regression test for removal of monitor-exit due to lack of specified side-effects. +The test invokes a synchronized version of Object.hashCode in multiple threads. +If monitor-exit is removed, the following threads will get stuck and timeout.
\ No newline at end of file diff --git a/test/522-checker-regression-monitor-exit/smali/Test.smali b/test/522-checker-regression-monitor-exit/smali/Test.smali new file mode 100644 index 0000000000..c8e91984e0 --- /dev/null +++ b/test/522-checker-regression-monitor-exit/smali/Test.smali @@ -0,0 +1,40 @@ +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LTest; + +.super Ljava/lang/Object; + +## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination (before) +## CHECK: MonitorOperation [<<Param:l\d+>>] kind:enter +## CHECK: MonitorOperation [<<Param>>] kind:exit + +## CHECK-START: int Test.synchronizedHashCode(java.lang.Object) dead_code_elimination (after) +## CHECK: MonitorOperation [<<Param:l\d+>>] kind:enter +## CHECK: MonitorOperation [<<Param>>] kind:exit + +.method public static synchronizedHashCode(Ljava/lang/Object;)I + .registers 2 + + monitor-enter p0 + invoke-virtual {p0}, Ljava/lang/Object;->hashCode()I + move-result v0 + + # Must not get removed by DCE. + monitor-exit p0 + + return v0 + +.end method diff --git a/test/522-checker-regression-monitor-exit/src/Main.java b/test/522-checker-regression-monitor-exit/src/Main.java new file mode 100644 index 0000000000..c85ac966ad --- /dev/null +++ b/test/522-checker-regression-monitor-exit/src/Main.java @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.CancellationException; +import java.util.concurrent.TimeoutException; + +public class Main { + + // Workaround for b/18051191. + class InnerClass {} + + private static class HashCodeQuery implements Callable<Integer> { + public HashCodeQuery(Object obj) { + m_obj = obj; + } + + public Integer call() { + Integer result; + try { + Class<?> c = Class.forName("Test"); + Method m = c.getMethod("synchronizedHashCode", new Class[] { Object.class }); + result = (Integer) m.invoke(null, m_obj); + } catch (Exception e) { + System.err.println("Hash code query exception"); + e.printStackTrace(); + result = -1; + } + return result; + } + + private Object m_obj; + private int m_index; + } + + public static void main(String args[]) throws Exception { + Object obj = new Object(); + int numThreads = 10; + + ExecutorService pool = Executors.newFixedThreadPool(numThreads); + + List<HashCodeQuery> queries = new ArrayList<HashCodeQuery>(numThreads); + for (int i = 0; i < numThreads; ++i) { + queries.add(new HashCodeQuery(obj)); + } + + try { + List<Future<Integer>> results = pool.invokeAll(queries, 5, TimeUnit.SECONDS); + + int hash = obj.hashCode(); + for (int i = 0; i < numThreads; ++i) { + int result = results.get(i).get(); + if (hash != result) { + throw new Error("Query #" + i + " wrong. Expected " + hash + ", got " + result); + } + } + pool.shutdown(); + } catch (CancellationException ex) { + System.err.println("Job timeout"); + System.exit(1); + } + } +} diff --git a/test/523-checker-can-throw-regression/expected.txt b/test/523-checker-can-throw-regression/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/523-checker-can-throw-regression/expected.txt diff --git a/test/523-checker-can-throw-regression/info.txt b/test/523-checker-can-throw-regression/info.txt new file mode 100644 index 0000000000..720dc85176 --- /dev/null +++ b/test/523-checker-can-throw-regression/info.txt @@ -0,0 +1,2 @@ +Regression test for the HGraphBuilder which would split a throwing catch block +but would not update information about which blocks throw.
\ No newline at end of file diff --git a/test/523-checker-can-throw-regression/smali/Test.smali b/test/523-checker-can-throw-regression/smali/Test.smali new file mode 100644 index 0000000000..87192ea123 --- /dev/null +++ b/test/523-checker-can-throw-regression/smali/Test.smali @@ -0,0 +1,53 @@ +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LTest; + +.super Ljava/lang/Object; + +## CHECK-START: int Test.testCase(int, int, int) builder (after) +## CHECK: TryBoundary kind:entry +## CHECK: TryBoundary kind:entry +## CHECK-NOT: TryBoundary kind:entry + +## CHECK-START: int Test.testCase(int, int, int) builder (after) +## CHECK: TryBoundary kind:exit +## CHECK: TryBoundary kind:exit +## CHECK-NOT: TryBoundary kind:exit + +.method public static testCase(III)I + .registers 4 + + :try_start_1 + div-int/2addr p0, p1 + return p0 + :try_end_1 + .catchall {:try_start_1 .. :try_end_1} :catchall + + :catchall + :try_start_2 + move-exception v0 + # Block would be split here but second part not marked as throwing. + div-int/2addr p0, p1 + if-eqz p2, :else + + div-int/2addr p0, p1 + :else + div-int/2addr p0, p2 + return p0 + :try_end_2 + .catchall {:try_start_2 .. :try_end_2} :catchall + +.end method diff --git a/test/523-checker-can-throw-regression/src/Main.java b/test/523-checker-can-throw-regression/src/Main.java new file mode 100644 index 0000000000..3ff48f3d4d --- /dev/null +++ b/test/523-checker-can-throw-regression/src/Main.java @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.CancellationException; +import java.util.concurrent.TimeoutException; + +public class Main { + + // Workaround for b/18051191. + class InnerClass {} + + public static void main(String args[]) {} +} diff --git a/test/524-boolean-simplifier-regression/expected.txt b/test/524-boolean-simplifier-regression/expected.txt new file mode 100644 index 0000000000..863339fb8c --- /dev/null +++ b/test/524-boolean-simplifier-regression/expected.txt @@ -0,0 +1 @@ +Passed diff --git a/test/524-boolean-simplifier-regression/info.txt b/test/524-boolean-simplifier-regression/info.txt new file mode 100644 index 0000000000..b38d71ce73 --- /dev/null +++ b/test/524-boolean-simplifier-regression/info.txt @@ -0,0 +1 @@ +Regression test for optimizing boolean simplifier. diff --git a/test/524-boolean-simplifier-regression/src/Main.java b/test/524-boolean-simplifier-regression/src/Main.java new file mode 100644 index 0000000000..a8830bb30f --- /dev/null +++ b/test/524-boolean-simplifier-regression/src/Main.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static boolean test2() { + throw new NullPointerException(); + } + + public static boolean test1() { + System.out.println("Passed"); + try { + test2(); + } catch (NullPointerException npe) { + } + return true; + } + + public static void main(String[] args) { + boolean b=false; + + b = (test1() || (b = b)) & b; + } +} diff --git a/test/525-checker-arrays-and-fields/expected.txt b/test/525-checker-arrays-and-fields/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/525-checker-arrays-and-fields/expected.txt diff --git a/test/525-checker-arrays-and-fields/info.txt b/test/525-checker-arrays-and-fields/info.txt new file mode 100644 index 0000000000..3e16abf204 --- /dev/null +++ b/test/525-checker-arrays-and-fields/info.txt @@ -0,0 +1 @@ +Test on (in)variant static and instance field and array references in loops. diff --git a/test/525-checker-arrays-and-fields/src/Main.java b/test/525-checker-arrays-and-fields/src/Main.java new file mode 100644 index 0000000000..a635a5157f --- /dev/null +++ b/test/525-checker-arrays-and-fields/src/Main.java @@ -0,0 +1,1099 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// +// Test on (in)variant static and instance field and array references in loops. +// +public class Main { + + private static Object anObject = new Object(); + private static Object anotherObject = new Object(); + + // + // Static fields. + // + + private static boolean sZ; + private static byte sB; + private static char sC; + private static short sS; + private static int sI; + private static long sJ; + private static float sF; + private static double sD; + private static Object sL; + + // + // Static arrays. + // + + private static boolean[] sArrZ; + private static byte[] sArrB; + private static char[] sArrC; + private static short[] sArrS; + private static int[] sArrI; + private static long[] sArrJ; + private static float[] sArrF; + private static double[] sArrD; + private static Object[] sArrL; + + // + // Instance fields. + // + + private boolean mZ; + private byte mB; + private char mC; + private short mS; + private int mI; + private long mJ; + private float mF; + private double mD; + private Object mL; + + // + // Instance arrays. + // + + private boolean[] mArrZ; + private byte[] mArrB; + private char[] mArrC; + private short[] mArrS; + private int[] mArrI; + private long[] mArrJ; + private float[] mArrF; + private double[] mArrD; + private Object[] mArrL; + + // + // Loops on static arrays with invariant static field references. + // The checker is used to ensure hoisting occurred. + // + + /// CHECK-START: void Main.SInvLoopZ() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopZ() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopZ() { + for (int i = 0; i < sArrZ.length; i++) { + sArrZ[i] = sZ; + } + } + + /// CHECK-START: void Main.SInvLoopB() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopB() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopB() { + for (int i = 0; i < sArrB.length; i++) { + sArrB[i] = sB; + } + } + + /// CHECK-START: void Main.SInvLoopC() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopC() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopC() { + for (int i = 0; i < sArrC.length; i++) { + sArrC[i] = sC; + } + } + + /// CHECK-START: void Main.SInvLoopS() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopS() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopS() { + for (int i = 0; i < sArrS.length; i++) { + sArrS[i] = sS; + } + } + + /// CHECK-START: void Main.SInvLoopI() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopI() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopI() { + for (int i = 0; i < sArrI.length; i++) { + sArrI[i] = sI; + } + } + + /// CHECK-START: void Main.SInvLoopJ() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopJ() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopJ() { + for (int i = 0; i < sArrJ.length; i++) { + sArrJ[i] = sJ; + } + } + + /// CHECK-START: void Main.SInvLoopF() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopF() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopF() { + for (int i = 0; i < sArrF.length; i++) { + sArrF[i] = sF; + } + } + + /// CHECK-START: void Main.SInvLoopD() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopD() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopD() { + for (int i = 0; i < sArrD.length; i++) { + sArrD[i] = sD; + } + } + + /// CHECK-START: void Main.SInvLoopL() licm (before) + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + /// CHECK-DAG: StaticFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.SInvLoopL() licm (after) + /// CHECK-DAG: StaticFieldGet loop:none + /// CHECK-DAG: StaticFieldGet loop:none + + private static void SInvLoopL() { + for (int i = 0; i < sArrL.length; i++) { + sArrL[i] = sL; + } + } + + // + // Loops on static arrays with variant static field references. + // Incorrect hoisting is detected by incorrect outcome. + // + + private static void SVarLoopZ() { + for (int i = 0; i < sArrZ.length; i++) { + sArrZ[i] = sZ; + if (i == 10) + sZ = !sZ; + } + } + + private static void SVarLoopB() { + for (int i = 0; i < sArrB.length; i++) { + sArrB[i] = sB; + if (i == 10) + sB++; + } + } + + private static void SVarLoopC() { + for (int i = 0; i < sArrC.length; i++) { + sArrC[i] = sC; + if (i == 10) + sC++; + } + } + + private static void SVarLoopS() { + for (int i = 0; i < sArrS.length; i++) { + sArrS[i] = sS; + if (i == 10) + sS++; + } + } + + private static void SVarLoopI() { + for (int i = 0; i < sArrI.length; i++) { + sArrI[i] = sI; + if (i == 10) + sI++; + } + } + + private static void SVarLoopJ() { + for (int i = 0; i < sArrJ.length; i++) { + sArrJ[i] = sJ; + if (i == 10) + sJ++; + } + } + + private static void SVarLoopF() { + for (int i = 0; i < sArrF.length; i++) { + sArrF[i] = sF; + if (i == 10) + sF++; + } + } + + private static void SVarLoopD() { + for (int i = 0; i < sArrD.length; i++) { + sArrD[i] = sD; + if (i == 10) + sD++; + } + } + + private static void SVarLoopL() { + for (int i = 0; i < sArrL.length; i++) { + sArrL[i] = sL; + if (i == 10) + sL = anotherObject; + } + } + + // + // Loops on static arrays with a cross-over reference. + // Incorrect hoisting is detected by incorrect outcome. + // In addition, the checker is used to detect no hoisting. + // + + /// CHECK-START: void Main.SCrossOverLoopZ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopZ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopZ() { + for (int i = 0; i < sArrZ.length; i++) { + sArrZ[i] = !sArrZ[20]; + } + } + + /// CHECK-START: void Main.SCrossOverLoopB() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopB() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopB() { + for (int i = 0; i < sArrB.length; i++) { + sArrB[i] = (byte)(sArrB[20] + 2); + } + } + + /// CHECK-START: void Main.SCrossOverLoopC() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopC() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopC() { + for (int i = 0; i < sArrC.length; i++) { + sArrC[i] = (char)(sArrC[20] + 2); + } + } + + /// CHECK-START: void Main.SCrossOverLoopS() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopS() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopS() { + for (int i = 0; i < sArrS.length; i++) { + sArrS[i] = (short)(sArrS[20] + 2); + } + } + + /// CHECK-START: void Main.SCrossOverLoopI() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopI() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopI() { + for (int i = 0; i < sArrI.length; i++) { + sArrI[i] = sArrI[20] + 2; + } + } + + /// CHECK-START: void Main.SCrossOverLoopJ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopJ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopJ() { + for (int i = 0; i < sArrJ.length; i++) { + sArrJ[i] = sArrJ[20] + 2; + } + } + + /// CHECK-START: void Main.SCrossOverLoopF() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopF() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopF() { + for (int i = 0; i < sArrF.length; i++) { + sArrF[i] = sArrF[20] + 2; + } + } + + /// CHECK-START: void Main.SCrossOverLoopD() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopD() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopD() { + for (int i = 0; i < sArrD.length; i++) { + sArrD[i] = sArrD[20] + 2; + } + } + + /// CHECK-START: void Main.SCrossOverLoopL() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.SCrossOverLoopL() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private static void SCrossOverLoopL() { + for (int i = 0; i < sArrL.length; i++) { + sArrL[i] = (sArrL[20] == anObject) ? anotherObject : anObject; + } + } + + // + // Loops on instance arrays with invariant instance field references. + // The checker is used to ensure hoisting occurred. + // + + /// CHECK-START: void Main.InvLoopZ() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopZ() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopZ() { + for (int i = 0; i < mArrZ.length; i++) { + mArrZ[i] = mZ; + } + } + + /// CHECK-START: void Main.InvLoopB() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopB() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopB() { + for (int i = 0; i < mArrB.length; i++) { + mArrB[i] = mB; + } + } + + /// CHECK-START: void Main.InvLoopC() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopC() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopC() { + for (int i = 0; i < mArrC.length; i++) { + mArrC[i] = mC; + } + } + + /// CHECK-START: void Main.InvLoopS() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopS() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopS() { + for (int i = 0; i < mArrS.length; i++) { + mArrS[i] = mS; + } + } + + /// CHECK-START: void Main.InvLoopI() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopI() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopI() { + for (int i = 0; i < mArrI.length; i++) { + mArrI[i] = mI; + } + } + + /// CHECK-START: void Main.InvLoopJ() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopJ() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopJ() { + for (int i = 0; i < mArrJ.length; i++) { + mArrJ[i] = mJ; + } + } + + /// CHECK-START: void Main.InvLoopF() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopF() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopF() { + for (int i = 0; i < mArrF.length; i++) { + mArrF[i] = mF; + } + } + + /// CHECK-START: void Main.InvLoopD() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopD() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopD() { + for (int i = 0; i < mArrD.length; i++) { + mArrD[i] = mD; + } + } + + /// CHECK-START: void Main.InvLoopL() licm (before) + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + /// CHECK-DAG: InstanceFieldGet loop:{{B\d+}} + + /// CHECK-START: void Main.InvLoopL() licm (after) + /// CHECK-DAG: InstanceFieldGet loop:none + /// CHECK-DAG: InstanceFieldGet loop:none + + private void InvLoopL() { + for (int i = 0; i < mArrL.length; i++) { + mArrL[i] = mL; + } + } + + // + // Loops on instance arrays with variant instance field references. + // Incorrect hoisting is detected by incorrect outcome. + // + + private void VarLoopZ() { + for (int i = 0; i < mArrZ.length; i++) { + mArrZ[i] = mZ; + if (i == 10) + mZ = !mZ; + } + } + + private void VarLoopB() { + for (int i = 0; i < mArrB.length; i++) { + mArrB[i] = mB; + if (i == 10) + mB++; + } + } + + private void VarLoopC() { + for (int i = 0; i < mArrC.length; i++) { + mArrC[i] = mC; + if (i == 10) + mC++; + } + } + + private void VarLoopS() { + for (int i = 0; i < mArrS.length; i++) { + mArrS[i] = mS; + if (i == 10) + mS++; + } + } + + private void VarLoopI() { + for (int i = 0; i < mArrI.length; i++) { + mArrI[i] = mI; + if (i == 10) + mI++; + } + } + + private void VarLoopJ() { + for (int i = 0; i < mArrJ.length; i++) { + mArrJ[i] = mJ; + if (i == 10) + mJ++; + } + } + + private void VarLoopF() { + for (int i = 0; i < mArrF.length; i++) { + mArrF[i] = mF; + if (i == 10) + mF++; + } + } + + private void VarLoopD() { + for (int i = 0; i < mArrD.length; i++) { + mArrD[i] = mD; + if (i == 10) + mD++; + } + } + + private void VarLoopL() { + for (int i = 0; i < mArrL.length; i++) { + mArrL[i] = mL; + if (i == 10) + mL = anotherObject; + } + } + + // + // Loops on instance arrays with a cross-over reference. + // Incorrect hoisting is detected by incorrect outcome. + // In addition, the checker is used to detect no hoisting. + // + + /// CHECK-START: void Main.CrossOverLoopZ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopZ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopZ() { + for (int i = 0; i < mArrZ.length; i++) { + mArrZ[i] = !mArrZ[20]; + } + } + + /// CHECK-START: void Main.CrossOverLoopB() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopB() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopB() { + for (int i = 0; i < mArrB.length; i++) { + mArrB[i] = (byte)(mArrB[20] + 2); + } + } + + /// CHECK-START: void Main.CrossOverLoopC() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopC() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopC() { + for (int i = 0; i < mArrC.length; i++) { + mArrC[i] = (char)(mArrC[20] + 2); + } + } + + /// CHECK-START: void Main.CrossOverLoopS() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopS() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopS() { + for (int i = 0; i < mArrS.length; i++) { + mArrS[i] = (short)(mArrS[20] + 2); + } + } + + /// CHECK-START: void Main.CrossOverLoopI() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopI() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopI() { + for (int i = 0; i < mArrI.length; i++) { + mArrI[i] = mArrI[20] + 2; + } + } + + /// CHECK-START: void Main.CrossOverLoopJ() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopJ() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopJ() { + for (int i = 0; i < mArrJ.length; i++) { + mArrJ[i] = mArrJ[20] + 2; + } + } + + /// CHECK-START: void Main.CrossOverLoopF() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopF() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopF() { + for (int i = 0; i < mArrF.length; i++) { + mArrF[i] = mArrF[20] + 2; + } + } + + /// CHECK-START: void Main.CrossOverLoopD() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopD() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopD() { + for (int i = 0; i < mArrD.length; i++) { + mArrD[i] = mArrD[20] + 2; + } + } + + /// CHECK-START: void Main.CrossOverLoopL() licm (before) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + /// CHECK-START: void Main.CrossOverLoopL() licm (after) + /// CHECK-DAG: ArrayGet loop:{{B\d+}} + /// CHECK-DAG: ArraySet loop:{{B\d+}} + + private void CrossOverLoopL() { + for (int i = 0; i < mArrL.length; i++) { + mArrL[i] = (mArrL[20] == anObject) ? anotherObject : anObject; + } + } + + // + // Driver and testers. + // + + public static void main(String[] args) { + DoStaticTests(); + new Main().DoInstanceTests(); + } + + private static void DoStaticTests() { + // Type Z. + sZ = true; + sArrZ = new boolean[100]; + SInvLoopZ(); + for (int i = 0; i < sArrZ.length; i++) { + expectEquals(true, sArrZ[i]); + } + SVarLoopZ(); + for (int i = 0; i < sArrZ.length; i++) { + expectEquals(i <= 10, sArrZ[i]); + } + SCrossOverLoopZ(); + for (int i = 0; i < sArrZ.length; i++) { + expectEquals(i <= 20, sArrZ[i]); + } + // Type B. + sB = 1; + sArrB = new byte[100]; + SInvLoopB(); + for (int i = 0; i < sArrB.length; i++) { + expectEquals(1, sArrB[i]); + } + SVarLoopB(); + for (int i = 0; i < sArrB.length; i++) { + expectEquals(i <= 10 ? 1 : 2, sArrB[i]); + } + SCrossOverLoopB(); + for (int i = 0; i < sArrB.length; i++) { + expectEquals(i <= 20 ? 4 : 6, sArrB[i]); + } + // Type C. + sC = 2; + sArrC = new char[100]; + SInvLoopC(); + for (int i = 0; i < sArrC.length; i++) { + expectEquals(2, sArrC[i]); + } + SVarLoopC(); + for (int i = 0; i < sArrC.length; i++) { + expectEquals(i <= 10 ? 2 : 3, sArrC[i]); + } + SCrossOverLoopC(); + for (int i = 0; i < sArrC.length; i++) { + expectEquals(i <= 20 ? 5 : 7, sArrC[i]); + } + // Type S. + sS = 3; + sArrS = new short[100]; + SInvLoopS(); + for (int i = 0; i < sArrS.length; i++) { + expectEquals(3, sArrS[i]); + } + SVarLoopS(); + for (int i = 0; i < sArrS.length; i++) { + expectEquals(i <= 10 ? 3 : 4, sArrS[i]); + } + SCrossOverLoopS(); + for (int i = 0; i < sArrS.length; i++) { + expectEquals(i <= 20 ? 6 : 8, sArrS[i]); + } + // Type I. + sI = 4; + sArrI = new int[100]; + SInvLoopI(); + for (int i = 0; i < sArrI.length; i++) { + expectEquals(4, sArrI[i]); + } + SVarLoopI(); + for (int i = 0; i < sArrI.length; i++) { + expectEquals(i <= 10 ? 4 : 5, sArrI[i]); + } + SCrossOverLoopI(); + for (int i = 0; i < sArrI.length; i++) { + expectEquals(i <= 20 ? 7 : 9, sArrI[i]); + } + // Type J. + sJ = 5; + sArrJ = new long[100]; + SInvLoopJ(); + for (int i = 0; i < sArrJ.length; i++) { + expectEquals(5, sArrJ[i]); + } + SVarLoopJ(); + for (int i = 0; i < sArrJ.length; i++) { + expectEquals(i <= 10 ? 5 : 6, sArrJ[i]); + } + SCrossOverLoopJ(); + for (int i = 0; i < sArrJ.length; i++) { + expectEquals(i <= 20 ? 8 : 10, sArrJ[i]); + } + // Type F. + sF = 6.0f; + sArrF = new float[100]; + SInvLoopF(); + for (int i = 0; i < sArrF.length; i++) { + expectEquals(6, sArrF[i]); + } + SVarLoopF(); + for (int i = 0; i < sArrF.length; i++) { + expectEquals(i <= 10 ? 6 : 7, sArrF[i]); + } + SCrossOverLoopF(); + for (int i = 0; i < sArrF.length; i++) { + expectEquals(i <= 20 ? 9 : 11, sArrF[i]); + } + // Type D. + sD = 7.0; + sArrD = new double[100]; + SInvLoopD(); + for (int i = 0; i < sArrD.length; i++) { + expectEquals(7.0, sArrD[i]); + } + SVarLoopD(); + for (int i = 0; i < sArrD.length; i++) { + expectEquals(i <= 10 ? 7 : 8, sArrD[i]); + } + SCrossOverLoopD(); + for (int i = 0; i < sArrD.length; i++) { + expectEquals(i <= 20 ? 10 : 12, sArrD[i]); + } + // Type L. + sL = anObject; + sArrL = new Object[100]; + SInvLoopL(); + for (int i = 0; i < sArrL.length; i++) { + expectEquals(anObject, sArrL[i]); + } + SVarLoopL(); + for (int i = 0; i < sArrL.length; i++) { + expectEquals(i <= 10 ? anObject : anotherObject, sArrL[i]); + } + SCrossOverLoopL(); + for (int i = 0; i < sArrL.length; i++) { + expectEquals(i <= 20 ? anObject : anotherObject, sArrL[i]); + } + } + + private void DoInstanceTests() { + // Type Z. + mZ = true; + mArrZ = new boolean[100]; + InvLoopZ(); + for (int i = 0; i < mArrZ.length; i++) { + expectEquals(true, mArrZ[i]); + } + VarLoopZ(); + for (int i = 0; i < mArrZ.length; i++) { + expectEquals(i <= 10, mArrZ[i]); + } + CrossOverLoopZ(); + for (int i = 0; i < mArrZ.length; i++) { + expectEquals(i <= 20, mArrZ[i]); + } + // Type B. + mB = 1; + mArrB = new byte[100]; + InvLoopB(); + for (int i = 0; i < mArrB.length; i++) { + expectEquals(1, mArrB[i]); + } + VarLoopB(); + for (int i = 0; i < mArrB.length; i++) { + expectEquals(i <= 10 ? 1 : 2, mArrB[i]); + } + CrossOverLoopB(); + for (int i = 0; i < mArrB.length; i++) { + expectEquals(i <= 20 ? 4 : 6, mArrB[i]); + } + // Type C. + mC = 2; + mArrC = new char[100]; + InvLoopC(); + for (int i = 0; i < mArrC.length; i++) { + expectEquals(2, mArrC[i]); + } + VarLoopC(); + for (int i = 0; i < mArrC.length; i++) { + expectEquals(i <= 10 ? 2 : 3, mArrC[i]); + } + CrossOverLoopC(); + for (int i = 0; i < mArrC.length; i++) { + expectEquals(i <= 20 ? 5 : 7, mArrC[i]); + } + // Type S. + mS = 3; + mArrS = new short[100]; + InvLoopS(); + for (int i = 0; i < mArrS.length; i++) { + expectEquals(3, mArrS[i]); + } + VarLoopS(); + for (int i = 0; i < mArrS.length; i++) { + expectEquals(i <= 10 ? 3 : 4, mArrS[i]); + } + CrossOverLoopS(); + for (int i = 0; i < mArrS.length; i++) { + expectEquals(i <= 20 ? 6 : 8, mArrS[i]); + } + // Type I. + mI = 4; + mArrI = new int[100]; + InvLoopI(); + for (int i = 0; i < mArrI.length; i++) { + expectEquals(4, mArrI[i]); + } + VarLoopI(); + for (int i = 0; i < mArrI.length; i++) { + expectEquals(i <= 10 ? 4 : 5, mArrI[i]); + } + CrossOverLoopI(); + for (int i = 0; i < mArrI.length; i++) { + expectEquals(i <= 20 ? 7 : 9, mArrI[i]); + } + // Type J. + mJ = 5; + mArrJ = new long[100]; + InvLoopJ(); + for (int i = 0; i < mArrJ.length; i++) { + expectEquals(5, mArrJ[i]); + } + VarLoopJ(); + for (int i = 0; i < mArrJ.length; i++) { + expectEquals(i <= 10 ? 5 : 6, mArrJ[i]); + } + CrossOverLoopJ(); + for (int i = 0; i < mArrJ.length; i++) { + expectEquals(i <= 20 ? 8 : 10, mArrJ[i]); + } + // Type F. + mF = 6.0f; + mArrF = new float[100]; + InvLoopF(); + for (int i = 0; i < mArrF.length; i++) { + expectEquals(6, mArrF[i]); + } + VarLoopF(); + for (int i = 0; i < mArrF.length; i++) { + expectEquals(i <= 10 ? 6 : 7, mArrF[i]); + } + CrossOverLoopF(); + for (int i = 0; i < mArrF.length; i++) { + expectEquals(i <= 20 ? 9 : 11, mArrF[i]); + } + // Type D. + mD = 7.0; + mArrD = new double[100]; + InvLoopD(); + for (int i = 0; i < mArrD.length; i++) { + expectEquals(7.0, mArrD[i]); + } + VarLoopD(); + for (int i = 0; i < mArrD.length; i++) { + expectEquals(i <= 10 ? 7 : 8, mArrD[i]); + } + CrossOverLoopD(); + for (int i = 0; i < mArrD.length; i++) { + expectEquals(i <= 20 ? 10 : 12, mArrD[i]); + } + // Type L. + mL = anObject; + mArrL = new Object[100]; + InvLoopL(); + for (int i = 0; i < mArrL.length; i++) { + expectEquals(anObject, mArrL[i]); + } + VarLoopL(); + for (int i = 0; i < mArrL.length; i++) { + expectEquals(i <= 10 ? anObject : anotherObject, mArrL[i]); + } + CrossOverLoopL(); + for (int i = 0; i < mArrL.length; i++) { + expectEquals(i <= 20 ? anObject : anotherObject, mArrL[i]); + } + } + + private static void expectEquals(boolean expected, boolean result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(byte expected, byte result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(char expected, char result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(short expected, short result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(float expected, float result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(double expected, double result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(Object expected, Object result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } +} diff --git a/test/526-checker-caller-callee-regs/expected.txt b/test/526-checker-caller-callee-regs/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/526-checker-caller-callee-regs/expected.txt diff --git a/test/526-checker-caller-callee-regs/info.txt b/test/526-checker-caller-callee-regs/info.txt new file mode 100644 index 0000000000..0e0373ac95 --- /dev/null +++ b/test/526-checker-caller-callee-regs/info.txt @@ -0,0 +1 @@ +Test allocation of caller and callee saved registers. diff --git a/test/526-checker-caller-callee-regs/src/Main.java b/test/526-checker-caller-callee-regs/src/Main.java new file mode 100644 index 0000000000..a1f33014ef --- /dev/null +++ b/test/526-checker-caller-callee-regs/src/Main.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + public static void assertIntEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + static boolean doThrow = false; + + // This function always returns 1. + // We use 'throw' to prevent the function from being inlined. + public static int $opt$noinline$function_call(int arg) { + if (doThrow) throw new Error(); + return 1 % arg; + } + + // | registers available to | regexp + // | the register allocator | + // ------------------------------|------------------------|----------------- + // ARM64 callee-saved registers | [x20-x29] | x2[0-9] + // ARM callee-saved registers | [r5-r8,r10,r11] | r([5-8]|10|11) + + /** + * Check that a value live across a function call is allocated in a callee + * saved register. + */ + + /// CHECK-START-ARM: int Main.$opt$LiveInCall(int) register (after) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<t1:i\d+>> Add [<<Arg>>,<<Const1>>] {{.*->r([5-8]|10|11)}} + /// CHECK: <<t2:i\d+>> InvokeStaticOrDirect + /// CHECK: Sub [<<t1>>,<<t2>>] + /// CHECK: Return + + /// CHECK-START-ARM64: int Main.$opt$LiveInCall(int) register (after) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 + /// CHECK: <<t1:i\d+>> Add [<<Arg>>,<<Const1>>] {{.*->x2[0-9]}} + /// CHECK: <<t2:i\d+>> InvokeStaticOrDirect + /// CHECK: Sub [<<t1>>,<<t2>>] + /// CHECK: Return + + // TODO: Add tests for other architectures. + + public static int $opt$LiveInCall(int arg) { + int t1 = arg + 1; + int t2 = $opt$noinline$function_call(arg); + return t1 - t2; + } + + public static void main(String[] args) { + int arg = 123; + assertIntEquals($opt$LiveInCall(arg), arg); + } +} diff --git a/test/526-long-regalloc/expected.txt b/test/526-long-regalloc/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/526-long-regalloc/expected.txt diff --git a/test/526-long-regalloc/info.txt b/test/526-long-regalloc/info.txt new file mode 100644 index 0000000000..a5ce1bc011 --- /dev/null +++ b/test/526-long-regalloc/info.txt @@ -0,0 +1,2 @@ +Regression test for optimizing that used to trip when allocating a register +pair under certain circumstances. diff --git a/test/526-long-regalloc/src/Main.java b/test/526-long-regalloc/src/Main.java new file mode 100644 index 0000000000..e8b3096d06 --- /dev/null +++ b/test/526-long-regalloc/src/Main.java @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Main { + public static void main(String[] args) { + foo(); + } + + public static void foo() { + int a = myField1; // esi + int b = myField2; // edi + $noinline$bar(); // makes allocation of a and b to be callee-save registers + int c = myField3; // ecx + int e = myField4; // ebx + int f = myField5; // edx + long d = a == 42 ? myLongField1 : 42L; // Will call AllocateBlockedReg -> edx/ebx + + // At this point, the register allocator used to be in a bogus state, where the low + // part of the interval was in the active set, but not the high part. + + long i = myLongField1; // Will call TrySplitNonPairOrUnalignedPairIntervalAt -> Failing DCHECK + + // Use esi and edi first to not have d allocated to them. + myField2 = a; + myField3 = b; + + // The following sequence of instructions are making the AllocateBlockedReg call + // for allocating the d variable misbehave: allocation of the low interval would split + // both low and high interval at the fixed use; therefore the allocation of the high interval + // would not see the register use, and think the interval can just be spilled and not be + // put in the active set, even though it is holding a register. + myField1 = (int)d; // stack use + myLongField3 = (long) myField2; // edx fixed use + myLongField2 = d; // register use + + // Ensure the HInstruction mapping to i, c, e, and f have a live range. + myLongField1 = i; + myField4 = c; + myField5 = e; + myField6 = f; + } + + public static long $noinline$bar() { + if (doThrow) throw new Error(); + return 42; + } + + public static boolean doThrow = false; + + public static int myField1 = 0; + public static int myField2 = 0; + public static int myField3 = 0; + public static int myField4 = 0; + public static int myField5 = 0; + public static int myField6 = 0; + public static long myLongField1 = 0L; + public static long myLongField2 = 0L; + public static long myLongField3 = 0L; +} diff --git a/test/528-long-hint/expected.txt b/test/528-long-hint/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/528-long-hint/expected.txt diff --git a/test/528-long-hint/info.txt b/test/528-long-hint/info.txt new file mode 100644 index 0000000000..6a9cfaec36 --- /dev/null +++ b/test/528-long-hint/info.txt @@ -0,0 +1,2 @@ +Regression test for optimizing that used to crash on x86 when +allocating a wrong register pair. diff --git a/test/528-long-hint/src/Main.java b/test/528-long-hint/src/Main.java new file mode 100644 index 0000000000..ca1a114a7a --- /dev/null +++ b/test/528-long-hint/src/Main.java @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import sun.misc.Unsafe; +import java.lang.reflect.Field; + +public class Main { + + long instanceField; + static long myLongField1; + static long myLongField2; + + public static void main(String[] args) throws Exception { + Unsafe unsafe = getUnsafe(); + Main f = new Main(); + long offset = unsafe.objectFieldOffset(Main.class.getDeclaredField("instanceField")); + getUnsafe(); // spill offset + long a = myLongField1; + // We used the hinted register for the low part of b, which is EBX, as requested + // by the intrinsic below. Allocating EBX for the low part, would put ESP as the high + // part, and we did not check that ESP was blocked. + long b = myLongField2; + unsafe.compareAndSwapLong(f, offset, a, b); + } + + + private static Unsafe getUnsafe() throws Exception { + Field f = Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (Unsafe) f.get(null); + } +} diff --git a/test/701-easy-div-rem/build b/test/701-easy-div-rem/build index 1dc8452d91..666fe895b5 100644 --- a/test/701-easy-div-rem/build +++ b/test/701-easy-div-rem/build @@ -23,6 +23,10 @@ python ./genMain.py # Increase the file size limitation for classes.lst as the machine generated # source file contains a lot of methods and is quite large. -ulimit -S 4096 + +# Jack generates big temp files so only apply ulimit for dx. +if [ ${USE_JACK} = "false" ]; then + ulimit -S 4096 +fi ./default-build diff --git a/test/705-register-conflict/src/Main.java b/test/705-register-conflict/src/Main.java index 42c79fb275..9ae10ecba6 100644 --- a/test/705-register-conflict/src/Main.java +++ b/test/705-register-conflict/src/Main.java @@ -14,7 +14,7 @@ * limitations under the License. */ -// Note that $opt$ is a marker for the optimizing compiler to ensure +// Note that $opt$ is a marker for the optimizing compiler to test // it does compile the method. public class Main { diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt index 7059b6b10e..dd37cdbaf5 100644 --- a/test/800-smali/expected.txt +++ b/test/800-smali/expected.txt @@ -27,4 +27,18 @@ b/22045582 (wide) b/21886894 b/22080519 b/21645819 +b/22244733 +b/22331663 +b/22331663 (pass) +b/22331663 (fail) +b/22411633 (1) +b/22411633 (2) +b/22411633 (3) +b/22411633 (4) +b/22411633 (5) +b/22777307 +b/22881413 +b/20843113 +b/23201502 (float) +b/23201502 (double) Done! diff --git a/test/800-smali/smali/b_20843113.smali b/test/800-smali/smali/b_20843113.smali new file mode 100644 index 0000000000..ab3dc4157b --- /dev/null +++ b/test/800-smali/smali/b_20843113.smali @@ -0,0 +1,34 @@ +.class public LB20843113; +.super Ljava/lang/Object; + + +.method public constructor <init>(I)V +.registers 2 + +:Label1 + # An instruction that may throw, so as to pass UninitializedThis to the handler + div-int v1, v1, v1 + + # Call the super-constructor + invoke-direct {v0}, Ljava/lang/Object;-><init>()V + + # Return normally. + return-void + +:Label2 + + +:Handler + move-exception v0 # Overwrite the (last) "this" register. This should be + # allowed as we will terminate abnormally below. + + throw v0 # Terminate abnormally + +.catchall {:Label1 .. :Label2} :Handler +.end method + +# Just a dummy. +.method public static run()V +.registers 1 + return-void +.end method diff --git a/test/800-smali/smali/b_22244733.smali b/test/800-smali/smali/b_22244733.smali new file mode 100644 index 0000000000..1b62ad9245 --- /dev/null +++ b/test/800-smali/smali/b_22244733.smali @@ -0,0 +1,7 @@ +.class public LB22244733; +.super Ljava/lang/Object; + +.method public static run(Ljava/lang/String;)Ljava/lang/String; +.registers 2 # One local and one parameter. + return-object p0 # Simple return, use the special-method path in Quick. +.end method diff --git a/test/800-smali/smali/b_22331663.smali b/test/800-smali/smali/b_22331663.smali new file mode 100644 index 0000000000..bae75c2924 --- /dev/null +++ b/test/800-smali/smali/b_22331663.smali @@ -0,0 +1,39 @@ +.class public LB22331663; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + if-eqz v5, :if_eqz_target + + # Construct a java.lang.Object completely, and throw a new exception. + new-instance v4, Ljava/lang/Object; + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + + new-instance v3, Ljava/lang/RuntimeException; + invoke-direct {v3}, Ljava/lang/RuntimeException;-><init>()V +:throw1_begin + throw v3 +:throw1_end + +:if_eqz_target + # Allocate a java.lang.Object (do not initialize), and throw a new exception. + new-instance v4, Ljava/lang/Object; + + new-instance v3, Ljava/lang/RuntimeException; + invoke-direct {v3}, Ljava/lang/RuntimeException;-><init>()V +:throw2_begin + throw v3 +:throw2_end + +:catch_entry + # Catch handler. Here we had to merge the uninitialized with the initialized reference, + # which creates a conflict. Copy the conflict, and then return. This should not make the + # verifier fail the method. + move-object v0, v4 + + return-void + +.catchall {:throw1_begin .. :throw1_end} :catch_entry +.catchall {:throw2_begin .. :throw2_end} :catch_entry +.end method diff --git a/test/800-smali/smali/b_22331663_fail.smali b/test/800-smali/smali/b_22331663_fail.smali new file mode 100644 index 0000000000..0c25e305ee --- /dev/null +++ b/test/800-smali/smali/b_22331663_fail.smali @@ -0,0 +1,20 @@ +.class public LB22331663Fail; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + if-eqz v5, :Label1 + + # Construct a java.lang.Object completely. This makes v4 of reference type. + new-instance v4, Ljava/lang/Object; + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + +:Label1 + # At this point, v4 is the merge of Undefined and ReferenceType. The verifier should + # reject any use of this, even a copy. Previously this was a conflict. Conflicts must + # be movable now, so ensure that we do not get a conflict (and then allow the move). + move-object v0, v4 + + return-void +.end method diff --git a/test/800-smali/smali/b_22331663_pass.smali b/test/800-smali/smali/b_22331663_pass.smali new file mode 100644 index 0000000000..1b54180da1 --- /dev/null +++ b/test/800-smali/smali/b_22331663_pass.smali @@ -0,0 +1,22 @@ +.class public LB22331663Pass; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + if-eqz v5, :Label1 + + # Construct a java.lang.Object completely. This makes v4 of reference type. + new-instance v4, Ljava/lang/Object; + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + +:Label1 + # At this point, v4 is the merge of Undefined and ReferenceType. The verifier should not + # reject this if it is unused. + + # Do an allocation here. This will force heap checking in gcstress mode. + new-instance v0, Ljava/lang/Object; + invoke-direct {v0}, Ljava/lang/Object;-><init>()V + + return-void +.end method diff --git a/test/800-smali/smali/b_22411633_1.smali b/test/800-smali/smali/b_22411633_1.smali new file mode 100644 index 0000000000..ffc82a86ae --- /dev/null +++ b/test/800-smali/smali/b_22411633_1.smali @@ -0,0 +1,35 @@ +.class public LB22411633_1; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + # Make v3 & v4 defined, just use null. + const v3, 0 + const v4, 0 + + # Allocate a java.lang.Object (do not initialize). + new-instance v4, Ljava/lang/Object; + + # Branch forward. + if-eqz v5, :LabelMerge + + # Just some random work. + add-int/lit16 v3, v3, 1 + + # Another branch forward. + if-nez v5, :LabelMerge + + # Some more random work, technically dead, but reachable. + add-int/lit16 v3, v3, 1 + +:LabelMerge + # v4 is still an uninitialized reference here. Initialize it. + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + + # And test whether it's initialized by calling hashCode. + invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I + + return-void + +.end method diff --git a/test/800-smali/smali/b_22411633_2.smali b/test/800-smali/smali/b_22411633_2.smali new file mode 100644 index 0000000000..9f27c4cb12 --- /dev/null +++ b/test/800-smali/smali/b_22411633_2.smali @@ -0,0 +1,45 @@ +.class public LB22411633_2; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + # Make v3 & v4 defined, just use null. + const v3, 0 + const v4, 0 + + # Allocate a java.lang.Object (do not initialize). + new-instance v4, Ljava/lang/Object; + + # Branch forward. + if-eqz v5, :LabelMerge + + # Create a non-precise object reference. We can do this by merging to objects together + # that only have Object as a common ancestor. + + # Allocate a java.lang.Object and initialize it. + new-instance v4, Ljava/lang/Object; + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + + if-nez v5, :LabelMergeObject + + new-instance v4, Ljava/lang/Integer; + invoke-direct {v4}, Ljava/lang/Integer;-><init>()V + +:LabelMergeObject + + # Dummy work to separate blocks. At this point, v4 is of type Reference<Object>. + add-int/lit16 v3, v3, 1 + +:LabelMerge + # Merge the uninitialized Object from line 12 with the reference to Object from 31. Older + # rules set any reference merged with Object to Object. This is wrong in the case of the + # other reference being an uninitialized reference, as we'd suddenly allow calling on it. + + # Test whether it's some initialized reference by calling hashCode. This should fail, as we + # merged initialized and uninitialized. + invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I + + return-void + +.end method diff --git a/test/800-smali/smali/b_22411633_3.smali b/test/800-smali/smali/b_22411633_3.smali new file mode 100644 index 0000000000..d1212f13dd --- /dev/null +++ b/test/800-smali/smali/b_22411633_3.smali @@ -0,0 +1,31 @@ +.class public LB22411633_3; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + # Make v3 & v4 defined, just use null. + const v3, 0 + const v4, 0 + + # Allocate a java.lang.Object (do not initialize). + new-instance v4, Ljava/lang/Object; + + # Branch forward. + if-eqz v5, :LabelMerge + + # Create an initialized Object. + new-instance v4, Ljava/lang/Object; + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + + # Just some random work. + add-int/lit16 v3, v3, 1 + +:LabelMerge + # At this point, an initialized and an uninitialized reference are merged. However, the + # merge is only from forward branches. If the conflict isn't used (as here), this should + # pass the verifier. + + return-void + +.end method diff --git a/test/800-smali/smali/b_22411633_4.smali b/test/800-smali/smali/b_22411633_4.smali new file mode 100644 index 0000000000..503ca99569 --- /dev/null +++ b/test/800-smali/smali/b_22411633_4.smali @@ -0,0 +1,25 @@ +.class public LB22411633_4; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + # Do not merge into the backward branch target. + goto :LabelEntry + +:LabelBwd + # At this point v4 is an uninitialized reference. This should fail to verify. + # Note: we make sure that it is an uninitialized reference and not a conflict in sister + # file b_22411633_bwdok.smali. + invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I + +:LabelEntry + # Allocate a java.lang.Object (do not initialize). + new-instance v4, Ljava/lang/Object; + + # Branch backward. + if-eqz v5, :LabelBwd + + return-void + +.end method diff --git a/test/800-smali/smali/b_22411633_5.smali b/test/800-smali/smali/b_22411633_5.smali new file mode 100644 index 0000000000..b7964f64a5 --- /dev/null +++ b/test/800-smali/smali/b_22411633_5.smali @@ -0,0 +1,28 @@ +.class public LB22411633_5; +.super Ljava/lang/Object; + + +.method public static run(Z)V +.registers 6 + # Do not merge into the backward branch target. + goto :LabelEntry + +:LabelBwd + # At this point v4 is an uninitialized reference. We should be able to initialize here + # and call a method afterwards. + invoke-direct {v4}, Ljava/lang/Object;-><init>()V + invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I + + # Make sure this is not an infinite loop. + const v5, 1 + +:LabelEntry + # Allocate a java.lang.Object (do not initialize). + new-instance v4, Ljava/lang/Object; + + # Branch backward. + if-eqz v5, :LabelBwd + + return-void + +.end method diff --git a/test/800-smali/smali/b_22777307.smali b/test/800-smali/smali/b_22777307.smali new file mode 100644 index 0000000000..6de3c703b5 --- /dev/null +++ b/test/800-smali/smali/b_22777307.smali @@ -0,0 +1,18 @@ +.class public LB22777307; +.super Ljava/lang/Object; + +# A static field. That way we can use the reference. +.field private static sTest:Ljava/lang/Object; + +.method public static run()V +.registers 2 + # This is a broken new-instance. It needs to throw at runtime, though. This test is here to + # ensure we won't produce a VerifyError. + # Cloneable was chosen because it's an already existing interface. + new-instance v0, Ljava/lang/Cloneable; + invoke-direct {v0}, Ljava/lang/Cloneable;-><init>()V + sput-object v0, LB22777307;->sTest:Ljava/lang/Object; + + return-void + +.end method diff --git a/test/800-smali/smali/b_22881413.smali b/test/800-smali/smali/b_22881413.smali new file mode 100644 index 0000000000..29dd82a358 --- /dev/null +++ b/test/800-smali/smali/b_22881413.smali @@ -0,0 +1,295 @@ +.class public LB22881413; +.super Ljava/lang/Object; + +# A couple of fields to allow "loading" resolved and unresolved types. Use non-final classes to +# avoid automatically getting precise reference types. +.field private static res1:Ljava/lang/Number; +.field private static res2:Ljava/lang/ClassLoader; +.field private static res3:Ljava/lang/Package; +.field private static res4:Ljava/lang/RuntimeException; +.field private static res5:Ljava/lang/Exception; +.field private static res6:Ljava/util/ArrayList; +.field private static res7:Ljava/util/LinkedList; +.field private static res8:Ljava/lang/Thread; +.field private static res9:Ljava/lang/ThreadGroup; +.field private static res10:Ljava/lang/Runtime; + +.field private static unres1:La/b/c/d1; +.field private static unres2:La/b/c/d2; +.field private static unres3:La/b/c/d3; +.field private static unres4:La/b/c/d4; +.field private static unres5:La/b/c/d5; +.field private static unres6:La/b/c/d6; +.field private static unres7:La/b/c/d7; +.field private static unres8:La/b/c/d8; +.field private static unres9:La/b/c/d9; +.field private static unres10:La/b/c/d10; + +.field private static unresBase0:La/b/c/dBase0; +.field private static unresBase1:La/b/c/dBase1; +.field private static unresBase2:La/b/c/dBase2; +.field private static unresBase3:La/b/c/dBase3; +.field private static unresBase4:La/b/c/dBase4; +.field private static unresBase5:La/b/c/dBase5; +.field private static unresBase6:La/b/c/dBase6; +.field private static unresBase7:La/b/c/dBase7; +.field private static unresBase8:La/b/c/dBase8; + +# Empty, ignore this. We want to see if the other method can be verified in a reasonable amount of +# time. +.method public static run()V +.registers 2 + return-void +.end method + +.method public static foo(IZZ) V +.registers 11 + # v8 = int, v9 = boolean, v10 = boolean + + sget-object v0, LB22881413;->unresBase0:La/b/c/dBase0; + +# Test an UnresolvedUninitializedReference type. + new-instance v0, La/b/c/dBaseInit; + + const v1, 0 + const v2, 0 + +# We're trying to create something like this (with more loops to amplify things). +# +# v0 = Unresolved1 +# while (something) { +# +# [Repeatedly] +# if (cond) { +# v0 = ResolvedX; +# } else { +# v0 = UnresolvedX; +# } +# +# v0 = Unresolved2 +# }; +# +# Important points: +# 1) Use a while, so that the end of the loop is a goto. That way, the merging of outer-loop +# unresolved classes is postponed. +# 2) Put the else cases after all if cases. That way there are backward gotos that will lead +# to stabilization loops in the body. +# + +:Loop1 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop1End + +:Loop2 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop2End + +:Loop3 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop3End + +:Loop4 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop4End + +:Loop5 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop5End + +:Loop6 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop6End + +:Loop7 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop7End + +:Loop8 + + const v6, 0 + add-int/lit16 v8, v8, -1 + if-ge v8, v6, :Loop8End + +# Prototype: +# +# if-eqz v9, :ElseX +# sget-object v0, LB22881413;->res1:Ljava/lang/Number; +#:JoinX +# +# And somewhere at the end +# +#:ElseX +# sget-object v0, LB22881413;->unresX:La/b/c/dX; +# goto :JoinX +# +# + + if-eqz v10, :Join1 + if-eqz v9, :Else1 + sget-object v0, LB22881413;->res1:Ljava/lang/Number; +:Join1 + + + if-eqz v10, :Join2 + if-eqz v9, :Else2 + sget-object v0, LB22881413;->res2:Ljava/lang/ClassLoader; +:Join2 + + + if-eqz v10, :Join3 + if-eqz v9, :Else3 + sget-object v0, LB22881413;->res3:Ljava/lang/Package; +:Join3 + + + if-eqz v10, :Join4 + if-eqz v9, :Else4 + sget-object v0, LB22881413;->res4:Ljava/lang/RuntimeException; +:Join4 + + + if-eqz v10, :Join5 + if-eqz v9, :Else5 + sget-object v0, LB22881413;->res5:Ljava/lang/Exception; +:Join5 + + + if-eqz v10, :Join6 + if-eqz v9, :Else6 + sget-object v0, LB22881413;->res6:Ljava/util/ArrayList; +:Join6 + + + if-eqz v10, :Join7 + if-eqz v9, :Else7 + sget-object v0, LB22881413;->res7:Ljava/util/LinkedList; +:Join7 + + + if-eqz v10, :Join8 + if-eqz v9, :Else8 + sget-object v0, LB22881413;->res8:Ljava/lang/Thread; +:Join8 + + + if-eqz v10, :Join9 + if-eqz v9, :Else9 + sget-object v0, LB22881413;->res9:Ljava/lang/ThreadGroup; +:Join9 + + + if-eqz v10, :Join10 + if-eqz v9, :Else10 + sget-object v0, LB22881413;->res10:Ljava/lang/Runtime; +:Join10 + + + goto :InnerMostLoopEnd + +:Else1 + sget-object v0, LB22881413;->unres1:La/b/c/d1; + goto :Join1 + +:Else2 + sget-object v0, LB22881413;->unres2:La/b/c/d2; + goto :Join2 + +:Else3 + sget-object v0, LB22881413;->unres3:La/b/c/d3; + goto :Join3 + +:Else4 + sget-object v0, LB22881413;->unres4:La/b/c/d4; + goto :Join4 + +:Else5 + sget-object v0, LB22881413;->unres5:La/b/c/d5; + goto :Join5 + +:Else6 + sget-object v0, LB22881413;->unres6:La/b/c/d6; + goto :Join6 + +:Else7 + sget-object v0, LB22881413;->unres7:La/b/c/d7; + goto :Join7 + +:Else8 + sget-object v0, LB22881413;->unres8:La/b/c/d8; + goto :Join8 + +:Else9 + sget-object v0, LB22881413;->unres9:La/b/c/d9; + goto :Join9 + +:Else10 + sget-object v0, LB22881413;->unres10:La/b/c/d10; + goto :Join10 + +:InnerMostLoopEnd + + # Loop 8 end of body. + sget-object v0, LB22881413;->unresBase8:La/b/c/dBase8; + goto :Loop8 + +:Loop8End + + # Loop 7 end of body. + sget-object v0, LB22881413;->unresBase7:La/b/c/dBase7; + goto :Loop7 + +:Loop7End + + # Loop 6 end of body. + sget-object v0, LB22881413;->unresBase6:La/b/c/dBase6; + goto :Loop6 + +:Loop6End + + # Loop 5 end of body + sget-object v0, LB22881413;->unresBase5:La/b/c/dBase5; + goto :Loop5 + +:Loop5End + + # Loop 4 end of body + sget-object v0, LB22881413;->unresBase4:La/b/c/dBase4; + goto :Loop4 + +:Loop4End + + # Loop 3 end of body + sget-object v0, LB22881413;->unresBase3:La/b/c/dBase3; + goto :Loop3 + +:Loop3End + + # Loop 2 end of body + sget-object v0, LB22881413;->unresBase2:La/b/c/dBase2; + goto :Loop2 + +:Loop2End + + # Loop 1 end of body + sget-object v0, LB22881413;->unresBase1:La/b/c/dBase1; + goto :Loop1 + +:Loop1End + + return-void + +.end method diff --git a/test/800-smali/smali/b_23201502.smali b/test/800-smali/smali/b_23201502.smali new file mode 100644 index 0000000000..d958938abf --- /dev/null +++ b/test/800-smali/smali/b_23201502.smali @@ -0,0 +1,23 @@ +.class public LB23201502; + +.super Ljava/lang/Object; + +.method public static runFloat()V + .registers 3 + const v0, 0 # Null array. + const v1, 0 # 0 index into array. + const v2, 0 # 0 value, will be turned into float. + int-to-float v2, v2 # Definitely make v2 float. + aput v2 , v0, v1 # Put into null array. + return-void +.end method + +.method public static runDouble()V + .registers 4 + const v0, 0 # Null array. + const v1, 0 # 0 index into array. + const v2, 0 # 0 value, will be turned into double. + int-to-double v2, v2 # Definitely make v2+v3 double. + aput-wide v2 , v0, v1 # Put into null array. + return-void +.end method diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java index 30c1b142b1..b481a1dbc4 100644 --- a/test/800-smali/src/Main.java +++ b/test/800-smali/src/Main.java @@ -101,6 +101,32 @@ public class Main { new NullPointerException(), null)); testCases.add(new TestCase("b/21645819", "B21645819", "run", new Object[] { null }, null, null)); + testCases.add(new TestCase("b/22244733", "B22244733", "run", new Object[] { "abc" }, + null, "abc")); + testCases.add(new TestCase("b/22331663", "B22331663", "run", new Object[] { false }, + null, null)); + testCases.add(new TestCase("b/22331663 (pass)", "B22331663Pass", "run", + new Object[] { false }, null, null)); + testCases.add(new TestCase("b/22331663 (fail)", "B22331663Fail", "run", + new Object[] { false }, new VerifyError(), null)); + testCases.add(new TestCase("b/22411633 (1)", "B22411633_1", "run", new Object[] { false }, + null, null)); + testCases.add(new TestCase("b/22411633 (2)", "B22411633_2", "run", new Object[] { false }, + new VerifyError(), null)); + testCases.add(new TestCase("b/22411633 (3)", "B22411633_3", "run", new Object[] { false }, + null, null)); + testCases.add(new TestCase("b/22411633 (4)", "B22411633_4", "run", new Object[] { false }, + new VerifyError(), null)); + testCases.add(new TestCase("b/22411633 (5)", "B22411633_5", "run", new Object[] { false }, + null, null)); + testCases.add(new TestCase("b/22777307", "B22777307", "run", null, new InstantiationError(), + null)); + testCases.add(new TestCase("b/22881413", "B22881413", "run", null, null, null)); + testCases.add(new TestCase("b/20843113", "B20843113", "run", null, null, null)); + testCases.add(new TestCase("b/23201502 (float)", "B23201502", "runFloat", null, + new NullPointerException(), null)); + testCases.add(new TestCase("b/23201502 (double)", "B23201502", "runDouble", null, + new NullPointerException(), null)); } public void runTests() { diff --git a/test/955-lambda-smali/expected.txt b/test/955-lambda-smali/expected.txt index 0a5b5fd37d..36370998f4 100644 --- a/test/955-lambda-smali/expected.txt +++ b/test/955-lambda-smali/expected.txt @@ -3,6 +3,16 @@ Hello world! (0-args, no closure) ABCD Hello world! (4-args, no closure) Caught NPE (BoxUnbox) Hello boxing world! (0-args, no closure) +(BoxUnbox) Boxing repeatedly yields referentially-equal objects (BoxUnbox) Caught NPE for unbox-lambda (BoxUnbox) Caught NPE for box-lambda (BoxUnbox) Caught ClassCastException for unbox-lambda +(MoveResult) testZ success +(MoveResult) testB success +(MoveResult) testS success +(MoveResult) testI success +(MoveResult) testC success +(MoveResult) testJ success +(MoveResult) testF success +(MoveResult) testD success +(MoveResult) testL success diff --git a/test/955-lambda-smali/smali/BoxUnbox.smali b/test/955-lambda-smali/smali/BoxUnbox.smali index 5e6673368c..108b5fafbc 100644 --- a/test/955-lambda-smali/smali/BoxUnbox.smali +++ b/test/955-lambda-smali/smali/BoxUnbox.smali @@ -23,15 +23,14 @@ .end method .method public static run()V -.registers 2 - # Trivial 0-arg hello world - create-lambda v0, LBoxUnbox;->doHelloWorld(Ljava/lang/reflect/ArtMethod;)V - # TODO: create-lambda should not write to both v0 and v1 - invoke-lambda v0, {} + .registers 0 + invoke-static {}, LBoxUnbox;->testBox()V + invoke-static {}, LBoxUnbox;->testBoxEquality()V invoke-static {}, LBoxUnbox;->testFailures()V invoke-static {}, LBoxUnbox;->testFailures2()V invoke-static {}, LBoxUnbox;->testFailures3()V + invoke-static {}, LBoxUnbox;->forceGC()V return-void .end method @@ -48,6 +47,47 @@ return-void .end method +# Test boxing and unboxing; the same lambda should be invoked as if there was no box. +.method private static testBox()V + .registers 3 + + create-lambda v0, LBoxUnbox;->doHelloWorld(Ljava/lang/reflect/ArtMethod;)V + box-lambda v2, v0 # v2 = box(v0) + unbox-lambda v0, v2, Ljava/lang/reflect/ArtMethod; # v0 = unbox(v2) + invoke-lambda v0, {} + + return-void +.end method + +# Test that boxing the same lambda twice yield the same object. +.method private static testBoxEquality()V + .registers 6 # 0 parameters, 6 locals + + create-lambda v0, LBoxUnbox;->doHelloWorld(Ljava/lang/reflect/ArtMethod;)V + box-lambda v2, v0 # v2 = box(v0) + box-lambda v3, v0 # v3 = box(v0) + + # The objects should be not-null, and they should have the same reference + if-eqz v2, :is_zero + if-ne v2, v3, :is_not_equal + + const-string v4, "(BoxUnbox) Boxing repeatedly yields referentially-equal objects" + goto :end + +:is_zero + const-string v4, "(BoxUnbox) Boxing repeatedly FAILED: boxing returned null" + goto :end + +:is_not_equal + const-string v4, "(BoxUnbox) Boxing repeatedly FAILED: objects were not same reference" + goto :end + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void +.end method + # Test exceptions are thrown as expected when used opcodes incorrectly .method private static testFailures()V .registers 4 # 0 parameters, 4 locals @@ -116,3 +156,14 @@ .catch Ljava/lang/ClassCastException; {:start .. :end} :handler .end method + + +# Force a GC. Used to ensure our weak reference table of boxed lambdas is getting swept. +.method private static forceGC()V + .registers 1 + invoke-static {}, Ljava/lang/Runtime;->getRuntime()Ljava/lang/Runtime; + move-result-object v0 + invoke-virtual {v0}, Ljava/lang/Runtime;->gc()V + + return-void +.end method diff --git a/test/955-lambda-smali/smali/Main.smali b/test/955-lambda-smali/smali/Main.smali index 92afd79ada..5d2aabb386 100644 --- a/test/955-lambda-smali/smali/Main.smali +++ b/test/955-lambda-smali/smali/Main.smali @@ -23,6 +23,7 @@ invoke-static {}, LSanityCheck;->run()I invoke-static {}, LTrivialHelloWorld;->run()V invoke-static {}, LBoxUnbox;->run()V + invoke-static {}, LMoveResult;->run()V # TODO: add tests when verification fails diff --git a/test/955-lambda-smali/smali/MoveResult.smali b/test/955-lambda-smali/smali/MoveResult.smali new file mode 100644 index 0000000000..1725da3044 --- /dev/null +++ b/test/955-lambda-smali/smali/MoveResult.smali @@ -0,0 +1,330 @@ +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +.class public LMoveResult; +.super Ljava/lang/Object; + +.method public constructor <init>()V +.registers 1 + invoke-direct {p0}, Ljava/lang/Object;-><init>()V + return-void +.end method + +.method public static run()V +.registers 8 + invoke-static {}, LMoveResult;->testZ()V + invoke-static {}, LMoveResult;->testB()V + invoke-static {}, LMoveResult;->testS()V + invoke-static {}, LMoveResult;->testI()V + invoke-static {}, LMoveResult;->testC()V + invoke-static {}, LMoveResult;->testJ()V + invoke-static {}, LMoveResult;->testF()V + invoke-static {}, LMoveResult;->testD()V + invoke-static {}, LMoveResult;->testL()V + + return-void +.end method + +# Test that booleans are returned correctly via move-result. +.method public static testZ()V + .registers 6 + + create-lambda v0, LMoveResult;->lambdaZ(Ljava/lang/reflect/ArtMethod;)Z + invoke-lambda v0, {} + move-result v2 + const v3, 1 + + if-ne v3, v2, :is_not_equal + const-string v4, "(MoveResult) testZ success" + goto :end + +:is_not_equal + const-string v4, "(MoveResult) testZ failed" + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testZ. Always returns "true". +.method public static lambdaZ(Ljava/lang/reflect/ArtMethod;)Z + .registers 3 + + const v0, 1 + return v0 + +.end method + +# Test that bytes are returned correctly via move-result. +.method public static testB()V + .registers 6 + + create-lambda v0, LMoveResult;->lambdaB(Ljava/lang/reflect/ArtMethod;)B + invoke-lambda v0, {} + move-result v2 + const v3, 15 + + if-ne v3, v2, :is_not_equal + const-string v4, "(MoveResult) testB success" + goto :end + +:is_not_equal + const-string v4, "(MoveResult) testB failed" + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testB. Always returns "15". +.method public static lambdaB(Ljava/lang/reflect/ArtMethod;)B + .registers 3 # 1 parameters, 2 locals + + const v0, 15 + return v0 + +.end method + +# Test that shorts are returned correctly via move-result. +.method public static testS()V + .registers 6 + + create-lambda v0, LMoveResult;->lambdaS(Ljava/lang/reflect/ArtMethod;)S + invoke-lambda v0, {} + move-result v2 + const/16 v3, 31000 + + if-ne v3, v2, :is_not_equal + const-string v4, "(MoveResult) testS success" + goto :end + +:is_not_equal + const-string v4, "(MoveResult) testS failed" + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testS. Always returns "31000". +.method public static lambdaS(Ljava/lang/reflect/ArtMethod;)S + .registers 3 + + const/16 v0, 31000 + return v0 + +.end method + +# Test that ints are returned correctly via move-result. +.method public static testI()V + .registers 6 + + create-lambda v0, LMoveResult;->lambdaI(Ljava/lang/reflect/ArtMethod;)I + invoke-lambda v0, {} + move-result v2 + const v3, 128000 + + if-ne v3, v2, :is_not_equal + const-string v4, "(MoveResult) testI success" + goto :end + +:is_not_equal + const-string v4, "(MoveResult) testI failed" + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testI. Always returns "128000". +.method public static lambdaI(Ljava/lang/reflect/ArtMethod;)I + .registers 3 + + const v0, 128000 + return v0 + +.end method + +# Test that chars are returned correctly via move-result. +.method public static testC()V + .registers 6 + + create-lambda v0, LMoveResult;->lambdaC(Ljava/lang/reflect/ArtMethod;)C + invoke-lambda v0, {} + move-result v2 + const v3, 65535 + + if-ne v3, v2, :is_not_equal + const-string v4, "(MoveResult) testC success" + goto :end + +:is_not_equal + const-string v4, "(MoveResult) testC failed" + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testC. Always returns "65535". +.method public static lambdaC(Ljava/lang/reflect/ArtMethod;)C + .registers 3 + + const v0, 65535 + return v0 + +.end method + +# Test that longs are returned correctly via move-result. +.method public static testJ()V + .registers 8 + + create-lambda v0, LMoveResult;->lambdaJ(Ljava/lang/reflect/ArtMethod;)J + invoke-lambda v0, {} + move-result v2 + const-wide v4, 0xdeadf00dc0ffee + + if-ne v4, v2, :is_not_equal + const-string v6, "(MoveResult) testJ success" + goto :end + +:is_not_equal + const-string v6, "(MoveResult) testJ failed" + +:end + sget-object v7, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v7, v6}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testC. Always returns "0xdeadf00dc0ffee". +.method public static lambdaJ(Ljava/lang/reflect/ArtMethod;)J + .registers 4 + + const-wide v0, 0xdeadf00dc0ffee + return-wide v0 + +.end method + +# Test that floats are returned correctly via move-result. +.method public static testF()V + .registers 6 + + create-lambda v0, LMoveResult;->lambdaF(Ljava/lang/reflect/ArtMethod;)F + invoke-lambda v0, {} + move-result v2 + const v3, infinityf + + if-ne v3, v2, :is_not_equal + const-string v4, "(MoveResult) testF success" + goto :end + +:is_not_equal + const-string v4, "(MoveResult) testF failed" + +:end + sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testF. Always returns "infinityf". +.method public static lambdaF(Ljava/lang/reflect/ArtMethod;)F + .registers 3 + + const v0, infinityf + return v0 + +.end method + +# Test that doubles are returned correctly via move-result. +.method public static testD()V + .registers 8 + + create-lambda v0, LMoveResult;->lambdaD(Ljava/lang/reflect/ArtMethod;)D + invoke-lambda v0, {} + move-result-wide v2 + const-wide v4, infinity + + if-ne v4, v2, :is_not_equal + const-string v6, "(MoveResult) testD success" + goto :end + +:is_not_equal + const-string v6, "(MoveResult) testD failed" + +:end + sget-object v7, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v7, v6}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testD. Always returns "infinity". +.method public static lambdaD(Ljava/lang/reflect/ArtMethod;)D + .registers 4 + + const-wide v0, infinity # 123.456789 + return-wide v0 + +.end method + + +# Test that objects are returned correctly via move-result. +.method public static testL()V + .registers 8 + + create-lambda v0, LMoveResult;->lambdaL(Ljava/lang/reflect/ArtMethod;)Ljava/lang/String; + invoke-lambda v0, {} + move-result-object v2 + const-string v4, "Interned string" + + # relies on string interning returning identical object references + if-ne v4, v2, :is_not_equal + const-string v6, "(MoveResult) testL success" + goto :end + +:is_not_equal + const-string v6, "(MoveResult) testL failed" + +:end + sget-object v7, Ljava/lang/System;->out:Ljava/io/PrintStream; + invoke-virtual {v7, v6}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void + +.end method + +# Lambda target for testL. Always returns "Interned string" (string). +.method public static lambdaL(Ljava/lang/reflect/ArtMethod;)Ljava/lang/String; + .registers 4 + + const-string v0, "Interned string" + return-object v0 + +.end method + + diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 3d97901e5f..4e6df6ca79 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -33,22 +33,50 @@ art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA TEST_ART_RUN_TEST_BUILD_RULES := # Dependencies for actually running a run-test. -TEST_ART_RUN_TEST_DEPENDENCIES := $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger +TEST_ART_RUN_TEST_DEPENDENCIES := \ + $(DX) \ + $(HOST_OUT_EXECUTABLES)/jasmin \ + $(HOST_OUT_EXECUTABLES)/smali \ + $(HOST_OUT_EXECUTABLES)/dexmerger + +ifeq ($(ANDROID_COMPILE_WITH_JACK),true) + TEST_ART_RUN_TEST_DEPENDENCIES += \ + $(JACK_JAR) \ + $(JACK_LAUNCHER_JAR) \ + $(JILL_JAR) +endif + +ifeq ($(ART_TEST_DEBUG_GC),true) + ART_TEST_WITH_STRACE := true +endif # Helper to create individual build targets for tests. Must be called with $(eval). # $(1): the test number define define-build-art-run-test dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch + run_test_options = --build-only + ifeq ($(ANDROID_COMPILE_WITH_JACK),true) + run_test_options += --build-with-jack + else + run_test_options += --build-with-javac-dx + endif +$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options) $$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@) $(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \ SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \ DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \ - $(LOCAL_PATH)/run-test --build-only --output-path $$(abspath $$(dir $$@)) $(1) + JACK=$(abspath $(JACK)) \ + JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \ + JACK_CLASSPATH=$(TARGET_JACK_CLASSPATH) \ + JACK_JAR=$(abspath $(JACK_JAR)) \ + JILL_JAR=$(abspath $(JILL_JAR)) \ + $(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1) $(hide) touch $$@ TEST_ART_RUN_TEST_BUILD_RULES += $$(dmart_target) dmart_target := + run_test_options := endef $(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$(test)))) @@ -84,6 +112,9 @@ COMPILER_TYPES := ifeq ($(ART_TEST_DEFAULT_COMPILER),true) COMPILER_TYPES += default endif +ifeq ($(ART_TEST_INTERPRETER_ACCESS_CHECKS),true) + COMPILER_TYPES += interp-ac +endif ifeq ($(ART_TEST_INTERPRETER),true) COMPILER_TYPES += interpreter endif @@ -170,7 +201,7 @@ endef # all-run-test-names # To generate a full list or tests: # $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \ # $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \ -# $(DEBUGGABLE_TYPES) $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES) +# $(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_RUN_TESTS),$(ALL_ADDRESS_SIZES)) # Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE define name-to-var @@ -232,6 +263,28 @@ endif TEST_ART_BROKEN_NO_RELOCATE_TESTS := +# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682 +TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \ + 004-JniTest \ + 005-annotations \ + 044-proxy \ + 073-mismatched-field \ + 088-monitor-verification \ + 135-MirandaDispatch \ + 137-cfi \ + 412-new-array \ + 471-uninitialized-locals \ + 506-verify-aput \ + 800-smali + +ifneq (,$(filter interp-ac,$(COMPILER_TYPES))) + ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ + interp-ac,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS), $(ALL_ADDRESS_SIZES)) +endif + +TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := + # Tests that are broken with GC stress. # 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we # hope the second process got into the expected state. The slowness of gcstress makes this bad. @@ -410,37 +463,6 @@ endif TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := -# Known broken tests for the MIPS64 optimizing compiler backend in 64-bit mode. b/21555893 -TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS := \ - 004-SignalTest \ - 018-stack-overflow \ - 107-int-math2 \ - 449-checker-bce - -ifeq ($(TARGET_ARCH),mips64) - ifneq (,$(filter optimizing,$(COMPILER_TYPES))) - ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \ - optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ - $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS),64) - endif -endif - -TEST_ART_BROKEN_OPTIMIZING_MIPS64_64BIT_RUN_TESTS := - -# Known broken tests for the MIPS64 optimizing compiler backend in 32-bit mode. b/21555893 -TEST_ART_BROKEN_OPTIMIZING_MIPS64_32BIT_RUN_TESTS := \ - 496-checker-inlining-and-class-loader - -ifeq ($(TARGET_ARCH),mips64) - ifneq (,$(filter optimizing,$(COMPILER_TYPES))) - ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \ - optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ - $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_MIPS64_32BIT_RUN_TESTS),32) - endif -endif - -TEST_ART_BROKEN_OPTIMIZING_MIPS64_32BIT_RUN_TESTS := - # Known broken tests for the optimizing compiler. TEST_ART_BROKEN_OPTIMIZING_RUN_TESTS := @@ -607,7 +629,8 @@ endif # Create a rule to build and run a tests following the form: # test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}- -# {4: interpreter default optimizing jit}-{5: relocate nrelocate relocate-npatchoat}- +# {4: interpreter default optimizing jit interp-ac}- +# {5: relocate nrelocate relocate-npatchoat}- # {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}- # {9: no-image image picimage}-{10: pictest npictest}- # {11: ndebuggable debuggable}-{12: test name}{13: 32 or 64} @@ -616,6 +639,15 @@ define define-test-art-run-test prereq_rule := test_groups := uc_host_or_target := + jack_classpath := + ifeq ($(ANDROID_COMPILE_WITH_JACK),true) + run_test_options += --build-with-jack + else + run_test_options += --build-with-javac-dx + endif + ifeq ($(ART_TEST_WITH_STRACE),true) + run_test_options += --strace + endif ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true) run_test_options += --always-clean endif @@ -624,11 +656,13 @@ define define-test-art-run-test test_groups := ART_RUN_TEST_HOST_RULES run_test_options += --host prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) + jack_classpath := $(HOST_JACK_CLASSPATH) else ifeq ($(1),target) uc_host_or_target := TARGET test_groups := ART_RUN_TEST_TARGET_RULES prereq_rule := test-art-target-sync + jack_classpath := $(TARGET_JACK_CLASSPATH) else $$(error found $(1) expected $(TARGET_TYPES)) endif @@ -666,6 +700,9 @@ define define-test-art-run-test ifeq ($(4),interpreter) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES run_test_options += --interpreter + else ifeq ($(4),interp-ac) + test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_ACCESS_CHECKS_RULES + run_test_options += --interpreter --verify-soft-fail else ifeq ($(4),default) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES @@ -816,6 +853,19 @@ define define-test-art-run-test $$(error found $(13) expected $(ALL_ADDRESS_SIZES)) endif endif + # Override of host instruction-set-features. Required to test advanced x86 intrinsics. The + # conditionals aren't really correct, they will fail to do the right thing on a 32-bit only + # host. However, this isn't common enough to worry here and make the conditions complicated. + ifneq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) + ifeq ($(13),64) + run_test_options += --instruction-set-features $(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) + endif + endif + ifneq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) + ifeq ($(13),32) + run_test_options += --instruction-set-features $($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) + endif + endif run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)-$(10)-$(11)-$(12)$(13) run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \ $$(run_test_options) @@ -823,12 +873,19 @@ define define-test-art-run-test run_test_options := --android-root $(ART_TEST_ANDROID_ROOT) $$(run_test_options) endif $$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options) +$$(run_test_rule_name): PRIVATE_JACK_CLASSPATH := $$(jack_classpath) .PHONY: $$(run_test_rule_name) -$$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule) +$$(run_test_rule_name): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule) $(hide) $$(call ART_TEST_SKIP,$$@) && \ - DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \ + DX=$(abspath $(DX)) \ + JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \ SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \ DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \ + JACK=$(abspath $(JACK)) \ + JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \ + JACK_CLASSPATH=$$(PRIVATE_JACK_CLASSPATH) \ + JACK_JAR=$(abspath $(JACK_JAR)) \ + JILL_JAR=$(abspath $(JILL_JAR)) \ art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \ && $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@) $$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \ @@ -843,6 +900,7 @@ $$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTAB run_test_options := run_test_rule_name := prereq_rule := + jack_classpath := endef # define-test-art-run-test $(foreach target, $(TARGET_TYPES), \ diff --git a/test/dexdump/bytecodes.dex b/test/dexdump/bytecodes.dex Binary files differnew file mode 100755 index 0000000000..91e11b8394 --- /dev/null +++ b/test/dexdump/bytecodes.dex diff --git a/test/dexdump/bytecodes.lst b/test/dexdump/bytecodes.lst new file mode 100644 index 0000000000..aeda7b4610 --- /dev/null +++ b/test/dexdump/bytecodes.lst @@ -0,0 +1,20 @@ +#bytecodes.dex +0x000009a0 8 com.google.android.test.BuildConfig <init> ()V BuildConfig.java 4 +0x000009b8 8 com.google.android.test.R$attr <init> ()V R.java 11 +0x000009d0 8 com.google.android.test.R$drawable <init> ()V R.java 13 +0x000009e8 8 com.google.android.test.R <init> ()V R.java 10 +0x00000a00 148 com.google.android.test.Test <clinit> ()V Test.java 7 +0x00000aa4 468 com.google.android.test.Test <init> ()V Test.java 43 +0x00000ca4 478 com.google.android.test.Test add (Ljava/lang/Object;)Ljava/lang/Object; Test.java 179 +0x00000ea0 236 com.google.android.test.Test adds (Ljava/lang/Object;)Ljava/lang/Object; Test.java 201 +0x00000f9c 342 com.google.android.test.Test copies ()V Test.java 216 +0x00001104 156 com.google.android.test.Test doit (I)V Test.java 98 +0x000011b0 146 com.google.android.test.Test geta ()Z Test.java 72 +0x00001254 38 com.google.android.test.Test p (I)V Test.java 120 +0x0000128c 636 com.google.android.test.Test params (BCSIJFDLjava/lang/Object;[I)J Test.java 232 +0x00001518 170 com.google.android.test.Test q (II)V Test.java 127 +0x000015d4 186 com.google.android.test.Test r (II)I Test.java 139 +0x000016a0 388 com.google.android.test.Test s (JJ)J Test.java 159 +0x00001834 96 com.google.android.test.Test seta ()V Test.java 60 +0x000018a4 14 com.google.android.test.Test onStart ()V Test.java 86 +0x000018c4 18 com.google.android.test.Test run ()V Test.java 92 diff --git a/test/dexdump/bytecodes.txt b/test/dexdump/bytecodes.txt new file mode 100755 index 0000000000..d14c47c886 --- /dev/null +++ b/test/dexdump/bytecodes.txt @@ -0,0 +1,1823 @@ +Processing 'bytecodes.dex'... +Opened 'bytecodes.dex', DEX version '035' +DEX file header: +magic : 'dex\n035\0' +checksum : 7d869259 +signature : 6fb7...9cc4 +file_size : 10288 +header_size : 112 +link_size : 0 +link_off : 0 (0x000000) +string_ids_size : 153 +string_ids_off : 112 (0x000070) +type_ids_size : 42 +type_ids_off : 724 (0x0002d4) +proto_ids_size : 12 +proto_ids_off : 892 (0x00037c) +field_ids_size : 40 +field_ids_off : 1036 (0x00040c) +method_ids_size : 28 +method_ids_off : 1356 (0x00054c) +class_defs_size : 7 +class_defs_off : 1580 (0x00062c) +data_size : 8464 +data_off : 1824 (0x000720) + +Class #0 header: +class_idx : 6 +access_flags : 9729 (0x2601) +superclass_idx : 20 +interfaces_off : 2116 (0x000844) +source_file_idx : 46 +annotations_off : 10256 (0x002810) +class_data_off : 2188 (0x00088c) +static_fields_size : 0 +instance_fields_size: 0 +direct_methods_size : 0 +virtual_methods_size: 1 + +Class #0 - + Class descriptor : 'Landroid/annotation/SuppressLint;' + Access flags : 0x2601 (PUBLIC INTERFACE ABSTRACT ANNOTATION) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Ljava/lang/annotation/Annotation;' + Static fields - + Instance fields - + Direct methods - + Virtual methods - + #0 : (in Landroid/annotation/SuppressLint;) + name : 'value' + type : '()[Ljava/lang/String;' + access : 0x0401 (PUBLIC ABSTRACT) + code : (none) + + source_file_idx : 46 (SuppressLint.java) + +Class #1 header: +class_idx : 7 +access_flags : 9729 (0x2601) +superclass_idx : 20 +interfaces_off : 2116 (0x000844) +source_file_idx : 48 +annotations_off : 10272 (0x002820) +class_data_off : 2196 (0x000894) +static_fields_size : 0 +instance_fields_size: 0 +direct_methods_size : 0 +virtual_methods_size: 1 + +Class #1 - + Class descriptor : 'Landroid/annotation/TargetApi;' + Access flags : 0x2601 (PUBLIC INTERFACE ABSTRACT ANNOTATION) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Ljava/lang/annotation/Annotation;' + Static fields - + Instance fields - + Direct methods - + Virtual methods - + #0 : (in Landroid/annotation/TargetApi;) + name : 'value' + type : '()I' + access : 0x0401 (PUBLIC ABSTRACT) + code : (none) + + source_file_idx : 48 (TargetApi.java) + +Class #2 header: +class_idx : 9 +access_flags : 17 (0x0011) +superclass_idx : 20 +interfaces_off : 0 (0x000000) +source_file_idx : 3 +annotations_off : 0 (0x000000) +class_data_off : 2204 (0x00089c) +static_fields_size : 1 +instance_fields_size: 0 +direct_methods_size : 1 +virtual_methods_size: 0 + +Class #2 - + Class descriptor : 'Lcom/google/android/test/BuildConfig;' + Access flags : 0x0011 (PUBLIC FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + Static fields - + #0 : (in Lcom/google/android/test/BuildConfig;) + name : 'DEBUG' + type : 'Z' + access : 0x0019 (PUBLIC STATIC FINAL) + Instance fields - + Direct methods - + #0 : (in Lcom/google/android/test/BuildConfig;) + name : '<init>' + type : '()V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 1 + ins : 1 + outs : 1 + insns size : 4 16-bit code units +000990: |[000990] com.google.android.test.BuildConfig.<init>:()V +0009a0: 7010 1900 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019 +0009a6: 0e00 |0003: return-void + catches : (none) + positions : + 0x0000 line=4 + locals : + 0x0000 - 0x0004 reg=0 this Lcom/google/android/test/BuildConfig; + + Virtual methods - + source_file_idx : 3 (BuildConfig.java) + +Class #3 header: +class_idx : 10 +access_flags : 17 (0x0011) +superclass_idx : 20 +interfaces_off : 0 (0x000000) +source_file_idx : 44 +annotations_off : 10184 (0x0027c8) +class_data_off : 2216 (0x0008a8) +static_fields_size : 0 +instance_fields_size: 0 +direct_methods_size : 1 +virtual_methods_size: 0 + +Class #3 - + Class descriptor : 'Lcom/google/android/test/R$attr;' + Access flags : 0x0011 (PUBLIC FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + Static fields - + Instance fields - + Direct methods - + #0 : (in Lcom/google/android/test/R$attr;) + name : '<init>' + type : '()V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 1 + ins : 1 + outs : 1 + insns size : 4 16-bit code units +0009a8: |[0009a8] com.google.android.test.R.attr.<init>:()V +0009b8: 7010 1900 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019 +0009be: 0e00 |0003: return-void + catches : (none) + positions : + 0x0000 line=11 + locals : + 0x0000 - 0x0004 reg=0 this Lcom/google/android/test/R$attr; + + Virtual methods - + source_file_idx : 44 (R.java) + +Class #4 header: +class_idx : 11 +access_flags : 17 (0x0011) +superclass_idx : 20 +interfaces_off : 0 (0x000000) +source_file_idx : 44 +annotations_off : 10200 (0x0027d8) +class_data_off : 2226 (0x0008b2) +static_fields_size : 1 +instance_fields_size: 0 +direct_methods_size : 1 +virtual_methods_size: 0 + +Class #4 - + Class descriptor : 'Lcom/google/android/test/R$drawable;' + Access flags : 0x0011 (PUBLIC FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + Static fields - + #0 : (in Lcom/google/android/test/R$drawable;) + name : 'icon' + type : 'I' + access : 0x0019 (PUBLIC STATIC FINAL) + Instance fields - + Direct methods - + #0 : (in Lcom/google/android/test/R$drawable;) + name : '<init>' + type : '()V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 1 + ins : 1 + outs : 1 + insns size : 4 16-bit code units +0009c0: |[0009c0] com.google.android.test.R.drawable.<init>:()V +0009d0: 7010 1900 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019 +0009d6: 0e00 |0003: return-void + catches : (none) + positions : + 0x0000 line=13 + locals : + 0x0000 - 0x0004 reg=0 this Lcom/google/android/test/R$drawable; + + Virtual methods - + source_file_idx : 44 (R.java) + +Class #5 header: +class_idx : 12 +access_flags : 17 (0x0011) +superclass_idx : 20 +interfaces_off : 0 (0x000000) +source_file_idx : 44 +annotations_off : 10216 (0x0027e8) +class_data_off : 2238 (0x0008be) +static_fields_size : 0 +instance_fields_size: 0 +direct_methods_size : 1 +virtual_methods_size: 0 + +Class #5 - + Class descriptor : 'Lcom/google/android/test/R;' + Access flags : 0x0011 (PUBLIC FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + Static fields - + Instance fields - + Direct methods - + #0 : (in Lcom/google/android/test/R;) + name : '<init>' + type : '()V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 1 + ins : 1 + outs : 1 + insns size : 4 16-bit code units +0009d8: |[0009d8] com.google.android.test.R.<init>:()V +0009e8: 7010 1900 0000 |0000: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@0019 +0009ee: 0e00 |0003: return-void + catches : (none) + positions : + 0x0000 line=10 + locals : + 0x0000 - 0x0004 reg=0 this Lcom/google/android/test/R; + + Virtual methods - + source_file_idx : 44 (R.java) + +Class #6 header: +class_idx : 13 +access_flags : 1 (0x0001) +superclass_idx : 8 +interfaces_off : 2100 (0x000834) +source_file_idx : 49 +annotations_off : 10232 (0x0027f8) +class_data_off : 2248 (0x0008c8) +static_fields_size : 10 +instance_fields_size: 20 +direct_methods_size : 13 +virtual_methods_size: 2 + +Class #6 - + Class descriptor : 'Lcom/google/android/test/Test;' + Access flags : 0x0001 (PUBLIC) + Superclass : 'Landroid/app/Activity;' + Interfaces - + #0 : 'Ljava/lang/Runnable;' + Static fields - + #0 : (in Lcom/google/android/test/Test;) + name : 'sArray' + type : '[I' + access : 0x000a (PRIVATE STATIC) + #1 : (in Lcom/google/android/test/Test;) + name : 'sB' + type : 'B' + access : 0x000a (PRIVATE STATIC) + #2 : (in Lcom/google/android/test/Test;) + name : 'sBool' + type : 'Z' + access : 0x000a (PRIVATE STATIC) + #3 : (in Lcom/google/android/test/Test;) + name : 'sC' + type : 'C' + access : 0x000a (PRIVATE STATIC) + #4 : (in Lcom/google/android/test/Test;) + name : 'sD' + type : 'D' + access : 0x000a (PRIVATE STATIC) + #5 : (in Lcom/google/android/test/Test;) + name : 'sF' + type : 'F' + access : 0x000a (PRIVATE STATIC) + #6 : (in Lcom/google/android/test/Test;) + name : 'sI' + type : 'I' + access : 0x000a (PRIVATE STATIC) + #7 : (in Lcom/google/android/test/Test;) + name : 'sL' + type : 'J' + access : 0x000a (PRIVATE STATIC) + #8 : (in Lcom/google/android/test/Test;) + name : 'sO' + type : 'Ljava/lang/Object;' + access : 0x000a (PRIVATE STATIC) + #9 : (in Lcom/google/android/test/Test;) + name : 'sS' + type : 'S' + access : 0x000a (PRIVATE STATIC) + Instance fields - + #0 : (in Lcom/google/android/test/Test;) + name : 'aBool' + type : '[Z' + access : 0x0002 (PRIVATE) + #1 : (in Lcom/google/android/test/Test;) + name : 'aByte' + type : '[B' + access : 0x0002 (PRIVATE) + #2 : (in Lcom/google/android/test/Test;) + name : 'aChar' + type : '[C' + access : 0x0002 (PRIVATE) + #3 : (in Lcom/google/android/test/Test;) + name : 'aDouble' + type : '[D' + access : 0x0002 (PRIVATE) + #4 : (in Lcom/google/android/test/Test;) + name : 'aFloat' + type : '[F' + access : 0x0002 (PRIVATE) + #5 : (in Lcom/google/android/test/Test;) + name : 'aInt' + type : '[I' + access : 0x0002 (PRIVATE) + #6 : (in Lcom/google/android/test/Test;) + name : 'aLong' + type : '[J' + access : 0x0002 (PRIVATE) + #7 : (in Lcom/google/android/test/Test;) + name : 'aObject' + type : '[Ljava/lang/Object;' + access : 0x0002 (PRIVATE) + #8 : (in Lcom/google/android/test/Test;) + name : 'aShort' + type : '[S' + access : 0x0002 (PRIVATE) + #9 : (in Lcom/google/android/test/Test;) + name : 'mArray' + type : '[I' + access : 0x0002 (PRIVATE) + #10 : (in Lcom/google/android/test/Test;) + name : 'mB' + type : 'B' + access : 0x0002 (PRIVATE) + #11 : (in Lcom/google/android/test/Test;) + name : 'mBool' + type : 'Z' + access : 0x0002 (PRIVATE) + #12 : (in Lcom/google/android/test/Test;) + name : 'mC' + type : 'C' + access : 0x0002 (PRIVATE) + #13 : (in Lcom/google/android/test/Test;) + name : 'mD' + type : 'D' + access : 0x0002 (PRIVATE) + #14 : (in Lcom/google/android/test/Test;) + name : 'mF' + type : 'F' + access : 0x0002 (PRIVATE) + #15 : (in Lcom/google/android/test/Test;) + name : 'mI' + type : 'I' + access : 0x0002 (PRIVATE) + #16 : (in Lcom/google/android/test/Test;) + name : 'mL' + type : 'J' + access : 0x0002 (PRIVATE) + #17 : (in Lcom/google/android/test/Test;) + name : 'mO' + type : 'Ljava/lang/Object;' + access : 0x0002 (PRIVATE) + #18 : (in Lcom/google/android/test/Test;) + name : 'mRunner' + type : 'Ljava/lang/Runnable;' + access : 0x0002 (PRIVATE) + #19 : (in Lcom/google/android/test/Test;) + name : 'mS' + type : 'S' + access : 0x0002 (PRIVATE) + Direct methods - + #0 : (in Lcom/google/android/test/Test;) + name : '<clinit>' + type : '()V' + access : 0x10008 (STATIC CONSTRUCTOR) + code - + registers : 2 + ins : 0 + outs : 0 + insns size : 74 16-bit code units +0009f0: |[0009f0] com.google.android.test.Test.<clinit>:()V +000a00: 1200 |0000: const/4 v0, #int 0 // #0 +000a02: 6a00 1800 |0001: sput-boolean v0, Lcom/google/android/test/Test;.sBool:Z // field@0018 +000a06: 1300 1f00 |0003: const/16 v0, #int 31 // #1f +000a0a: 6b00 1700 |0005: sput-byte v0, Lcom/google/android/test/Test;.sB:B // field@0017 +000a0e: 1400 ffff 0000 |0007: const v0, #float 0.000000 // #0000ffff +000a14: 6c00 1900 |000a: sput-char v0, Lcom/google/android/test/Test;.sC:C // field@0019 +000a18: 1300 3412 |000c: const/16 v0, #int 4660 // #1234 +000a1c: 6d00 1f00 |000e: sput-short v0, Lcom/google/android/test/Test;.sS:S // field@001f +000a20: 1400 7856 3412 |0010: const v0, #float 0.000000 // #12345678 +000a26: 6700 1c00 |0013: sput v0, Lcom/google/android/test/Test;.sI:I // field@001c +000a2a: 1800 ffff cdab 7956 3412 |0015: const-wide v0, #double 0.000000 // #12345679abcdffff +000a34: 6800 1d00 |001a: sput-wide v0, Lcom/google/android/test/Test;.sL:J // field@001d +000a38: 1400 00e4 4046 |001c: const v0, #float 12345.000000 // #4640e400 +000a3e: 6700 1b00 |001f: sput v0, Lcom/google/android/test/Test;.sF:F // field@001b +000a42: 1800 0000 0000 801c c840 |0021: const-wide v0, #double 12345.000000 // #40c81c8000000000 +000a4c: 6800 1a00 |0026: sput-wide v0, Lcom/google/android/test/Test;.sD:D // field@001a +000a50: 1200 |0028: const/4 v0, #int 0 // #0 +000a52: 6900 1e00 |0029: sput-object v0, Lcom/google/android/test/Test;.sO:Ljava/lang/Object; // field@001e +000a56: 1300 0800 |002b: const/16 v0, #int 8 // #8 +000a5a: 2300 2400 |002d: new-array v0, v0, [I // type@0024 +000a5e: 2600 0700 0000 |002f: fill-array-data v0, 00000036 // +00000007 +000a64: 6900 1600 |0032: sput-object v0, Lcom/google/android/test/Test;.sArray:[I // field@0016 +000a68: 0e00 |0034: return-void +000a6a: 0000 |0035: nop // spacer +000a6c: 0003 0400 0800 0000 0100 0000 0200 ... |0036: array-data (20 units) + catches : (none) + positions : + 0x0000 line=7 + 0x0003 line=8 + 0x0007 line=9 + 0x000c line=10 + 0x0010 line=11 + 0x0015 line=12 + 0x001c line=13 + 0x0021 line=14 + 0x0028 line=15 + 0x002b line=16 + locals : + + #1 : (in Lcom/google/android/test/Test;) + name : '<init>' + type : '()V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 9 + ins : 1 + outs : 2 + insns size : 234 16-bit code units +000a94: |[000a94] com.google.android.test.Test.<init>:()V +000aa4: 1606 0000 |0000: const-wide/16 v6, #int 0 // #0 +000aa8: 1215 |0002: const/4 v5, #int 1 // #1 +000aaa: 1224 |0003: const/4 v4, #int 2 // #2 +000aac: 7010 0200 0800 |0004: invoke-direct {v8}, Landroid/app/Activity;.<init>:()V // method@0002 +000ab2: 1201 |0007: const/4 v1, #int 0 // #0 +000ab4: 5c81 0d00 |0008: iput-boolean v1, v8, Lcom/google/android/test/Test;.mBool:Z // field@000d +000ab8: 1301 1f00 |000a: const/16 v1, #int 31 // #1f +000abc: 5d81 0c00 |000c: iput-byte v1, v8, Lcom/google/android/test/Test;.mB:B // field@000c +000ac0: 1401 ffff 0000 |000e: const v1, #float 0.000000 // #0000ffff +000ac6: 5e81 0e00 |0011: iput-char v1, v8, Lcom/google/android/test/Test;.mC:C // field@000e +000aca: 1301 3412 |0013: const/16 v1, #int 4660 // #1234 +000ace: 5f81 1500 |0015: iput-short v1, v8, Lcom/google/android/test/Test;.mS:S // field@0015 +000ad2: 1401 7856 3412 |0017: const v1, #float 0.000000 // #12345678 +000ad8: 5981 1100 |001a: iput v1, v8, Lcom/google/android/test/Test;.mI:I // field@0011 +000adc: 1802 ffff cdab 7956 3412 |001c: const-wide v2, #double 0.000000 // #12345679abcdffff +000ae6: 5a82 1200 |0021: iput-wide v2, v8, Lcom/google/android/test/Test;.mL:J // field@0012 +000aea: 1401 00e4 4046 |0023: const v1, #float 12345.000000 // #4640e400 +000af0: 5981 1000 |0026: iput v1, v8, Lcom/google/android/test/Test;.mF:F // field@0010 +000af4: 1802 0000 0000 801c c840 |0028: const-wide v2, #double 12345.000000 // #40c81c8000000000 +000afe: 5a82 0f00 |002d: iput-wide v2, v8, Lcom/google/android/test/Test;.mD:D // field@000f +000b02: 1201 |002f: const/4 v1, #int 0 // #0 +000b04: 5b81 1300 |0030: iput-object v1, v8, Lcom/google/android/test/Test;.mO:Ljava/lang/Object; // field@0013 +000b08: 1241 |0032: const/4 v1, #int 4 // #4 +000b0a: 2311 2400 |0033: new-array v1, v1, [I // type@0024 +000b0e: 2601 7500 0000 |0035: fill-array-data v1, 000000aa // +00000075 +000b14: 5b81 0b00 |0038: iput-object v1, v8, Lcom/google/android/test/Test;.mArray:[I // field@000b +000b18: 2341 2900 |003a: new-array v1, v4, [Z // type@0029 +000b1c: 4e05 0105 |003c: aput-boolean v5, v1, v5 +000b20: 5b81 0200 |003e: iput-object v1, v8, Lcom/google/android/test/Test;.aBool:[Z // field@0002 +000b24: 2341 2000 |0040: new-array v1, v4, [B // type@0020 +000b28: 2601 7400 0000 |0042: fill-array-data v1, 000000b6 // +00000074 +000b2e: 5b81 0300 |0045: iput-object v1, v8, Lcom/google/android/test/Test;.aByte:[B // field@0003 +000b32: 2341 2100 |0047: new-array v1, v4, [C // type@0021 +000b36: 2601 7300 0000 |0049: fill-array-data v1, 000000bc // +00000073 +000b3c: 5b81 0400 |004c: iput-object v1, v8, Lcom/google/android/test/Test;.aChar:[C // field@0004 +000b40: 2341 2800 |004e: new-array v1, v4, [S // type@0028 +000b44: 5b81 0a00 |0050: iput-object v1, v8, Lcom/google/android/test/Test;.aShort:[S // field@000a +000b48: 2341 2400 |0052: new-array v1, v4, [I // type@0024 +000b4c: 2601 6e00 0000 |0054: fill-array-data v1, 000000c2 // +0000006e +000b52: 5b81 0700 |0057: iput-object v1, v8, Lcom/google/android/test/Test;.aInt:[I // field@0007 +000b56: 2341 2500 |0059: new-array v1, v4, [J // type@0025 +000b5a: 2601 6f00 0000 |005b: fill-array-data v1, 000000ca // +0000006f +000b60: 5b81 0800 |005e: iput-object v1, v8, Lcom/google/android/test/Test;.aLong:[J // field@0008 +000b64: 2341 2300 |0060: new-array v1, v4, [F // type@0023 +000b68: 2601 7400 0000 |0062: fill-array-data v1, 000000d6 // +00000074 +000b6e: 5b81 0600 |0065: iput-object v1, v8, Lcom/google/android/test/Test;.aFloat:[F // field@0006 +000b72: 2341 2200 |0067: new-array v1, v4, [D // type@0022 +000b76: 2601 7500 0000 |0069: fill-array-data v1, 000000de // +00000075 +000b7c: 5b81 0500 |006c: iput-object v1, v8, Lcom/google/android/test/Test;.aDouble:[D // field@0005 +000b80: 2341 2600 |006e: new-array v1, v4, [Ljava/lang/Object; // type@0026 +000b84: 2202 1400 |0070: new-instance v2, Ljava/lang/Object; // type@0014 +000b88: 7010 1900 0200 |0072: invoke-direct {v2}, Ljava/lang/Object;.<init>:()V // method@0019 +000b8e: 4d02 0105 |0075: aput-object v2, v1, v5 +000b92: 5b81 0900 |0077: iput-object v1, v8, Lcom/google/android/test/Test;.aObject:[Ljava/lang/Object; // field@0009 +000b96: 1231 |0079: const/4 v1, #int 3 // #3 +000b98: 7020 0d00 1800 |007a: invoke-direct {v8, v1}, Lcom/google/android/test/Test;.doit:(I)V // method@000d +000b9e: 5a86 1200 |007d: iput-wide v6, v8, Lcom/google/android/test/Test;.mL:J // field@0012 +000ba2: 7020 0a00 8800 |007f: invoke-direct {v8, v8}, Lcom/google/android/test/Test;.add:(Ljava/lang/Object;)Ljava/lang/Object; // method@000a +000ba8: 0c01 |0082: move-result-object v1 +000baa: 5b81 1300 |0083: iput-object v1, v8, Lcom/google/android/test/Test;.mO:Ljava/lang/Object; // field@0013 +000bae: 7110 0b00 0800 |0085: invoke-static {v8}, Lcom/google/android/test/Test;.adds:(Ljava/lang/Object;)Ljava/lang/Object; // method@000b +000bb4: 0c01 |0088: move-result-object v1 +000bb6: 6901 1e00 |0089: sput-object v1, Lcom/google/android/test/Test;.sO:Ljava/lang/Object; // field@001e +000bba: 7010 0c00 0800 |008b: invoke-direct {v8}, Lcom/google/android/test/Test;.copies:()V // method@000c +000bc0: 7010 1600 0800 |008e: invoke-direct {v8}, Lcom/google/android/test/Test;.seta:()V // method@0016 +000bc6: 7010 0e00 0800 |0091: invoke-direct {v8}, Lcom/google/android/test/Test;.geta:()Z // method@000e +000bcc: 0a01 |0094: move-result v1 +000bce: 3801 0900 |0095: if-eqz v1, 009e // +0009 +000bd2: 6201 2000 |0097: sget-object v1, Ljava/lang/System;.out:Ljava/io/PrintStream; // field@0020 +000bd6: 1a02 7600 |0099: const-string v2, "ok then" // string@0076 +000bda: 6e20 1700 2100 |009b: invoke-virtual {v1, v2}, Ljava/io/PrintStream;.println:(Ljava/lang/String;)V // method@0017 +000be0: 0e00 |009e: return-void +000be2: 0d00 |009f: move-exception v0 +000be4: 1251 |00a0: const/4 v1, #int 5 // #5 +000be6: 5981 1100 |00a1: iput v1, v8, Lcom/google/android/test/Test;.mI:I // field@0011 +000bea: 5a86 1200 |00a3: iput-wide v6, v8, Lcom/google/android/test/Test;.mL:J // field@0012 +000bee: 28da |00a5: goto 007f // -0026 +000bf0: 0d01 |00a6: move-exception v1 +000bf2: 5a86 1200 |00a7: iput-wide v6, v8, Lcom/google/android/test/Test;.mL:J // field@0012 +000bf6: 2701 |00a9: throw v1 +000bf8: 0003 0400 0400 0000 0100 0000 0200 ... |00aa: array-data (12 units) +000c10: 0003 0100 0200 0000 0102 |00b6: array-data (5 units) +000c1a: 0000 |00bb: nop // spacer +000c1c: 0003 0200 0200 0000 6100 6200 |00bc: array-data (6 units) +000c28: 0003 0400 0200 0000 0100 0000 0200 ... |00c2: array-data (8 units) +000c38: 0003 0800 0200 0000 0100 0000 0000 ... |00ca: array-data (12 units) +000c50: 0003 0400 0200 0000 0000 803f 0000 ... |00d6: array-data (8 units) +000c60: 0003 0800 0200 0000 0000 0000 0000 ... |00de: array-data (12 units) + catches : 2 + 0x007a - 0x007d + Ljava/lang/Exception; -> 0x009f + <any> -> 0x00a6 + 0x00a1 - 0x00a3 + <any> -> 0x00a6 + positions : + 0x0004 line=43 + 0x0007 line=18 + 0x000a line=19 + 0x000e line=20 + 0x0013 line=21 + 0x0017 line=22 + 0x001c line=23 + 0x0023 line=24 + 0x0028 line=25 + 0x002f line=26 + 0x0032 line=27 + 0x003a line=31 + 0x0040 line=32 + 0x0047 line=33 + 0x004e line=34 + 0x0052 line=35 + 0x0059 line=36 + 0x0060 line=37 + 0x0067 line=38 + 0x006e line=39 + 0x0079 line=45 + 0x007d line=49 + 0x007f line=51 + 0x0085 line=52 + 0x008b line=53 + 0x008e line=54 + 0x0091 line=55 + 0x0097 line=56 + 0x009e line=57 + 0x009f line=46 + 0x00a0 line=47 + 0x00a3 line=49 + 0x00a6 line=48 + 0x00a7 line=49 + 0x00a9 line=50 + 0x00aa line=27 + 0x00b6 line=32 + 0x00bb line=33 + 0x00c2 line=35 + 0x00ca line=36 + 0x00d6 line=37 + 0x00de line=38 + locals : + 0x00a0 - 0x00a6 reg=0 e Ljava/lang/Exception; + 0x0000 - 0x00ea reg=8 this Lcom/google/android/test/Test; + + #2 : (in Lcom/google/android/test/Test;) + name : 'add' + type : '(Ljava/lang/Object;)Ljava/lang/Object;' + access : 0x20002 (PRIVATE DECLARED_SYNCHRONIZED) + code - + registers : 13 + ins : 2 + outs : 0 + insns size : 239 16-bit code units +000c94: |[000c94] com.google.android.test.Test.add:(Ljava/lang/Object;)Ljava/lang/Object; +000ca4: 150a 8040 |0000: const/high16 v10, #int 1082130432 // #4080 +000ca8: 1908 1040 |0002: const-wide/high16 v8, #long 4616189618054758400 // #4010 +000cac: 1d0b |0004: monitor-enter v11 +000cae: 5bbc 1300 |0005: iput-object v12, v11, Lcom/google/android/test/Test;.mO:Ljava/lang/Object; // field@0013 +000cb2: 55b0 0d00 |0007: iget-boolean v0, v11, Lcom/google/android/test/Test;.mBool:Z // field@000d +000cb6: de00 0000 |0009: or-int/lit8 v0, v0, #int 0 // #00 +000cba: 5cb0 0d00 |000b: iput-boolean v0, v11, Lcom/google/android/test/Test;.mBool:Z // field@000d +000cbe: 56b0 0c00 |000d: iget-byte v0, v11, Lcom/google/android/test/Test;.mB:B // field@000c +000cc2: d800 001f |000f: add-int/lit8 v0, v0, #int 31 // #1f +000cc6: 8d00 |0011: int-to-byte v0, v0 +000cc8: 5db0 0c00 |0012: iput-byte v0, v11, Lcom/google/android/test/Test;.mB:B // field@000c +000ccc: 57b0 0e00 |0014: iget-char v0, v11, Lcom/google/android/test/Test;.mC:C // field@000e +000cd0: 1401 ffff 0000 |0016: const v1, #float 0.000000 // #0000ffff +000cd6: b010 |0019: add-int/2addr v0, v1 +000cd8: 8e00 |001a: int-to-char v0, v0 +000cda: 5eb0 0e00 |001b: iput-char v0, v11, Lcom/google/android/test/Test;.mC:C // field@000e +000cde: 58b0 1500 |001d: iget-short v0, v11, Lcom/google/android/test/Test;.mS:S // field@0015 +000ce2: d000 3412 |001f: add-int/lit16 v0, v0, #int 4660 // #1234 +000ce6: 8f00 |0021: int-to-short v0, v0 +000ce8: 5fb0 1500 |0022: iput-short v0, v11, Lcom/google/android/test/Test;.mS:S // field@0015 +000cec: 52b0 1100 |0024: iget v0, v11, Lcom/google/android/test/Test;.mI:I // field@0011 +000cf0: 1401 7856 3412 |0026: const v1, #float 0.000000 // #12345678 +000cf6: b010 |0029: add-int/2addr v0, v1 +000cf8: 59b0 1100 |002a: iput v0, v11, Lcom/google/android/test/Test;.mI:I // field@0011 +000cfc: 52b0 1100 |002c: iget v0, v11, Lcom/google/android/test/Test;.mI:I // field@0011 +000d00: 1501 f11f |002e: const/high16 v1, #int 535887872 // #1ff1 +000d04: b010 |0030: add-int/2addr v0, v1 +000d06: 59b0 1100 |0031: iput v0, v11, Lcom/google/android/test/Test;.mI:I // field@0011 +000d0a: 53b0 1200 |0033: iget-wide v0, v11, Lcom/google/android/test/Test;.mL:J // field@0012 +000d0e: 1802 ffff cdab 7956 3412 |0035: const-wide v2, #double 0.000000 // #12345679abcdffff +000d18: bb20 |003a: add-long/2addr v0, v2 +000d1a: 5ab0 1200 |003b: iput-wide v0, v11, Lcom/google/android/test/Test;.mL:J // field@0012 +000d1e: 53b0 1200 |003d: iget-wide v0, v11, Lcom/google/android/test/Test;.mL:J // field@0012 +000d22: 1902 f11f |003f: const-wide/high16 v2, #long 2301620884563034112 // #1ff1 +000d26: bb20 |0041: add-long/2addr v0, v2 +000d28: 5ab0 1200 |0042: iput-wide v0, v11, Lcom/google/android/test/Test;.mL:J // field@0012 +000d2c: 52b0 1000 |0044: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000d30: 1401 00e4 4046 |0046: const v1, #float 12345.000000 // #4640e400 +000d36: 52b2 1000 |0049: iget v2, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000d3a: 1503 803f |004b: const/high16 v3, #int 1065353216 // #3f80 +000d3e: c732 |004d: sub-float/2addr v2, v3 +000d40: c621 |004e: add-float/2addr v1, v2 +000d42: 52b2 1000 |004f: iget v2, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000d46: c8a2 |0051: mul-float/2addr v2, v10 +000d48: 1503 c03f |0052: const/high16 v3, #int 1069547520 // #3fc0 +000d4c: c932 |0054: div-float/2addr v2, v3 +000d4e: c621 |0055: add-float/2addr v1, v2 +000d50: c610 |0056: add-float/2addr v0, v1 +000d52: 59b0 1000 |0057: iput v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000d56: 53b0 0f00 |0059: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000d5a: 1802 0000 0000 801c c840 |005b: const-wide v2, #double 12345.000000 // #40c81c8000000000 +000d64: 53b4 0f00 |0060: iget-wide v4, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000d68: 1906 f03f |0062: const-wide/high16 v6, #long 4607182418800017408 // #3ff0 +000d6c: cc64 |0064: sub-double/2addr v4, v6 +000d6e: cb42 |0065: add-double/2addr v2, v4 +000d70: 53b4 0f00 |0066: iget-wide v4, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000d74: cd84 |0068: mul-double/2addr v4, v8 +000d76: 1906 f83f |0069: const-wide/high16 v6, #long 4609434218613702656 // #3ff8 +000d7a: ce64 |006b: div-double/2addr v4, v6 +000d7c: cb42 |006c: add-double/2addr v2, v4 +000d7e: cb20 |006d: add-double/2addr v0, v2 +000d80: 5ab0 0f00 |006e: iput-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000d84: 52b0 1000 |0070: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000d88: 1201 |0072: const/4 v1, #int 0 // #0 +000d8a: 2d00 0001 |0073: cmpl-float v0, v0, v1 +000d8e: 3800 2900 |0075: if-eqz v0, 009e // +0029 +000d92: 52b0 1000 |0077: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000d96: 1401 9a99 993e |0079: const v1, #float 0.300000 // #3e99999a +000d9c: 2d00 0001 |007c: cmpl-float v0, v0, v1 +000da0: 3900 2000 |007e: if-nez v0, 009e // +0020 +000da4: 52b0 1000 |0080: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000da8: 2d00 000a |0082: cmpl-float v0, v0, v10 +000dac: 3c00 1a00 |0084: if-gtz v0, 009e // +001a +000db0: 52b0 1000 |0086: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000db4: 1501 c040 |0088: const/high16 v1, #int 1086324736 // #40c0 +000db8: 2e00 0001 |008a: cmpg-float v0, v0, v1 +000dbc: 3a00 1200 |008c: if-ltz v0, 009e // +0012 +000dc0: 52b0 1000 |008e: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000dc4: 1501 b0c1 |0090: const/high16 v1, #int -1045430272 // #c1b0 +000dc8: 2e00 0001 |0092: cmpg-float v0, v0, v1 +000dcc: 3d00 0a00 |0094: if-lez v0, 009e // +000a +000dd0: 52b0 1000 |0096: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000dd4: 1501 b041 |0098: const/high16 v1, #int 1102053376 // #41b0 +000dd8: 2d00 0001 |009a: cmpl-float v0, v0, v1 +000ddc: 3a00 0700 |009c: if-ltz v0, 00a3 // +0007 +000de0: 53b0 0f00 |009e: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000de4: 8c00 |00a0: double-to-float v0, v0 +000de6: 59b0 1000 |00a1: iput v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000dea: 53b0 0f00 |00a3: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000dee: 1602 0000 |00a5: const-wide/16 v2, #int 0 // #0 +000df2: 2f00 0002 |00a7: cmpl-double v0, v0, v2 +000df6: 3800 2b00 |00a9: if-eqz v0, 00d4 // +002b +000dfa: 53b0 0f00 |00ab: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000dfe: 1802 3333 3333 3333 d33f |00ad: const-wide v2, #double 0.300000 // #3fd3333333333333 +000e08: 2f00 0002 |00b2: cmpl-double v0, v0, v2 +000e0c: 3900 2000 |00b4: if-nez v0, 00d4 // +0020 +000e10: 53b0 0f00 |00b6: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000e14: 2f00 0008 |00b8: cmpl-double v0, v0, v8 +000e18: 3c00 1a00 |00ba: if-gtz v0, 00d4 // +001a +000e1c: 53b0 0f00 |00bc: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000e20: 1902 1840 |00be: const-wide/high16 v2, #long 4618441417868443648 // #4018 +000e24: 3000 0002 |00c0: cmpg-double v0, v0, v2 +000e28: 3a00 1200 |00c2: if-ltz v0, 00d4 // +0012 +000e2c: 53b0 0f00 |00c4: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000e30: 1902 36c0 |00c6: const-wide/high16 v2, #long -4596486369685012480 // #c036 +000e34: 3000 0002 |00c8: cmpg-double v0, v0, v2 +000e38: 3d00 0a00 |00ca: if-lez v0, 00d4 // +000a +000e3c: 53b0 0f00 |00cc: iget-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000e40: 1902 3640 |00ce: const-wide/high16 v2, #long 4626885667169763328 // #4036 +000e44: 2f00 0002 |00d0: cmpl-double v0, v0, v2 +000e48: 3a00 1200 |00d2: if-ltz v0, 00e4 // +0012 +000e4c: 52b0 1000 |00d4: iget v0, v11, Lcom/google/android/test/Test;.mF:F // field@0010 +000e50: 8900 |00d6: float-to-double v0, v0 +000e52: 5ab0 0f00 |00d7: iput-wide v0, v11, Lcom/google/android/test/Test;.mD:D // field@000f +000e56: 6300 1800 |00d9: sget-boolean v0, Lcom/google/android/test/Test;.sBool:Z // field@0018 +000e5a: 3900 0f00 |00db: if-nez v0, 00ea // +000f +000e5e: 55b0 0d00 |00dd: iget-boolean v0, v11, Lcom/google/android/test/Test;.mBool:Z // field@000d +000e62: 3900 0b00 |00df: if-nez v0, 00ea // +000b +000e66: 1200 |00e1: const/4 v0, #int 0 // #0 +000e68: 5cb0 0d00 |00e2: iput-boolean v0, v11, Lcom/google/android/test/Test;.mBool:Z // field@000d +000e6c: 390c 0400 |00e4: if-nez v12, 00e8 // +0004 +000e70: 54bc 1300 |00e6: iget-object v12, v11, Lcom/google/android/test/Test;.mO:Ljava/lang/Object; // field@0013 +000e74: 1e0b |00e8: monitor-exit v11 +000e76: 110c |00e9: return-object v12 +000e78: 1210 |00ea: const/4 v0, #int 1 // #1 +000e7a: 28f7 |00eb: goto 00e2 // -0009 +000e7c: 0d00 |00ec: move-exception v0 +000e7e: 1e0b |00ed: monitor-exit v11 +000e80: 2700 |00ee: throw v0 + catches : 1 + 0x0005 - 0x00e8 + <any> -> 0x00ec + positions : + 0x0004 line=179 + 0x0007 line=180 + 0x000d line=181 + 0x0014 line=182 + 0x001d line=183 + 0x0024 line=184 + 0x002c line=185 + 0x0033 line=186 + 0x003d line=187 + 0x0044 line=188 + 0x0059 line=189 + 0x0070 line=190 + 0x009e line=191 + 0x00a3 line=193 + 0x00d4 line=194 + 0x00d9 line=195 + 0x00e4 line=197 + 0x00ea line=195 + 0x00ec line=179 + locals : + 0x0000 - 0x00e8 reg=12 o Ljava/lang/Object; + 0x0000 - 0x00ef reg=11 this Lcom/google/android/test/Test; + 0x00ea - 0x00ef reg=12 o Ljava/lang/Object; + + #3 : (in Lcom/google/android/test/Test;) + name : 'adds' + type : '(Ljava/lang/Object;)Ljava/lang/Object;' + access : 0x000a (PRIVATE STATIC) + code - + registers : 9 + ins : 1 + outs : 0 + insns size : 118 16-bit code units +000e90: |[000e90] com.google.android.test.Test.adds:(Ljava/lang/Object;)Ljava/lang/Object; +000ea0: 6908 1e00 |0000: sput-object v8, Lcom/google/android/test/Test;.sO:Ljava/lang/Object; // field@001e +000ea4: 6300 1800 |0002: sget-boolean v0, Lcom/google/android/test/Test;.sBool:Z // field@0018 +000ea8: de00 0000 |0004: or-int/lit8 v0, v0, #int 0 // #00 +000eac: 6a00 1800 |0006: sput-boolean v0, Lcom/google/android/test/Test;.sBool:Z // field@0018 +000eb0: 6400 1700 |0008: sget-byte v0, Lcom/google/android/test/Test;.sB:B // field@0017 +000eb4: d800 001f |000a: add-int/lit8 v0, v0, #int 31 // #1f +000eb8: 8d00 |000c: int-to-byte v0, v0 +000eba: 6b00 1700 |000d: sput-byte v0, Lcom/google/android/test/Test;.sB:B // field@0017 +000ebe: 6500 1900 |000f: sget-char v0, Lcom/google/android/test/Test;.sC:C // field@0019 +000ec2: 1401 ffff 0000 |0011: const v1, #float 0.000000 // #0000ffff +000ec8: b010 |0014: add-int/2addr v0, v1 +000eca: 8e00 |0015: int-to-char v0, v0 +000ecc: 6c00 1900 |0016: sput-char v0, Lcom/google/android/test/Test;.sC:C // field@0019 +000ed0: 6600 1f00 |0018: sget-short v0, Lcom/google/android/test/Test;.sS:S // field@001f +000ed4: d000 3412 |001a: add-int/lit16 v0, v0, #int 4660 // #1234 +000ed8: 8f00 |001c: int-to-short v0, v0 +000eda: 6d00 1f00 |001d: sput-short v0, Lcom/google/android/test/Test;.sS:S // field@001f +000ede: 6000 1c00 |001f: sget v0, Lcom/google/android/test/Test;.sI:I // field@001c +000ee2: 1401 7856 3412 |0021: const v1, #float 0.000000 // #12345678 +000ee8: b010 |0024: add-int/2addr v0, v1 +000eea: 6700 1c00 |0025: sput v0, Lcom/google/android/test/Test;.sI:I // field@001c +000eee: 6000 1c00 |0027: sget v0, Lcom/google/android/test/Test;.sI:I // field@001c +000ef2: 1501 f11f |0029: const/high16 v1, #int 535887872 // #1ff1 +000ef6: b010 |002b: add-int/2addr v0, v1 +000ef8: 6700 1c00 |002c: sput v0, Lcom/google/android/test/Test;.sI:I // field@001c +000efc: 6100 1d00 |002e: sget-wide v0, Lcom/google/android/test/Test;.sL:J // field@001d +000f00: 1802 ffff cdab 7956 3412 |0030: const-wide v2, #double 0.000000 // #12345679abcdffff +000f0a: bb20 |0035: add-long/2addr v0, v2 +000f0c: 6800 1d00 |0036: sput-wide v0, Lcom/google/android/test/Test;.sL:J // field@001d +000f10: 6100 1d00 |0038: sget-wide v0, Lcom/google/android/test/Test;.sL:J // field@001d +000f14: 1902 f11f |003a: const-wide/high16 v2, #long 2301620884563034112 // #1ff1 +000f18: bb20 |003c: add-long/2addr v0, v2 +000f1a: 6800 1d00 |003d: sput-wide v0, Lcom/google/android/test/Test;.sL:J // field@001d +000f1e: 6000 1b00 |003f: sget v0, Lcom/google/android/test/Test;.sF:F // field@001b +000f22: 1401 00e4 4046 |0041: const v1, #float 12345.000000 // #4640e400 +000f28: 6002 1b00 |0044: sget v2, Lcom/google/android/test/Test;.sF:F // field@001b +000f2c: 7f22 |0046: neg-float v2, v2 +000f2e: 1503 803f |0047: const/high16 v3, #int 1065353216 // #3f80 +000f32: c732 |0049: sub-float/2addr v2, v3 +000f34: c621 |004a: add-float/2addr v1, v2 +000f36: 6002 1b00 |004b: sget v2, Lcom/google/android/test/Test;.sF:F // field@001b +000f3a: 1503 8040 |004d: const/high16 v3, #int 1082130432 // #4080 +000f3e: c832 |004f: mul-float/2addr v2, v3 +000f40: 1503 c03f |0050: const/high16 v3, #int 1069547520 // #3fc0 +000f44: c932 |0052: div-float/2addr v2, v3 +000f46: c621 |0053: add-float/2addr v1, v2 +000f48: c610 |0054: add-float/2addr v0, v1 +000f4a: 6700 1b00 |0055: sput v0, Lcom/google/android/test/Test;.sF:F // field@001b +000f4e: 6100 1a00 |0057: sget-wide v0, Lcom/google/android/test/Test;.sD:D // field@001a +000f52: 1802 0000 0000 801c c840 |0059: const-wide v2, #double 12345.000000 // #40c81c8000000000 +000f5c: 6104 1a00 |005e: sget-wide v4, Lcom/google/android/test/Test;.sD:D // field@001a +000f60: 8044 |0060: neg-double v4, v4 +000f62: 1906 f03f |0061: const-wide/high16 v6, #long 4607182418800017408 // #3ff0 +000f66: cc64 |0063: sub-double/2addr v4, v6 +000f68: cb42 |0064: add-double/2addr v2, v4 +000f6a: 6104 1a00 |0065: sget-wide v4, Lcom/google/android/test/Test;.sD:D // field@001a +000f6e: 1906 1040 |0067: const-wide/high16 v6, #long 4616189618054758400 // #4010 +000f72: cd64 |0069: mul-double/2addr v4, v6 +000f74: 1906 f83f |006a: const-wide/high16 v6, #long 4609434218613702656 // #3ff8 +000f78: ce64 |006c: div-double/2addr v4, v6 +000f7a: cb42 |006d: add-double/2addr v2, v4 +000f7c: cb20 |006e: add-double/2addr v0, v2 +000f7e: 6800 1a00 |006f: sput-wide v0, Lcom/google/android/test/Test;.sD:D // field@001a +000f82: 3908 0400 |0071: if-nez v8, 0075 // +0004 +000f86: 6208 1e00 |0073: sget-object v8, Lcom/google/android/test/Test;.sO:Ljava/lang/Object; // field@001e +000f8a: 1108 |0075: return-object v8 + catches : (none) + positions : + 0x0000 line=201 + 0x0002 line=202 + 0x0008 line=203 + 0x000f line=204 + 0x0018 line=205 + 0x001f line=206 + 0x0027 line=207 + 0x002e line=208 + 0x0038 line=209 + 0x003f line=210 + 0x0057 line=211 + 0x0071 line=212 + locals : + 0x0000 - 0x0075 reg=8 o Ljava/lang/Object; + + #4 : (in Lcom/google/android/test/Test;) + name : 'copies' + type : '()V' + access : 0x0002 (PRIVATE) + code - + registers : 19 + ins : 1 + outs : 12 + insns size : 171 16-bit code units +000f8c: |[000f8c] com.google.android.test.Test.copies:()V +000f9c: 0800 1200 |0000: move-object/from16 v0, v18 +000fa0: 5302 1200 |0002: iget-wide v2, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +000fa4: 7d22 |0004: neg-long v2, v2 +000fa6: 6104 1d00 |0005: sget-wide v4, Lcom/google/android/test/Test;.sL:J // field@001d +000faa: 6106 1d00 |0007: sget-wide v6, Lcom/google/android/test/Test;.sL:J // field@001d +000fae: bd64 |0009: mul-long/2addr v4, v6 +000fb0: 0800 1200 |000a: move-object/from16 v0, v18 +000fb4: 5306 1200 |000c: iget-wide v6, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +000fb8: be64 |000e: div-long/2addr v4, v6 +000fba: bc42 |000f: sub-long/2addr v2, v4 +000fbc: 0800 1200 |0010: move-object/from16 v0, v18 +000fc0: 5304 1200 |0012: iget-wide v4, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +000fc4: 1606 ffff |0014: const-wide/16 v6, #int -1 // #ffff +000fc8: c264 |0016: xor-long/2addr v4, v6 +000fca: bc42 |0017: sub-long/2addr v2, v4 +000fcc: 0800 1200 |0018: move-object/from16 v0, v18 +000fd0: 5304 1200 |001a: iget-wide v4, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +000fd4: 1606 0400 |001c: const-wide/16 v6, #int 4 // #4 +000fd8: bf64 |001e: rem-long/2addr v4, v6 +000fda: a210 0204 |001f: xor-long v16, v2, v4 +000fde: 0800 1200 |0021: move-object/from16 v0, v18 +000fe2: 5302 0f00 |0023: iget-wide v2, v0, Lcom/google/android/test/Test;.mD:D // field@000f +000fe6: 6004 1b00 |0025: sget v4, Lcom/google/android/test/Test;.sF:F // field@001b +000fea: 8944 |0027: float-to-double v4, v4 +000fec: cd42 |0028: mul-double/2addr v2, v4 +000fee: 0800 1200 |0029: move-object/from16 v0, v18 +000ff2: 5304 0f00 |002b: iget-wide v4, v0, Lcom/google/android/test/Test;.mD:D // field@000f +000ff6: ce42 |002d: div-double/2addr v2, v4 +000ff8: 6104 1a00 |002e: sget-wide v4, Lcom/google/android/test/Test;.sD:D // field@001a +000ffc: 0800 1200 |0030: move-object/from16 v0, v18 +001000: 5306 0f00 |0032: iget-wide v6, v0, Lcom/google/android/test/Test;.mD:D // field@000f +001004: cd64 |0034: mul-double/2addr v4, v6 +001006: ac0e 0204 |0035: sub-double v14, v2, v4 +00100a: 6302 1800 |0037: sget-boolean v2, Lcom/google/android/test/Test;.sBool:Z // field@0018 +00100e: 0800 1200 |0039: move-object/from16 v0, v18 +001012: 5c02 0d00 |003b: iput-boolean v2, v0, Lcom/google/android/test/Test;.mBool:Z // field@000d +001016: 6402 1700 |003d: sget-byte v2, Lcom/google/android/test/Test;.sB:B // field@0017 +00101a: 0800 1200 |003f: move-object/from16 v0, v18 +00101e: 5d02 0c00 |0041: iput-byte v2, v0, Lcom/google/android/test/Test;.mB:B // field@000c +001022: 6502 1900 |0043: sget-char v2, Lcom/google/android/test/Test;.sC:C // field@0019 +001026: 0800 1200 |0045: move-object/from16 v0, v18 +00102a: 5e02 0e00 |0047: iput-char v2, v0, Lcom/google/android/test/Test;.mC:C // field@000e +00102e: 6602 1f00 |0049: sget-short v2, Lcom/google/android/test/Test;.sS:S // field@001f +001032: 0800 1200 |004b: move-object/from16 v0, v18 +001036: 5f02 1500 |004d: iput-short v2, v0, Lcom/google/android/test/Test;.mS:S // field@0015 +00103a: 6002 1c00 |004f: sget v2, Lcom/google/android/test/Test;.sI:I // field@001c +00103e: 0800 1200 |0051: move-object/from16 v0, v18 +001042: 5203 1100 |0053: iget v3, v0, Lcom/google/android/test/Test;.mI:I // field@0011 +001046: b432 |0055: rem-int/2addr v2, v3 +001048: 0800 1200 |0056: move-object/from16 v0, v18 +00104c: 5902 1100 |0058: iput v2, v0, Lcom/google/android/test/Test;.mI:I // field@0011 +001050: 6102 1d00 |005a: sget-wide v2, Lcom/google/android/test/Test;.sL:J // field@001d +001054: 1604 ffff |005c: const-wide/16 v4, #int -1 // #ffff +001058: a204 0410 |005e: xor-long v4, v4, v16 +00105c: bb42 |0060: add-long/2addr v2, v4 +00105e: 0800 1200 |0061: move-object/from16 v0, v18 +001062: 5a02 1200 |0063: iput-wide v2, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +001066: 6002 1b00 |0065: sget v2, Lcom/google/android/test/Test;.sF:F // field@001b +00106a: 0800 1200 |0067: move-object/from16 v0, v18 +00106e: 5902 1000 |0069: iput v2, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +001072: 6102 1a00 |006b: sget-wide v2, Lcom/google/android/test/Test;.sD:D // field@001a +001076: cbe2 |006d: add-double/2addr v2, v14 +001078: 0800 1200 |006e: move-object/from16 v0, v18 +00107c: 5a02 0f00 |0070: iput-wide v2, v0, Lcom/google/android/test/Test;.mD:D // field@000f +001080: 6202 1e00 |0072: sget-object v2, Lcom/google/android/test/Test;.sO:Ljava/lang/Object; // field@001e +001084: 0800 1200 |0074: move-object/from16 v0, v18 +001088: 5b02 1300 |0076: iput-object v2, v0, Lcom/google/android/test/Test;.mO:Ljava/lang/Object; // field@0013 +00108c: 6202 1600 |0078: sget-object v2, Lcom/google/android/test/Test;.sArray:[I // field@0016 +001090: 0800 1200 |007a: move-object/from16 v0, v18 +001094: 5b02 0b00 |007c: iput-object v2, v0, Lcom/google/android/test/Test;.mArray:[I // field@000b +001098: 0800 1200 |007e: move-object/from16 v0, v18 +00109c: 5603 0c00 |0080: iget-byte v3, v0, Lcom/google/android/test/Test;.mB:B // field@000c +0010a0: 0800 1200 |0082: move-object/from16 v0, v18 +0010a4: 5704 0e00 |0084: iget-char v4, v0, Lcom/google/android/test/Test;.mC:C // field@000e +0010a8: 0800 1200 |0086: move-object/from16 v0, v18 +0010ac: 5805 1500 |0088: iget-short v5, v0, Lcom/google/android/test/Test;.mS:S // field@0015 +0010b0: 0800 1200 |008a: move-object/from16 v0, v18 +0010b4: 5206 1100 |008c: iget v6, v0, Lcom/google/android/test/Test;.mI:I // field@0011 +0010b8: 0800 1200 |008e: move-object/from16 v0, v18 +0010bc: 5307 1200 |0090: iget-wide v7, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +0010c0: 0800 1200 |0092: move-object/from16 v0, v18 +0010c4: 5209 1000 |0094: iget v9, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +0010c8: 0800 1200 |0096: move-object/from16 v0, v18 +0010cc: 530a 0f00 |0098: iget-wide v10, v0, Lcom/google/android/test/Test;.mD:D // field@000f +0010d0: 0800 1200 |009a: move-object/from16 v0, v18 +0010d4: 540c 1300 |009c: iget-object v12, v0, Lcom/google/android/test/Test;.mO:Ljava/lang/Object; // field@0013 +0010d8: 0800 1200 |009e: move-object/from16 v0, v18 +0010dc: 540d 0b00 |00a0: iget-object v13, v0, Lcom/google/android/test/Test;.mArray:[I // field@000b +0010e0: 0802 1200 |00a2: move-object/from16 v2, v18 +0010e4: 760c 1100 0200 |00a4: invoke-direct/range {v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/test/Test;.params:(BCSIJFDLjava/lang/Object;[I)J // method@0011 +0010ea: 0b02 |00a7: move-result-wide v2 +0010ec: 6802 1d00 |00a8: sput-wide v2, Lcom/google/android/test/Test;.sL:J // field@001d +0010f0: 0e00 |00aa: return-void + catches : (none) + positions : + 0x0000 line=216 + 0x0021 line=217 + 0x0037 line=218 + 0x003d line=219 + 0x0043 line=220 + 0x0049 line=221 + 0x004f line=222 + 0x005a line=223 + 0x0065 line=224 + 0x006b line=225 + 0x0072 line=226 + 0x0078 line=227 + 0x007e line=228 + 0x00aa line=229 + locals : + 0x0037 - 0x00ab reg=14 d D + 0x0021 - 0x00ab reg=16 x J + 0x0000 - 0x00ab reg=18 this Lcom/google/android/test/Test; + + #5 : (in Lcom/google/android/test/Test;) + name : 'doit' + type : '(I)V' + access : 0x0002 (PRIVATE) + code - + registers : 3 + ins : 2 + outs : 3 + insns size : 78 16-bit code units +0010f4: |[0010f4] com.google.android.test.Test.doit:(I)V +001104: 3d02 0700 |0000: if-lez v2, 0007 // +0007 +001108: d800 02fd |0002: add-int/lit8 v0, v2, #int -3 // #fd +00110c: 7020 0d00 0100 |0004: invoke-direct {v1, v0}, Lcom/google/android/test/Test;.doit:(I)V // method@000d +001112: 2b02 3500 0000 |0007: packed-switch v2, 0000003c // +00000035 +001118: 2200 1300 |000a: new-instance v0, Ljava/lang/Exception; // type@0013 +00111c: 7010 1800 0000 |000c: invoke-direct {v0}, Ljava/lang/Exception;.<init>:()V // method@0018 +001122: 2700 |000f: throw v0 +001124: df00 02ff |0010: xor-int/lit8 v0, v2, #int -1 // #ff +001128: 7020 1000 0100 |0012: invoke-direct {v1, v0}, Lcom/google/android/test/Test;.p:(I)V // method@0010 +00112e: 5410 1400 |0015: iget-object v0, v1, Lcom/google/android/test/Test;.mRunner:Ljava/lang/Runnable; // field@0014 +001132: 3800 0700 |0017: if-eqz v0, 001e // +0007 +001136: 5410 1400 |0019: iget-object v0, v1, Lcom/google/android/test/Test;.mRunner:Ljava/lang/Runnable; // field@0014 +00113a: 7210 1b00 0000 |001b: invoke-interface {v0}, Ljava/lang/Runnable;.run:()V // method@001b +001140: 2c02 2600 0000 |001e: sparse-switch v2, 00000044 // +00000026 +001146: 0e00 |0021: return-void +001148: 3d02 0700 |0022: if-lez v2, 0029 // +0007 +00114c: 0120 |0024: move v0, v2 +00114e: 7030 1200 2100 |0025: invoke-direct {v1, v2, v0}, Lcom/google/android/test/Test;.q:(II)V // method@0012 +001154: 28ed |0028: goto 0015 // -0013 +001156: 7b20 |0029: neg-int v0, v2 +001158: 28fb |002a: goto 0025 // -0005 +00115a: 7020 1000 2100 |002b: invoke-direct {v1, v2}, Lcom/google/android/test/Test;.p:(I)V // method@0010 +001160: 5410 1400 |002e: iget-object v0, v1, Lcom/google/android/test/Test;.mRunner:Ljava/lang/Runnable; // field@0014 +001164: 3900 f1ff |0030: if-nez v0, 0021 // -000f +001168: 5b11 1400 |0032: iput-object v1, v1, Lcom/google/android/test/Test;.mRunner:Ljava/lang/Runnable; // field@0014 +00116c: 28ed |0034: goto 0021 // -0013 +00116e: d800 02ff |0035: add-int/lit8 v0, v2, #int -1 // #ff +001172: 7030 1200 2100 |0037: invoke-direct {v1, v2, v0}, Lcom/google/android/test/Test;.q:(II)V // method@0012 +001178: 28f4 |003a: goto 002e // -000c +00117a: 0000 |003b: nop // spacer +00117c: 0001 0200 0000 0000 0900 0000 1b00 ... |003c: packed-switch-data (8 units) +00118c: 0002 0200 2dfb ffff 0ba2 0700 0d00 ... |0044: sparse-switch-data (10 units) + catches : (none) + positions : + 0x0000 line=98 + 0x0002 line=99 + 0x0007 line=101 + 0x000a line=104 + 0x0010 line=102 + 0x0015 line=106 + 0x0019 line=107 + 0x001e line=109 + 0x0021 line=117 + 0x0022 line=103 + 0x002b line=110 + 0x002e line=114 + 0x0032 line=115 + 0x0035 line=111 + 0x003b line=101 + 0x0044 line=109 + locals : + 0x0000 - 0x004e reg=1 this Lcom/google/android/test/Test; + 0x0000 - 0x004e reg=2 x I + + #6 : (in Lcom/google/android/test/Test;) + name : 'geta' + type : '()Z' + access : 0x0002 (PRIVATE) + code - + registers : 8 + ins : 1 + outs : 0 + insns size : 73 16-bit code units +0011a0: |[0011a0] com.google.android.test.Test.geta:()Z +0011b0: 1226 |0000: const/4 v6, #int 2 // #2 +0011b2: 1210 |0001: const/4 v0, #int 1 // #1 +0011b4: 5471 0200 |0002: iget-object v1, v7, Lcom/google/android/test/Test;.aBool:[Z // field@0002 +0011b8: 4701 0106 |0004: aget-boolean v1, v1, v6 +0011bc: 3801 0300 |0006: if-eqz v1, 0009 // +0003 +0011c0: 0f00 |0008: return v0 +0011c2: 5471 0300 |0009: iget-object v1, v7, Lcom/google/android/test/Test;.aByte:[B // field@0003 +0011c6: 4801 0106 |000b: aget-byte v1, v1, v6 +0011ca: 3201 fbff |000d: if-eq v1, v0, 0008 // -0005 +0011ce: 5471 0400 |000f: iget-object v1, v7, Lcom/google/android/test/Test;.aChar:[C // field@0004 +0011d2: 4901 0106 |0011: aget-char v1, v1, v6 +0011d6: 1302 6400 |0013: const/16 v2, #int 100 // #64 +0011da: 3221 f3ff |0015: if-eq v1, v2, 0008 // -000d +0011de: 5471 0a00 |0017: iget-object v1, v7, Lcom/google/android/test/Test;.aShort:[S // field@000a +0011e2: 4a01 0106 |0019: aget-short v1, v1, v6 +0011e6: 3201 edff |001b: if-eq v1, v0, 0008 // -0013 +0011ea: 5471 0700 |001d: iget-object v1, v7, Lcom/google/android/test/Test;.aInt:[I // field@0007 +0011ee: 4401 0106 |001f: aget v1, v1, v6 +0011f2: 3201 e7ff |0021: if-eq v1, v0, 0008 // -0019 +0011f6: 5471 0800 |0023: iget-object v1, v7, Lcom/google/android/test/Test;.aLong:[J // field@0008 +0011fa: 4502 0106 |0025: aget-wide v2, v1, v6 +0011fe: 1604 0100 |0027: const-wide/16 v4, #int 1 // #1 +001202: 3101 0204 |0029: cmp-long v1, v2, v4 +001206: 3801 ddff |002b: if-eqz v1, 0008 // -0023 +00120a: 5471 0600 |002d: iget-object v1, v7, Lcom/google/android/test/Test;.aFloat:[F // field@0006 +00120e: 4401 0106 |002f: aget v1, v1, v6 +001212: 1502 803f |0031: const/high16 v2, #int 1065353216 // #3f80 +001216: 2d01 0102 |0033: cmpl-float v1, v1, v2 +00121a: 3801 d3ff |0035: if-eqz v1, 0008 // -002d +00121e: 5471 0500 |0037: iget-object v1, v7, Lcom/google/android/test/Test;.aDouble:[D // field@0005 +001222: 4502 0106 |0039: aget-wide v2, v1, v6 +001226: 1904 f03f |003b: const-wide/high16 v4, #long 4607182418800017408 // #3ff0 +00122a: 2f01 0204 |003d: cmpl-double v1, v2, v4 +00122e: 3801 c9ff |003f: if-eqz v1, 0008 // -0037 +001232: 5471 0900 |0041: iget-object v1, v7, Lcom/google/android/test/Test;.aObject:[Ljava/lang/Object; // field@0009 +001236: 4601 0106 |0043: aget-object v1, v1, v6 +00123a: 3271 c3ff |0045: if-eq v1, v7, 0008 // -003d +00123e: 1200 |0047: const/4 v0, #int 0 // #0 +001240: 28c0 |0048: goto 0008 // -0040 + catches : (none) + positions : + 0x0002 line=72 + 0x0008 line=81 + 0x0009 line=73 + 0x000f line=74 + 0x0017 line=75 + 0x001d line=76 + 0x0023 line=77 + 0x002d line=78 + 0x0037 line=79 + 0x0041 line=80 + 0x0047 line=81 + locals : + 0x0000 - 0x0049 reg=7 this Lcom/google/android/test/Test; + + #7 : (in Lcom/google/android/test/Test;) + name : 'p' + type : '(I)V' + access : 0x0002 (PRIVATE) + code - + registers : 6 + ins : 2 + outs : 0 + insns size : 19 16-bit code units +001244: |[001244] com.google.android.test.Test.p:(I)V +001254: 0151 |0000: move v1, v5 +001256: 1200 |0001: const/4 v0, #int 0 // #0 +001258: 5442 0b00 |0002: iget-object v2, v4, Lcom/google/android/test/Test;.mArray:[I // field@000b +00125c: 2122 |0004: array-length v2, v2 +00125e: 3420 0300 |0005: if-lt v0, v2, 0008 // +0003 +001262: 0e00 |0007: return-void +001264: 5442 0b00 |0008: iget-object v2, v4, Lcom/google/android/test/Test;.mArray:[I // field@000b +001268: 5243 1100 |000a: iget v3, v4, Lcom/google/android/test/Test;.mI:I // field@0011 +00126c: 9303 0103 |000c: div-int v3, v1, v3 +001270: 4b03 0200 |000e: aput v3, v2, v0 +001274: d800 0001 |0010: add-int/lit8 v0, v0, #int 1 // #01 +001278: 28f0 |0012: goto 0002 // -0010 + catches : (none) + positions : + 0x0000 line=120 + 0x0001 line=121 + 0x0007 line=124 + 0x0008 line=122 + 0x0010 line=121 + locals : + 0x0002 - 0x0013 reg=0 i I + 0x0001 - 0x0013 reg=1 y I + 0x0000 - 0x0013 reg=4 this Lcom/google/android/test/Test; + 0x0000 - 0x0013 reg=5 x I + + #8 : (in Lcom/google/android/test/Test;) + name : 'params' + type : '(BCSIJFDLjava/lang/Object;[I)J' + access : 0x0002 (PRIVATE) + code - + registers : 38 + ins : 12 + outs : 2 + insns size : 318 16-bit code units +00127c: |[00127c] com.google.android.test.Test.params:(BCSIJFDLjava/lang/Object;[I)J +00128c: 0800 2400 |0000: move-object/from16 v0, v36 +001290: 2000 1500 |0002: instance-of v0, v0, Ljava/lang/Runnable; // type@0015 +001294: 0215 0000 |0004: move/from16 v21, v0 +001298: 3815 0c00 |0006: if-eqz v21, 0012 // +000c +00129c: 0815 2400 |0008: move-object/from16 v21, v36 +0012a0: 1f15 1500 |000a: check-cast v21, Ljava/lang/Runnable; // type@0015 +0012a4: 0800 1500 |000c: move-object/from16 v0, v21 +0012a8: 0801 1a00 |000e: move-object/from16 v1, v26 +0012ac: 5b10 1400 |0010: iput-object v0, v1, Lcom/google/android/test/Test;.mRunner:Ljava/lang/Runnable; // field@0014 +0012b0: 3825 0a00 |0012: if-eqz v37, 001c // +000a +0012b4: 3824 0800 |0014: if-eqz v36, 001c // +0008 +0012b8: 7402 1a00 2400 |0016: invoke-virtual/range {v36, v37}, Ljava/lang/Object;.equals:(Ljava/lang/Object;)Z // method@001a +0012be: 0a15 |0019: move-result v21 +0012c0: 3915 3800 |001a: if-nez v21, 0052 // +0038 +0012c4: 1315 0200 |001c: const/16 v21, #int 2 // #2 +0012c8: 0200 1500 |001e: move/from16 v0, v21 +0012cc: 2304 2400 |0020: new-array v4, v0, [I // type@0024 +0012d0: 2604 0801 0000 |0022: fill-array-data v4, 0000012a // +00000108 +0012d6: 0800 1a00 |0025: move-object/from16 v0, v26 +0012da: 5b04 0700 |0027: iput-object v4, v0, Lcom/google/android/test/Test;.aInt:[I // field@0007 +0012de: 1315 0200 |0029: const/16 v21, #int 2 // #2 +0012e2: 0200 1500 |002b: move/from16 v0, v21 +0012e6: 2305 2500 |002d: new-array v5, v0, [J // type@0025 +0012ea: 2605 0301 0000 |002f: fill-array-data v5, 00000132 // +00000103 +0012f0: 0800 1a00 |0032: move-object/from16 v0, v26 +0012f4: 5b05 0800 |0034: iput-object v5, v0, Lcom/google/android/test/Test;.aLong:[J // field@0008 +0012f8: 9015 1b1c |0036: add-int v21, v27, v28 +0012fc: 9015 151d |0038: add-int v21, v21, v29 +001300: 9015 151e |003a: add-int v21, v21, v30 +001304: 0200 1500 |003c: move/from16 v0, v21 +001308: 8100 |003e: int-to-long v0, v0 +00130a: 0516 0000 |003f: move-wide/from16 v22, v0 +00130e: 9b16 161f |0041: add-long v22, v22, v31 +001312: 0200 2100 |0043: move/from16 v0, v33 +001316: 8800 |0045: float-to-long v0, v0 +001318: 0518 0000 |0046: move-wide/from16 v24, v0 +00131c: 9b16 1618 |0048: add-long v22, v22, v24 +001320: 0500 2200 |004a: move-wide/from16 v0, v34 +001324: 8b00 |004c: double-to-long v0, v0 +001326: 0518 0000 |004d: move-wide/from16 v24, v0 +00132a: 9b16 1618 |004f: add-long v22, v22, v24 +00132e: 1016 |0051: return-wide v22 +001330: 0200 1e00 |0052: move/from16 v0, v30 +001334: 8200 |0054: int-to-float v0, v0 +001336: 0221 0000 |0055: move/from16 v33, v0 +00133a: 0200 1e00 |0057: move/from16 v0, v30 +00133e: 8300 |0059: int-to-double v0, v0 +001340: 0522 0000 |005a: move-wide/from16 v34, v0 +001344: 0800 1a00 |005c: move-object/from16 v0, v26 +001348: 5300 1200 |005e: iget-wide v0, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +00134c: 0516 0000 |0060: move-wide/from16 v22, v0 +001350: 0500 1600 |0062: move-wide/from16 v0, v22 +001354: 8400 |0064: long-to-int v0, v0 +001356: 0215 0000 |0065: move/from16 v21, v0 +00135a: 0200 1500 |0067: move/from16 v0, v21 +00135e: 0801 1a00 |0069: move-object/from16 v1, v26 +001362: 5910 1100 |006b: iput v0, v1, Lcom/google/android/test/Test;.mI:I // field@0011 +001366: 0800 1a00 |006d: move-object/from16 v0, v26 +00136a: 5300 1200 |006f: iget-wide v0, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +00136e: 0516 0000 |0071: move-wide/from16 v22, v0 +001372: 0500 1600 |0073: move-wide/from16 v0, v22 +001376: 7d00 |0075: neg-long v0, v0 +001378: 0516 0000 |0076: move-wide/from16 v22, v0 +00137c: 0500 1600 |0078: move-wide/from16 v0, v22 +001380: 8500 |007a: long-to-float v0, v0 +001382: 0221 0000 |007b: move/from16 v33, v0 +001386: 0800 1a00 |007d: move-object/from16 v0, v26 +00138a: 5300 1200 |007f: iget-wide v0, v0, Lcom/google/android/test/Test;.mL:J // field@0012 +00138e: 0516 0000 |0081: move-wide/from16 v22, v0 +001392: 1618 ffff |0083: const-wide/16 v24, #int -1 // #ffff +001396: a216 1618 |0085: xor-long v22, v22, v24 +00139a: 0500 1600 |0087: move-wide/from16 v0, v22 +00139e: 8600 |0089: long-to-double v0, v0 +0013a0: 0522 0000 |008a: move-wide/from16 v34, v0 +0013a4: 0200 2100 |008c: move/from16 v0, v33 +0013a8: 8700 |008e: float-to-int v0, v0 +0013aa: 021e 0000 |008f: move/from16 v30, v0 +0013ae: 0500 2200 |0091: move-wide/from16 v0, v34 +0013b2: 8a00 |0093: double-to-int v0, v0 +0013b4: 0215 0000 |0094: move/from16 v21, v0 +0013b8: 0200 1500 |0096: move/from16 v0, v21 +0013bc: 0801 1a00 |0098: move-object/from16 v1, v26 +0013c0: 5910 1100 |009a: iput v0, v1, Lcom/google/android/test/Test;.mI:I // field@0011 +0013c4: 0800 1a00 |009c: move-object/from16 v0, v26 +0013c8: 5200 1000 |009e: iget v0, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +0013cc: 0215 0000 |00a0: move/from16 v21, v0 +0013d0: 6016 1b00 |00a2: sget v22, Lcom/google/android/test/Test;.sF:F // field@001b +0013d4: a610 1516 |00a4: add-float v16, v21, v22 +0013d8: 0800 1a00 |00a6: move-object/from16 v0, v26 +0013dc: 5200 1000 |00a8: iget v0, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +0013e0: 0215 0000 |00aa: move/from16 v21, v0 +0013e4: 6016 1b00 |00ac: sget v22, Lcom/google/android/test/Test;.sF:F // field@001b +0013e8: a711 1516 |00ae: sub-float v17, v21, v22 +0013ec: 0800 1a00 |00b0: move-object/from16 v0, v26 +0013f0: 5200 1000 |00b2: iget v0, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +0013f4: 0215 0000 |00b4: move/from16 v21, v0 +0013f8: 6016 1b00 |00b6: sget v22, Lcom/google/android/test/Test;.sF:F // field@001b +0013fc: a912 1516 |00b8: div-float v18, v21, v22 +001400: 0800 1a00 |00ba: move-object/from16 v0, v26 +001404: 5200 1000 |00bc: iget v0, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +001408: 0215 0000 |00be: move/from16 v21, v0 +00140c: 6016 1b00 |00c0: sget v22, Lcom/google/android/test/Test;.sF:F // field@001b +001410: a813 1516 |00c2: mul-float v19, v21, v22 +001414: 0800 1a00 |00c4: move-object/from16 v0, v26 +001418: 5200 1000 |00c6: iget v0, v0, Lcom/google/android/test/Test;.mF:F // field@0010 +00141c: 0215 0000 |00c8: move/from16 v21, v0 +001420: 6016 1b00 |00ca: sget v22, Lcom/google/android/test/Test;.sF:F // field@001b +001424: aa14 1516 |00cc: rem-float v20, v21, v22 +001428: 0800 1a00 |00ce: move-object/from16 v0, v26 +00142c: 5300 0f00 |00d0: iget-wide v0, v0, Lcom/google/android/test/Test;.mD:D // field@000f +001430: 0516 0000 |00d2: move-wide/from16 v22, v0 +001434: 6118 1a00 |00d4: sget-wide v24, Lcom/google/android/test/Test;.sD:D // field@001a +001438: ab06 1618 |00d6: add-double v6, v22, v24 +00143c: 0800 1a00 |00d8: move-object/from16 v0, v26 +001440: 5300 0f00 |00da: iget-wide v0, v0, Lcom/google/android/test/Test;.mD:D // field@000f +001444: 0516 0000 |00dc: move-wide/from16 v22, v0 +001448: 6118 1a00 |00de: sget-wide v24, Lcom/google/android/test/Test;.sD:D // field@001a +00144c: ac08 1618 |00e0: sub-double v8, v22, v24 +001450: 0800 1a00 |00e2: move-object/from16 v0, v26 +001454: 5300 0f00 |00e4: iget-wide v0, v0, Lcom/google/android/test/Test;.mD:D // field@000f +001458: 0516 0000 |00e6: move-wide/from16 v22, v0 +00145c: 6118 1a00 |00e8: sget-wide v24, Lcom/google/android/test/Test;.sD:D // field@001a +001460: ae0a 1618 |00ea: div-double v10, v22, v24 +001464: 0800 1a00 |00ec: move-object/from16 v0, v26 +001468: 5300 0f00 |00ee: iget-wide v0, v0, Lcom/google/android/test/Test;.mD:D // field@000f +00146c: 0516 0000 |00f0: move-wide/from16 v22, v0 +001470: 6118 1a00 |00f2: sget-wide v24, Lcom/google/android/test/Test;.sD:D // field@001a +001474: ad0c 1618 |00f4: mul-double v12, v22, v24 +001478: 0800 1a00 |00f6: move-object/from16 v0, v26 +00147c: 5300 0f00 |00f8: iget-wide v0, v0, Lcom/google/android/test/Test;.mD:D // field@000f +001480: 0516 0000 |00fa: move-wide/from16 v22, v0 +001484: 6118 1a00 |00fc: sget-wide v24, Lcom/google/android/test/Test;.sD:D // field@001a +001488: af0e 1618 |00fe: rem-double v14, v22, v24 +00148c: 0200 1000 |0100: move/from16 v0, v16 +001490: 7f00 |0102: neg-float v0, v0 +001492: 0215 0000 |0103: move/from16 v21, v0 +001496: a615 1511 |0105: add-float v21, v21, v17 +00149a: a816 1213 |0107: mul-float v22, v18, v19 +00149e: a916 1614 |0109: div-float v22, v22, v20 +0014a2: aa16 1610 |010b: rem-float v22, v22, v16 +0014a6: a715 1516 |010d: sub-float v21, v21, v22 +0014aa: 0200 1500 |010f: move/from16 v0, v21 +0014ae: 0801 1a00 |0111: move-object/from16 v1, v26 +0014b2: 5910 1000 |0113: iput v0, v1, Lcom/google/android/test/Test;.mF:F // field@0010 +0014b6: 8060 |0115: neg-double v0, v6 +0014b8: 0516 0000 |0116: move-wide/from16 v22, v0 +0014bc: ab16 1608 |0118: add-double v22, v22, v8 +0014c0: ad18 0a0c |011a: mul-double v24, v10, v12 +0014c4: ae18 180e |011c: div-double v24, v24, v14 +0014c8: af18 1806 |011e: rem-double v24, v24, v6 +0014cc: ac16 1618 |0120: sub-double v22, v22, v24 +0014d0: 0500 1600 |0122: move-wide/from16 v0, v22 +0014d4: 0802 1a00 |0124: move-object/from16 v2, v26 +0014d8: 5a20 0f00 |0126: iput-wide v0, v2, Lcom/google/android/test/Test;.mD:D // field@000f +0014dc: 2900 eafe |0128: goto/16 0012 // -0116 +0014e0: 0003 0400 0200 0000 0100 0000 0100 ... |012a: array-data (8 units) +0014f0: 0003 0800 0200 0000 0100 0000 0000 ... |0132: array-data (12 units) + catches : (none) + positions : + 0x0000 line=232 + 0x000a line=233 + 0x0012 line=235 + 0x001c line=256 + 0x0025 line=257 + 0x0029 line=258 + 0x0032 line=259 + 0x0036 line=260 + 0x0052 line=236 + 0x0057 line=237 + 0x005c line=238 + 0x006d line=239 + 0x007d line=240 + 0x008c line=241 + 0x0091 line=242 + 0x009c line=243 + 0x00a6 line=244 + 0x00b0 line=245 + 0x00ba line=246 + 0x00c4 line=247 + 0x00ce line=248 + 0x00d8 line=249 + 0x00e2 line=250 + 0x00ec line=251 + 0x00f6 line=252 + 0x0100 line=253 + 0x0115 line=254 + 0x012a line=256 + 0x0132 line=258 + locals : + 0x0025 - 0x0052 reg=4 aa [I + 0x0032 - 0x0052 reg=5 bb [J + 0x00d8 - 0x013e reg=6 d1 D + 0x00e2 - 0x013e reg=8 d2 D + 0x00ec - 0x013e reg=10 d3 D + 0x00f6 - 0x013e reg=12 d4 D + 0x0100 - 0x013e reg=14 d5 D + 0x00a6 - 0x013e reg=16 f1 F + 0x00b0 - 0x013e reg=17 f2 F + 0x00ba - 0x013e reg=18 f3 F + 0x00c4 - 0x013e reg=19 f4 F + 0x00ce - 0x013e reg=20 f5 F + 0x0000 - 0x013e reg=26 this Lcom/google/android/test/Test; + 0x0000 - 0x013e reg=27 b B + 0x0000 - 0x013e reg=28 c C + 0x0000 - 0x013e reg=29 s S + 0x0000 - 0x013e reg=30 i I + 0x0000 - 0x013e reg=31 l J + 0x0000 - 0x013e reg=33 f F + 0x0000 - 0x013e reg=34 d D + 0x0000 - 0x013e reg=36 o Ljava/lang/Object; + 0x0000 - 0x013e reg=37 a [I + + #9 : (in Lcom/google/android/test/Test;) + name : 'q' + type : '(II)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 10 + ins : 3 + outs : 4 + insns size : 85 16-bit code units +001508: |[001508] com.google.android.test.Test.q:(II)V +001518: 1301 0a00 |0000: const/16 v1, #int 10 // #a +00151c: 1236 |0002: const/4 v6, #int 3 // #3 +00151e: 3218 0400 |0003: if-eq v8, v1, 0007 // +0004 +001522: 3568 1000 |0005: if-ge v8, v6, 0015 // +0010 +001526: 6200 1600 |0007: sget-object v0, Lcom/google/android/test/Test;.sArray:[I // field@0016 +00152a: 1221 |0009: const/4 v1, #int 2 // #2 +00152c: 5272 1100 |000a: iget v2, v7, Lcom/google/android/test/Test;.mI:I // field@0011 +001530: 7120 1300 2800 |000c: invoke-static {v8, v2}, Lcom/google/android/test/Test;.r:(II)I // method@0013 +001536: 0a02 |000f: move-result v2 +001538: b192 |0010: sub-int/2addr v2, v9 +00153a: b982 |0011: shr-int/2addr v2, v8 +00153c: 4b02 0001 |0012: aput v2, v0, v1 +001540: 0e00 |0014: return-void +001542: 3618 1600 |0015: if-gt v8, v1, 002b // +0016 +001546: 1300 9cff |0017: const/16 v0, #int -100 // #ff9c +00154a: 3208 1200 |0019: if-eq v8, v0, 002b // +0012 +00154e: 6200 1600 |001b: sget-object v0, Lcom/google/android/test/Test;.sArray:[I // field@0016 +001552: 6102 1d00 |001d: sget-wide v2, Lcom/google/android/test/Test;.sL:J // field@001d +001556: 5374 1200 |001f: iget-wide v4, v7, Lcom/google/android/test/Test;.mL:J // field@0012 +00155a: 7140 1500 3254 |0021: invoke-static {v2, v3, v4, v5}, Lcom/google/android/test/Test;.s:(JJ)J // method@0015 +001560: 0b02 |0024: move-result-wide v2 +001562: 8421 |0025: long-to-int v1, v2 +001564: b291 |0026: mul-int/2addr v1, v9 +001566: ba81 |0027: ushr-int/2addr v1, v8 +001568: 4b01 0006 |0028: aput v1, v0, v6 +00156c: 28ea |002a: goto 0014 // -0016 +00156e: 1250 |002b: const/4 v0, #int 5 // #5 +001570: 3508 0400 |002c: if-ge v8, v0, 0030 // +0004 +001574: 3218 0e00 |002e: if-eq v8, v1, 003c // +000e +001578: 6200 1600 |0030: sget-object v0, Lcom/google/android/test/Test;.sArray:[I // field@0016 +00157c: 7120 1300 8900 |0032: invoke-static {v9, v8}, Lcom/google/android/test/Test;.r:(II)I // method@0013 +001582: 0a01 |0035: move-result v1 +001584: 9802 0809 |0036: shl-int v2, v8, v9 +001588: b721 |0038: xor-int/2addr v1, v2 +00158a: 4b01 0006 |0039: aput v1, v0, v6 +00158e: 28d9 |003b: goto 0014 // -0027 +001590: 3398 0a00 |003c: if-ne v8, v9, 0046 // +000a +001594: d800 0902 |003e: add-int/lit8 v0, v9, #int 2 // #02 +001598: 3708 0600 |0040: if-le v8, v0, 0046 // +0006 +00159c: 3b08 0400 |0042: if-gez v8, 0046 // +0004 +0015a0: 3c08 d0ff |0044: if-gtz v8, 0014 // -0030 +0015a4: 6200 1600 |0046: sget-object v0, Lcom/google/android/test/Test;.sArray:[I // field@0016 +0015a8: df01 09ff |0048: xor-int/lit8 v1, v9, #int -1 // #ff +0015ac: 9401 0801 |004a: rem-int v1, v8, v1 +0015b0: b081 |004c: add-int/2addr v1, v8 +0015b2: 9202 0909 |004d: mul-int v2, v9, v9 +0015b6: b382 |004f: div-int/2addr v2, v8 +0015b8: b121 |0050: sub-int/2addr v1, v2 +0015ba: b791 |0051: xor-int/2addr v1, v9 +0015bc: 4b01 0006 |0052: aput v1, v0, v6 +0015c0: 28c0 |0054: goto 0014 // -0040 + catches : (none) + positions : + 0x0003 line=127 + 0x0007 line=128 + 0x0014 line=136 + 0x0015 line=129 + 0x001b line=130 + 0x002b line=131 + 0x0030 line=132 + 0x003c line=133 + 0x0046 line=134 + locals : + 0x0000 - 0x0055 reg=7 this Lcom/google/android/test/Test; + 0x0000 - 0x0055 reg=8 x I + 0x0000 - 0x0055 reg=9 y I + + #10 : (in Lcom/google/android/test/Test;) + name : 'r' + type : '(II)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 15 + ins : 2 + outs : 0 + insns size : 93 16-bit code units +0015c4: |[0015c4] com.google.android.test.Test.r:(II)I +0015d4: e00d 0d01 |0000: shl-int/lit8 v13, v13, #int 1 // #01 +0015d8: e10d 0d03 |0002: shr-int/lit8 v13, v13, #int 3 // #03 +0015dc: e20d 0d04 |0004: ushr-int/lit8 v13, v13, #int 4 // #04 +0015e0: b8ed |0006: shl-int/2addr v13, v14 +0015e2: b9ed |0007: shr-int/2addr v13, v14 +0015e4: baed |0008: ushr-int/2addr v13, v14 +0015e6: df09 0eff |0009: xor-int/lit8 v9, v14, #int -1 // #ff +0015ea: 9000 0e09 |000b: add-int v0, v14, v9 +0015ee: 9101 0e09 |000d: sub-int v1, v14, v9 +0015f2: 9202 0e09 |000f: mul-int v2, v14, v9 +0015f6: 9303 0e09 |0011: div-int v3, v14, v9 +0015fa: 9704 0e09 |0013: xor-int v4, v14, v9 +0015fe: 9505 0e09 |0015: and-int v5, v14, v9 +001602: 9806 0e09 |0017: shl-int v6, v14, v9 +001606: 9907 0e09 |0019: shr-int v7, v14, v9 +00160a: 9a08 0e09 |001b: ushr-int v8, v14, v9 +00160e: d5da ff00 |001d: and-int/lit16 v10, v13, #int 255 // #00ff +001612: df0b 0d12 |001f: xor-int/lit8 v11, v13, #int 18 // #12 +001616: df0b 0bff |0021: xor-int/lit8 v11, v11, #int -1 // #ff +00161a: 960d 0a0b |0023: or-int v13, v10, v11 +00161e: df0a 00ff |0025: xor-int/lit8 v10, v0, #int -1 // #ff +001622: b01a |0027: add-int/2addr v10, v1 +001624: 920b 0203 |0028: mul-int v11, v2, v3 +001628: b34b |002a: div-int/2addr v11, v4 +00162a: b1ba |002b: sub-int/2addr v10, v11 +00162c: b65a |002c: or-int/2addr v10, v5 +00162e: df0b 05ff |002d: xor-int/lit8 v11, v5, #int -1 // #ff +001632: 920c 0607 |002f: mul-int v12, v6, v7 +001636: b48c |0031: rem-int/2addr v12, v8 +001638: b0cb |0032: add-int/2addr v11, v12 +00163a: b6ba |0033: or-int/2addr v10, v11 +00163c: b1ad |0034: sub-int/2addr v13, v10 +00163e: 7bda |0035: neg-int v10, v13 +001640: d80a 0a01 |0036: add-int/lit8 v10, v10, #int 1 // #01 +001644: da0b 0d03 |0038: mul-int/lit8 v11, v13, #int 3 // #03 +001648: db0b 0b02 |003a: div-int/lit8 v11, v11, #int 2 // #02 +00164c: b1ba |003c: sub-int/2addr v10, v11 +00164e: b1ea |003d: sub-int/2addr v10, v14 +001650: d5db ff00 |003e: and-int/lit16 v11, v13, #int 255 // #00ff +001654: b0ba |0040: add-int/2addr v10, v11 +001656: d4db ff00 |0041: rem-int/lit16 v11, v13, #int 255 // #00ff +00165a: b0ba |0043: add-int/2addr v10, v11 +00165c: d0db 01ff |0044: add-int/lit16 v11, v13, #int -255 // #ff01 +001660: b0ba |0046: add-int/2addr v10, v11 +001662: d2db ff00 |0047: mul-int/lit16 v11, v13, #int 255 // #00ff +001666: b0ba |0049: add-int/2addr v10, v11 +001668: d3db ff00 |004a: div-int/lit16 v11, v13, #int 255 // #00ff +00166c: b0ba |004c: add-int/2addr v10, v11 +00166e: d6db ff00 |004d: or-int/lit16 v11, v13, #int 255 // #00ff +001672: b0ba |004f: add-int/2addr v10, v11 +001674: d7db ff00 |0050: xor-int/lit16 v11, v13, #int 255 // #00ff +001678: b0ba |0052: add-int/2addr v10, v11 +00167a: dd0b 0d01 |0053: and-int/lit8 v11, v13, #int 1 // #01 +00167e: b0ba |0055: add-int/2addr v10, v11 +001680: dc0b 0d01 |0056: rem-int/lit8 v11, v13, #int 1 // #01 +001684: b0ba |0058: add-int/2addr v10, v11 +001686: d80b 0dff |0059: add-int/lit8 v11, v13, #int -1 // #ff +00168a: b0ba |005b: add-int/2addr v10, v11 +00168c: 0f0a |005c: return v10 + catches : (none) + positions : + 0x0000 line=139 + 0x0006 line=140 + 0x0009 line=141 + 0x000b line=142 + 0x000d line=143 + 0x000f line=144 + 0x0011 line=145 + 0x0013 line=146 + 0x0015 line=147 + 0x0017 line=148 + 0x0019 line=149 + 0x001b line=150 + 0x001d line=151 + 0x0025 line=152 + 0x0035 line=153 + 0x0047 line=154 + 0x0049 line=153 + 0x004a line=154 + 0x004c line=153 + 0x004d line=154 + 0x004f line=153 + 0x0050 line=154 + 0x0052 line=153 + 0x0053 line=155 + 0x0055 line=153 + 0x0056 line=155 + 0x0058 line=153 + 0x0059 line=155 + 0x005b line=153 + locals : + 0x000d - 0x005d reg=0 t1 I + 0x000f - 0x005d reg=1 t2 I + 0x0011 - 0x005d reg=2 t3 I + 0x0013 - 0x005d reg=3 t4 I + 0x0015 - 0x005d reg=4 t5 I + 0x0017 - 0x005d reg=5 t6 I + 0x0019 - 0x005d reg=6 t7 I + 0x001b - 0x005d reg=7 t8 I + 0x001d - 0x005d reg=8 t9 I + 0x000b - 0x005d reg=9 z I + 0x0000 - 0x005d reg=13 x I + 0x0000 - 0x005d reg=14 y I + + #11 : (in Lcom/google/android/test/Test;) + name : 's' + type : '(JJ)J' + access : 0x000a (PRIVATE STATIC) + code - + registers : 32 + ins : 4 + outs : 0 + insns size : 194 16-bit code units +001690: |[001690] com.google.android.test.Test.s:(JJ)J +0016a0: 1316 0100 |0000: const/16 v22, #int 1 // #1 +0016a4: a31c 1c16 |0002: shl-long v28, v28, v22 +0016a8: 1316 0300 |0004: const/16 v22, #int 3 // #3 +0016ac: a41c 1c16 |0006: shr-long v28, v28, v22 +0016b0: 1316 0400 |0008: const/16 v22, #int 4 // #4 +0016b4: a51c 1c16 |000a: ushr-long v28, v28, v22 +0016b8: 0500 1e00 |000c: move-wide/from16 v0, v30 +0016bc: 8400 |000e: long-to-int v0, v0 +0016be: 0216 0000 |000f: move/from16 v22, v0 +0016c2: a31c 1c16 |0011: shl-long v28, v28, v22 +0016c6: 0500 1e00 |0013: move-wide/from16 v0, v30 +0016ca: 8400 |0015: long-to-int v0, v0 +0016cc: 0216 0000 |0016: move/from16 v22, v0 +0016d0: a41c 1c16 |0018: shr-long v28, v28, v22 +0016d4: 0500 1e00 |001a: move-wide/from16 v0, v30 +0016d8: 8400 |001c: long-to-int v0, v0 +0016da: 0216 0000 |001d: move/from16 v22, v0 +0016de: a51c 1c16 |001f: ushr-long v28, v28, v22 +0016e2: 1616 ffff |0021: const-wide/16 v22, #int -1 // #ffff +0016e6: a214 1e16 |0023: xor-long v20, v30, v22 +0016ea: 9b02 1e14 |0025: add-long v2, v30, v20 +0016ee: 9c04 1e14 |0027: sub-long v4, v30, v20 +0016f2: 9d06 1e14 |0029: mul-long v6, v30, v20 +0016f6: 9e08 1e14 |002b: div-long v8, v30, v20 +0016fa: a20a 1e14 |002d: xor-long v10, v30, v20 +0016fe: a00c 1e14 |002f: and-long v12, v30, v20 +001702: 0500 1400 |0031: move-wide/from16 v0, v20 +001706: 8400 |0033: long-to-int v0, v0 +001708: 0216 0000 |0034: move/from16 v22, v0 +00170c: a30e 1e16 |0036: shl-long v14, v30, v22 +001710: 0500 1400 |0038: move-wide/from16 v0, v20 +001714: 8400 |003a: long-to-int v0, v0 +001716: 0216 0000 |003b: move/from16 v22, v0 +00171a: a410 1e16 |003d: shr-long v16, v30, v22 +00171e: 0500 1400 |003f: move-wide/from16 v0, v20 +001722: 8400 |0041: long-to-int v0, v0 +001724: 0216 0000 |0042: move/from16 v22, v0 +001728: a512 1e16 |0044: ushr-long v18, v30, v22 +00172c: 1616 ff00 |0046: const-wide/16 v22, #int 255 // #ff +001730: a016 161c |0048: and-long v22, v22, v28 +001734: 1618 1200 |004a: const-wide/16 v24, #int 18 // #12 +001738: a218 181c |004c: xor-long v24, v24, v28 +00173c: 161a ffff |004e: const-wide/16 v26, #int -1 // #ffff +001740: a218 181a |0050: xor-long v24, v24, v26 +001744: a11c 1618 |0052: or-long v28, v22, v24 +001748: 1616 ffff |0054: const-wide/16 v22, #int -1 // #ffff +00174c: a216 1602 |0056: xor-long v22, v22, v2 +001750: 9b16 1604 |0058: add-long v22, v22, v4 +001754: 9d18 0608 |005a: mul-long v24, v6, v8 +001758: 9e18 180a |005c: div-long v24, v24, v10 +00175c: 9c16 1618 |005e: sub-long v22, v22, v24 +001760: a116 160c |0060: or-long v22, v22, v12 +001764: 1618 ffff |0062: const-wide/16 v24, #int -1 // #ffff +001768: a218 180c |0064: xor-long v24, v24, v12 +00176c: 9d1a 0e10 |0066: mul-long v26, v14, v16 +001770: 9f1a 1a12 |0068: rem-long v26, v26, v18 +001774: 9b18 181a |006a: add-long v24, v24, v26 +001778: a116 1618 |006c: or-long v22, v22, v24 +00177c: 9c1c 1c16 |006e: sub-long v28, v28, v22 +001780: 0500 1c00 |0070: move-wide/from16 v0, v28 +001784: 7d00 |0072: neg-long v0, v0 +001786: 0516 0000 |0073: move-wide/from16 v22, v0 +00178a: 1618 0100 |0075: const-wide/16 v24, #int 1 // #1 +00178e: 9b16 1618 |0077: add-long v22, v22, v24 +001792: 1618 0300 |0079: const-wide/16 v24, #int 3 // #3 +001796: 9d18 181c |007b: mul-long v24, v24, v28 +00179a: 161a 0200 |007d: const-wide/16 v26, #int 2 // #2 +00179e: 9e18 181a |007f: div-long v24, v24, v26 +0017a2: 9c16 1618 |0081: sub-long v22, v22, v24 +0017a6: 9c16 161e |0083: sub-long v22, v22, v30 +0017aa: 1618 ff00 |0085: const-wide/16 v24, #int 255 // #ff +0017ae: a018 181c |0087: and-long v24, v24, v28 +0017b2: 9b16 1618 |0089: add-long v22, v22, v24 +0017b6: 1618 ff00 |008b: const-wide/16 v24, #int 255 // #ff +0017ba: 9f18 1c18 |008d: rem-long v24, v28, v24 +0017be: 9b16 1618 |008f: add-long v22, v22, v24 +0017c2: 1618 ff00 |0091: const-wide/16 v24, #int 255 // #ff +0017c6: 9c18 1c18 |0093: sub-long v24, v28, v24 +0017ca: 9b16 1618 |0095: add-long v22, v22, v24 +0017ce: 1618 ff00 |0097: const-wide/16 v24, #int 255 // #ff +0017d2: 9d18 181c |0099: mul-long v24, v24, v28 +0017d6: 9b16 1618 |009b: add-long v22, v22, v24 +0017da: 1618 ff00 |009d: const-wide/16 v24, #int 255 // #ff +0017de: 9e18 1c18 |009f: div-long v24, v28, v24 +0017e2: 9b16 1618 |00a1: add-long v22, v22, v24 +0017e6: 1618 ff00 |00a3: const-wide/16 v24, #int 255 // #ff +0017ea: a118 181c |00a5: or-long v24, v24, v28 +0017ee: 9b16 1618 |00a7: add-long v22, v22, v24 +0017f2: 1618 ff00 |00a9: const-wide/16 v24, #int 255 // #ff +0017f6: a218 181c |00ab: xor-long v24, v24, v28 +0017fa: 9b16 1618 |00ad: add-long v22, v22, v24 +0017fe: 1618 0100 |00af: const-wide/16 v24, #int 1 // #1 +001802: a018 181c |00b1: and-long v24, v24, v28 +001806: 9b16 1618 |00b3: add-long v22, v22, v24 +00180a: 1618 0100 |00b5: const-wide/16 v24, #int 1 // #1 +00180e: 9f18 1c18 |00b7: rem-long v24, v28, v24 +001812: 9b16 1618 |00b9: add-long v22, v22, v24 +001816: 1618 0100 |00bb: const-wide/16 v24, #int 1 // #1 +00181a: 9c18 1c18 |00bd: sub-long v24, v28, v24 +00181e: 9b16 1618 |00bf: add-long v22, v22, v24 +001822: 1016 |00c1: return-wide v22 + catches : (none) + positions : + 0x0000 line=159 + 0x000c line=160 + 0x0021 line=161 + 0x0025 line=162 + 0x0027 line=163 + 0x0029 line=164 + 0x002b line=165 + 0x002d line=166 + 0x002f line=167 + 0x0031 line=168 + 0x0038 line=169 + 0x003f line=170 + 0x0046 line=171 + 0x0054 line=172 + 0x0070 line=173 + 0x0097 line=174 + 0x009b line=173 + 0x009d line=174 + 0x00a1 line=173 + 0x00a3 line=174 + 0x00a7 line=173 + 0x00a9 line=174 + 0x00ad line=173 + 0x00af line=175 + 0x00b3 line=173 + 0x00b5 line=175 + 0x00b9 line=173 + 0x00bb line=175 + 0x00bf line=173 + locals : + 0x0027 - 0x00c2 reg=2 t1 J + 0x0029 - 0x00c2 reg=4 t2 J + 0x002b - 0x00c2 reg=6 t3 J + 0x002d - 0x00c2 reg=8 t4 J + 0x002f - 0x00c2 reg=10 t5 J + 0x0031 - 0x00c2 reg=12 t6 J + 0x0038 - 0x00c2 reg=14 t7 J + 0x003f - 0x00c2 reg=16 t8 J + 0x0046 - 0x00c2 reg=18 t9 J + 0x0025 - 0x00c2 reg=20 z J + 0x0000 - 0x00c2 reg=28 x J + 0x0000 - 0x00c2 reg=30 y J + + #12 : (in Lcom/google/android/test/Test;) + name : 'seta' + type : '()V' + access : 0x0002 (PRIVATE) + code - + registers : 6 + ins : 1 + outs : 0 + insns size : 48 16-bit code units +001824: |[001824] com.google.android.test.Test.seta:()V +001834: 1211 |0000: const/4 v1, #int 1 // #1 +001836: 1224 |0001: const/4 v4, #int 2 // #2 +001838: 5450 0200 |0002: iget-object v0, v5, Lcom/google/android/test/Test;.aBool:[Z // field@0002 +00183c: 4e01 0004 |0004: aput-boolean v1, v0, v4 +001840: 5450 0300 |0006: iget-object v0, v5, Lcom/google/android/test/Test;.aByte:[B // field@0003 +001844: 4f01 0004 |0008: aput-byte v1, v0, v4 +001848: 5450 0400 |000a: iget-object v0, v5, Lcom/google/android/test/Test;.aChar:[C // field@0004 +00184c: 5004 0004 |000c: aput-char v4, v0, v4 +001850: 5450 0a00 |000e: iget-object v0, v5, Lcom/google/android/test/Test;.aShort:[S // field@000a +001854: 1301 8600 |0010: const/16 v1, #int 134 // #86 +001858: 5101 0004 |0012: aput-short v1, v0, v4 +00185c: 5450 0700 |0014: iget-object v0, v5, Lcom/google/android/test/Test;.aInt:[I // field@0007 +001860: 12f1 |0016: const/4 v1, #int -1 // #ff +001862: 4b01 0004 |0017: aput v1, v0, v4 +001866: 5450 0800 |0019: iget-object v0, v5, Lcom/google/android/test/Test;.aLong:[J // field@0008 +00186a: 1602 ffff |001b: const-wide/16 v2, #int -1 // #ffff +00186e: 4c02 0004 |001d: aput-wide v2, v0, v4 +001872: 5450 0600 |001f: iget-object v0, v5, Lcom/google/android/test/Test;.aFloat:[F // field@0006 +001876: 1501 8841 |0021: const/high16 v1, #int 1099431936 // #4188 +00187a: 4b01 0004 |0023: aput v1, v0, v4 +00187e: 5450 0500 |0025: iget-object v0, v5, Lcom/google/android/test/Test;.aDouble:[D // field@0005 +001882: 1902 3240 |0027: const-wide/high16 v2, #long 4625759767262920704 // #4032 +001886: 4c02 0004 |0029: aput-wide v2, v0, v4 +00188a: 5450 0900 |002b: iget-object v0, v5, Lcom/google/android/test/Test;.aObject:[Ljava/lang/Object; // field@0009 +00188e: 4d05 0004 |002d: aput-object v5, v0, v4 +001892: 0e00 |002f: return-void + catches : (none) + positions : + 0x0002 line=60 + 0x0006 line=61 + 0x000a line=62 + 0x000e line=63 + 0x0014 line=64 + 0x0019 line=65 + 0x001f line=66 + 0x0025 line=67 + 0x002b line=68 + 0x002f line=69 + locals : + 0x0000 - 0x0030 reg=5 this Lcom/google/android/test/Test; + + Virtual methods - + #0 : (in Lcom/google/android/test/Test;) + name : 'onStart' + type : '()V' + access : 0x0004 (PROTECTED) + code - + registers : 2 + ins : 1 + outs : 1 + insns size : 7 16-bit code units +001894: |[001894] com.google.android.test.Test.onStart:()V +0018a4: 6f10 0300 0100 |0000: invoke-super {v1}, Landroid/app/Activity;.onStart:()V // method@0003 +0018aa: 1200 |0003: const/4 v0, #int 0 // #0 +0018ac: 5b10 0b00 |0004: iput-object v0, v1, Lcom/google/android/test/Test;.mArray:[I // field@000b +0018b0: 0e00 |0006: return-void + catches : (none) + positions : + 0x0000 line=86 + 0x0003 line=87 + 0x0006 line=88 + locals : + 0x0000 - 0x0007 reg=1 this Lcom/google/android/test/Test; + + #1 : (in Lcom/google/android/test/Test;) + name : 'run' + type : '()V' + access : 0x0001 (PUBLIC) + code - + registers : 3 + ins : 1 + outs : 0 + insns size : 9 16-bit code units +0018b4: |[0018b4] com.google.android.test.Test.run:()V +0018c4: 1301 6400 |0000: const/16 v1, #int 100 // #64 +0018c8: 2310 2400 |0002: new-array v0, v1, [I // type@0024 +0018cc: 5b20 0b00 |0004: iput-object v0, v2, Lcom/google/android/test/Test;.mArray:[I // field@000b +0018d0: 6900 1600 |0006: sput-object v0, Lcom/google/android/test/Test;.sArray:[I // field@0016 +0018d4: 0e00 |0008: return-void + catches : (none) + positions : + 0x0000 line=92 + 0x0004 line=93 + 0x0006 line=94 + 0x0008 line=95 + locals : + 0x0004 - 0x0009 reg=0 x [I + 0x0000 - 0x0009 reg=2 this Lcom/google/android/test/Test; + + source_file_idx : 49 (Test.java) + diff --git a/test/dexdump/bytecodes.xml b/test/dexdump/bytecodes.xml new file mode 100755 index 0000000000..0581677f6a --- /dev/null +++ b/test/dexdump/bytecodes.xml @@ -0,0 +1,163 @@ +<api> +<package name="android.annotation" +> +<class name="SuppressLint" + extends="java.lang.Object" + abstract="true" + static="false" + final="false" + visibility="public" +> +<implements name="java.lang.annotation.Annotation"> +</implements> +<method name="value" + return="java.lang.String[]" + abstract="true" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +</method> +</class> +<class name="TargetApi" + extends="java.lang.Object" + abstract="true" + static="false" + final="false" + visibility="public" +> +<implements name="java.lang.annotation.Annotation"> +</implements> +<method name="value" + return="int" + abstract="true" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +</method> +</class> +</package> +<package name="com.google.android.test" +> +<class name="BuildConfig" + extends="java.lang.Object" + abstract="false" + static="false" + final="true" + visibility="public" +> +<field name="DEBUG" + type="boolean" + transient="false" + volatile="false" + static="true" + final="true" + visibility="public" +> +</field> +<constructor name="BuildConfig" + type="com.google.android.test.BuildConfig" + static="false" + final="false" + visibility="public" +> +</constructor> +</class> +<class name="R.attr" + extends="java.lang.Object" + abstract="false" + static="false" + final="true" + visibility="public" +> +<constructor name="R.attr" + type="com.google.android.test.R.attr" + static="false" + final="false" + visibility="public" +> +</constructor> +</class> +<class name="R.drawable" + extends="java.lang.Object" + abstract="false" + static="false" + final="true" + visibility="public" +> +<field name="icon" + type="int" + transient="false" + volatile="false" + static="true" + final="true" + visibility="public" +> +</field> +<constructor name="R.drawable" + type="com.google.android.test.R.drawable" + static="false" + final="false" + visibility="public" +> +</constructor> +</class> +<class name="R" + extends="java.lang.Object" + abstract="false" + static="false" + final="true" + visibility="public" +> +<constructor name="R" + type="com.google.android.test.R" + static="false" + final="false" + visibility="public" +> +</constructor> +</class> +<class name="Test" + extends="android.app.Activity" + abstract="false" + static="false" + final="false" + visibility="public" +> +<implements name="java.lang.Runnable"> +</implements> +<constructor name="Test" + type="com.google.android.test.Test" + static="false" + final="false" + visibility="public" +> +</constructor> +<method name="onStart" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="protected" +> +</method> +<method name="run" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +</method> +</class> +</package> +</api> diff --git a/test/dexdump/checkers.dex b/test/dexdump/checkers.dex Binary files differnew file mode 100755 index 0000000000..f8e93b4574 --- /dev/null +++ b/test/dexdump/checkers.dex diff --git a/test/dexdump/checkers.lst b/test/dexdump/checkers.lst new file mode 100644 index 0000000000..daef13819b --- /dev/null +++ b/test/dexdump/checkers.lst @@ -0,0 +1,82 @@ +#checkers.dex +0x0000149c 8 com.google.android.checkers.Checkers <init> ()V (none) -1 +0x000014b4 66 com.google.android.checkers.Checkers a (Z)V (none) -1 +0x00001508 8 com.google.android.checkers.Checkers onConfigurationChanged (Landroid/content/res/Configuration;)V (none) -1 +0x00001520 118 com.google.android.checkers.Checkers onCreate (Landroid/os/Bundle;)V (none) -1 +0x000015a8 432 com.google.android.checkers.Checkers onCreateOptionsMenu (Landroid/view/Menu;)Z (none) -1 +0x00001768 116 com.google.android.checkers.Checkers onKeyDown (ILandroid/view/KeyEvent;)Z (none) -1 +0x000017ec 316 com.google.android.checkers.Checkers onOptionsItemSelected (Landroid/view/MenuItem;)Z (none) -1 +0x00001938 42 com.google.android.checkers.Checkers onPause ()V (none) -1 +0x00001974 16 com.google.android.checkers.Checkers onStop ()V (none) -1 +0x00001994 88 com.google.android.checkers.Checkers onTrackballEvent (Landroid/view/MotionEvent;)Z (none) -1 +0x000019fc 1324 com.google.android.checkers.CheckersView <init> (Landroid/content/Context;Landroid/content/SharedPreferences;)V (none) -1 +0x00001f48 62 com.google.android.checkers.CheckersView a (Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V (none) -1 +0x00001f98 66 com.google.android.checkers.CheckersView a (Landroid/content/SharedPreferences;Ljava/lang/String;[I)V (none) -1 +0x00001fec 126 com.google.android.checkers.CheckersView a (Landroid/graphics/Canvas;IIII)V (none) -1 +0x0000207c 162 com.google.android.checkers.CheckersView a (Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V (none) -1 +0x00002130 8 com.google.android.checkers.CheckersView a (Lcom/google/android/checkers/CheckersView;I)V (none) -1 +0x00002148 588 com.google.android.checkers.CheckersView a (Landroid/content/SharedPreferences;)Z (none) -1 +0x000023d0 22 com.google.android.checkers.CheckersView a (Lcom/google/android/checkers/CheckersView;)Z (none) -1 +0x000023f8 1290 com.google.android.checkers.CheckersView a (ZIIII)Z (none) -1 +0x00002930 204 com.google.android.checkers.CheckersView b (FF)I (none) -1 +0x00002a20 36 com.google.android.checkers.CheckersView b (I)V (none) -1 +0x00002a60 198 com.google.android.checkers.CheckersView b (Landroid/graphics/Canvas;IIII)V (none) -1 +0x00002b38 524 com.google.android.checkers.CheckersView c (I)V (none) -1 +0x00002d54 176 com.google.android.checkers.CheckersView d ()V (none) -1 +0x00002e14 20 com.google.android.checkers.CheckersView e ()Z (none) -1 +0x00002e38 128 com.google.android.checkers.CheckersView a ()V (none) -1 +0x00002ec8 226 com.google.android.checkers.CheckersView a (FF)V (none) -1 +0x00002fd8 32 com.google.android.checkers.CheckersView a (IIII)V (none) -1 +0x00003008 340 com.google.android.checkers.CheckersView a (Landroid/content/SharedPreferences$Editor;)V (none) -1 +0x00003178 34 com.google.android.checkers.CheckersView a (I)Z (none) -1 +0x000031ac 44 com.google.android.checkers.CheckersView a (Z)Z (none) -1 +0x000031f4 60 com.google.android.checkers.CheckersView b ()V (none) -1 +0x0000324c 138 com.google.android.checkers.CheckersView b (Z)Z (none) -1 +0x000032f4 16 com.google.android.checkers.CheckersView c ()I (none) -1 +0x00003320 68 com.google.android.checkers.CheckersView c (Z)Z (none) -1 +0x00003380 38 com.google.android.checkers.CheckersView d (Z)Z (none) -1 +0x000033c4 2528 com.google.android.checkers.CheckersView draw (Landroid/graphics/Canvas;)V (none) -1 +0x00003dd0 38 com.google.android.checkers.CheckersView e (Z)Z (none) -1 +0x00003e14 104 com.google.android.checkers.CheckersView onSizeChanged (IIII)V (none) -1 +0x00003e98 82 com.google.android.checkers.CheckersView onTouchEvent (Landroid/view/MotionEvent;)Z (none) -1 +0x00003efc 128 com.google.android.checkers.CheckersView setLevel (I)V (none) -1 +0x00003f98 2780 com.google.android.checkers.a <clinit> ()V (none) -1 +0x00004a84 188 com.google.android.checkers.a <init> (Lcom/google/android/checkers/CheckersView;)V (none) -1 +0x00004b5c 28 com.google.android.checkers.a a (II)I (none) -1 +0x00004b88 2592 com.google.android.checkers.a a (IIIIIZ)I (none) -1 +0x000055b8 110 com.google.android.checkers.a a (IZ)I (none) -1 +0x00005638 196 com.google.android.checkers.a a (Z)I (none) -1 +0x0000570c 112 com.google.android.checkers.a a (ZII)I (none) -1 +0x0000578c 88 com.google.android.checkers.a a (ZIIIZ)I (none) -1 +0x000057f4 68 com.google.android.checkers.a a (ZIIZ)I (none) -1 +0x00005848 152 com.google.android.checkers.a a (IIII)V (none) -1 +0x000058f0 78 com.google.android.checkers.a a (IIIII)V (none) -1 +0x00005950 198 com.google.android.checkers.a a (IIIIIIII)V (none) -1 +0x00005a28 1750 com.google.android.checkers.a a (IZI)Z (none) -1 +0x00006110 92 com.google.android.checkers.a b (ZIIIZ)I (none) -1 +0x0000617c 112 com.google.android.checkers.a b (ZIIZ)I (none) -1 +0x000061fc 38 com.google.android.checkers.a b ()V (none) -1 +0x0000624c 736 com.google.android.checkers.a b (I)V (none) -1 +0x0000653c 198 com.google.android.checkers.a b (IIIIIIII)V (none) -1 +0x00006614 922 com.google.android.checkers.a b (IZI)Z (none) -1 +0x000069c0 108 com.google.android.checkers.a c (ZIIZ)I (none) -1 +0x00006a3c 16 com.google.android.checkers.a c ()V (none) -1 +0x00006a68 406 com.google.android.checkers.a c (IIIIIIII)V (none) -1 +0x00006c10 112 com.google.android.checkers.a d (ZIIZ)I (none) -1 +0x00006c90 16 com.google.android.checkers.a a (ZZ)I (none) -1 +0x00006cb0 90 com.google.android.checkers.a a ()V (none) -1 +0x00006d1c 8 com.google.android.checkers.a a (I)V (none) -1 +0x00006d34 74 com.google.android.checkers.a a (IIIIZ)V (none) -1 +0x00006d90 32 com.google.android.checkers.a b (ZZ)V (none) -1 +0x00006dcc 1052 com.google.android.checkers.a run ()V (none) -1 +0x000071f8 12 com.google.android.checkers.b <init> (Lcom/google/android/checkers/CheckersView;)V (none) -1 +0x00007214 28 com.google.android.checkers.b onClick (Landroid/content/DialogInterface;I)V (none) -1 +0x00007240 12 com.google.android.checkers.c <init> (Lcom/google/android/checkers/CheckersView;)V (none) -1 +0x0000725c 2 com.google.android.checkers.c onClick (Landroid/content/DialogInterface;I)V (none) -1 +0x00007270 12 com.google.android.checkers.d <init> (Lcom/google/android/checkers/CheckersView;)V (none) -1 +0x0000728c 2 com.google.android.checkers.d onClick (Landroid/content/DialogInterface;I)V (none) -1 +0x000072a0 12 com.google.android.checkers.e <init> (Lcom/google/android/checkers/CheckersView;)V (none) -1 +0x000072bc 14 com.google.android.checkers.e onClick (Landroid/content/DialogInterface;I)V (none) -1 +0x000072dc 12 com.google.android.checkers.f <init> (Lcom/google/android/checkers/CheckersView;)V (none) -1 +0x000072f8 12 com.google.android.checkers.f onClick (Landroid/content/DialogInterface;I)V (none) -1 +0x00007314 58 com.google.android.checkers.g a ([B)Z (none) -1 diff --git a/test/dexdump/checkers.txt b/test/dexdump/checkers.txt new file mode 100755 index 0000000000..5c8336f94c --- /dev/null +++ b/test/dexdump/checkers.txt @@ -0,0 +1,7821 @@ +Processing 'checkers.dex'... +Opened 'checkers.dex', DEX version '035' +DEX file header: +magic : 'dex\n035\0' +checksum : 3ce07f0d +signature : 6aca...3cae +file_size : 35384 +header_size : 112 +link_size : 0 +link_off : 0 (0x000000) +string_ids_size : 323 +string_ids_off : 112 (0x000070) +type_ids_size : 58 +type_ids_off : 1404 (0x00057c) +proto_ids_size : 88 +proto_ids_off : 1636 (0x000664) +field_ids_size : 108 +field_ids_off : 2692 (0x000a84) +method_ids_size : 177 +method_ids_off : 3556 (0x000de4) +class_defs_size : 9 +class_defs_off : 4972 (0x00136c) +data_size : 30124 +data_off : 5260 (0x00148c) + +Class #0 header: +class_idx : 30 +access_flags : 1 (0x0001) +superclass_idx : 4 +interfaces_off : 0 (0x000000) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 34554 (0x0086fa) +static_fields_size : 0 +instance_fields_size: 1 +direct_methods_size : 2 +virtual_methods_size: 8 + +Class #0 - + Class descriptor : 'Lcom/google/android/checkers/Checkers;' + Access flags : 0x0001 (PUBLIC) + Superclass : 'Landroid/app/Activity;' + Interfaces - + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/Checkers;) + name : 'a' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x0002 (PRIVATE) + Direct methods - + #0 : (in Lcom/google/android/checkers/Checkers;) + name : '<init>' + type : '()V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 1 + ins : 1 + outs : 1 + insns size : 4 16-bit code units +00148c: |[00148c] com.google.android.checkers.Checkers.<init>:()V +00149c: 7010 0000 0000 |0000: invoke-direct {v0}, Landroid/app/Activity;.<init>:()V // method@0000 +0014a2: 0e00 |0003: return-void + catches : (none) + positions : + locals : + + #1 : (in Lcom/google/android/checkers/Checkers;) + name : 'a' + type : '(Z)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 4 + ins : 2 + outs : 2 + insns size : 33 16-bit code units +0014a4: |[0014a4] com.google.android.checkers.Checkers.a:(Z)V +0014b4: 6e10 4100 0200 |0000: invoke-virtual {v2}, Lcom/google/android/checkers/Checkers;.getWindow:()Landroid/view/Window; // method@0041 +0014ba: 0c00 |0003: move-result-object v0 +0014bc: 3803 1200 |0004: if-eqz v3, 0016 // +0012 +0014c0: 1301 8004 |0006: const/16 v1, #int 1152 // #480 +0014c4: 6e20 3a00 1000 |0008: invoke-virtual {v0, v1}, Landroid/view/Window;.addFlags:(I)V // method@003a +0014ca: 1301 0008 |000b: const/16 v1, #int 2048 // #800 +0014ce: 6e20 3b00 1000 |000d: invoke-virtual {v0, v1}, Landroid/view/Window;.clearFlags:(I)V // method@003b +0014d4: 5420 0100 |0010: iget-object v0, v2, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0014d8: 6e10 6f00 0000 |0012: invoke-virtual {v0}, Lcom/google/android/checkers/CheckersView;.requestLayout:()V // method@006f +0014de: 0e00 |0015: return-void +0014e0: 1301 8008 |0016: const/16 v1, #int 2176 // #880 +0014e4: 6e20 3a00 1000 |0018: invoke-virtual {v0, v1}, Landroid/view/Window;.addFlags:(I)V // method@003a +0014ea: 1301 0004 |001b: const/16 v1, #int 1024 // #400 +0014ee: 6e20 3b00 1000 |001d: invoke-virtual {v0, v1}, Landroid/view/Window;.clearFlags:(I)V // method@003b +0014f4: 28f0 |0020: goto 0010 // -0010 + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/Checkers;) + name : 'onConfigurationChanged' + type : '(Landroid/content/res/Configuration;)V' + access : 0x0001 (PUBLIC) + code - + registers : 2 + ins : 2 + outs : 2 + insns size : 4 16-bit code units +0014f8: |[0014f8] com.google.android.checkers.Checkers.onConfigurationChanged:(Landroid/content/res/Configuration;)V +001508: 6f20 0100 1000 |0000: invoke-super {v0, v1}, Landroid/app/Activity;.onConfigurationChanged:(Landroid/content/res/Configuration;)V // method@0001 +00150e: 0e00 |0003: return-void + catches : (none) + positions : + locals : + + #1 : (in Lcom/google/android/checkers/Checkers;) + name : 'onCreate' + type : '(Landroid/os/Bundle;)V' + access : 0x0001 (PUBLIC) + code - + registers : 8 + ins : 2 + outs : 3 + insns size : 59 16-bit code units +001510: |[001510] com.google.android.checkers.Checkers.onCreate:(Landroid/os/Bundle;)V +001520: 1215 |0000: const/4 v5, #int 1 // #1 +001522: 1201 |0001: const/4 v1, #int 0 // #0 +001524: 6f20 0200 7600 |0002: invoke-super {v6, v7}, Landroid/app/Activity;.onCreate:(Landroid/os/Bundle;)V // method@0002 +00152a: 6e20 4a00 5600 |0005: invoke-virtual {v6, v5}, Lcom/google/android/checkers/Checkers;.requestWindowFeature:(I)Z // method@004a +001530: 2200 1f00 |0008: new-instance v0, Lcom/google/android/checkers/CheckersView; // type@001f +001534: 6e20 4000 1600 |000a: invoke-virtual {v6, v1}, Lcom/google/android/checkers/Checkers;.getPreferences:(I)Landroid/content/SharedPreferences; // method@0040 +00153a: 0c02 |000d: move-result-object v2 +00153c: 7030 4d00 6002 |000e: invoke-direct {v0, v6, v2}, Lcom/google/android/checkers/CheckersView;.<init>:(Landroid/content/Context;Landroid/content/SharedPreferences;)V // method@004d +001542: 5b60 0100 |0011: iput-object v0, v6, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001546: 1a03 b500 |0013: const-string v3, "by Aart J.C. Bik" // string@00b5 +00154a: 0110 |0015: move v0, v1 +00154c: 0112 |0016: move v2, v1 +00154e: 6e10 a400 0300 |0017: invoke-virtual {v3}, Ljava/lang/String;.length:()I // method@00a4 +001554: 0a04 |001a: move-result v4 +001556: 3440 1800 |001b: if-lt v0, v4, 0033 // +0018 +00155a: 1300 c204 |001d: const/16 v0, #int 1218 // #4c2 +00155e: 3202 0500 |001f: if-eq v2, v0, 0024 // +0005 +001562: 7110 ac00 0500 |0021: invoke-static {v5}, Ljava/lang/System;.exit:(I)V // method@00ac +001568: 5460 0100 |0024: iget-object v0, v6, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +00156c: 6e20 4b00 0600 |0026: invoke-virtual {v6, v0}, Lcom/google/android/checkers/Checkers;.setContentView:(Landroid/view/View;)V // method@004b +001572: 5460 0100 |0029: iget-object v0, v6, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001576: 6e20 6500 1000 |002b: invoke-virtual {v0, v1}, Lcom/google/android/checkers/CheckersView;.d:(Z)Z // method@0065 +00157c: 0a00 |002e: move-result v0 +00157e: 7020 3f00 0600 |002f: invoke-direct {v6, v0}, Lcom/google/android/checkers/Checkers;.a:(Z)V // method@003f +001584: 0e00 |0032: return-void +001586: 6e20 a300 0300 |0033: invoke-virtual {v3, v0}, Ljava/lang/String;.charAt:(I)C // method@00a3 +00158c: 0a04 |0036: move-result v4 +00158e: b042 |0037: add-int/2addr v2, v4 +001590: d800 0001 |0038: add-int/lit8 v0, v0, #int 1 // #01 +001594: 28dd |003a: goto 0017 // -0023 + catches : (none) + positions : + locals : + + #2 : (in Lcom/google/android/checkers/Checkers;) + name : 'onCreateOptionsMenu' + type : '(Landroid/view/Menu;)Z' + access : 0x0001 (PUBLIC) + code - + registers : 11 + ins : 2 + outs : 5 + insns size : 216 16-bit code units +001598: |[001598] com.google.android.checkers.Checkers.onCreateOptionsMenu:(Landroid/view/Menu;)Z +0015a8: 1248 |0000: const/4 v8, #int 4 // #4 +0015aa: 1237 |0001: const/4 v7, #int 3 // #3 +0015ac: 1226 |0002: const/4 v6, #int 2 // #2 +0015ae: 1205 |0003: const/4 v5, #int 0 // #0 +0015b0: 1214 |0004: const/4 v4, #int 1 // #1 +0015b2: 6f20 0300 a900 |0005: invoke-super {v9, v10}, Landroid/app/Activity;.onCreateOptionsMenu:(Landroid/view/Menu;)Z // method@0003 +0015b8: 1a00 7400 |0008: const-string v0, "New Game" // string@0074 +0015bc: 7250 2b00 5a55 |000a: invoke-interface {v10, v5, v5, v5, v0}, Landroid/view/Menu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@002b +0015c2: 1a00 8200 |000d: const-string v0, "Undo" // string@0082 +0015c6: 7250 2b00 5a44 |000f: invoke-interface {v10, v5, v4, v4, v0}, Landroid/view/Menu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@002b +0015cc: 1a00 8000 |0012: const-string v0, "Switch Side" // string@0080 +0015d0: 7250 2b00 5a66 |0014: invoke-interface {v10, v5, v6, v6, v0}, Landroid/view/Menu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@002b +0015d6: 1a00 7800 |0017: const-string v0, "Options" // string@0078 +0015da: 7250 2c00 5a77 |0019: invoke-interface {v10, v5, v7, v7, v0}, Landroid/view/Menu;.addSubMenu:(IIILjava/lang/CharSequence;)Landroid/view/SubMenu; // method@002c +0015e0: 0c00 |001c: move-result-object v0 +0015e2: 1a01 7100 |001d: const-string v1, "Move Coach" // string@0071 +0015e6: 7251 3400 4055 |001f: invoke-interface {v0, v4, v5, v5, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0015ec: 0c01 |0022: move-result-object v1 +0015ee: 7220 2f00 4100 |0023: invoke-interface {v1, v4}, Landroid/view/MenuItem;.setCheckable:(Z)Landroid/view/MenuItem; // method@002f +0015f4: 0c01 |0026: move-result-object v1 +0015f6: 5492 0100 |0027: iget-object v2, v9, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0015fa: 6e20 5a00 5200 |0029: invoke-virtual {v2, v5}, Lcom/google/android/checkers/CheckersView;.a:(Z)Z // method@005a +001600: 0a02 |002c: move-result v2 +001602: 7220 3000 2100 |002d: invoke-interface {v1, v2}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +001608: 1a01 7700 |0030: const-string v1, "Optional Jumps" // string@0077 +00160c: 7251 3400 4044 |0032: invoke-interface {v0, v4, v4, v4, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +001612: 0c01 |0035: move-result-object v1 +001614: 7220 2f00 4100 |0036: invoke-interface {v1, v4}, Landroid/view/MenuItem;.setCheckable:(Z)Landroid/view/MenuItem; // method@002f +00161a: 0c01 |0039: move-result-object v1 +00161c: 5492 0100 |003a: iget-object v2, v9, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001620: 6e20 6000 5200 |003c: invoke-virtual {v2, v5}, Lcom/google/android/checkers/CheckersView;.b:(Z)Z // method@0060 +001626: 0a02 |003f: move-result v2 +001628: 7220 3000 2100 |0040: invoke-interface {v1, v2}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +00162e: 1a01 9800 |0043: const-string v1, "View from White" // string@0098 +001632: 7251 3400 4066 |0045: invoke-interface {v0, v4, v6, v6, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +001638: 0c01 |0048: move-result-object v1 +00163a: 7220 2f00 4100 |0049: invoke-interface {v1, v4}, Landroid/view/MenuItem;.setCheckable:(Z)Landroid/view/MenuItem; // method@002f +001640: 0c01 |004c: move-result-object v1 +001642: 5492 0100 |004d: iget-object v2, v9, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001646: 6e20 6300 5200 |004f: invoke-virtual {v2, v5}, Lcom/google/android/checkers/CheckersView;.c:(Z)Z // method@0063 +00164c: 0a02 |0052: move-result v2 +00164e: 7220 3000 2100 |0053: invoke-interface {v1, v2}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +001654: 1a01 1800 |0056: const-string v1, "Full Screen" // string@0018 +001658: 7251 3400 4077 |0058: invoke-interface {v0, v4, v7, v7, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +00165e: 0c01 |005b: move-result-object v1 +001660: 7220 2f00 4100 |005c: invoke-interface {v1, v4}, Landroid/view/MenuItem;.setCheckable:(Z)Landroid/view/MenuItem; // method@002f +001666: 0c01 |005f: move-result-object v1 +001668: 5492 0100 |0060: iget-object v2, v9, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +00166c: 6e20 6500 5200 |0062: invoke-virtual {v2, v5}, Lcom/google/android/checkers/CheckersView;.d:(Z)Z // method@0065 +001672: 0a02 |0065: move-result v2 +001674: 7220 3000 2100 |0066: invoke-interface {v1, v2}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +00167a: 1a01 7e00 |0069: const-string v1, "Start Screen" // string@007e +00167e: 7251 3400 4088 |006b: invoke-interface {v0, v4, v8, v8, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +001684: 0c01 |006e: move-result-object v1 +001686: 7220 2f00 4100 |006f: invoke-interface {v1, v4}, Landroid/view/MenuItem;.setCheckable:(Z)Landroid/view/MenuItem; // method@002f +00168c: 0c01 |0072: move-result-object v1 +00168e: 5492 0100 |0073: iget-object v2, v9, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001692: 6e20 6800 5200 |0075: invoke-virtual {v2, v5}, Lcom/google/android/checkers/CheckersView;.e:(Z)Z // method@0068 +001698: 0a02 |0078: move-result v2 +00169a: 7220 3000 2100 |0079: invoke-interface {v1, v2}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +0016a0: 1251 |007c: const/4 v1, #int 5 // #5 +0016a2: 1252 |007d: const/4 v2, #int 5 // #5 +0016a4: 1a03 0d00 |007e: const-string v3, "Board Color" // string@000d +0016a8: 7253 3400 4021 |0080: invoke-interface {v0, v4, v1, v2, v3}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016ae: 1a00 5800 |0083: const-string v0, "Level" // string@0058 +0016b2: 7250 2c00 5a88 |0085: invoke-interface {v10, v5, v8, v8, v0}, Landroid/view/Menu;.addSubMenu:(IIILjava/lang/CharSequence;)Landroid/view/SubMenu; // method@002c +0016b8: 0c00 |0088: move-result-object v0 +0016ba: 1a01 1700 |0089: const-string v1, "Free Play" // string@0017 +0016be: 7251 3400 6055 |008b: invoke-interface {v0, v6, v5, v5, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016c4: 1a01 7b00 |008e: const-string v1, "Random (0s)" // string@007b +0016c8: 7251 3400 6044 |0090: invoke-interface {v0, v6, v4, v4, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016ce: 1a01 5900 |0093: const-string v1, "Level 1 (fast)" // string@0059 +0016d2: 7251 3400 6066 |0095: invoke-interface {v0, v6, v6, v6, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016d8: 1a01 5a00 |0098: const-string v1, "Level 2 (1s)" // string@005a +0016dc: 7251 3400 6077 |009a: invoke-interface {v0, v6, v7, v7, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016e2: 1a01 5b00 |009d: const-string v1, "Level 3 (5s)" // string@005b +0016e6: 7251 3400 6088 |009f: invoke-interface {v0, v6, v8, v8, v1}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016ec: 1251 |00a2: const/4 v1, #int 5 // #5 +0016ee: 1252 |00a3: const/4 v2, #int 5 // #5 +0016f0: 1a03 5c00 |00a4: const-string v3, "Level 4 (10s)" // string@005c +0016f4: 7253 3400 6021 |00a6: invoke-interface {v0, v6, v1, v2, v3}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +0016fa: 1261 |00a9: const/4 v1, #int 6 // #6 +0016fc: 1262 |00aa: const/4 v2, #int 6 // #6 +0016fe: 1a03 5d00 |00ab: const-string v3, "Level 5 (15s)" // string@005d +001702: 7253 3400 6021 |00ad: invoke-interface {v0, v6, v1, v2, v3}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +001708: 1271 |00b0: const/4 v1, #int 7 // #7 +00170a: 1272 |00b1: const/4 v2, #int 7 // #7 +00170c: 1a03 5e00 |00b2: const-string v3, "Level 6 (30s)" // string@005e +001710: 7253 3400 6021 |00b4: invoke-interface {v0, v6, v1, v2, v3}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +001716: 1301 0800 |00b7: const/16 v1, #int 8 // #8 +00171a: 1302 0800 |00b9: const/16 v2, #int 8 // #8 +00171e: 1a03 5f00 |00bb: const-string v3, "Level 7 (60s)" // string@005f +001722: 7253 3400 6021 |00bd: invoke-interface {v0, v6, v1, v2, v3}, Landroid/view/SubMenu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@0034 +001728: 7240 3600 6044 |00c0: invoke-interface {v0, v6, v4, v4}, Landroid/view/SubMenu;.setGroupCheckable:(IZZ)V // method@0036 +00172e: 5491 0100 |00c3: iget-object v1, v9, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001732: 6e10 6100 0100 |00c5: invoke-virtual {v1}, Lcom/google/android/checkers/CheckersView;.c:()I // method@0061 +001738: 0a01 |00c8: move-result v1 +00173a: 7220 3500 1000 |00c9: invoke-interface {v0, v1}, Landroid/view/SubMenu;.findItem:(I)Landroid/view/MenuItem; // method@0035 +001740: 0c00 |00cc: move-result-object v0 +001742: 7220 3000 4000 |00cd: invoke-interface {v0, v4}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +001748: 1250 |00d0: const/4 v0, #int 5 // #5 +00174a: 1251 |00d1: const/4 v1, #int 5 // #5 +00174c: 1a02 9b00 |00d2: const-string v2, "Website" // string@009b +001750: 7252 2b00 5a10 |00d4: invoke-interface {v10, v5, v0, v1, v2}, Landroid/view/Menu;.add:(IIILjava/lang/CharSequence;)Landroid/view/MenuItem; // method@002b +001756: 0f04 |00d7: return v4 + catches : (none) + positions : + locals : + + #3 : (in Lcom/google/android/checkers/Checkers;) + name : 'onKeyDown' + type : '(ILandroid/view/KeyEvent;)Z' + access : 0x0001 (PUBLIC) + code - + registers : 8 + ins : 3 + outs : 3 + insns size : 58 16-bit code units +001758: |[001758] com.google.android.checkers.Checkers.onKeyDown:(ILandroid/view/KeyEvent;)Z +001768: 1504 803f |0000: const/high16 v4, #int 1065353216 // #3f80 +00176c: 1503 80bf |0002: const/high16 v3, #int -1082130432 // #bf80 +001770: 1202 |0004: const/4 v2, #int 0 // #0 +001772: 1210 |0005: const/4 v0, #int 1 // #1 +001774: 2b06 2600 0000 |0006: packed-switch v6, 0000002c // +00000026 +00177a: 6f30 0400 6507 |0009: invoke-super {v5, v6, v7}, Landroid/app/Activity;.onKeyDown:(ILandroid/view/KeyEvent;)Z // method@0004 +001780: 0a00 |000c: move-result v0 +001782: 0f00 |000d: return v0 +001784: 5451 0100 |000e: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001788: 6e10 5d00 0100 |0010: invoke-virtual {v1}, Lcom/google/android/checkers/CheckersView;.b:()V // method@005d +00178e: 28fa |0013: goto 000d // -0006 +001790: 5451 0100 |0014: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001794: 6e30 4f00 3102 |0016: invoke-virtual {v1, v3, v2}, Lcom/google/android/checkers/CheckersView;.a:(FF)V // method@004f +00179a: 28f4 |0019: goto 000d // -000c +00179c: 5451 0100 |001a: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0017a0: 6e30 4f00 4102 |001c: invoke-virtual {v1, v4, v2}, Lcom/google/android/checkers/CheckersView;.a:(FF)V // method@004f +0017a6: 28ee |001f: goto 000d // -0012 +0017a8: 5451 0100 |0020: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0017ac: 6e30 4f00 2103 |0022: invoke-virtual {v1, v2, v3}, Lcom/google/android/checkers/CheckersView;.a:(FF)V // method@004f +0017b2: 28e8 |0025: goto 000d // -0018 +0017b4: 5451 0100 |0026: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0017b8: 6e30 4f00 2104 |0028: invoke-virtual {v1, v2, v4}, Lcom/google/android/checkers/CheckersView;.a:(FF)V // method@004f +0017be: 28e2 |002b: goto 000d // -001e +0017c0: 0001 0500 1300 0000 1a00 0000 2000 ... |002c: packed-switch-data (14 units) + catches : (none) + positions : + locals : + + #4 : (in Lcom/google/android/checkers/Checkers;) + name : 'onOptionsItemSelected' + type : '(Landroid/view/MenuItem;)Z' + access : 0x0001 (PUBLIC) + code - + registers : 7 + ins : 2 + outs : 3 + insns size : 158 16-bit code units +0017dc: |[0017dc] com.google.android.checkers.Checkers.onOptionsItemSelected:(Landroid/view/MenuItem;)Z +0017ec: 1254 |0000: const/4 v4, #int 5 // #5 +0017ee: 1223 |0001: const/4 v3, #int 2 // #2 +0017f0: 1210 |0002: const/4 v0, #int 1 // #1 +0017f2: 7210 2e00 0600 |0003: invoke-interface {v6}, Landroid/view/MenuItem;.getItemId:()I // method@002e +0017f8: 0a01 |0006: move-result v1 +0017fa: 7210 2d00 0600 |0007: invoke-interface {v6}, Landroid/view/MenuItem;.getGroupId:()I // method@002d +001800: 0a02 |000a: move-result v2 +001802: 2b02 8900 0000 |000b: packed-switch v2, 00000094 // +00000089 +001808: 6f20 0500 6500 |000e: invoke-super {v5, v6}, Landroid/app/Activity;.onOptionsItemSelected:(Landroid/view/MenuItem;)Z // method@0005 +00180e: 0a00 |0011: move-result v0 +001810: 0f00 |0012: return v0 +001812: 3901 0900 |0013: if-nez v1, 001c // +0009 +001816: 5451 0100 |0015: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +00181a: 12d2 |0017: const/4 v2, #int -3 // #fd +00181c: 6e20 5700 2100 |0018: invoke-virtual {v1, v2}, Lcom/google/android/checkers/CheckersView;.a:(I)Z // method@0057 +001822: 28f7 |001b: goto 0012 // -0009 +001824: 3301 0900 |001c: if-ne v1, v0, 0025 // +0009 +001828: 5451 0100 |001e: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +00182c: 12e2 |0020: const/4 v2, #int -2 // #fe +00182e: 6e20 5700 2100 |0021: invoke-virtual {v1, v2}, Lcom/google/android/checkers/CheckersView;.a:(I)Z // method@0057 +001834: 28ee |0024: goto 0012 // -0012 +001836: 3331 0900 |0025: if-ne v1, v3, 002e // +0009 +00183a: 5451 0100 |0027: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +00183e: 12c2 |0029: const/4 v2, #int -4 // #fc +001840: 6e20 5700 2100 |002a: invoke-virtual {v1, v2}, Lcom/google/android/checkers/CheckersView;.a:(I)Z // method@0057 +001846: 28e5 |002d: goto 0012 // -001b +001848: 3341 e0ff |002e: if-ne v1, v4, 000e // -0020 +00184c: 1a01 e300 |0030: const-string v1, "http://www.aartbik.com/MISC/android.html" // string@00e3 +001850: 7110 2900 0100 |0032: invoke-static {v1}, Landroid/net/Uri;.parse:(Ljava/lang/String;)Landroid/net/Uri; // method@0029 +001856: 0c01 |0035: move-result-object v1 +001858: 2202 0a00 |0036: new-instance v2, Landroid/content/Intent; // type@000a +00185c: 1a03 af00 |0038: const-string v3, "android.intent.action.VIEW" // string@00af +001860: 7030 1200 3201 |003a: invoke-direct {v2, v3, v1}, Landroid/content/Intent;.<init>:(Ljava/lang/String;Landroid/net/Uri;)V // method@0012 +001866: 6e20 4c00 2500 |003d: invoke-virtual {v5, v2}, Lcom/google/android/checkers/Checkers;.startActivity:(Landroid/content/Intent;)V // method@004c +00186c: 28d2 |0040: goto 0012 // -002e +00186e: 3901 0c00 |0041: if-nez v1, 004d // +000c +001872: 5451 0100 |0043: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001876: 6e20 5a00 0100 |0045: invoke-virtual {v1, v0}, Lcom/google/android/checkers/CheckersView;.a:(Z)Z // method@005a +00187c: 0a01 |0048: move-result v1 +00187e: 7220 3000 1600 |0049: invoke-interface {v6, v1}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +001884: 28c6 |004c: goto 0012 // -003a +001886: 3301 0c00 |004d: if-ne v1, v0, 0059 // +000c +00188a: 5451 0100 |004f: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +00188e: 6e20 6000 0100 |0051: invoke-virtual {v1, v0}, Lcom/google/android/checkers/CheckersView;.b:(Z)Z // method@0060 +001894: 0a01 |0054: move-result v1 +001896: 7220 3000 1600 |0055: invoke-interface {v6, v1}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +00189c: 28ba |0058: goto 0012 // -0046 +00189e: 3331 0c00 |0059: if-ne v1, v3, 0065 // +000c +0018a2: 5451 0100 |005b: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0018a6: 6e20 6300 0100 |005d: invoke-virtual {v1, v0}, Lcom/google/android/checkers/CheckersView;.c:(Z)Z // method@0063 +0018ac: 0a01 |0060: move-result v1 +0018ae: 7220 3000 1600 |0061: invoke-interface {v6, v1}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +0018b4: 28ae |0064: goto 0012 // -0052 +0018b6: 1232 |0065: const/4 v2, #int 3 // #3 +0018b8: 3321 0f00 |0066: if-ne v1, v2, 0075 // +000f +0018bc: 5451 0100 |0068: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0018c0: 6e20 6500 0100 |006a: invoke-virtual {v1, v0}, Lcom/google/android/checkers/CheckersView;.d:(Z)Z // method@0065 +0018c6: 0a01 |006d: move-result v1 +0018c8: 7220 3000 1600 |006e: invoke-interface {v6, v1}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +0018ce: 7020 3f00 1500 |0071: invoke-direct {v5, v1}, Lcom/google/android/checkers/Checkers;.a:(Z)V // method@003f +0018d4: 289e |0074: goto 0012 // -0062 +0018d6: 1242 |0075: const/4 v2, #int 4 // #4 +0018d8: 3321 0c00 |0076: if-ne v1, v2, 0082 // +000c +0018dc: 5451 0100 |0078: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0018e0: 6e20 6800 0100 |007a: invoke-virtual {v1, v0}, Lcom/google/android/checkers/CheckersView;.e:(Z)Z // method@0068 +0018e6: 0a01 |007d: move-result v1 +0018e8: 7220 3000 1600 |007e: invoke-interface {v6, v1}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +0018ee: 2891 |0081: goto 0012 // -006f +0018f0: 3341 8cff |0082: if-ne v1, v4, 000e // -0074 +0018f4: 5451 0100 |0084: iget-object v1, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0018f8: 6e10 4e00 0100 |0086: invoke-virtual {v1}, Lcom/google/android/checkers/CheckersView;.a:()V // method@004e +0018fe: 2889 |0089: goto 0012 // -0077 +001900: 5452 0100 |008a: iget-object v2, v5, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001904: 6e20 7000 1200 |008c: invoke-virtual {v2, v1}, Lcom/google/android/checkers/CheckersView;.setLevel:(I)V // method@0070 +00190a: 7220 3000 0600 |008f: invoke-interface {v6, v0}, Landroid/view/MenuItem;.setChecked:(Z)Landroid/view/MenuItem; // method@0030 +001910: 2880 |0092: goto 0012 // -0080 +001912: 0000 |0093: nop // spacer +001914: 0001 0300 0000 0000 0800 0000 3600 ... |0094: packed-switch-data (10 units) + catches : (none) + positions : + locals : + + #5 : (in Lcom/google/android/checkers/Checkers;) + name : 'onPause' + type : '()V' + access : 0x0001 (PUBLIC) + code - + registers : 3 + ins : 1 + outs : 2 + insns size : 21 16-bit code units +001928: |[001928] com.google.android.checkers.Checkers.onPause:()V +001938: 6f10 0600 0200 |0000: invoke-super {v2}, Landroid/app/Activity;.onPause:()V // method@0006 +00193e: 1200 |0003: const/4 v0, #int 0 // #0 +001940: 6e20 4000 0200 |0004: invoke-virtual {v2, v0}, Lcom/google/android/checkers/Checkers;.getPreferences:(I)Landroid/content/SharedPreferences; // method@0040 +001946: 0c00 |0007: move-result-object v0 +001948: 7210 1700 0000 |0008: invoke-interface {v0}, Landroid/content/SharedPreferences;.edit:()Landroid/content/SharedPreferences$Editor; // method@0017 +00194e: 0c00 |000b: move-result-object v0 +001950: 5421 0100 |000c: iget-object v1, v2, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +001954: 6e20 5100 0100 |000e: invoke-virtual {v1, v0}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences$Editor;)V // method@0051 +00195a: 7210 1400 0000 |0011: invoke-interface {v0}, Landroid/content/SharedPreferences$Editor;.commit:()Z // method@0014 +001960: 0e00 |0014: return-void + catches : (none) + positions : + locals : + + #6 : (in Lcom/google/android/checkers/Checkers;) + name : 'onStop' + type : '()V' + access : 0x0001 (PUBLIC) + code - + registers : 2 + ins : 1 + outs : 1 + insns size : 8 16-bit code units +001964: |[001964] com.google.android.checkers.Checkers.onStop:()V +001974: 6f10 0700 0100 |0000: invoke-super {v1}, Landroid/app/Activity;.onStop:()V // method@0007 +00197a: 1200 |0003: const/4 v0, #int 0 // #0 +00197c: 7110 ac00 0000 |0004: invoke-static {v0}, Ljava/lang/System;.exit:(I)V // method@00ac +001982: 0e00 |0007: return-void + catches : (none) + positions : + locals : + + #7 : (in Lcom/google/android/checkers/Checkers;) + name : 'onTrackballEvent' + type : '(Landroid/view/MotionEvent;)Z' + access : 0x0001 (PUBLIC) + code - + registers : 6 + ins : 2 + outs : 3 + insns size : 44 16-bit code units +001984: |[001984] com.google.android.checkers.Checkers.onTrackballEvent:(Landroid/view/MotionEvent;)Z +001994: 1210 |0000: const/4 v0, #int 1 // #1 +001996: 6e10 3100 0500 |0001: invoke-virtual {v5}, Landroid/view/MotionEvent;.getAction:()I // method@0031 +00199c: 0a01 |0004: move-result v1 +00199e: 2b01 1d00 0000 |0005: packed-switch v1, 00000022 // +0000001d +0019a4: 6f20 0800 5400 |0008: invoke-super {v4, v5}, Landroid/app/Activity;.onTrackballEvent:(Landroid/view/MotionEvent;)Z // method@0008 +0019aa: 0a00 |000b: move-result v0 +0019ac: 0f00 |000c: return v0 +0019ae: 5441 0100 |000d: iget-object v1, v4, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0019b2: 6e10 5d00 0100 |000f: invoke-virtual {v1}, Lcom/google/android/checkers/CheckersView;.b:()V // method@005d +0019b8: 28fa |0012: goto 000c // -0006 +0019ba: 5441 0100 |0013: iget-object v1, v4, Lcom/google/android/checkers/Checkers;.a:Lcom/google/android/checkers/CheckersView; // field@0001 +0019be: 6e10 3200 0500 |0015: invoke-virtual {v5}, Landroid/view/MotionEvent;.getX:()F // method@0032 +0019c4: 0a02 |0018: move-result v2 +0019c6: 6e10 3300 0500 |0019: invoke-virtual {v5}, Landroid/view/MotionEvent;.getY:()F // method@0033 +0019cc: 0a03 |001c: move-result v3 +0019ce: 6e30 4f00 2103 |001d: invoke-virtual {v1, v2, v3}, Lcom/google/android/checkers/CheckersView;.a:(FF)V // method@004f +0019d4: 28ec |0020: goto 000c // -0014 +0019d6: 0000 |0021: nop // spacer +0019d8: 0001 0300 0000 0000 0800 0000 0300 ... |0022: packed-switch-data (10 units) + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #1 header: +class_idx : 31 +access_flags : 1 (0x0001) +superclass_idx : 27 +interfaces_off : 0 (0x000000) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 34602 (0x00872a) +static_fields_size : 0 +instance_fields_size: 43 +direct_methods_size : 15 +virtual_methods_size: 16 + +Class #1 - + Class descriptor : 'Lcom/google/android/checkers/CheckersView;' + Access flags : 0x0001 (PUBLIC) + Superclass : 'Landroid/view/View;' + Interfaces - + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/CheckersView;) + name : 'A' + type : 'Z' + access : 0x0002 (PRIVATE) + #1 : (in Lcom/google/android/checkers/CheckersView;) + name : 'B' + type : 'Z' + access : 0x0002 (PRIVATE) + #2 : (in Lcom/google/android/checkers/CheckersView;) + name : 'C' + type : 'Z' + access : 0x0002 (PRIVATE) + #3 : (in Lcom/google/android/checkers/CheckersView;) + name : 'D' + type : 'Z' + access : 0x0002 (PRIVATE) + #4 : (in Lcom/google/android/checkers/CheckersView;) + name : 'E' + type : 'Z' + access : 0x0002 (PRIVATE) + #5 : (in Lcom/google/android/checkers/CheckersView;) + name : 'F' + type : 'I' + access : 0x0002 (PRIVATE) + #6 : (in Lcom/google/android/checkers/CheckersView;) + name : 'G' + type : '[I' + access : 0x0002 (PRIVATE) + #7 : (in Lcom/google/android/checkers/CheckersView;) + name : 'H' + type : '[I' + access : 0x0002 (PRIVATE) + #8 : (in Lcom/google/android/checkers/CheckersView;) + name : 'I' + type : '[I' + access : 0x0002 (PRIVATE) + #9 : (in Lcom/google/android/checkers/CheckersView;) + name : 'J' + type : '[I' + access : 0x0002 (PRIVATE) + #10 : (in Lcom/google/android/checkers/CheckersView;) + name : 'K' + type : 'I' + access : 0x0002 (PRIVATE) + #11 : (in Lcom/google/android/checkers/CheckersView;) + name : 'L' + type : 'I' + access : 0x0002 (PRIVATE) + #12 : (in Lcom/google/android/checkers/CheckersView;) + name : 'M' + type : 'I' + access : 0x0002 (PRIVATE) + #13 : (in Lcom/google/android/checkers/CheckersView;) + name : 'N' + type : 'Ljava/lang/String;' + access : 0x0002 (PRIVATE) + #14 : (in Lcom/google/android/checkers/CheckersView;) + name : 'O' + type : 'F' + access : 0x0002 (PRIVATE) + #15 : (in Lcom/google/android/checkers/CheckersView;) + name : 'P' + type : 'F' + access : 0x0002 (PRIVATE) + #16 : (in Lcom/google/android/checkers/CheckersView;) + name : 'Q' + type : 'I' + access : 0x0002 (PRIVATE) + #17 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : 'Landroid/content/Context;' + access : 0x0002 (PRIVATE) + #18 : (in Lcom/google/android/checkers/CheckersView;) + name : 'b' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #19 : (in Lcom/google/android/checkers/CheckersView;) + name : 'c' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #20 : (in Lcom/google/android/checkers/CheckersView;) + name : 'd' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #21 : (in Lcom/google/android/checkers/CheckersView;) + name : 'e' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #22 : (in Lcom/google/android/checkers/CheckersView;) + name : 'f' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #23 : (in Lcom/google/android/checkers/CheckersView;) + name : 'g' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #24 : (in Lcom/google/android/checkers/CheckersView;) + name : 'h' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #25 : (in Lcom/google/android/checkers/CheckersView;) + name : 'i' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #26 : (in Lcom/google/android/checkers/CheckersView;) + name : 'j' + type : 'Landroid/graphics/Paint;' + access : 0x0002 (PRIVATE) + #27 : (in Lcom/google/android/checkers/CheckersView;) + name : 'k' + type : 'I' + access : 0x0002 (PRIVATE) + #28 : (in Lcom/google/android/checkers/CheckersView;) + name : 'l' + type : 'F' + access : 0x0002 (PRIVATE) + #29 : (in Lcom/google/android/checkers/CheckersView;) + name : 'm' + type : 'I' + access : 0x0002 (PRIVATE) + #30 : (in Lcom/google/android/checkers/CheckersView;) + name : 'n' + type : 'I' + access : 0x0002 (PRIVATE) + #31 : (in Lcom/google/android/checkers/CheckersView;) + name : 'o' + type : 'Landroid/graphics/drawable/Drawable;' + access : 0x0002 (PRIVATE) + #32 : (in Lcom/google/android/checkers/CheckersView;) + name : 'p' + type : 'Lcom/google/android/checkers/a;' + access : 0x0002 (PRIVATE) + #33 : (in Lcom/google/android/checkers/CheckersView;) + name : 'q' + type : 'I' + access : 0x0002 (PRIVATE) + #34 : (in Lcom/google/android/checkers/CheckersView;) + name : 'r' + type : 'I' + access : 0x0002 (PRIVATE) + #35 : (in Lcom/google/android/checkers/CheckersView;) + name : 's' + type : 'I' + access : 0x0002 (PRIVATE) + #36 : (in Lcom/google/android/checkers/CheckersView;) + name : 't' + type : 'I' + access : 0x0002 (PRIVATE) + #37 : (in Lcom/google/android/checkers/CheckersView;) + name : 'u' + type : 'I' + access : 0x0002 (PRIVATE) + #38 : (in Lcom/google/android/checkers/CheckersView;) + name : 'v' + type : 'I' + access : 0x0002 (PRIVATE) + #39 : (in Lcom/google/android/checkers/CheckersView;) + name : 'w' + type : 'I' + access : 0x0002 (PRIVATE) + #40 : (in Lcom/google/android/checkers/CheckersView;) + name : 'x' + type : 'I' + access : 0x0002 (PRIVATE) + #41 : (in Lcom/google/android/checkers/CheckersView;) + name : 'y' + type : 'Z' + access : 0x0002 (PRIVATE) + #42 : (in Lcom/google/android/checkers/CheckersView;) + name : 'z' + type : 'I' + access : 0x0002 (PRIVATE) + Direct methods - + #0 : (in Lcom/google/android/checkers/CheckersView;) + name : '<init>' + type : '(Landroid/content/Context;Landroid/content/SharedPreferences;)V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 12 + ins : 3 + outs : 5 + insns size : 662 16-bit code units +0019ec: |[0019ec] com.google.android.checkers.CheckersView.<init>:(Landroid/content/Context;Landroid/content/SharedPreferences;)V +0019fc: 1308 0800 |0000: const/16 v8, #int 8 // #8 +001a00: 1217 |0002: const/4 v7, #int 1 // #1 +001a02: 1306 ff00 |0003: const/16 v6, #int 255 // #ff +001a06: 1205 |0005: const/4 v5, #int 0 // #0 +001a08: 1204 |0006: const/4 v4, #int 0 // #0 +001a0a: 7020 3700 a900 |0007: invoke-direct {v9, v10}, Landroid/view/View;.<init>:(Landroid/content/Context;)V // method@0037 +001a10: 5b9a 1300 |000a: iput-object v10, v9, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +001a14: 2200 1100 |000c: new-instance v0, Landroid/graphics/Paint; // type@0011 +001a18: 7010 2100 0000 |000e: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001a1e: 5b90 1400 |0011: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +001a22: 5490 1400 |0013: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +001a26: 6e20 2300 7000 |0015: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001a2c: 5490 1400 |0018: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +001a30: 6e54 2200 6044 |001a: invoke-virtual {v0, v6, v4, v4, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001a36: 2200 1100 |001d: new-instance v0, Landroid/graphics/Paint; // type@0011 +001a3a: 7010 2100 0000 |001f: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001a40: 5b90 1500 |0022: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +001a44: 5490 1500 |0024: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +001a48: 6e20 2300 7000 |0026: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001a4e: 5490 1500 |0029: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +001a52: 6e56 2200 6066 |002b: invoke-virtual {v0, v6, v6, v6, v6}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001a58: 2200 1100 |002e: new-instance v0, Landroid/graphics/Paint; // type@0011 +001a5c: 7010 2100 0000 |0030: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001a62: 5b90 1600 |0033: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.d:Landroid/graphics/Paint; // field@0016 +001a66: 5490 1600 |0035: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.d:Landroid/graphics/Paint; // field@0016 +001a6a: 6e20 2300 7000 |0037: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001a70: 5490 1600 |003a: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.d:Landroid/graphics/Paint; // field@0016 +001a74: 1301 fb00 |003c: const/16 v1, #int 251 // #fb +001a78: 1302 d700 |003e: const/16 v2, #int 215 // #d7 +001a7c: 1303 ae00 |0040: const/16 v3, #int 174 // #ae +001a80: 6e53 2200 6021 |0042: invoke-virtual {v0, v6, v1, v2, v3}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001a86: 2200 1100 |0045: new-instance v0, Landroid/graphics/Paint; // type@0011 +001a8a: 7010 2100 0000 |0047: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001a90: 5b90 1700 |004a: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +001a94: 5490 1700 |004c: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +001a98: 6e20 2300 7000 |004e: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001a9e: 5490 1700 |0051: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +001aa2: 6e54 2200 6044 |0053: invoke-virtual {v0, v6, v4, v4, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001aa8: 2200 1100 |0056: new-instance v0, Landroid/graphics/Paint; // type@0011 +001aac: 7010 2100 0000 |0058: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001ab2: 5b90 1800 |005b: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.f:Landroid/graphics/Paint; // field@0018 +001ab6: 5490 1800 |005d: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.f:Landroid/graphics/Paint; // field@0018 +001aba: 6e20 2300 7000 |005f: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001ac0: 5490 1800 |0062: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.f:Landroid/graphics/Paint; // field@0018 +001ac4: 6e54 2200 6046 |0064: invoke-virtual {v0, v6, v6, v4, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001aca: 2200 1100 |0067: new-instance v0, Landroid/graphics/Paint; // type@0011 +001ace: 7010 2100 0000 |0069: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001ad4: 5b90 1900 |006c: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.g:Landroid/graphics/Paint; // field@0019 +001ad8: 5490 1900 |006e: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.g:Landroid/graphics/Paint; // field@0019 +001adc: 6e20 2300 7000 |0070: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001ae2: 5490 1900 |0073: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.g:Landroid/graphics/Paint; // field@0019 +001ae6: 1301 a500 |0075: const/16 v1, #int 165 // #a5 +001aea: 6e54 2200 6016 |0077: invoke-virtual {v0, v6, v6, v1, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001af0: 2200 1100 |007a: new-instance v0, Landroid/graphics/Paint; // type@0011 +001af4: 7010 2100 0000 |007c: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001afa: 5b90 1a00 |007f: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.h:Landroid/graphics/Paint; // field@001a +001afe: 5490 1a00 |0081: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.h:Landroid/graphics/Paint; // field@001a +001b02: 6e20 2300 7000 |0083: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001b08: 5490 1a00 |0086: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.h:Landroid/graphics/Paint; // field@001a +001b0c: 1301 c800 |0088: const/16 v1, #int 200 // #c8 +001b10: 6e54 2200 6016 |008a: invoke-virtual {v0, v6, v6, v1, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001b16: 5490 1a00 |008d: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.h:Landroid/graphics/Paint; // field@001a +001b1a: 1501 0040 |008f: const/high16 v1, #int 1073741824 // #4000 +001b1e: 6e20 2400 1000 |0091: invoke-virtual {v0, v1}, Landroid/graphics/Paint;.setStrokeWidth:(F)V // method@0024 +001b24: 2200 1100 |0094: new-instance v0, Landroid/graphics/Paint; // type@0011 +001b28: 7010 2100 0000 |0096: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001b2e: 5b90 1b00 |0099: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.i:Landroid/graphics/Paint; // field@001b +001b32: 5490 1b00 |009b: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.i:Landroid/graphics/Paint; // field@001b +001b36: 6e20 2300 7000 |009d: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001b3c: 5490 1b00 |00a0: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.i:Landroid/graphics/Paint; // field@001b +001b40: 6e54 2200 6064 |00a2: invoke-virtual {v0, v6, v4, v6, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001b46: 2200 1100 |00a5: new-instance v0, Landroid/graphics/Paint; // type@0011 +001b4a: 7010 2100 0000 |00a7: invoke-direct {v0}, Landroid/graphics/Paint;.<init>:()V // method@0021 +001b50: 5b90 1c00 |00aa: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.j:Landroid/graphics/Paint; // field@001c +001b54: 5490 1c00 |00ac: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.j:Landroid/graphics/Paint; // field@001c +001b58: 6e20 2300 7000 |00ae: invoke-virtual {v0, v7}, Landroid/graphics/Paint;.setAntiAlias:(Z)V // method@0023 +001b5e: 5490 1c00 |00b1: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.j:Landroid/graphics/Paint; // field@001c +001b62: 6e54 2200 6044 |00b3: invoke-virtual {v0, v6, v4, v4, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +001b68: 5490 1c00 |00b6: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.j:Landroid/graphics/Paint; // field@001c +001b6c: 6201 0000 |00b8: sget-object v1, Landroid/graphics/Paint$Style;.STROKE:Landroid/graphics/Paint$Style; // field@0000 +001b70: 6e20 2500 1000 |00ba: invoke-virtual {v0, v1}, Landroid/graphics/Paint;.setStyle:(Landroid/graphics/Paint$Style;)V // method@0025 +001b76: 5490 1c00 |00bd: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.j:Landroid/graphics/Paint; // field@001c +001b7a: 1501 a040 |00bf: const/high16 v1, #int 1084227584 // #40a0 +001b7e: 6e20 2400 1000 |00c1: invoke-virtual {v0, v1}, Landroid/graphics/Paint;.setStrokeWidth:(F)V // method@0024 +001b84: 1300 0c00 |00c4: const/16 v0, #int 12 // #c +001b88: 5990 1d00 |00c6: iput v0, v9, Lcom/google/android/checkers/CheckersView;.k:I // field@001d +001b8c: 1200 |00c8: const/4 v0, #int 0 // #0 +001b8e: 5990 1e00 |00c9: iput v0, v9, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +001b92: 5994 1f00 |00cb: iput v4, v9, Lcom/google/android/checkers/CheckersView;.m:I // field@001f +001b96: 5994 2000 |00cd: iput v4, v9, Lcom/google/android/checkers/CheckersView;.n:I // field@0020 +001b9a: 5490 1300 |00cf: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +001b9e: 6e10 1100 0000 |00d1: invoke-virtual {v0}, Landroid/content/Context;.getResources:()Landroid/content/res/Resources; // method@0011 +001ba4: 0c00 |00d4: move-result-object v0 +001ba6: 1501 027f |00d5: const/high16 v1, #int 2130837504 // #7f02 +001baa: 6e20 1a00 1000 |00d7: invoke-virtual {v0, v1}, Landroid/content/res/Resources;.getDrawable:(I)Landroid/graphics/drawable/Drawable; // method@001a +001bb0: 0c00 |00da: move-result-object v0 +001bb2: 5b90 2100 |00db: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.o:Landroid/graphics/drawable/Drawable; // field@0021 +001bb6: 2380 3700 |00dd: new-array v0, v8, [I // type@0037 +001bba: 5b90 0800 |00df: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.G:[I // field@0008 +001bbe: 2380 3700 |00e1: new-array v0, v8, [I // type@0037 +001bc2: 5b90 0900 |00e3: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.H:[I // field@0009 +001bc6: 2380 3700 |00e5: new-array v0, v8, [I // type@0037 +001bca: 5b90 0a00 |00e7: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.I:[I // field@000a +001bce: 2380 3700 |00e9: new-array v0, v8, [I // type@0037 +001bd2: 5b90 0b00 |00eb: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.J:[I // field@000b +001bd6: 1300 0002 |00ed: const/16 v0, #int 512 // #200 +001bda: 2300 3600 |00ef: new-array v0, v0, [B // type@0036 +001bde: 6900 5900 |00f1: sput-object v0, Lcom/google/android/checkers/g;.a:[B // field@0059 +001be2: 1300 8003 |00f3: const/16 v0, #int 896 // #380 +001be6: 2300 3600 |00f5: new-array v0, v0, [B // type@0036 +001bea: 6900 5a00 |00f7: sput-object v0, Lcom/google/android/checkers/g;.b:[B // field@005a +001bee: 1300 8003 |00f9: const/16 v0, #int 896 // #380 +001bf2: 2300 3600 |00fb: new-array v0, v0, [B // type@0036 +001bf6: 6900 5b00 |00fd: sput-object v0, Lcom/google/android/checkers/g;.c:[B // field@005b +001bfa: 1300 1003 |00ff: const/16 v0, #int 784 // #310 +001bfe: 2300 3600 |0101: new-array v0, v0, [B // type@0036 +001c02: 6900 5c00 |0103: sput-object v0, Lcom/google/android/checkers/g;.d:[B // field@005c +001c06: 1300 001f |0105: const/16 v0, #int 7936 // #1f00 +001c0a: 2300 3600 |0107: new-array v0, v0, [B // type@0036 +001c0e: 6900 5d00 |0109: sput-object v0, Lcom/google/android/checkers/g;.e:[B // field@005d +001c12: 1300 001f |010b: const/16 v0, #int 7936 // #1f00 +001c16: 2300 3600 |010d: new-array v0, v0, [B // type@0036 +001c1a: 6900 5e00 |010f: sput-object v0, Lcom/google/android/checkers/g;.f:[B // field@005e +001c1e: 1300 4036 |0111: const/16 v0, #int 13888 // #3640 +001c22: 2300 3600 |0113: new-array v0, v0, [B // type@0036 +001c26: 6900 5f00 |0115: sput-object v0, Lcom/google/android/checkers/g;.g:[B // field@005f +001c2a: 1300 4036 |0117: const/16 v0, #int 13888 // #3640 +001c2e: 2300 3600 |0119: new-array v0, v0, [B // type@0036 +001c32: 6900 6000 |011b: sput-object v0, Lcom/google/android/checkers/g;.h:[B // field@0060 +001c36: 1300 0070 |011d: const/16 v0, #int 28672 // #7000 +001c3a: 2300 3600 |011f: new-array v0, v0, [B // type@0036 +001c3e: 6900 6100 |0121: sput-object v0, Lcom/google/android/checkers/g;.i:[B // field@0061 +001c42: 1300 0070 |0123: const/16 v0, #int 28672 // #7000 +001c46: 2300 3600 |0125: new-array v0, v0, [B // type@0036 +001c4a: 6900 6200 |0127: sput-object v0, Lcom/google/android/checkers/g;.j:[B // field@0062 +001c4e: 1300 0062 |0129: const/16 v0, #int 25088 // #6200 +001c52: 2300 3600 |012b: new-array v0, v0, [B // type@0036 +001c56: 6900 6300 |012d: sput-object v0, Lcom/google/android/checkers/g;.k:[B // field@0063 +001c5a: 1300 0062 |012f: const/16 v0, #int 25088 // #6200 +001c5e: 2300 3600 |0131: new-array v0, v0, [B // type@0036 +001c62: 6900 6400 |0133: sput-object v0, Lcom/google/android/checkers/g;.l:[B // field@0064 +001c66: 1300 402f |0135: const/16 v0, #int 12096 // #2f40 +001c6a: 2300 3600 |0137: new-array v0, v0, [B // type@0036 +001c6e: 6900 6500 |0139: sput-object v0, Lcom/google/android/checkers/g;.m:[B // field@0065 +001c72: 1300 402f |013b: const/16 v0, #int 12096 // #2f40 +001c76: 2300 3600 |013d: new-array v0, v0, [B // type@0036 +001c7a: 6900 6600 |013f: sput-object v0, Lcom/google/android/checkers/g;.n:[B // field@0066 +001c7e: 1300 5829 |0141: const/16 v0, #int 10584 // #2958 +001c82: 2300 3600 |0143: new-array v0, v0, [B // type@0036 +001c86: 6900 6700 |0145: sput-object v0, Lcom/google/android/checkers/g;.o:[B // field@0067 +001c8a: 1300 5829 |0147: const/16 v0, #int 10584 // #2958 +001c8e: 2300 3600 |0149: new-array v0, v0, [B // type@0036 +001c92: 6900 6800 |014b: sput-object v0, Lcom/google/android/checkers/g;.p:[B // field@0068 +001c96: 1400 00c1 0300 |014d: const v0, #float 0.000000 // #0003c100 +001c9c: 2300 3600 |0150: new-array v0, v0, [B // type@0036 +001ca0: 6900 6900 |0152: sput-object v0, Lcom/google/android/checkers/g;.q:[B // field@0069 +001ca4: 6e10 1100 0a00 |0154: invoke-virtual {v10}, Landroid/content/Context;.getResources:()Landroid/content/res/Resources; // method@0011 +001caa: 0c00 |0157: move-result-object v0 +001cac: 1501 037f |0158: const/high16 v1, #int 2130903040 // #7f03 +001cb0: 6e20 1b00 1000 |015a: invoke-virtual {v0, v1}, Landroid/content/res/Resources;.openRawResource:(I)Ljava/io/InputStream; // method@001b +001cb6: 0c00 |015d: move-result-object v0 +001cb8: 2201 2700 |015e: new-instance v1, Ljava/io/BufferedInputStream; // type@0027 +001cbc: 1302 0020 |0160: const/16 v2, #int 8192 // #2000 +001cc0: 7030 9a00 0102 |0162: invoke-direct {v1, v0, v2}, Ljava/io/BufferedInputStream;.<init>:(Ljava/io/InputStream;I)V // method@009a +001cc6: 6901 6b00 |0165: sput-object v1, Lcom/google/android/checkers/g;.s:Ljava/io/BufferedInputStream; // field@006b +001cca: 6200 5900 |0167: sget-object v0, Lcom/google/android/checkers/g;.a:[B // field@0059 +001cce: 7110 9900 0000 |0169: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001cd4: 6200 5a00 |016c: sget-object v0, Lcom/google/android/checkers/g;.b:[B // field@005a +001cd8: 7110 9900 0000 |016e: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001cde: 6200 5b00 |0171: sget-object v0, Lcom/google/android/checkers/g;.c:[B // field@005b +001ce2: 7110 9900 0000 |0173: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001ce8: 6200 5c00 |0176: sget-object v0, Lcom/google/android/checkers/g;.d:[B // field@005c +001cec: 7110 9900 0000 |0178: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001cf2: 6200 5d00 |017b: sget-object v0, Lcom/google/android/checkers/g;.e:[B // field@005d +001cf6: 7110 9900 0000 |017d: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001cfc: 6200 5e00 |0180: sget-object v0, Lcom/google/android/checkers/g;.f:[B // field@005e +001d00: 7110 9900 0000 |0182: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d06: 6200 5f00 |0185: sget-object v0, Lcom/google/android/checkers/g;.g:[B // field@005f +001d0a: 7110 9900 0000 |0187: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d10: 6200 6000 |018a: sget-object v0, Lcom/google/android/checkers/g;.h:[B // field@0060 +001d14: 7110 9900 0000 |018c: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d1a: 6200 6100 |018f: sget-object v0, Lcom/google/android/checkers/g;.i:[B // field@0061 +001d1e: 7110 9900 0000 |0191: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d24: 6200 6200 |0194: sget-object v0, Lcom/google/android/checkers/g;.j:[B // field@0062 +001d28: 7110 9900 0000 |0196: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d2e: 6200 6300 |0199: sget-object v0, Lcom/google/android/checkers/g;.k:[B // field@0063 +001d32: 7110 9900 0000 |019b: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d38: 6200 6400 |019e: sget-object v0, Lcom/google/android/checkers/g;.l:[B // field@0064 +001d3c: 7110 9900 0000 |01a0: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d42: 6200 6500 |01a3: sget-object v0, Lcom/google/android/checkers/g;.m:[B // field@0065 +001d46: 7110 9900 0000 |01a5: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d4c: 6200 6600 |01a8: sget-object v0, Lcom/google/android/checkers/g;.n:[B // field@0066 +001d50: 7110 9900 0000 |01aa: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d56: 6200 6700 |01ad: sget-object v0, Lcom/google/android/checkers/g;.o:[B // field@0067 +001d5a: 7110 9900 0000 |01af: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d60: 6200 6800 |01b2: sget-object v0, Lcom/google/android/checkers/g;.p:[B // field@0068 +001d64: 7110 9900 0000 |01b4: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d6a: 6200 6900 |01b7: sget-object v0, Lcom/google/android/checkers/g;.q:[B // field@0069 +001d6e: 7110 9900 0000 |01b9: invoke-static {v0}, Lcom/google/android/checkers/g;.a:([B)Z // method@0099 +001d74: 6200 6b00 |01bc: sget-object v0, Lcom/google/android/checkers/g;.s:Ljava/io/BufferedInputStream; // field@006b +001d78: 6e10 9b00 0000 |01be: invoke-virtual {v0}, Ljava/io/BufferedInputStream;.close:()V // method@009b +001d7e: 1200 |01c1: const/4 v0, #int 0 // #0 +001d80: 6900 6b00 |01c2: sput-object v0, Lcom/google/android/checkers/g;.s:Ljava/io/BufferedInputStream; // field@006b +001d84: 1210 |01c4: const/4 v0, #int 1 // #1 +001d86: 6a00 6a00 |01c5: sput-boolean v0, Lcom/google/android/checkers/g;.r:Z // field@006a +001d8a: 2200 2000 |01c7: new-instance v0, Lcom/google/android/checkers/a; // type@0020 +001d8e: 7020 7200 9000 |01c9: invoke-direct {v0, v9}, Lcom/google/android/checkers/a;.<init>:(Lcom/google/android/checkers/CheckersView;)V // method@0072 +001d94: 5b90 2200 |01cc: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +001d98: 5b95 0f00 |01ce: iput-object v5, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +001d9c: 7020 5800 b900 |01d0: invoke-direct {v9, v11}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences;)Z // method@0058 +001da2: 0a00 |01d3: move-result v0 +001da4: 3800 8500 |01d4: if-eqz v0, 0259 // +0085 +001da8: 7010 6700 0900 |01d6: invoke-direct {v9}, Lcom/google/android/checkers/CheckersView;.e:()Z // method@0067 +001dae: 0a00 |01d9: move-result v0 +001db0: 3900 0600 |01da: if-nez v0, 01e0 // +0006 +001db4: 1a00 1701 |01dc: const-string v0, "restored game" // string@0117 +001db8: 5b90 0f00 |01de: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +001dbc: 7010 6400 0900 |01e0: invoke-direct {v9}, Lcom/google/android/checkers/CheckersView;.d:()V // method@0064 +001dc2: 1500 20c1 |01e3: const/high16 v0, #int -1054867456 // #c120 +001dc6: 5990 1000 |01e5: iput v0, v9, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +001dca: 1500 20c1 |01e7: const/high16 v0, #int -1054867456 // #c120 +001dce: 5990 1100 |01e9: iput v0, v9, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +001dd2: 5994 1200 |01eb: iput v4, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +001dd6: 5590 0600 |01ed: iget-boolean v0, v9, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +001dda: 3800 2e00 |01ef: if-eqz v0, 021d // +002e +001dde: 7010 6700 0900 |01f1: invoke-direct {v9}, Lcom/google/android/checkers/CheckersView;.e:()Z // method@0067 +001de4: 0a00 |01f4: move-result v0 +001de6: 3800 2800 |01f5: if-eqz v0, 021d // +0028 +001dea: 2200 0500 |01f7: new-instance v0, Landroid/app/AlertDialog$Builder; // type@0005 +001dee: 5491 1300 |01f9: iget-object v1, v9, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +001df2: 7020 0900 1000 |01fb: invoke-direct {v0, v1}, Landroid/app/AlertDialog$Builder;.<init>:(Landroid/content/Context;)V // method@0009 +001df8: 1a01 1200 |01fe: const-string v1, "Checkers for Android was written by Aart J.C. Bik. + +Use the touch screen or trackball to make a move. Press the MENU button for more options, such as making captures optional instead of mandatory. + +The application complies with the official American checkers rules, where black moves first, captures are mandatory, men only move and jump forward, and kings move and jump forward and backward (but not over a distance). Please note that many variants of checkers exist, and this game may not use the rules you are most familiar with. +" // string@0012 +001dfc: 6e20 0c00 1000 |0200: invoke-virtual {v0, v1}, Landroid/app/AlertDialog$Builder;.setMessage:(Ljava/lang/CharSequence;)Landroid/app/AlertDialog$Builder; // method@000c +001e02: 0c00 |0203: move-result-object v0 +001e04: 1a01 2b00 |0204: const-string v1, "KEEP SHOWING" // string@002b +001e08: 2202 2300 |0206: new-instance v2, Lcom/google/android/checkers/d; // type@0023 +001e0c: 7020 9300 9200 |0208: invoke-direct {v2, v9}, Lcom/google/android/checkers/d;.<init>:(Lcom/google/android/checkers/CheckersView;)V // method@0093 +001e12: 6e30 0e00 1002 |020b: invoke-virtual {v0, v1, v2}, Landroid/app/AlertDialog$Builder;.setPositiveButton:(Ljava/lang/CharSequence;Landroid/content/DialogInterface$OnClickListener;)Landroid/app/AlertDialog$Builder; // method@000e +001e18: 0c00 |020e: move-result-object v0 +001e1a: 1a01 7c00 |020f: const-string v1, "STOP SHOWING" // string@007c +001e1e: 2202 2400 |0211: new-instance v2, Lcom/google/android/checkers/e; // type@0024 +001e22: 7020 9500 9200 |0213: invoke-direct {v2, v9}, Lcom/google/android/checkers/e;.<init>:(Lcom/google/android/checkers/CheckersView;)V // method@0095 +001e28: 6e30 0d00 1002 |0216: invoke-virtual {v0, v1, v2}, Landroid/app/AlertDialog$Builder;.setNegativeButton:(Ljava/lang/CharSequence;Landroid/content/DialogInterface$OnClickListener;)Landroid/app/AlertDialog$Builder; // method@000d +001e2e: 0c00 |0219: move-result-object v0 +001e30: 6e10 1000 0000 |021a: invoke-virtual {v0}, Landroid/app/AlertDialog$Builder;.show:()Landroid/app/AlertDialog; // method@0010 +001e36: 0e00 |021d: return-void +001e38: 0d00 |021e: move-exception v0 +001e3a: 1a01 0800 |021f: const-string v1, "BIK" // string@0008 +001e3e: 2202 3000 |0221: new-instance v2, Ljava/lang/StringBuilder; // type@0030 +001e42: 1a03 b700 |0223: const-string v3, "cannot read tb: " // string@00b7 +001e46: 7020 a600 3200 |0225: invoke-direct {v2, v3}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +001e4c: 6e20 a800 0200 |0228: invoke-virtual {v2, v0}, Ljava/lang/StringBuilder;.append:(Ljava/lang/Object;)Ljava/lang/StringBuilder; // method@00a8 +001e52: 0c00 |022b: move-result-object v0 +001e54: 6e10 aa00 0000 |022c: invoke-virtual {v0}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +001e5a: 0c00 |022f: move-result-object v0 +001e5c: 7120 2a00 0100 |0230: invoke-static {v1, v0}, Landroid/util/Log;.d:(Ljava/lang/String;Ljava/lang/String;)I // method@002a +001e62: 6905 5900 |0233: sput-object v5, Lcom/google/android/checkers/g;.a:[B // field@0059 +001e66: 6905 5a00 |0235: sput-object v5, Lcom/google/android/checkers/g;.b:[B // field@005a +001e6a: 6905 5b00 |0237: sput-object v5, Lcom/google/android/checkers/g;.c:[B // field@005b +001e6e: 6905 5c00 |0239: sput-object v5, Lcom/google/android/checkers/g;.d:[B // field@005c +001e72: 6905 5d00 |023b: sput-object v5, Lcom/google/android/checkers/g;.e:[B // field@005d +001e76: 6905 5e00 |023d: sput-object v5, Lcom/google/android/checkers/g;.f:[B // field@005e +001e7a: 6905 5f00 |023f: sput-object v5, Lcom/google/android/checkers/g;.g:[B // field@005f +001e7e: 6905 6000 |0241: sput-object v5, Lcom/google/android/checkers/g;.h:[B // field@0060 +001e82: 6905 6100 |0243: sput-object v5, Lcom/google/android/checkers/g;.i:[B // field@0061 +001e86: 6905 6200 |0245: sput-object v5, Lcom/google/android/checkers/g;.j:[B // field@0062 +001e8a: 6905 6300 |0247: sput-object v5, Lcom/google/android/checkers/g;.k:[B // field@0063 +001e8e: 6905 6400 |0249: sput-object v5, Lcom/google/android/checkers/g;.l:[B // field@0064 +001e92: 6905 6500 |024b: sput-object v5, Lcom/google/android/checkers/g;.m:[B // field@0065 +001e96: 6905 6600 |024d: sput-object v5, Lcom/google/android/checkers/g;.n:[B // field@0066 +001e9a: 6905 6700 |024f: sput-object v5, Lcom/google/android/checkers/g;.o:[B // field@0067 +001e9e: 6905 6800 |0251: sput-object v5, Lcom/google/android/checkers/g;.p:[B // field@0068 +001ea2: 6905 6900 |0253: sput-object v5, Lcom/google/android/checkers/g;.q:[B // field@0069 +001ea6: 6a04 6a00 |0255: sput-boolean v4, Lcom/google/android/checkers/g;.r:Z // field@006a +001eaa: 2900 70ff |0257: goto/16 01c7 // -0090 +001eae: 5997 2300 |0259: iput v7, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +001eb2: 5490 2200 |025b: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +001eb6: 5200 3d00 |025d: iget v0, v0, Lcom/google/android/checkers/a;.d:I // field@003d +001eba: 5990 2400 |025f: iput v0, v9, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +001ebe: 5490 2200 |0261: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +001ec2: 5200 3f00 |0263: iget v0, v0, Lcom/google/android/checkers/a;.f:I // field@003f +001ec6: 5990 2500 |0265: iput v0, v9, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +001eca: 5490 2200 |0267: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +001ece: 5200 3e00 |0269: iget v0, v0, Lcom/google/android/checkers/a;.e:I // field@003e +001ed2: 5990 2600 |026b: iput v0, v9, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +001ed6: 5490 2200 |026d: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +001eda: 5200 4000 |026f: iget v0, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +001ede: 5990 2700 |0271: iput v0, v9, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +001ee2: 5994 2800 |0273: iput v4, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +001ee6: 5994 2900 |0275: iput v4, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +001eea: 5994 2a00 |0277: iput v4, v9, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +001eee: 5c94 2b00 |0279: iput-boolean v4, v9, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +001ef2: 1230 |027b: const/4 v0, #int 3 // #3 +001ef4: 5990 2c00 |027c: iput v0, v9, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +001ef8: 5c97 0200 |027e: iput-boolean v7, v9, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +001efc: 5c94 0300 |0280: iput-boolean v4, v9, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +001f00: 5c94 0400 |0282: iput-boolean v4, v9, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +001f04: 5c94 0500 |0284: iput-boolean v4, v9, Lcom/google/android/checkers/CheckersView;.D:Z // field@0005 +001f08: 5c97 0600 |0286: iput-boolean v7, v9, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +001f0c: 5994 0700 |0288: iput v4, v9, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +001f10: 5994 0c00 |028a: iput v4, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +001f14: 5994 0d00 |028c: iput v4, v9, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +001f18: 5490 2200 |028e: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +001f1c: 5200 3c00 |0290: iget v0, v0, Lcom/google/android/checkers/a;.c:I // field@003c +001f20: 5990 0e00 |0292: iput v0, v9, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +001f24: 2900 4cff |0294: goto/16 01e0 // -00b4 + catches : 1 + 0x00ef - 0x01c7 + Ljava/lang/Exception; -> 0x021e + positions : + locals : + + #1 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V' + access : 0x000a (PRIVATE STATIC) + code - + registers : 6 + ins : 3 + outs : 3 + insns size : 31 16-bit code units +001f38: |[001f38] com.google.android.checkers.CheckersView.a:(Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V +001f48: 1200 |0000: const/4 v0, #int 0 // #0 +001f4a: 1301 0800 |0001: const/16 v1, #int 8 // #8 +001f4e: 3410 0300 |0003: if-lt v0, v1, 0006 // +0003 +001f52: 0e00 |0005: return-void +001f54: 2201 3000 |0006: new-instance v1, Ljava/lang/StringBuilder; // type@0030 +001f58: 7110 a500 0400 |0008: invoke-static {v4}, Ljava/lang/String;.valueOf:(Ljava/lang/Object;)Ljava/lang/String; // method@00a5 +001f5e: 0c02 |000b: move-result-object v2 +001f60: 7020 a600 2100 |000c: invoke-direct {v1, v2}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +001f66: 6e20 a700 0100 |000f: invoke-virtual {v1, v0}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +001f6c: 0c01 |0012: move-result-object v1 +001f6e: 6e10 aa00 0100 |0013: invoke-virtual {v1}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +001f74: 0c01 |0016: move-result-object v1 +001f76: 4402 0500 |0017: aget v2, v5, v0 +001f7a: 7230 1600 1302 |0019: invoke-interface {v3, v1, v2}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +001f80: d800 0001 |001c: add-int/lit8 v0, v0, #int 1 // #01 +001f84: 28e3 |001e: goto 0001 // -001d + catches : (none) + positions : + locals : + + #2 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Landroid/content/SharedPreferences;Ljava/lang/String;[I)V' + access : 0x000a (PRIVATE STATIC) + code - + registers : 7 + ins : 3 + outs : 3 + insns size : 33 16-bit code units +001f88: |[001f88] com.google.android.checkers.CheckersView.a:(Landroid/content/SharedPreferences;Ljava/lang/String;[I)V +001f98: 1201 |0000: const/4 v1, #int 0 // #0 +001f9a: 0110 |0001: move v0, v1 +001f9c: 1302 0800 |0002: const/16 v2, #int 8 // #8 +001fa0: 3420 0300 |0004: if-lt v0, v2, 0007 // +0003 +001fa4: 0e00 |0006: return-void +001fa6: 2202 3000 |0007: new-instance v2, Ljava/lang/StringBuilder; // type@0030 +001faa: 7110 a500 0500 |0009: invoke-static {v5}, Ljava/lang/String;.valueOf:(Ljava/lang/Object;)Ljava/lang/String; // method@00a5 +001fb0: 0c03 |000c: move-result-object v3 +001fb2: 7020 a600 3200 |000d: invoke-direct {v2, v3}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +001fb8: 6e20 a700 0200 |0010: invoke-virtual {v2, v0}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +001fbe: 0c02 |0013: move-result-object v2 +001fc0: 6e10 aa00 0200 |0014: invoke-virtual {v2}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +001fc6: 0c02 |0017: move-result-object v2 +001fc8: 7230 1900 2401 |0018: invoke-interface {v4, v2, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +001fce: 0a02 |001b: move-result v2 +001fd0: 4b02 0600 |001c: aput v2, v6, v0 +001fd4: d800 0001 |001e: add-int/lit8 v0, v0, #int 1 // #01 +001fd8: 28e2 |0020: goto 0002 // -001e + catches : (none) + positions : + locals : + + #3 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Landroid/graphics/Canvas;IIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 13 + ins : 6 + outs : 6 + insns size : 63 16-bit code units +001fdc: |[001fdc] com.google.android.checkers.CheckersView.a:(Landroid/graphics/Canvas;IIII)V +001fec: e201 0902 |0000: ushr-int/lit8 v1, v9, #int 2 // #02 +001ff0: dd00 0903 |0002: and-int/lit8 v0, v9, #int 3 // #03 +001ff4: da00 0002 |0004: mul-int/lit8 v0, v0, #int 2 // #02 +001ff8: d800 0001 |0006: add-int/lit8 v0, v0, #int 1 // #01 +001ffc: dd02 0101 |0008: and-int/lit8 v2, v1, #int 1 // #01 +002000: 9103 0002 |000a: sub-int v3, v0, v2 +002004: e200 0a02 |000c: ushr-int/lit8 v0, v10, #int 2 // #02 +002008: dd02 0a03 |000e: and-int/lit8 v2, v10, #int 3 // #03 +00200c: da02 0202 |0010: mul-int/lit8 v2, v2, #int 2 // #02 +002010: d802 0201 |0012: add-int/lit8 v2, v2, #int 1 // #01 +002014: dd04 0001 |0014: and-int/lit8 v4, v0, #int 1 // #01 +002018: b142 |0016: sub-int/2addr v2, v4 +00201a: 5574 0400 |0017: iget-boolean v4, v7, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +00201e: 3804 2100 |0019: if-eqz v4, 003a // +0021 +002022: d901 0107 |001b: rsub-int/lit8 v1, v1, #int 7 // #07 +002026: d903 0307 |001d: rsub-int/lit8 v3, v3, #int 7 // #07 +00202a: d900 0007 |001f: rsub-int/lit8 v0, v0, #int 7 // #07 +00202e: d902 0207 |0021: rsub-int/lit8 v2, v2, #int 7 // #07 +002032: 0116 |0023: move v6, v1 +002034: 0131 |0024: move v1, v3 +002036: 0123 |0025: move v3, v2 +002038: 0162 |0026: move v2, v6 +00203a: b2b1 |0027: mul-int/2addr v1, v11 +00203c: b0c1 |0028: add-int/2addr v1, v12 +00203e: 8211 |0029: int-to-float v1, v1 +002040: b2b2 |002a: mul-int/2addr v2, v11 +002042: b0c2 |002b: add-int/2addr v2, v12 +002044: 8222 |002c: int-to-float v2, v2 +002046: b2b3 |002d: mul-int/2addr v3, v11 +002048: b0c3 |002e: add-int/2addr v3, v12 +00204a: 8233 |002f: int-to-float v3, v3 +00204c: b2b0 |0030: mul-int/2addr v0, v11 +00204e: b0c0 |0031: add-int/2addr v0, v12 +002050: 8204 |0032: int-to-float v4, v0 +002052: 5475 1a00 |0033: iget-object v5, v7, Lcom/google/android/checkers/CheckersView;.h:Landroid/graphics/Paint; // field@001a +002056: 0780 |0035: move-object v0, v8 +002058: 7406 1d00 0000 |0036: invoke-virtual/range {v0, v1, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawLine:(FFFFLandroid/graphics/Paint;)V // method@001d +00205e: 0e00 |0039: return-void +002060: 0116 |003a: move v6, v1 +002062: 0131 |003b: move v1, v3 +002064: 0123 |003c: move v3, v2 +002066: 0162 |003d: move v2, v6 +002068: 28e9 |003e: goto 0027 // -0017 + catches : (none) + positions : + locals : + + #4 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V' + access : 0x000a (PRIVATE STATIC) + code - + registers : 13 + ins : 8 + outs : 5 + insns size : 81 16-bit code units +00206c: |[00206c] com.google.android.checkers.CheckersView.a:(Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V +00207c: 8260 |0000: int-to-float v0, v6 +00207e: 8271 |0001: int-to-float v1, v7 +002080: d802 08fe |0002: add-int/lit8 v2, v8, #int -2 // #fe +002084: 8222 |0004: int-to-float v2, v2 +002086: 6e5a 1c00 0521 |0005: invoke-virtual {v5, v0, v1, v2, v10}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +00208c: 8260 |0008: int-to-float v0, v6 +00208e: 8271 |0009: int-to-float v1, v7 +002090: d802 08fc |000a: add-int/lit8 v2, v8, #int -4 // #fc +002094: 8222 |000c: int-to-float v2, v2 +002096: 6e5b 1c00 0521 |000d: invoke-virtual {v5, v0, v1, v2, v11}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +00209c: 8260 |0010: int-to-float v0, v6 +00209e: 8271 |0011: int-to-float v1, v7 +0020a0: d802 08f9 |0012: add-int/lit8 v2, v8, #int -7 // #f9 +0020a4: 8222 |0014: int-to-float v2, v2 +0020a6: 6e5a 1c00 0521 |0015: invoke-virtual {v5, v0, v1, v2, v10}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0020ac: 8260 |0018: int-to-float v0, v6 +0020ae: 8271 |0019: int-to-float v1, v7 +0020b0: d802 08f7 |001a: add-int/lit8 v2, v8, #int -9 // #f7 +0020b4: 8222 |001c: int-to-float v2, v2 +0020b6: 6e5b 1c00 0521 |001d: invoke-virtual {v5, v0, v1, v2, v11}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0020bc: 380c 3000 |0020: if-eqz v12, 0050 // +0030 +0020c0: e100 0802 |0022: shr-int/lit8 v0, v8, #int 2 // #02 +0020c4: 9101 0600 |0024: sub-int v1, v6, v0 +0020c8: 9100 0700 |0026: sub-int v0, v7, v0 +0020cc: 8212 |0028: int-to-float v2, v1 +0020ce: 8203 |0029: int-to-float v3, v0 +0020d0: d804 08fe |002a: add-int/lit8 v4, v8, #int -2 // #fe +0020d4: 8244 |002c: int-to-float v4, v4 +0020d6: 6e5a 1c00 2543 |002d: invoke-virtual {v5, v2, v3, v4, v10}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0020dc: 8212 |0030: int-to-float v2, v1 +0020de: 8203 |0031: int-to-float v3, v0 +0020e0: d804 08fc |0032: add-int/lit8 v4, v8, #int -4 // #fc +0020e4: 8244 |0034: int-to-float v4, v4 +0020e6: 6e5b 1c00 2543 |0035: invoke-virtual {v5, v2, v3, v4, v11}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0020ec: 8212 |0038: int-to-float v2, v1 +0020ee: 8203 |0039: int-to-float v3, v0 +0020f0: d804 08f9 |003a: add-int/lit8 v4, v8, #int -7 // #f9 +0020f4: 8244 |003c: int-to-float v4, v4 +0020f6: 6e5a 1c00 2543 |003d: invoke-virtual {v5, v2, v3, v4, v10}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0020fc: 8212 |0040: int-to-float v2, v1 +0020fe: 8203 |0041: int-to-float v3, v0 +002100: d804 08f7 |0042: add-int/lit8 v4, v8, #int -9 // #f7 +002104: 8244 |0044: int-to-float v4, v4 +002106: 6e5b 1c00 2543 |0045: invoke-virtual {v5, v2, v3, v4, v11}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +00210c: 1a02 2a00 |0048: const-string v2, "K" // string@002a +002110: b191 |004a: sub-int/2addr v1, v9 +002112: 8211 |004b: int-to-float v1, v1 +002114: 8200 |004c: int-to-float v0, v0 +002116: 6e5a 2000 2501 |004d: invoke-virtual {v5, v2, v1, v0, v10}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +00211c: 0e00 |0050: return-void + catches : (none) + positions : + locals : + + #5 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Lcom/google/android/checkers/CheckersView;I)V' + access : 0x1008 (STATIC SYNTHETIC) + code - + registers : 2 + ins : 2 + outs : 2 + insns size : 4 16-bit code units +002120: |[002120] com.google.android.checkers.CheckersView.a:(Lcom/google/android/checkers/CheckersView;I)V +002130: 7020 5e00 1000 |0000: invoke-direct {v0, v1}, Lcom/google/android/checkers/CheckersView;.b:(I)V // method@005e +002136: 0e00 |0003: return-void + catches : (none) + positions : + locals : + + #6 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Landroid/content/SharedPreferences;)Z' + access : 0x20012 (PRIVATE FINAL DECLARED_SYNCHRONIZED) + code - + registers : 12 + ins : 2 + outs : 6 + insns size : 294 16-bit code units +002138: |[002138] com.google.android.checkers.CheckersView.a:(Landroid/content/SharedPreferences;)Z +002148: 1249 |0000: const/4 v9, #int 4 // #4 +00214a: 1232 |0001: const/4 v2, #int 3 // #3 +00214c: 1217 |0002: const/4 v7, #int 1 // #1 +00214e: 1206 |0003: const/4 v6, #int 0 // #0 +002150: 1d0a |0004: monitor-enter v10 +002152: 380b 1b01 |0005: if-eqz v11, 0120 // +011b +002156: 1a00 d000 |0007: const-string v0, "format" // string@00d0 +00215a: 1201 |0009: const/4 v1, #int 0 // #0 +00215c: 7230 1900 0b01 |000a: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +002162: 0a00 |000d: move-result v0 +002164: 1301 2200 |000e: const/16 v1, #int 34 // #22 +002168: 3210 0500 |0010: if-eq v0, v1, 0015 // +0005 +00216c: 0160 |0012: move v0, v6 +00216e: 1e0a |0013: monitor-exit v10 +002170: 0f00 |0014: return v0 +002172: 1a00 3101 |0015: const-string v0, "state" // string@0131 +002176: 1211 |0017: const/4 v1, #int 1 // #1 +002178: 7230 1900 0b01 |0018: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +00217e: 0a00 |001b: move-result v0 +002180: 59a0 2300 |001c: iput v0, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002184: 1a00 3f01 |001e: const-string v0, "wp" // string@013f +002188: 1201 |0020: const/4 v1, #int 0 // #0 +00218a: 7230 1900 0b01 |0021: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +002190: 0a00 |0024: move-result v0 +002192: 59a0 2400 |0025: iput v0, v10, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +002196: 1a00 b400 |0027: const-string v0, "bp" // string@00b4 +00219a: 1201 |0029: const/4 v1, #int 0 // #0 +00219c: 7230 1900 0b01 |002a: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0021a2: 0a00 |002d: move-result v0 +0021a4: 59a0 2500 |002e: iput v0, v10, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +0021a8: 1a00 3e01 |0030: const-string v0, "wk" // string@013e +0021ac: 1201 |0032: const/4 v1, #int 0 // #0 +0021ae: 7230 1900 0b01 |0033: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0021b4: 0a00 |0036: move-result v0 +0021b6: 59a0 2600 |0037: iput v0, v10, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +0021ba: 1a00 b300 |0039: const-string v0, "bk" // string@00b3 +0021be: 1201 |003b: const/4 v1, #int 0 // #0 +0021c0: 7230 1900 0b01 |003c: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0021c6: 0a00 |003f: move-result v0 +0021c8: 59a0 2700 |0040: iput v0, v10, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +0021cc: 1a00 e800 |0042: const-string v0, "l1" // string@00e8 +0021d0: 1201 |0044: const/4 v1, #int 0 // #0 +0021d2: 7230 1900 0b01 |0045: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0021d8: 0a00 |0048: move-result v0 +0021da: 59a0 2800 |0049: iput v0, v10, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +0021de: 1a00 e900 |004b: const-string v0, "l2" // string@00e9 +0021e2: 1201 |004d: const/4 v1, #int 0 // #0 +0021e4: 7230 1900 0b01 |004e: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0021ea: 0a00 |0051: move-result v0 +0021ec: 59a0 2900 |0052: iput v0, v10, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +0021f0: 1a00 ef00 |0054: const-string v0, "lm" // string@00ef +0021f4: 1201 |0056: const/4 v1, #int 0 // #0 +0021f6: 7230 1900 0b01 |0057: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0021fc: 0a00 |005a: move-result v0 +0021fe: 59a0 2a00 |005b: iput v0, v10, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +002202: 1a00 b800 |005d: const-string v0, "cap" // string@00b8 +002206: 1211 |005f: const/4 v1, #int 1 // #1 +002208: 7230 1800 0b01 |0060: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getBoolean:(Ljava/lang/String;Z)Z // method@0018 +00220e: 0a00 |0063: move-result v0 +002210: 5ca0 2b00 |0064: iput-boolean v0, v10, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +002214: 1a00 ee00 |0066: const-string v0, "level" // string@00ee +002218: 1231 |0068: const/4 v1, #int 3 // #3 +00221a: 7230 1900 0b01 |0069: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +002220: 0a00 |006c: move-result v0 +002222: 59a0 2c00 |006d: iput v0, v10, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +002226: 1a00 2d01 |006f: const-string v0, "show" // string@012d +00222a: 1211 |0071: const/4 v1, #int 1 // #1 +00222c: 7230 1800 0b01 |0072: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getBoolean:(Ljava/lang/String;Z)Z // method@0018 +002232: 0a00 |0075: move-result v0 +002234: 5ca0 0200 |0076: iput-boolean v0, v10, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +002238: 1a00 d100 |0078: const-string v0, "free" // string@00d1 +00223c: 1201 |007a: const/4 v1, #int 0 // #0 +00223e: 7230 1800 0b01 |007b: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getBoolean:(Ljava/lang/String;Z)Z // method@0018 +002244: 0a00 |007e: move-result v0 +002246: 5ca0 0300 |007f: iput-boolean v0, v10, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +00224a: 1a00 1801 |0081: const-string v0, "rot" // string@0118 +00224e: 1201 |0083: const/4 v1, #int 0 // #0 +002250: 7230 1800 0b01 |0084: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getBoolean:(Ljava/lang/String;Z)Z // method@0018 +002256: 0a00 |0087: move-result v0 +002258: 5ca0 0400 |0088: iput-boolean v0, v10, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +00225c: 1a00 d300 |008a: const-string v0, "full" // string@00d3 +002260: 1201 |008c: const/4 v1, #int 0 // #0 +002262: 7230 1800 0b01 |008d: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getBoolean:(Ljava/lang/String;Z)Z // method@0018 +002268: 0a00 |0090: move-result v0 +00226a: 5ca0 0500 |0091: iput-boolean v0, v10, Lcom/google/android/checkers/CheckersView;.D:Z // field@0005 +00226e: 1a00 2f01 |0093: const-string v0, "start" // string@012f +002272: 1211 |0095: const/4 v1, #int 1 // #1 +002274: 7230 1800 0b01 |0096: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getBoolean:(Ljava/lang/String;Z)Z // method@0018 +00227a: 0a00 |0099: move-result v0 +00227c: 5ca0 0600 |009a: iput-boolean v0, v10, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +002280: 1a00 bd00 |009c: const-string v0, "color" // string@00bd +002284: 1201 |009e: const/4 v1, #int 0 // #0 +002286: 7230 1900 0b01 |009f: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +00228c: 0a00 |00a2: move-result v0 +00228e: 59a0 0700 |00a3: iput v0, v10, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +002292: 1a00 f300 |00a5: const-string v0, "lwp" // string@00f3 +002296: 54a1 0800 |00a7: iget-object v1, v10, Lcom/google/android/checkers/CheckersView;.G:[I // field@0008 +00229a: 7130 5300 0b01 |00a9: invoke-static {v11, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences;Ljava/lang/String;[I)V // method@0053 +0022a0: 1a00 f200 |00ac: const-string v0, "lwk" // string@00f2 +0022a4: 54a1 0900 |00ae: iget-object v1, v10, Lcom/google/android/checkers/CheckersView;.H:[I // field@0009 +0022a8: 7130 5300 0b01 |00b0: invoke-static {v11, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences;Ljava/lang/String;[I)V // method@0053 +0022ae: 1a00 eb00 |00b3: const-string v0, "lbp" // string@00eb +0022b2: 54a1 0a00 |00b5: iget-object v1, v10, Lcom/google/android/checkers/CheckersView;.I:[I // field@000a +0022b6: 7130 5300 0b01 |00b7: invoke-static {v11, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences;Ljava/lang/String;[I)V // method@0053 +0022bc: 1a00 ea00 |00ba: const-string v0, "lbk" // string@00ea +0022c0: 54a1 0b00 |00bc: iget-object v1, v10, Lcom/google/android/checkers/CheckersView;.J:[I // field@000b +0022c4: 7130 5300 0b01 |00be: invoke-static {v11, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences;Ljava/lang/String;[I)V // method@0053 +0022ca: 1a00 f100 |00c1: const-string v0, "lp" // string@00f1 +0022ce: 1201 |00c3: const/4 v1, #int 0 // #0 +0022d0: 7230 1900 0b01 |00c4: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0022d6: 0a00 |00c7: move-result v0 +0022d8: 59a0 0c00 |00c8: iput v0, v10, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +0022dc: 1a00 ec00 |00ca: const-string v0, "lc" // string@00ec +0022e0: 1201 |00cc: const/4 v1, #int 0 // #0 +0022e2: 7230 1900 0b01 |00cd: invoke-interface {v11, v0, v1}, Landroid/content/SharedPreferences;.getInt:(Ljava/lang/String;I)I // method@0019 +0022e8: 0a00 |00d0: move-result v0 +0022ea: 59a0 0d00 |00d1: iput v0, v10, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +0022ee: 52a0 2c00 |00d3: iget v0, v10, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +0022f2: 6e20 7000 0a00 |00d5: invoke-virtual {v10, v0}, Lcom/google/android/checkers/CheckersView;.setLevel:(I)V // method@0070 +0022f8: 52a0 2300 |00d8: iget v0, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0022fc: 3220 3d00 |00da: if-eq v0, v2, 0117 // +003d +002300: 52a0 2300 |00dc: iget v0, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002304: 3290 3900 |00de: if-eq v0, v9, 0117 // +0039 +002308: 52a0 2300 |00e0: iget v0, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00230c: 1261 |00e2: const/4 v1, #int 6 // #6 +00230e: 3210 3400 |00e3: if-eq v0, v1, 0117 // +0034 +002312: 0165 |00e5: move v5, v6 +002314: 54a8 2200 |00e6: iget-object v8, v10, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002318: 1d08 |00e8: monitor-enter v8 +00231a: 54a0 2200 |00e9: iget-object v0, v10, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00231e: 52a1 2400 |00eb: iget v1, v10, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +002322: 52a2 2600 |00ed: iget v2, v10, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +002326: 52a3 2500 |00ef: iget v3, v10, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +00232a: 52a4 2700 |00f1: iget v4, v10, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +00232e: 7406 8000 0000 |00f3: invoke-virtual/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIIZ)V // method@0080 +002334: 54a0 2200 |00f6: iget-object v0, v10, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002338: 55a1 0300 |00f8: iget-boolean v1, v10, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +00233c: 6e30 7a00 5001 |00fa: invoke-virtual {v0, v5, v1}, Lcom/google/android/checkers/a;.a:(ZZ)I // method@007a +002342: 54a0 2200 |00fd: iget-object v0, v10, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002346: 5200 3c00 |00ff: iget v0, v0, Lcom/google/android/checkers/a;.c:I // field@003c +00234a: 59a0 0e00 |0101: iput v0, v10, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +00234e: 1e08 |0103: monitor-exit v8 +002350: 52a0 2300 |0104: iget v0, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002354: 3290 0700 |0106: if-eq v0, v9, 010d // +0007 +002358: 52a0 2300 |0108: iget v0, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00235c: 1221 |010a: const/4 v1, #int 2 // #2 +00235e: 3310 0900 |010b: if-ne v0, v1, 0114 // +0009 +002362: 54a0 2200 |010d: iget-object v0, v10, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002366: 55a1 0300 |010f: iget-boolean v1, v10, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +00236a: 6e30 8700 5001 |0111: invoke-virtual {v0, v5, v1}, Lcom/google/android/checkers/a;.b:(ZZ)V // method@0087 +002370: 0170 |0114: move v0, v7 +002372: 2900 fefe |0115: goto/16 0013 // -0102 +002376: 0175 |0117: move v5, v7 +002378: 28ce |0118: goto 00e6 // -0032 +00237a: 0d00 |0119: move-exception v0 +00237c: 1e08 |011a: monitor-exit v8 +00237e: 2700 |011b: throw v0 +002380: 0d00 |011c: move-exception v0 +002382: 0160 |011d: move v0, v6 +002384: 2900 f5fe |011e: goto/16 0013 // -010b +002388: 0160 |0120: move v0, v6 +00238a: 2900 f2fe |0121: goto/16 0013 // -010e +00238e: 0d00 |0123: move-exception v0 +002390: 1e0a |0124: monitor-exit v10 +002392: 2700 |0125: throw v0 + catches : 4 + 0x0007 - 0x000d + Ljava/lang/ClassCastException; -> 0x011c + <any> -> 0x0123 + 0x0015 - 0x00e9 + Ljava/lang/ClassCastException; -> 0x011c + <any> -> 0x0123 + 0x00e9 - 0x0104 + <any> -> 0x0119 + 0x0104 - 0x011c + Ljava/lang/ClassCastException; -> 0x011c + <any> -> 0x0123 + positions : + locals : + + #7 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Lcom/google/android/checkers/CheckersView;)Z' + access : 0x1008 (STATIC SYNTHETIC) + code - + registers : 7 + ins : 1 + outs : 6 + insns size : 11 16-bit code units +0023c0: |[0023c0] com.google.android.checkers.CheckersView.a:(Lcom/google/android/checkers/CheckersView;)Z +0023d0: 1201 |0000: const/4 v1, #int 0 // #0 +0023d2: 12f2 |0001: const/4 v2, #int -1 // #ff +0023d4: 0760 |0002: move-object v0, v6 +0023d6: 0113 |0003: move v3, v1 +0023d8: 0114 |0004: move v4, v1 +0023da: 0115 |0005: move v5, v1 +0023dc: 7606 5b00 0000 |0006: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(ZIIII)Z // method@005b +0023e2: 0a00 |0009: move-result v0 +0023e4: 0f00 |000a: return v0 + catches : (none) + positions : + locals : + + #8 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(ZIIII)Z' + access : 0x20012 (PRIVATE FINAL DECLARED_SYNCHRONIZED) + code - + registers : 15 + ins : 6 + outs : 6 + insns size : 645 16-bit code units +0023e8: |[0023e8] com.google.android.checkers.CheckersView.a:(ZIIII)Z +0023f8: 1232 |0000: const/4 v2, #int 3 // #3 +0023fa: 12f1 |0001: const/4 v1, #int -1 // #ff +0023fc: 1223 |0002: const/4 v3, #int 2 // #2 +0023fe: 1216 |0003: const/4 v6, #int 1 // #1 +002400: 1207 |0004: const/4 v7, #int 0 // #0 +002402: 1d09 |0005: monitor-enter v9 +002404: 380a 1b00 |0006: if-eqz v10, 0021 // +001b +002408: 5290 1200 |0008: iget v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +00240c: 3d00 1700 |000a: if-lez v0, 0021 // +0017 +002410: 5290 1200 |000c: iget v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +002414: 3330 2b00 |000e: if-ne v0, v3, 0039 // +002b +002418: 12eb |0010: const/4 v11, #int -2 // #fe +00241a: 5290 2300 |0011: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00241e: 3330 2800 |0013: if-ne v0, v3, 003b // +0028 +002422: 0160 |0015: move v0, v6 +002424: 5990 2300 |0016: iput v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002428: 5290 2c00 |0018: iget v0, v9, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +00242c: 6e20 7000 0900 |001a: invoke-virtual {v9, v0}, Lcom/google/android/checkers/CheckersView;.setLevel:(I)V // method@0070 +002432: 1200 |001d: const/4 v0, #int 0 // #0 +002434: 5990 1200 |001e: iput v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +002438: 017a |0020: move v10, v7 +00243a: 380a 8800 |0021: if-eqz v10, 00a9 // +0088 +00243e: 7020 6200 b900 |0023: invoke-direct {v9, v11}, Lcom/google/android/checkers/CheckersView;.c:(I)V // method@0062 +002444: 5290 2300 |0026: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002448: 1252 |0028: const/4 v2, #int 5 // #5 +00244a: 3220 0d00 |0029: if-eq v0, v2, 0036 // +000d +00244e: 5290 2300 |002b: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002452: 1262 |002d: const/4 v2, #int 6 // #6 +002454: 3220 0800 |002e: if-eq v0, v2, 0036 // +0008 +002458: 390d 0d00 |0030: if-nez v13, 003d // +000d +00245c: 1a00 1301 |0032: const-string v0, "random play" // string@0113 +002460: 5b90 0f00 |0034: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +002464: 0160 |0036: move v0, v6 +002466: 1e09 |0037: monitor-exit v9 +002468: 0f00 |0038: return v0 +00246a: 011b |0039: move v11, v1 +00246c: 28d7 |003a: goto 0011 // -0029 +00246e: 0120 |003b: move v0, v2 +002470: 28da |003c: goto 0016 // -0026 +002472: 331d 0a00 |003d: if-ne v13, v1, 0047 // +000a +002476: 1a00 0801 |003f: const-string v0, "only reply" // string@0108 +00247a: 5b90 0f00 |0041: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +00247e: 28f3 |0043: goto 0036 // -000d +002480: 0d00 |0044: move-exception v0 +002482: 1e09 |0045: monitor-exit v9 +002484: 2700 |0046: throw v0 +002486: 12e0 |0047: const/4 v0, #int -2 // #fe +002488: 330d 0700 |0048: if-ne v13, v0, 004f // +0007 +00248c: 1a00 0a01 |004a: const-string v0, "opening" // string@010a +002490: 5b90 0f00 |004c: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +002494: 28e8 |004e: goto 0036 // -0018 +002496: 1300 0083 |004f: const/16 v0, #int -32000 // #8300 +00249a: 360c 1800 |0051: if-gt v12, v0, 0069 // +0018 +00249e: 2200 3000 |0053: new-instance v0, Ljava/lang/StringBuilder; // type@0030 +0024a2: 1a01 f000 |0055: const-string v1, "loss in #" // string@00f0 +0024a6: 7020 a600 1000 |0057: invoke-direct {v0, v1}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +0024ac: d0c1 f47e |005a: add-int/lit16 v1, v12, #int 32500 // #7ef4 +0024b0: db01 0102 |005c: div-int/lit8 v1, v1, #int 2 // #02 +0024b4: 6e20 a700 1000 |005e: invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +0024ba: 0c00 |0061: move-result-object v0 +0024bc: 6e10 aa00 0000 |0062: invoke-virtual {v0}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +0024c2: 0c00 |0065: move-result-object v0 +0024c4: 5b90 0f00 |0066: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0024c8: 28ce |0068: goto 0036 // -0032 +0024ca: 1300 007d |0069: const/16 v0, #int 32000 // #7d00 +0024ce: 340c 1800 |006b: if-lt v12, v0, 0083 // +0018 +0024d2: 2200 3000 |006d: new-instance v0, Ljava/lang/StringBuilder; // type@0030 +0024d6: 1a01 3d01 |006f: const-string v1, "win in #" // string@013d +0024da: 7020 a600 1000 |0071: invoke-direct {v0, v1}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +0024e0: d1c1 f47e |0074: rsub-int v1, v12, #int 32500 // #7ef4 +0024e4: db01 0102 |0076: div-int/lit8 v1, v1, #int 2 // #02 +0024e8: 6e20 a700 1000 |0078: invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +0024ee: 0c00 |007b: move-result-object v0 +0024f0: 6e10 aa00 0000 |007c: invoke-virtual {v0}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +0024f6: 0c00 |007f: move-result-object v0 +0024f8: 5b90 0f00 |0080: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0024fc: 28b4 |0082: goto 0036 // -004c +0024fe: 2200 3000 |0083: new-instance v0, Ljava/lang/StringBuilder; // type@0030 +002502: 1a01 3901 |0085: const-string v1, "v=" // string@0139 +002506: 7020 a600 1000 |0087: invoke-direct {v0, v1}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +00250c: 6e20 a700 c000 |008a: invoke-virtual {v0, v12}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +002512: 0c00 |008d: move-result-object v0 +002514: 1a01 0000 |008e: const-string v1, " d=" // string@0000 +002518: 6e20 a900 1000 |0090: invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@00a9 +00251e: 0c00 |0093: move-result-object v0 +002520: 6e20 a700 d000 |0094: invoke-virtual {v0, v13}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +002526: 0c00 |0097: move-result-object v0 +002528: 1a01 0100 |0098: const-string v1, " n=" // string@0001 +00252c: 6e20 a900 1000 |009a: invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@00a9 +002532: 0c00 |009d: move-result-object v0 +002534: 6e20 a700 e000 |009e: invoke-virtual {v0, v14}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +00253a: 0c00 |00a1: move-result-object v0 +00253c: 6e10 aa00 0000 |00a2: invoke-virtual {v0}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +002542: 0c00 |00a5: move-result-object v0 +002544: 5b90 0f00 |00a6: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +002548: 288e |00a8: goto 0036 // -0072 +00254a: 12d0 |00a9: const/4 v0, #int -3 // #fd +00254c: 330b 3000 |00aa: if-ne v11, v0, 00da // +0030 +002550: 2200 0500 |00ac: new-instance v0, Landroid/app/AlertDialog$Builder; // type@0005 +002554: 5491 1300 |00ae: iget-object v1, v9, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +002558: 7020 0900 1000 |00b0: invoke-direct {v0, v1}, Landroid/app/AlertDialog$Builder;.<init>:(Landroid/content/Context;)V // method@0009 +00255e: 1a01 7f00 |00b3: const-string v1, "Start a new game?" // string@007f +002562: 6e20 0c00 1000 |00b5: invoke-virtual {v0, v1}, Landroid/app/AlertDialog$Builder;.setMessage:(Ljava/lang/CharSequence;)Landroid/app/AlertDialog$Builder; // method@000c +002568: 0c00 |00b8: move-result-object v0 +00256a: 1201 |00b9: const/4 v1, #int 0 // #0 +00256c: 6e20 0a00 1000 |00ba: invoke-virtual {v0, v1}, Landroid/app/AlertDialog$Builder;.setCancelable:(Z)Landroid/app/AlertDialog$Builder; // method@000a +002572: 0c00 |00bd: move-result-object v0 +002574: 1a01 9e00 |00be: const-string v1, "Yes" // string@009e +002578: 2202 2100 |00c0: new-instance v2, Lcom/google/android/checkers/b; // type@0021 +00257c: 7020 8f00 9200 |00c2: invoke-direct {v2, v9}, Lcom/google/android/checkers/b;.<init>:(Lcom/google/android/checkers/CheckersView;)V // method@008f +002582: 6e30 0e00 1002 |00c5: invoke-virtual {v0, v1, v2}, Landroid/app/AlertDialog$Builder;.setPositiveButton:(Ljava/lang/CharSequence;Landroid/content/DialogInterface$OnClickListener;)Landroid/app/AlertDialog$Builder; // method@000e +002588: 0c00 |00c8: move-result-object v0 +00258a: 1a01 7500 |00c9: const-string v1, "No" // string@0075 +00258e: 2202 2200 |00cb: new-instance v2, Lcom/google/android/checkers/c; // type@0022 +002592: 7020 9100 9200 |00cd: invoke-direct {v2, v9}, Lcom/google/android/checkers/c;.<init>:(Lcom/google/android/checkers/CheckersView;)V // method@0091 +002598: 6e30 0d00 1002 |00d0: invoke-virtual {v0, v1, v2}, Landroid/app/AlertDialog$Builder;.setNegativeButton:(Ljava/lang/CharSequence;Landroid/content/DialogInterface$OnClickListener;)Landroid/app/AlertDialog$Builder; // method@000d +00259e: 0c00 |00d3: move-result-object v0 +0025a0: 6e10 1000 0000 |00d4: invoke-virtual {v0}, Landroid/app/AlertDialog$Builder;.show:()Landroid/app/AlertDialog; // method@0010 +0025a6: 0160 |00d7: move v0, v6 +0025a8: 2900 5fff |00d8: goto/16 0037 // -00a1 +0025ac: 331b 5b00 |00da: if-ne v11, v1, 0135 // +005b +0025b0: 5290 2300 |00dc: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0025b4: 3230 0700 |00de: if-eq v0, v3, 00e5 // +0007 +0025b8: 5290 2300 |00e0: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0025bc: 1241 |00e2: const/4 v1, #int 4 // #4 +0025be: 3310 1100 |00e3: if-ne v0, v1, 00f4 // +0011 +0025c2: 1210 |00e5: const/4 v0, #int 1 // #1 +0025c4: 5990 1200 |00e6: iput v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +0025c8: 5490 2200 |00e8: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +0025cc: 1211 |00ea: const/4 v1, #int 1 // #1 +0025ce: 5901 4100 |00eb: iput v1, v0, Lcom/google/android/checkers/a;.h:I // field@0041 +0025d2: 1a00 3201 |00ed: const-string v0, "stopping...." // string@0132 +0025d6: 5b90 0f00 |00ef: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0025da: 0160 |00f1: move v0, v6 +0025dc: 2900 45ff |00f2: goto/16 0037 // -00bb +0025e0: 1200 |00f4: const/4 v0, #int 0 // #0 +0025e2: 5990 1200 |00f5: iput v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +0025e6: 5490 2200 |00f7: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +0025ea: 6e10 7b00 0000 |00f9: invoke-virtual {v0}, Lcom/google/android/checkers/a;.a:()V // method@007b +0025f0: 1210 |00fc: const/4 v0, #int 1 // #1 +0025f2: 5990 2300 |00fd: iput v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0025f6: 5490 2200 |00ff: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +0025fa: 5200 3d00 |0101: iget v0, v0, Lcom/google/android/checkers/a;.d:I // field@003d +0025fe: 5990 2400 |0103: iput v0, v9, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +002602: 5490 2200 |0105: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002606: 5200 3f00 |0107: iget v0, v0, Lcom/google/android/checkers/a;.f:I // field@003f +00260a: 5990 2500 |0109: iput v0, v9, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +00260e: 5490 2200 |010b: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002612: 5200 3e00 |010d: iget v0, v0, Lcom/google/android/checkers/a;.e:I // field@003e +002616: 5990 2600 |010f: iput v0, v9, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +00261a: 5490 2200 |0111: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00261e: 5200 4000 |0113: iget v0, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +002622: 5990 2700 |0115: iput v0, v9, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +002626: 1200 |0117: const/4 v0, #int 0 // #0 +002628: 5990 2800 |0118: iput v0, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +00262c: 1200 |011a: const/4 v0, #int 0 // #0 +00262e: 5990 2900 |011b: iput v0, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +002632: 1200 |011d: const/4 v0, #int 0 // #0 +002634: 5990 2a00 |011e: iput v0, v9, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +002638: 1200 |0120: const/4 v0, #int 0 // #0 +00263a: 5c90 2b00 |0121: iput-boolean v0, v9, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +00263e: 1200 |0123: const/4 v0, #int 0 // #0 +002640: 5990 0c00 |0124: iput v0, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +002644: 1200 |0126: const/4 v0, #int 0 // #0 +002646: 5990 0d00 |0127: iput v0, v9, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +00264a: 5490 2200 |0129: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00264e: 5200 3c00 |012b: iget v0, v0, Lcom/google/android/checkers/a;.c:I // field@003c +002652: 5990 0e00 |012d: iput v0, v9, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +002656: 1200 |012f: const/4 v0, #int 0 // #0 +002658: 5b90 0f00 |0130: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +00265c: 0160 |0132: move v0, v6 +00265e: 2900 04ff |0133: goto/16 0037 // -00fc +002662: 12e0 |0135: const/4 v0, #int -2 // #fe +002664: 330b b100 |0136: if-ne v11, v0, 01e7 // +00b1 +002668: 1200 |0138: const/4 v0, #int 0 // #0 +00266a: 5990 2800 |0139: iput v0, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +00266e: 1200 |013b: const/4 v0, #int 0 // #0 +002670: 5990 2900 |013c: iput v0, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +002674: 1200 |013e: const/4 v0, #int 0 // #0 +002676: 5990 2a00 |013f: iput v0, v9, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +00267a: 5290 2300 |0141: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00267e: 3230 0700 |0143: if-eq v0, v3, 014a // +0007 +002682: 5290 2300 |0145: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002686: 1241 |0147: const/4 v1, #int 4 // #4 +002688: 3310 1100 |0148: if-ne v0, v1, 0159 // +0011 +00268c: 1220 |014a: const/4 v0, #int 2 // #2 +00268e: 5990 1200 |014b: iput v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +002692: 5490 2200 |014d: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002696: 1211 |014f: const/4 v1, #int 1 // #1 +002698: 5901 4100 |0150: iput v1, v0, Lcom/google/android/checkers/a;.h:I // field@0041 +00269c: 1a00 3201 |0152: const-string v0, "stopping...." // string@0132 +0026a0: 5b90 0f00 |0154: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0026a4: 0160 |0156: move v0, v6 +0026a6: 2900 e0fe |0157: goto/16 0037 // -0120 +0026aa: 1200 |0159: const/4 v0, #int 0 // #0 +0026ac: 5990 1200 |015a: iput v0, v9, Lcom/google/android/checkers/CheckersView;.Q:I // field@0012 +0026b0: 5290 2300 |015c: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0026b4: 3260 7600 |015e: if-eq v0, v6, 01d4 // +0076 +0026b8: 5290 2300 |0160: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0026bc: 1251 |0162: const/4 v1, #int 5 // #5 +0026be: 3210 7100 |0163: if-eq v0, v1, 01d4 // +0071 +0026c2: 0175 |0165: move v5, v7 +0026c4: 5290 0d00 |0166: iget v0, v9, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +0026c8: 3d00 7200 |0168: if-lez v0, 01da // +0072 +0026cc: 5290 0d00 |016a: iget v0, v9, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +0026d0: d800 00ff |016c: add-int/lit8 v0, v0, #int -1 // #ff +0026d4: 5990 0d00 |016e: iput v0, v9, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +0026d8: 5290 0c00 |0170: iget v0, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +0026dc: 3d00 6400 |0172: if-lez v0, 01d6 // +0064 +0026e0: 5290 0c00 |0174: iget v0, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +0026e4: d800 00ff |0176: add-int/lit8 v0, v0, #int -1 // #ff +0026e8: 5990 0c00 |0178: iput v0, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +0026ec: 5294 0c00 |017a: iget v4, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +0026f0: 5490 2200 |017c: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +0026f4: 5491 0800 |017e: iget-object v1, v9, Lcom/google/android/checkers/CheckersView;.G:[I // field@0008 +0026f8: 4401 0104 |0180: aget v1, v1, v4 +0026fc: 5492 0900 |0182: iget-object v2, v9, Lcom/google/android/checkers/CheckersView;.H:[I // field@0009 +002700: 4402 0204 |0184: aget v2, v2, v4 +002704: 5493 0a00 |0186: iget-object v3, v9, Lcom/google/android/checkers/CheckersView;.I:[I // field@000a +002708: 4403 0304 |0188: aget v3, v3, v4 +00270c: 5498 0b00 |018a: iget-object v8, v9, Lcom/google/android/checkers/CheckersView;.J:[I // field@000b +002710: 4404 0804 |018c: aget v4, v8, v4 +002714: 7406 8000 0000 |018e: invoke-virtual/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIIZ)V // method@0080 +00271a: 0160 |0191: move v0, v6 +00271c: 3800 4e00 |0192: if-eqz v0, 01e0 // +004e +002720: 3805 4800 |0194: if-eqz v5, 01dc // +0048 +002724: 1230 |0196: const/4 v0, #int 3 // #3 +002726: 5990 2300 |0197: iput v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00272a: 5490 2200 |0199: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00272e: 5200 3d00 |019b: iget v0, v0, Lcom/google/android/checkers/a;.d:I // field@003d +002732: 5990 2400 |019d: iput v0, v9, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +002736: 5490 2200 |019f: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00273a: 5200 3f00 |01a1: iget v0, v0, Lcom/google/android/checkers/a;.f:I // field@003f +00273e: 5990 2500 |01a3: iput v0, v9, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +002742: 5490 2200 |01a5: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002746: 5200 3e00 |01a7: iget v0, v0, Lcom/google/android/checkers/a;.e:I // field@003e +00274a: 5990 2600 |01a9: iput v0, v9, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +00274e: 5490 2200 |01ab: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002752: 5200 4000 |01ad: iget v0, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +002756: 5990 2700 |01af: iput v0, v9, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +00275a: 1200 |01b1: const/4 v0, #int 0 // #0 +00275c: 5990 2800 |01b2: iput v0, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +002760: 1200 |01b4: const/4 v0, #int 0 // #0 +002762: 5990 2900 |01b5: iput v0, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +002766: 1200 |01b7: const/4 v0, #int 0 // #0 +002768: 5990 2a00 |01b8: iput v0, v9, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +00276c: 5490 2200 |01ba: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002770: 5591 0300 |01bc: iget-boolean v1, v9, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +002774: 6e30 7a00 5001 |01be: invoke-virtual {v0, v5, v1}, Lcom/google/android/checkers/a;.a:(ZZ)I // method@007a +00277a: 0a00 |01c1: move-result v0 +00277c: 3360 0300 |01c2: if-ne v0, v6, 01c5 // +0003 +002780: 0167 |01c4: move v7, v6 +002782: 5c97 2b00 |01c5: iput-boolean v7, v9, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +002786: 5490 2200 |01c7: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00278a: 5200 3c00 |01c9: iget v0, v0, Lcom/google/android/checkers/a;.c:I // field@003c +00278e: 5990 0e00 |01cb: iput v0, v9, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +002792: 1a00 3701 |01cd: const-string v0, "undid half-move" // string@0137 +002796: 5b90 0f00 |01cf: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +00279a: 0160 |01d1: move v0, v6 +00279c: 2900 65fe |01d2: goto/16 0037 // -019b +0027a0: 0165 |01d4: move v5, v6 +0027a2: 2891 |01d5: goto 0166 // -006f +0027a4: 1270 |01d6: const/4 v0, #int 7 // #7 +0027a6: 5990 0c00 |01d7: iput v0, v9, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +0027aa: 28a1 |01d9: goto 017a // -005f +0027ac: 0170 |01da: move v0, v7 +0027ae: 28b7 |01db: goto 0192 // -0049 +0027b0: 1210 |01dc: const/4 v0, #int 1 // #1 +0027b2: 5990 2300 |01dd: iput v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0027b6: 28ba |01df: goto 0199 // -0046 +0027b8: 1a00 f900 |01e0: const-string v0, "no more undo" // string@00f9 +0027bc: 5b90 0f00 |01e2: iput-object v0, v9, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0027c0: 0160 |01e4: move v0, v6 +0027c2: 2900 52fe |01e5: goto/16 0037 // -01ae +0027c6: 12c0 |01e7: const/4 v0, #int -4 // #fc +0027c8: 330b 4c00 |01e8: if-ne v11, v0, 0234 // +004c +0027cc: 5290 2300 |01ea: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0027d0: 3360 2300 |01ec: if-ne v0, v6, 020f // +0023 +0027d4: 1220 |01ee: const/4 v0, #int 2 // #2 +0027d6: 5990 2300 |01ef: iput v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +0027da: 5490 2200 |01f1: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +0027de: 1201 |01f3: const/4 v1, #int 0 // #0 +0027e0: 5592 0300 |01f4: iget-boolean v2, v9, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +0027e4: 6e30 8700 1002 |01f6: invoke-virtual {v0, v1, v2}, Lcom/google/android/checkers/a;.b:(ZZ)V // method@0087 +0027ea: 5491 1300 |01f9: iget-object v1, v9, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +0027ee: 5590 0400 |01fb: iget-boolean v0, v9, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +0027f2: 3800 0f00 |01fd: if-eqz v0, 020c // +000f +0027f6: 1a00 bf00 |01ff: const-string v0, "computer now plays black" // string@00bf +0027fa: 1202 |0201: const/4 v2, #int 0 // #0 +0027fc: 7130 3c00 0102 |0202: invoke-static {v1, v0, v2}, Landroid/widget/Toast;.makeText:(Landroid/content/Context;Ljava/lang/CharSequence;I)Landroid/widget/Toast; // method@003c +002802: 0c00 |0205: move-result-object v0 +002804: 6e10 3d00 0000 |0206: invoke-virtual {v0}, Landroid/widget/Toast;.show:()V // method@003d +00280a: 0160 |0209: move v0, v6 +00280c: 2900 2dfe |020a: goto/16 0037 // -01d3 +002810: 1a00 c000 |020c: const-string v0, "computer now plays black +goto options to rotate board" // string@00c0 +002814: 28f3 |020e: goto 0201 // -000d +002816: 5290 2300 |020f: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00281a: 3320 6f00 |0211: if-ne v0, v2, 0280 // +006f +00281e: 1240 |0213: const/4 v0, #int 4 // #4 +002820: 5990 2300 |0214: iput v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002824: 5490 2200 |0216: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002828: 1211 |0218: const/4 v1, #int 1 // #1 +00282a: 5592 0300 |0219: iget-boolean v2, v9, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +00282e: 6e30 8700 1002 |021b: invoke-virtual {v0, v1, v2}, Lcom/google/android/checkers/a;.b:(ZZ)V // method@0087 +002834: 5491 1300 |021e: iget-object v1, v9, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +002838: 5590 0400 |0220: iget-boolean v0, v9, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +00283c: 3800 0f00 |0222: if-eqz v0, 0231 // +000f +002840: 1a00 c200 |0224: const-string v0, "computer now plays white +goto options to rotate board" // string@00c2 +002844: 1202 |0226: const/4 v2, #int 0 // #0 +002846: 7130 3c00 0102 |0227: invoke-static {v1, v0, v2}, Landroid/widget/Toast;.makeText:(Landroid/content/Context;Ljava/lang/CharSequence;I)Landroid/widget/Toast; // method@003c +00284c: 0c00 |022a: move-result-object v0 +00284e: 6e10 3d00 0000 |022b: invoke-virtual {v0}, Landroid/widget/Toast;.show:()V // method@003d +002854: 0160 |022e: move v0, v6 +002856: 2900 08fe |022f: goto/16 0037 // -01f8 +00285a: 1a00 c100 |0231: const-string v0, "computer now plays white" // string@00c1 +00285e: 28f3 |0233: goto 0226 // -000d +002860: 336b 4c00 |0234: if-ne v11, v6, 0280 // +004c +002864: 5290 2300 |0236: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002868: 3260 0600 |0238: if-eq v0, v6, 023e // +0006 +00286c: 5290 2300 |023a: iget v0, v9, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002870: 3320 4400 |023c: if-ne v0, v2, 0280 // +0044 +002874: 1200 |023e: const/4 v0, #int 0 // #0 +002876: 5990 2900 |023f: iput v0, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +00287a: 1200 |0241: const/4 v0, #int 0 // #0 +00287c: 5990 2a00 |0242: iput v0, v9, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +002880: 5490 2200 |0244: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002884: 5204 3c00 |0246: iget v4, v0, Lcom/google/android/checkers/a;.c:I // field@003c +002888: 5490 2200 |0248: iget-object v0, v9, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00288c: 5405 3b00 |024a: iget-object v5, v0, Lcom/google/android/checkers/a;.b:[I // field@003b +002890: 0172 |024c: move v2, v7 +002892: 0170 |024d: move v0, v7 +002894: 0173 |024e: move v3, v7 +002896: 3442 0a00 |024f: if-lt v2, v4, 0259 // +000a +00289a: 3363 2500 |0251: if-ne v3, v6, 0276 // +0025 +00289e: 7020 6200 1900 |0253: invoke-direct {v9, v1}, Lcom/google/android/checkers/CheckersView;.c:(I)V // method@0062 +0028a4: 0160 |0256: move v0, v6 +0028a6: 2900 e0fd |0257: goto/16 0037 // -0220 +0028aa: 5297 2800 |0259: iget v7, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +0028ae: 4408 0502 |025b: aget v8, v5, v2 +0028b2: b587 |025d: and-int/2addr v7, v8 +0028b4: 5298 2800 |025e: iget v8, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +0028b8: 3387 1300 |0260: if-ne v7, v8, 0273 // +0013 +0028bc: 4401 0502 |0262: aget v1, v5, v2 +0028c0: 3201 1f00 |0264: if-eq v1, v0, 0283 // +001f +0028c4: d801 0301 |0266: add-int/lit8 v1, v3, #int 1 // #01 +0028c8: 4400 0502 |0268: aget v0, v5, v2 +0028cc: 5293 2900 |026a: iget v3, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +0028d0: 4407 0502 |026c: aget v7, v5, v2 +0028d4: b673 |026e: or-int/2addr v3, v7 +0028d6: 5993 2900 |026f: iput v3, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +0028da: 0113 |0271: move v3, v1 +0028dc: 0121 |0272: move v1, v2 +0028de: d802 0201 |0273: add-int/lit8 v2, v2, #int 1 // #01 +0028e2: 28da |0275: goto 024f // -0026 +0028e4: 5290 2900 |0276: iget v0, v9, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +0028e8: 3900 0500 |0278: if-nez v0, 027d // +0005 +0028ec: 1200 |027a: const/4 v0, #int 0 // #0 +0028ee: 5990 2800 |027b: iput v0, v9, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +0028f2: 0160 |027d: move v0, v6 +0028f4: 2900 b9fd |027e: goto/16 0037 // -0247 +0028f8: 0170 |0280: move v0, v7 +0028fa: 2900 b6fd |0281: goto/16 0037 // -024a +0028fe: 0131 |0283: move v1, v3 +002900: 28e4 |0284: goto 0268 // -001c + catches : 3 + 0x0008 - 0x0036 + <any> -> 0x0044 + 0x003f - 0x0043 + <any> -> 0x0044 + 0x004a - 0x027d + <any> -> 0x0044 + positions : + locals : + + #9 : (in Lcom/google/android/checkers/CheckersView;) + name : 'b' + type : '(FF)I' + access : 0x20012 (PRIVATE FINAL DECLARED_SYNCHRONIZED) + code - + registers : 13 + ins : 3 + outs : 1 + insns size : 102 16-bit code units +002920: |[002920] com.google.android.checkers.CheckersView.b:(FF)I +002930: 1309 0800 |0000: const/16 v9, #int 8 // #8 +002934: 1201 |0002: const/4 v1, #int 0 // #0 +002936: 1210 |0003: const/4 v0, #int 1 // #1 +002938: 1d0a |0004: monitor-enter v10 +00293a: 6e10 6a00 0a00 |0005: invoke-virtual {v10}, Lcom/google/android/checkers/CheckersView;.getWidth:()I // method@006a +002940: 0a02 |0008: move-result v2 +002942: 6e10 6900 0a00 |0009: invoke-virtual {v10}, Lcom/google/android/checkers/CheckersView;.getHeight:()I // method@0069 +002948: 0a03 |000c: move-result v3 +00294a: 3532 1400 |000d: if-ge v2, v3, 0021 // +0014 +00294e: e207 0203 |000f: ushr-int/lit8 v7, v2, #int 3 // #03 +002952: 52a2 2300 |0011: iget v2, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002956: 3202 0700 |0013: if-eq v2, v0, 001a // +0007 +00295a: 52a2 2300 |0015: iget v2, v10, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00295e: 1233 |0017: const/4 v3, #int 3 // #3 +002960: 3332 0600 |0018: if-ne v2, v3, 001e // +0006 +002964: 0116 |001a: move v6, v1 +002966: 0103 |001b: move v3, v0 +002968: 3496 0700 |001c: if-lt v6, v9, 0023 // +0007 +00296c: 0110 |001e: move v0, v1 +00296e: 1e0a |001f: monitor-exit v10 +002970: 0f00 |0020: return v0 +002972: 0132 |0021: move v2, v3 +002974: 28ed |0022: goto 000f // -0013 +002976: d902 0601 |0023: rsub-int/lit8 v2, v6, #int 1 // #01 +00297a: dd02 0201 |0025: and-int/lit8 v2, v2, #int 1 // #01 +00297e: 0124 |0027: move v4, v2 +002980: 0135 |0028: move v5, v3 +002982: 3494 0700 |0029: if-lt v4, v9, 0030 // +0007 +002986: d802 0601 |002b: add-int/lit8 v2, v6, #int 1 // #01 +00298a: 0126 |002d: move v6, v2 +00298c: 0153 |002e: move v3, v5 +00298e: 28ed |002f: goto 001c // -0013 +002990: 55a2 0400 |0030: iget-boolean v2, v10, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +002994: 3802 2800 |0032: if-eqz v2, 005a // +0028 +002998: d902 0407 |0034: rsub-int/lit8 v2, v4, #int 7 // #07 +00299c: 9203 0702 |0036: mul-int v3, v7, v2 +0029a0: d902 0607 |0038: rsub-int/lit8 v2, v6, #int 7 // #07 +0029a4: b272 |003a: mul-int/2addr v2, v7 +0029a6: 8238 |003b: int-to-float v8, v3 +0029a8: 2e08 080b |003c: cmpg-float v8, v8, v11 +0029ac: 3c08 2100 |003e: if-gtz v8, 005f // +0021 +0029b0: b073 |0040: add-int/2addr v3, v7 +0029b2: 8233 |0041: int-to-float v3, v3 +0029b4: 2e03 0b03 |0042: cmpg-float v3, v11, v3 +0029b8: 3b03 1b00 |0044: if-gez v3, 005f // +001b +0029bc: 8223 |0046: int-to-float v3, v2 +0029be: 2e03 030c |0047: cmpg-float v3, v3, v12 +0029c2: 3c03 1600 |0049: if-gtz v3, 005f // +0016 +0029c6: b072 |004b: add-int/2addr v2, v7 +0029c8: 8222 |004c: int-to-float v2, v2 +0029ca: 2e02 0c02 |004d: cmpg-float v2, v12, v2 +0029ce: 3b02 1000 |004f: if-gez v2, 005f // +0010 +0029d2: 52a1 2800 |0051: iget v1, v10, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +0029d6: b651 |0053: or-int/2addr v1, v5 +0029d8: 59a1 2800 |0054: iput v1, v10, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +0029dc: 28c9 |0056: goto 001f // -0037 +0029de: 0d00 |0057: move-exception v0 +0029e0: 1e0a |0058: monitor-exit v10 +0029e2: 2700 |0059: throw v0 +0029e4: 9203 0704 |005a: mul-int v3, v7, v4 +0029e8: 9202 0706 |005c: mul-int v2, v7, v6 +0029ec: 28dd |005e: goto 003b // -0023 +0029ee: e003 0501 |005f: shl-int/lit8 v3, v5, #int 1 // #01 +0029f2: d802 0402 |0061: add-int/lit8 v2, v4, #int 2 // #02 +0029f6: 0124 |0063: move v4, v2 +0029f8: 0135 |0064: move v5, v3 +0029fa: 28c4 |0065: goto 0029 // -003c + catches : 2 + 0x0005 - 0x0017 + <any> -> 0x0057 + 0x0030 - 0x0056 + <any> -> 0x0057 + positions : + locals : + + #10 : (in Lcom/google/android/checkers/CheckersView;) + name : 'b' + type : '(I)V' + access : 0x20012 (PRIVATE FINAL DECLARED_SYNCHRONIZED) + code - + registers : 3 + ins : 2 + outs : 1 + insns size : 18 16-bit code units +002a10: |[002a10] com.google.android.checkers.CheckersView.b:(I)V +002a20: 1d01 |0000: monitor-enter v1 +002a22: 5210 0700 |0001: iget v0, v1, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +002a26: 3220 0a00 |0003: if-eq v0, v2, 000d // +000a +002a2a: 5912 0700 |0005: iput v2, v1, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +002a2e: 7010 6400 0100 |0007: invoke-direct {v1}, Lcom/google/android/checkers/CheckersView;.d:()V // method@0064 +002a34: 6e10 6d00 0100 |000a: invoke-virtual {v1}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +002a3a: 1e01 |000d: monitor-exit v1 +002a3c: 0e00 |000e: return-void +002a3e: 0d00 |000f: move-exception v0 +002a40: 1e01 |0010: monitor-exit v1 +002a42: 2700 |0011: throw v0 + catches : 1 + 0x0001 - 0x000d + <any> -> 0x000f + positions : + locals : + + #11 : (in Lcom/google/android/checkers/CheckersView;) + name : 'b' + type : '(Landroid/graphics/Canvas;IIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 16 + ins : 6 + outs : 6 + insns size : 99 16-bit code units +002a50: |[002a50] com.google.android.checkers.CheckersView.b:(Landroid/graphics/Canvas;IIII)V +002a60: df00 0cff |0000: xor-int/lit8 v0, v12, #int -1 // #ff +002a64: b5d0 |0002: and-int/2addr v0, v13 +002a66: 3900 5e00 |0003: if-nez v0, 0061 // +005e +002a6a: 7110 9f00 0c00 |0005: invoke-static {v12}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +002a70: 0a02 |0008: move-result v2 +002a72: d800 0cff |0009: add-int/lit8 v0, v12, #int -1 // #ff +002a76: b5c0 |000b: and-int/2addr v0, v12 +002a78: 7110 9f00 0000 |000c: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +002a7e: 0a03 |000f: move-result v3 +002a80: 07a0 |0010: move-object v0, v10 +002a82: 07b1 |0011: move-object v1, v11 +002a84: 01e4 |0012: move v4, v14 +002a86: 01f5 |0013: move v5, v15 +002a88: 7606 5400 0000 |0014: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIII)V // method@0054 +002a8e: 0e00 |0017: return-void +002a90: 7110 9f00 0800 |0018: invoke-static {v8}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +002a96: 0a03 |001b: move-result v3 +002a98: e200 0302 |001c: ushr-int/lit8 v0, v3, #int 2 // #02 +002a9c: dd00 0001 |001e: and-int/lit8 v0, v0, #int 1 // #01 +002aa0: 3800 3600 |0020: if-eqz v0, 0056 // +0036 +002aa4: d802 03fb |0022: add-int/lit8 v2, v3, #int -5 // #fb +002aa8: d801 03fc |0024: add-int/lit8 v1, v3, #int -4 // #fc +002aac: d800 0303 |0026: add-int/lit8 v0, v3, #int 3 // #03 +002ab0: d803 0304 |0028: add-int/lit8 v3, v3, #int 4 // #04 +002ab4: 0106 |002a: move v6, v0 +002ab6: 0117 |002b: move v7, v1 +002ab8: 1210 |002c: const/4 v0, #int 1 // #1 +002aba: b820 |002d: shl-int/2addr v0, v2 +002abc: 1211 |002e: const/4 v1, #int 1 // #1 +002abe: b831 |002f: shl-int/2addr v1, v3 +002ac0: b610 |0030: or-int/2addr v0, v1 +002ac2: 1211 |0031: const/4 v1, #int 1 // #1 +002ac4: b871 |0032: shl-int/2addr v1, v7 +002ac6: 1214 |0033: const/4 v4, #int 1 // #1 +002ac8: b864 |0034: shl-int/2addr v4, v6 +002aca: 9609 0104 |0035: or-int v9, v1, v4 +002ace: 9501 000c |0037: and-int v1, v0, v12 +002ad2: 3301 0900 |0039: if-ne v1, v0, 0042 // +0009 +002ad6: 07a0 |003b: move-object v0, v10 +002ad8: 07b1 |003c: move-object v1, v11 +002ada: 01e4 |003d: move v4, v14 +002adc: 01f5 |003e: move v5, v15 +002ade: 7606 5400 0000 |003f: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIII)V // method@0054 +002ae4: 9500 090c |0042: and-int v0, v9, v12 +002ae8: 3390 0b00 |0044: if-ne v0, v9, 004f // +000b +002aec: 07a0 |0046: move-object v0, v10 +002aee: 07b1 |0047: move-object v1, v11 +002af0: 0172 |0048: move v2, v7 +002af2: 0163 |0049: move v3, v6 +002af4: 01e4 |004a: move v4, v14 +002af6: 01f5 |004b: move v5, v15 +002af8: 7606 5400 0000 |004c: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIII)V // method@0054 +002afe: d800 08ff |004f: add-int/lit8 v0, v8, #int -1 // #ff +002b02: b580 |0051: and-int/2addr v0, v8 +002b04: 0108 |0052: move v8, v0 +002b06: 3908 c5ff |0053: if-nez v8, 0018 // -003b +002b0a: 28c2 |0055: goto 0017 // -003e +002b0c: d802 03fc |0056: add-int/lit8 v2, v3, #int -4 // #fc +002b10: d801 03fd |0058: add-int/lit8 v1, v3, #int -3 // #fd +002b14: d800 0304 |005a: add-int/lit8 v0, v3, #int 4 // #04 +002b18: d803 0305 |005c: add-int/lit8 v3, v3, #int 5 // #05 +002b1c: 0106 |005e: move v6, v0 +002b1e: 0117 |005f: move v7, v1 +002b20: 28cc |0060: goto 002c // -0034 +002b22: 0108 |0061: move v8, v0 +002b24: 28f1 |0062: goto 0053 // -000f + catches : (none) + positions : + locals : + + #12 : (in Lcom/google/android/checkers/CheckersView;) + name : 'c' + type : '(I)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 10 + ins : 2 + outs : 3 + insns size : 262 16-bit code units +002b28: |[002b28] com.google.android.checkers.CheckersView.c:(I)V +002b38: 1237 |0000: const/4 v7, #int 3 // #3 +002b3a: 1226 |0001: const/4 v6, #int 2 // #2 +002b3c: 1212 |0002: const/4 v2, #int 1 // #1 +002b3e: 1201 |0003: const/4 v1, #int 0 // #0 +002b40: 5280 0c00 |0004: iget v0, v8, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +002b44: 5483 0800 |0006: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.G:[I // field@0008 +002b48: 5484 2200 |0008: iget-object v4, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002b4c: 5244 3d00 |000a: iget v4, v4, Lcom/google/android/checkers/a;.d:I // field@003d +002b50: 4b04 0300 |000c: aput v4, v3, v0 +002b54: 5483 0900 |000e: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.H:[I // field@0009 +002b58: 5484 2200 |0010: iget-object v4, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002b5c: 5244 3e00 |0012: iget v4, v4, Lcom/google/android/checkers/a;.e:I // field@003e +002b60: 4b04 0300 |0014: aput v4, v3, v0 +002b64: 5483 0a00 |0016: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.I:[I // field@000a +002b68: 5484 2200 |0018: iget-object v4, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002b6c: 5244 3f00 |001a: iget v4, v4, Lcom/google/android/checkers/a;.f:I // field@003f +002b70: 4b04 0300 |001c: aput v4, v3, v0 +002b74: 5483 0b00 |001e: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.J:[I // field@000b +002b78: 5484 2200 |0020: iget-object v4, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002b7c: 5244 4000 |0022: iget v4, v4, Lcom/google/android/checkers/a;.g:I // field@0040 +002b80: 4b04 0300 |0024: aput v4, v3, v0 +002b84: 5280 0c00 |0026: iget v0, v8, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +002b88: 1273 |0028: const/4 v3, #int 7 // #7 +002b8a: 3530 6e00 |0029: if-ge v0, v3, 0097 // +006e +002b8e: 5280 0c00 |002b: iget v0, v8, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +002b92: d800 0001 |002d: add-int/lit8 v0, v0, #int 1 // #01 +002b96: 5980 0c00 |002f: iput v0, v8, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +002b9a: 5280 0d00 |0031: iget v0, v8, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +002b9e: 1303 0800 |0033: const/16 v3, #int 8 // #8 +002ba2: 3530 0800 |0035: if-ge v0, v3, 003d // +0008 +002ba6: 5280 0d00 |0037: iget v0, v8, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +002baa: d800 0001 |0039: add-int/lit8 v0, v0, #int 1 // #01 +002bae: 5980 0d00 |003b: iput v0, v8, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +002bb2: 5280 2300 |003d: iget v0, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002bb6: 3220 5b00 |003f: if-eq v0, v2, 009a // +005b +002bba: 5280 2300 |0041: iget v0, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002bbe: 3260 5700 |0043: if-eq v0, v6, 009a // +0057 +002bc2: 0110 |0045: move v0, v1 +002bc4: 5981 2800 |0046: iput v1, v8, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +002bc8: 5981 2900 |0048: iput v1, v8, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +002bcc: 5483 2200 |004a: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002bd0: 5433 3b00 |004c: iget-object v3, v3, Lcom/google/android/checkers/a;.b:[I // field@003b +002bd4: 4403 0309 |004e: aget v3, v3, v9 +002bd8: 5983 2a00 |0050: iput v3, v8, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +002bdc: 1403 6666 663f |0052: const v3, #float 0.900000 // #3f666666 +002be2: 5983 1e00 |0055: iput v3, v8, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +002be6: 3800 4500 |0057: if-eqz v0, 009c // +0045 +002bea: 5483 2200 |0059: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002bee: 5433 3a00 |005b: iget-object v3, v3, Lcom/google/android/checkers/a;.a:[I // field@003a +002bf2: 4403 0309 |005d: aget v3, v3, v9 +002bf6: 5284 2400 |005f: iget v4, v8, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +002bfa: 5285 2600 |0061: iget v5, v8, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +002bfe: b654 |0063: or-int/2addr v4, v5 +002c00: b543 |0064: and-int/2addr v3, v4 +002c02: 5983 1f00 |0065: iput v3, v8, Lcom/google/android/checkers/CheckersView;.m:I // field@001f +002c06: 5981 2000 |0067: iput v1, v8, Lcom/google/android/checkers/CheckersView;.n:I // field@0020 +002c0a: 5483 2200 |0069: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c0e: 6e20 7c00 9300 |006b: invoke-virtual {v3, v9}, Lcom/google/android/checkers/a;.a:(I)V // method@007c +002c14: 5483 2200 |006e: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c18: 5233 3d00 |0070: iget v3, v3, Lcom/google/android/checkers/a;.d:I // field@003d +002c1c: 5983 2400 |0072: iput v3, v8, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +002c20: 5483 2200 |0074: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c24: 5233 3f00 |0076: iget v3, v3, Lcom/google/android/checkers/a;.f:I // field@003f +002c28: 5983 2500 |0078: iput v3, v8, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +002c2c: 5483 2200 |007a: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c30: 5233 3e00 |007c: iget v3, v3, Lcom/google/android/checkers/a;.e:I // field@003e +002c34: 5983 2600 |007e: iput v3, v8, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +002c38: 5483 2200 |0080: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c3c: 5233 4000 |0082: iget v3, v3, Lcom/google/android/checkers/a;.g:I // field@0040 +002c40: 5983 2700 |0084: iput v3, v8, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +002c44: 5c81 2b00 |0086: iput-boolean v1, v8, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +002c48: 1203 |0088: const/4 v3, #int 0 // #0 +002c4a: 5b83 0f00 |0089: iput-object v3, v8, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +002c4e: 5483 2200 |008b: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c52: 5584 0300 |008d: iget-boolean v4, v8, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +002c56: 6e30 7a00 0304 |008f: invoke-virtual {v3, v0, v4}, Lcom/google/android/checkers/a;.a:(ZZ)I // method@007a +002c5c: 0a03 |0092: move-result v3 +002c5e: 2b03 6900 0000 |0093: packed-switch v3, 000000fc // +00000069 +002c64: 0e00 |0096: return-void +002c66: 5981 0c00 |0097: iput v1, v8, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +002c6a: 2898 |0099: goto 0031 // -0068 +002c6c: 0120 |009a: move v0, v2 +002c6e: 28ab |009b: goto 0046 // -0055 +002c70: 5981 1f00 |009c: iput v1, v8, Lcom/google/android/checkers/CheckersView;.m:I // field@001f +002c74: 5483 2200 |009e: iget-object v3, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002c78: 5433 3a00 |00a0: iget-object v3, v3, Lcom/google/android/checkers/a;.a:[I // field@003a +002c7c: 4403 0309 |00a2: aget v3, v3, v9 +002c80: 5284 2500 |00a4: iget v4, v8, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +002c84: 5285 2700 |00a6: iget v5, v8, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +002c88: b654 |00a8: or-int/2addr v4, v5 +002c8a: b543 |00a9: and-int/2addr v3, v4 +002c8c: 5983 2000 |00aa: iput v3, v8, Lcom/google/android/checkers/CheckersView;.n:I // field@0020 +002c90: 28bd |00ac: goto 0069 // -0043 +002c92: 3800 0800 |00ad: if-eqz v0, 00b5 // +0008 +002c96: 1260 |00af: const/4 v0, #int 6 // #6 +002c98: 5980 2300 |00b0: iput v0, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002c9c: 5981 0e00 |00b2: iput v1, v8, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +002ca0: 28e2 |00b4: goto 0096 // -001e +002ca2: 1250 |00b5: const/4 v0, #int 5 // #5 +002ca4: 28fa |00b6: goto 00b0 // -0006 +002ca6: 5c82 2b00 |00b7: iput-boolean v2, v8, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +002caa: 5481 2200 |00b9: iget-object v1, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002cae: 5211 3c00 |00bb: iget v1, v1, Lcom/google/android/checkers/a;.c:I // field@003c +002cb2: 5981 0e00 |00bd: iput v1, v8, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +002cb6: 5281 2300 |00bf: iget v1, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002cba: 3321 1800 |00c1: if-ne v1, v2, 00d9 // +0018 +002cbe: 5281 2c00 |00c3: iget v1, v8, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +002cc2: 3901 0900 |00c5: if-nez v1, 00ce // +0009 +002cc6: 5987 2300 |00c7: iput v7, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002cca: 1a00 d200 |00c9: const-string v0, "free play" // string@00d2 +002cce: 5b80 0f00 |00cb: iput-object v0, v8, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +002cd2: 28c9 |00cd: goto 0096 // -0037 +002cd4: 1241 |00ce: const/4 v1, #int 4 // #4 +002cd6: 5981 2300 |00cf: iput v1, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002cda: 5481 2200 |00d1: iget-object v1, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002cde: 5582 0300 |00d3: iget-boolean v2, v8, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +002ce2: 6e30 8700 0102 |00d5: invoke-virtual {v1, v0, v2}, Lcom/google/android/checkers/a;.b:(ZZ)V // method@0087 +002ce8: 28be |00d8: goto 0096 // -0042 +002cea: 5281 2300 |00d9: iget v1, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002cee: 3371 1700 |00db: if-ne v1, v7, 00f2 // +0017 +002cf2: 5281 2c00 |00dd: iget v1, v8, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +002cf6: 3901 0900 |00df: if-nez v1, 00e8 // +0009 +002cfa: 5982 2300 |00e1: iput v2, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002cfe: 1a00 d200 |00e3: const-string v0, "free play" // string@00d2 +002d02: 5b80 0f00 |00e5: iput-object v0, v8, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +002d06: 28af |00e7: goto 0096 // -0051 +002d08: 5986 2300 |00e8: iput v6, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002d0c: 5481 2200 |00ea: iget-object v1, v8, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +002d10: 5582 0300 |00ec: iget-boolean v2, v8, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +002d14: 6e30 8700 0102 |00ee: invoke-virtual {v1, v0, v2}, Lcom/google/android/checkers/a;.b:(ZZ)V // method@0087 +002d1a: 28a5 |00f1: goto 0096 // -005b +002d1c: 5280 2300 |00f2: iget v0, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002d20: 3360 0500 |00f4: if-ne v0, v6, 00f9 // +0005 +002d24: 5987 2300 |00f6: iput v7, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002d28: 289e |00f8: goto 0096 // -0062 +002d2a: 5982 2300 |00f9: iput v2, v8, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +002d2e: 289b |00fb: goto 0096 // -0065 +002d30: 0001 0300 0000 0000 1a00 0000 2400 ... |00fc: packed-switch-data (10 units) + catches : (none) + positions : + locals : + + #13 : (in Lcom/google/android/checkers/CheckersView;) + name : 'd' + type : '()V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 6 + ins : 1 + outs : 5 + insns size : 88 16-bit code units +002d44: |[002d44] com.google.android.checkers.CheckersView.d:()V +002d54: 1303 8b00 |0000: const/16 v3, #int 139 // #8b +002d58: 1301 cc00 |0002: const/16 v1, #int 204 // #cc +002d5c: 1304 ff00 |0004: const/16 v4, #int 255 // #ff +002d60: 5250 0700 |0006: iget v0, v5, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +002d64: 2b00 4000 0000 |0008: packed-switch v0, 00000048 // +00000040 +002d6a: 0e00 |000b: return-void +002d6c: 5450 1700 |000c: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +002d70: 1301 6600 |000e: const/16 v1, #int 102 // #66 +002d74: 1302 cd00 |0010: const/16 v2, #int 205 // #cd +002d78: 1303 aa00 |0012: const/16 v3, #int 170 // #aa +002d7c: 6e53 2200 4021 |0014: invoke-virtual {v0, v4, v1, v2, v3}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +002d82: 28f4 |0017: goto 000b // -000c +002d84: 5450 1700 |0018: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +002d88: 1301 4500 |001a: const/16 v1, #int 69 // #45 +002d8c: 1302 7400 |001c: const/16 v2, #int 116 // #74 +002d90: 6e52 2200 4031 |001e: invoke-virtual {v0, v4, v1, v3, v2}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +002d96: 28ea |0021: goto 000b // -0016 +002d98: 5450 1700 |0022: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +002d9c: 1301 1e00 |0024: const/16 v1, #int 30 // #1e +002da0: 1302 9000 |0026: const/16 v2, #int 144 // #90 +002da4: 6e54 2200 4021 |0028: invoke-virtual {v0, v4, v1, v2, v4}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +002daa: 28e0 |002b: goto 000b // -0020 +002dac: 5450 1700 |002c: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +002db0: 1301 8e00 |002e: const/16 v1, #int 142 // #8e +002db4: 1302 6b00 |0030: const/16 v2, #int 107 // #6b +002db8: 1303 2300 |0032: const/16 v3, #int 35 // #23 +002dbc: 6e53 2200 4021 |0034: invoke-virtual {v0, v4, v1, v2, v3}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +002dc2: 28d4 |0037: goto 000b // -002c +002dc4: 5450 1700 |0038: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +002dc8: 1301 6c00 |003a: const/16 v1, #int 108 // #6c +002dcc: 1302 7b00 |003c: const/16 v2, #int 123 // #7b +002dd0: 6e53 2200 4021 |003e: invoke-virtual {v0, v4, v1, v2, v3}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +002dd6: 28ca |0041: goto 000b // -0036 +002dd8: 5450 1700 |0042: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +002ddc: 6e51 2200 4011 |0044: invoke-virtual {v0, v4, v1, v1, v1}, Landroid/graphics/Paint;.setARGB:(IIII)V // method@0022 +002de2: 28c4 |0047: goto 000b // -003c +002de4: 0001 0600 0100 0000 0400 0000 1000 ... |0048: packed-switch-data (16 units) + catches : (none) + positions : + locals : + + #14 : (in Lcom/google/android/checkers/CheckersView;) + name : 'e' + type : '()Z' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 3 + ins : 1 + outs : 0 + insns size : 10 16-bit code units +002e04: |[002e04] com.google.android.checkers.CheckersView.e:()Z +002e14: 5220 2500 |0000: iget v0, v2, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +002e18: 1501 f0ff |0002: const/high16 v1, #int -1048576 // #fff0 +002e1c: 3310 0400 |0004: if-ne v0, v1, 0008 // +0004 +002e20: 1210 |0006: const/4 v0, #int 1 // #1 +002e22: 0f00 |0007: return v0 +002e24: 1200 |0008: const/4 v0, #int 0 // #0 +002e26: 28fe |0009: goto 0007 // -0002 + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '()V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 4 + ins : 1 + outs : 3 + insns size : 64 16-bit code units +002e28: |[002e28] com.google.android.checkers.CheckersView.a:()V +002e38: 1270 |0000: const/4 v0, #int 7 // #7 +002e3a: 2300 3800 |0001: new-array v0, v0, [Ljava/lang/CharSequence; // type@0038 +002e3e: 1201 |0003: const/4 v1, #int 0 // #0 +002e40: 1a02 9d00 |0004: const-string v2, "Wood" // string@009d +002e44: 4d02 0001 |0006: aput-object v2, v0, v1 +002e48: 1211 |0008: const/4 v1, #int 1 // #1 +002e4a: 1a02 6000 |0009: const-string v2, "Light Aquamarine" // string@0060 +002e4e: 4d02 0001 |000b: aput-object v2, v0, v1 +002e52: 1221 |000d: const/4 v1, #int 2 // #2 +002e54: 1a02 1400 |000e: const-string v2, "Dark Aquamarine" // string@0014 +002e58: 4d02 0001 |0010: aput-object v2, v0, v1 +002e5c: 1231 |0012: const/4 v1, #int 3 // #3 +002e5e: 1a02 0c00 |0013: const-string v2, "Blue" // string@000c +002e62: 4d02 0001 |0015: aput-object v2, v0, v1 +002e66: 1241 |0017: const/4 v1, #int 4 // #4 +002e68: 1a02 0e00 |0018: const-string v2, "Brown" // string@000e +002e6c: 4d02 0001 |001a: aput-object v2, v0, v1 +002e70: 1251 |001c: const/4 v1, #int 5 // #5 +002e72: 1a02 1a00 |001d: const-string v2, "Grey" // string@001a +002e76: 4d02 0001 |001f: aput-object v2, v0, v1 +002e7a: 1261 |0021: const/4 v1, #int 6 // #6 +002e7c: 1a02 6100 |0022: const-string v2, "Light Grey" // string@0061 +002e80: 4d02 0001 |0024: aput-object v2, v0, v1 +002e84: 2201 0500 |0026: new-instance v1, Landroid/app/AlertDialog$Builder; // type@0005 +002e88: 5432 1300 |0028: iget-object v2, v3, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +002e8c: 7020 0900 2100 |002a: invoke-direct {v1, v2}, Landroid/app/AlertDialog$Builder;.<init>:(Landroid/content/Context;)V // method@0009 +002e92: 1a02 0d00 |002d: const-string v2, "Board Color" // string@000d +002e96: 6e20 0f00 2100 |002f: invoke-virtual {v1, v2}, Landroid/app/AlertDialog$Builder;.setTitle:(Ljava/lang/CharSequence;)Landroid/app/AlertDialog$Builder; // method@000f +002e9c: 0c01 |0032: move-result-object v1 +002e9e: 2202 2500 |0033: new-instance v2, Lcom/google/android/checkers/f; // type@0025 +002ea2: 7020 9700 3200 |0035: invoke-direct {v2, v3}, Lcom/google/android/checkers/f;.<init>:(Lcom/google/android/checkers/CheckersView;)V // method@0097 +002ea8: 6e30 0b00 0102 |0038: invoke-virtual {v1, v0, v2}, Landroid/app/AlertDialog$Builder;.setItems:([Ljava/lang/CharSequence;Landroid/content/DialogInterface$OnClickListener;)Landroid/app/AlertDialog$Builder; // method@000b +002eae: 0c00 |003b: move-result-object v0 +002eb0: 6e10 1000 0000 |003c: invoke-virtual {v0}, Landroid/app/AlertDialog$Builder;.show:()Landroid/app/AlertDialog; // method@0010 +002eb6: 0e00 |003f: return-void + catches : (none) + positions : + locals : + + #1 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(FF)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 10 + ins : 3 + outs : 1 + insns size : 113 16-bit code units +002eb8: |[002eb8] com.google.android.checkers.CheckersView.a:(FF)V +002ec8: 1206 |0000: const/4 v6, #int 0 // #0 +002eca: 6e10 6a00 0700 |0001: invoke-virtual {v7}, Lcom/google/android/checkers/CheckersView;.getWidth:()I // method@006a +002ed0: 0a01 |0004: move-result v1 +002ed2: 6e10 6900 0700 |0005: invoke-virtual {v7}, Lcom/google/android/checkers/CheckersView;.getHeight:()I // method@0069 +002ed8: 0a02 |0008: move-result v2 +002eda: 3521 4900 |0009: if-ge v1, v2, 0052 // +0049 +002ede: 0110 |000b: move v0, v1 +002ee0: e200 0003 |000c: ushr-int/lit8 v0, v0, #int 3 // #03 +002ee4: e003 0003 |000e: shl-int/lit8 v3, v0, #int 3 // #03 +002ee8: 5274 1000 |0010: iget v4, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002eec: 2e04 0604 |0012: cmpg-float v4, v6, v4 +002ef0: 3c04 1600 |0014: if-gtz v4, 002a // +0016 +002ef4: 5274 1000 |0016: iget v4, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002ef8: 8235 |0018: int-to-float v5, v3 +002efa: 2e04 0405 |0019: cmpg-float v4, v4, v5 +002efe: 3b04 0f00 |001b: if-gez v4, 002a // +000f +002f02: 5274 1100 |001d: iget v4, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f06: 2e04 0604 |001f: cmpg-float v4, v6, v4 +002f0a: 3c04 0900 |0021: if-gtz v4, 002a // +0009 +002f0e: 5274 1100 |0023: iget v4, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f12: 8233 |0025: int-to-float v3, v3 +002f14: 2e03 0403 |0026: cmpg-float v3, v4, v3 +002f18: 3a03 0400 |0028: if-ltz v3, 002c // +0004 +002f1c: 1300 1000 |002a: const/16 v0, #int 16 // #10 +002f20: 1d07 |002c: monitor-enter v7 +002f22: 5273 1000 |002d: iget v3, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002f26: 8204 |002f: int-to-float v4, v0 +002f28: c884 |0030: mul-float/2addr v4, v8 +002f2a: c643 |0031: add-float/2addr v3, v4 +002f2c: 5973 1000 |0032: iput v3, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002f30: 5273 1100 |0034: iget v3, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f34: 8200 |0036: int-to-float v0, v0 +002f36: c890 |0037: mul-float/2addr v0, v9 +002f38: c630 |0038: add-float/2addr v0, v3 +002f3a: 5970 1100 |0039: iput v0, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f3e: 5270 1000 |003b: iget v0, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002f42: 2e00 0006 |003d: cmpg-float v0, v0, v6 +002f46: 3b00 1500 |003f: if-gez v0, 0054 // +0015 +002f4a: 1200 |0041: const/4 v0, #int 0 // #0 +002f4c: 5970 1000 |0042: iput v0, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002f50: 5270 1100 |0044: iget v0, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f54: 2e00 0006 |0046: cmpg-float v0, v0, v6 +002f58: 3b00 1c00 |0048: if-gez v0, 0064 // +001c +002f5c: 1200 |004a: const/4 v0, #int 0 // #0 +002f5e: 5970 1100 |004b: iput v0, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f62: 1e07 |004d: monitor-exit v7 +002f64: 6e10 6d00 0700 |004e: invoke-virtual {v7}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +002f6a: 0e00 |0051: return-void +002f6c: 0120 |0052: move v0, v2 +002f6e: 28b9 |0053: goto 000c // -0047 +002f70: 5270 1000 |0054: iget v0, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002f74: 8213 |0056: int-to-float v3, v1 +002f76: 2d00 0003 |0057: cmpl-float v0, v0, v3 +002f7a: 3a00 ebff |0059: if-ltz v0, 0044 // -0015 +002f7e: d800 01ff |005b: add-int/lit8 v0, v1, #int -1 // #ff +002f82: 8200 |005d: int-to-float v0, v0 +002f84: 5970 1000 |005e: iput v0, v7, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +002f88: 28e4 |0060: goto 0044 // -001c +002f8a: 0d00 |0061: move-exception v0 +002f8c: 1e07 |0062: monitor-exit v7 +002f8e: 2700 |0063: throw v0 +002f90: 5270 1100 |0064: iget v0, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002f94: 8221 |0066: int-to-float v1, v2 +002f96: 2d00 0001 |0067: cmpl-float v0, v0, v1 +002f9a: 3a00 e4ff |0069: if-ltz v0, 004d // -001c +002f9e: d800 02ff |006b: add-int/lit8 v0, v2, #int -1 // #ff +002fa2: 8200 |006d: int-to-float v0, v0 +002fa4: 5970 1100 |006e: iput v0, v7, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +002fa8: 28dd |0070: goto 004d // -0023 + catches : 3 + 0x002d - 0x004e + <any> -> 0x0061 + 0x0054 - 0x0060 + <any> -> 0x0061 + 0x0064 - 0x0070 + <any> -> 0x0061 + positions : + locals : + + #2 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(IIII)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 11 + ins : 5 + outs : 6 + insns size : 16 16-bit code units +002fc8: |[002fc8] com.google.android.checkers.CheckersView.a:(IIII)V +002fd8: 1211 |0000: const/4 v1, #int 1 // #1 +002fda: 0760 |0001: move-object v0, v6 +002fdc: 0172 |0002: move v2, v7 +002fde: 0183 |0003: move v3, v8 +002fe0: 0194 |0004: move v4, v9 +002fe2: 01a5 |0005: move v5, v10 +002fe4: 7606 5b00 0000 |0006: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(ZIIII)Z // method@005b +002fea: 0a00 |0009: move-result v0 +002fec: 3800 0500 |000a: if-eqz v0, 000f // +0005 +002ff0: 6e10 6d00 0600 |000c: invoke-virtual {v6}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +002ff6: 0e00 |000f: return-void + catches : (none) + positions : + locals : + + #3 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Landroid/content/SharedPreferences$Editor;)V' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 4 + ins : 2 + outs : 3 + insns size : 170 16-bit code units +002ff8: |[002ff8] com.google.android.checkers.CheckersView.a:(Landroid/content/SharedPreferences$Editor;)V +003008: 1d02 |0000: monitor-enter v2 +00300a: 7210 1300 0300 |0001: invoke-interface {v3}, Landroid/content/SharedPreferences$Editor;.clear:()Landroid/content/SharedPreferences$Editor; // method@0013 +003010: 1a00 d000 |0004: const-string v0, "format" // string@00d0 +003014: 1301 2200 |0006: const/16 v1, #int 34 // #22 +003018: 7230 1600 0301 |0008: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +00301e: 1a00 3101 |000b: const-string v0, "state" // string@0131 +003022: 5221 2300 |000d: iget v1, v2, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +003026: 7230 1600 0301 |000f: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +00302c: 1a00 3f01 |0012: const-string v0, "wp" // string@013f +003030: 5221 2400 |0014: iget v1, v2, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +003034: 7230 1600 0301 |0016: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +00303a: 1a00 b400 |0019: const-string v0, "bp" // string@00b4 +00303e: 5221 2500 |001b: iget v1, v2, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +003042: 7230 1600 0301 |001d: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003048: 1a00 3e01 |0020: const-string v0, "wk" // string@013e +00304c: 5221 2600 |0022: iget v1, v2, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +003050: 7230 1600 0301 |0024: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003056: 1a00 b300 |0027: const-string v0, "bk" // string@00b3 +00305a: 5221 2700 |0029: iget v1, v2, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +00305e: 7230 1600 0301 |002b: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003064: 1a00 e800 |002e: const-string v0, "l1" // string@00e8 +003068: 5221 2800 |0030: iget v1, v2, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +00306c: 7230 1600 0301 |0032: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003072: 1a00 e900 |0035: const-string v0, "l2" // string@00e9 +003076: 5221 2900 |0037: iget v1, v2, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +00307a: 7230 1600 0301 |0039: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003080: 1a00 ef00 |003c: const-string v0, "lm" // string@00ef +003084: 5221 2a00 |003e: iget v1, v2, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +003088: 7230 1600 0301 |0040: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +00308e: 1a00 b800 |0043: const-string v0, "cap" // string@00b8 +003092: 5521 2b00 |0045: iget-boolean v1, v2, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +003096: 7230 1500 0301 |0047: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putBoolean:(Ljava/lang/String;Z)Landroid/content/SharedPreferences$Editor; // method@0015 +00309c: 1a00 ee00 |004a: const-string v0, "level" // string@00ee +0030a0: 5221 2c00 |004c: iget v1, v2, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +0030a4: 7230 1600 0301 |004e: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +0030aa: 1a00 2d01 |0051: const-string v0, "show" // string@012d +0030ae: 5521 0200 |0053: iget-boolean v1, v2, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +0030b2: 7230 1500 0301 |0055: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putBoolean:(Ljava/lang/String;Z)Landroid/content/SharedPreferences$Editor; // method@0015 +0030b8: 1a00 d100 |0058: const-string v0, "free" // string@00d1 +0030bc: 5521 0300 |005a: iget-boolean v1, v2, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +0030c0: 7230 1500 0301 |005c: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putBoolean:(Ljava/lang/String;Z)Landroid/content/SharedPreferences$Editor; // method@0015 +0030c6: 1a00 1801 |005f: const-string v0, "rot" // string@0118 +0030ca: 5521 0400 |0061: iget-boolean v1, v2, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +0030ce: 7230 1500 0301 |0063: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putBoolean:(Ljava/lang/String;Z)Landroid/content/SharedPreferences$Editor; // method@0015 +0030d4: 1a00 d300 |0066: const-string v0, "full" // string@00d3 +0030d8: 5521 0500 |0068: iget-boolean v1, v2, Lcom/google/android/checkers/CheckersView;.D:Z // field@0005 +0030dc: 7230 1500 0301 |006a: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putBoolean:(Ljava/lang/String;Z)Landroid/content/SharedPreferences$Editor; // method@0015 +0030e2: 1a00 2f01 |006d: const-string v0, "start" // string@012f +0030e6: 5521 0600 |006f: iget-boolean v1, v2, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +0030ea: 7230 1500 0301 |0071: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putBoolean:(Ljava/lang/String;Z)Landroid/content/SharedPreferences$Editor; // method@0015 +0030f0: 1a00 bd00 |0074: const-string v0, "color" // string@00bd +0030f4: 5221 0700 |0076: iget v1, v2, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +0030f8: 7230 1600 0301 |0078: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +0030fe: 1a00 f300 |007b: const-string v0, "lwp" // string@00f3 +003102: 5421 0800 |007d: iget-object v1, v2, Lcom/google/android/checkers/CheckersView;.G:[I // field@0008 +003106: 7130 5200 0301 |007f: invoke-static {v3, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V // method@0052 +00310c: 1a00 f200 |0082: const-string v0, "lwk" // string@00f2 +003110: 5421 0900 |0084: iget-object v1, v2, Lcom/google/android/checkers/CheckersView;.H:[I // field@0009 +003114: 7130 5200 0301 |0086: invoke-static {v3, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V // method@0052 +00311a: 1a00 eb00 |0089: const-string v0, "lbp" // string@00eb +00311e: 5421 0a00 |008b: iget-object v1, v2, Lcom/google/android/checkers/CheckersView;.I:[I // field@000a +003122: 7130 5200 0301 |008d: invoke-static {v3, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V // method@0052 +003128: 1a00 ea00 |0090: const-string v0, "lbk" // string@00ea +00312c: 5421 0b00 |0092: iget-object v1, v2, Lcom/google/android/checkers/CheckersView;.J:[I // field@000b +003130: 7130 5200 0301 |0094: invoke-static {v3, v0, v1}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/content/SharedPreferences$Editor;Ljava/lang/String;[I)V // method@0052 +003136: 1a00 f100 |0097: const-string v0, "lp" // string@00f1 +00313a: 5221 0c00 |0099: iget v1, v2, Lcom/google/android/checkers/CheckersView;.K:I // field@000c +00313e: 7230 1600 0301 |009b: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003144: 1a00 ec00 |009e: const-string v0, "lc" // string@00ec +003148: 5221 0d00 |00a0: iget v1, v2, Lcom/google/android/checkers/CheckersView;.L:I // field@000d +00314c: 7230 1600 0301 |00a2: invoke-interface {v3, v0, v1}, Landroid/content/SharedPreferences$Editor;.putInt:(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor; // method@0016 +003152: 1e02 |00a5: monitor-exit v2 +003154: 0e00 |00a6: return-void +003156: 0d00 |00a7: move-exception v0 +003158: 1e02 |00a8: monitor-exit v2 +00315a: 2700 |00a9: throw v0 + catches : 1 + 0x0001 - 0x00a5 + <any> -> 0x00a7 + positions : + locals : + + #4 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(I)Z' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 8 + ins : 2 + outs : 6 + insns size : 17 16-bit code units +003168: |[003168] com.google.android.checkers.CheckersView.a:(I)Z +003178: 1201 |0000: const/4 v1, #int 0 // #0 +00317a: 0760 |0001: move-object v0, v6 +00317c: 0172 |0002: move v2, v7 +00317e: 0113 |0003: move v3, v1 +003180: 0114 |0004: move v4, v1 +003182: 0115 |0005: move v5, v1 +003184: 7606 5b00 0000 |0006: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(ZIIII)Z // method@005b +00318a: 0a00 |0009: move-result v0 +00318c: 3800 0600 |000a: if-eqz v0, 0010 // +0006 +003190: 6e10 6d00 0600 |000c: invoke-virtual {v6}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +003196: 1211 |000f: const/4 v1, #int 1 // #1 +003198: 0f01 |0010: return v1 + catches : (none) + positions : + locals : + + #5 : (in Lcom/google/android/checkers/CheckersView;) + name : 'a' + type : '(Z)Z' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 3 + ins : 2 + outs : 1 + insns size : 22 16-bit code units +00319c: |[00319c] com.google.android.checkers.CheckersView.a:(Z)Z +0031ac: 1d01 |0000: monitor-enter v1 +0031ae: 3802 0c00 |0001: if-eqz v2, 000d // +000c +0031b2: 5510 0200 |0003: iget-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +0031b6: 3800 0c00 |0005: if-eqz v0, 0011 // +000c +0031ba: 1200 |0007: const/4 v0, #int 0 // #0 +0031bc: 5c10 0200 |0008: iput-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +0031c0: 6e10 6d00 0100 |000a: invoke-virtual {v1}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +0031c6: 5510 0200 |000d: iget-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +0031ca: 1e01 |000f: monitor-exit v1 +0031cc: 0f00 |0010: return v0 +0031ce: 1210 |0011: const/4 v0, #int 1 // #1 +0031d0: 28f6 |0012: goto 0008 // -000a +0031d2: 0d00 |0013: move-exception v0 +0031d4: 1e01 |0014: monitor-exit v1 +0031d6: 2700 |0015: throw v0 + catches : 1 + 0x0003 - 0x000f + <any> -> 0x0013 + positions : + locals : + + #6 : (in Lcom/google/android/checkers/CheckersView;) + name : 'b' + type : '()V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 7 + ins : 1 + outs : 6 + insns size : 30 16-bit code units +0031e4: |[0031e4] com.google.android.checkers.CheckersView.b:()V +0031f4: 1201 |0000: const/4 v1, #int 0 // #0 +0031f6: 1d06 |0001: monitor-enter v6 +0031f8: 5260 1000 |0002: iget v0, v6, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +0031fc: 5262 1100 |0004: iget v2, v6, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +003200: 7030 5c00 0602 |0006: invoke-direct {v6, v0, v2}, Lcom/google/android/checkers/CheckersView;.b:(FF)I // method@005c +003206: 0a02 |0009: move-result v2 +003208: 1e06 |000a: monitor-exit v6 +00320a: 3802 0f00 |000b: if-eqz v2, 001a // +000f +00320e: 0760 |000d: move-object v0, v6 +003210: 0113 |000e: move v3, v1 +003212: 0114 |000f: move v4, v1 +003214: 0115 |0010: move v5, v1 +003216: 7606 5b00 0000 |0011: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(ZIIII)Z // method@005b +00321c: 0a00 |0014: move-result v0 +00321e: 3800 0500 |0015: if-eqz v0, 001a // +0005 +003222: 6e10 6d00 0600 |0017: invoke-virtual {v6}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +003228: 0e00 |001a: return-void +00322a: 0d00 |001b: move-exception v0 +00322c: 1e06 |001c: monitor-exit v6 +00322e: 2700 |001d: throw v0 + catches : 1 + 0x0002 - 0x000b + <any> -> 0x001b + positions : + locals : + + #7 : (in Lcom/google/android/checkers/CheckersView;) + name : 'b' + type : '(Z)Z' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 7 + ins : 2 + outs : 3 + insns size : 69 16-bit code units +00323c: |[00323c] com.google.android.checkers.CheckersView.b:(Z)Z +00324c: 1233 |0000: const/4 v3, #int 3 // #3 +00324e: 1210 |0001: const/4 v0, #int 1 // #1 +003250: 1201 |0002: const/4 v1, #int 0 // #0 +003252: 1d05 |0003: monitor-enter v5 +003254: 3806 3400 |0004: if-eqz v6, 0038 // +0034 +003258: 5552 0300 |0006: iget-boolean v2, v5, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +00325c: 3802 3400 |0008: if-eqz v2, 003c // +0034 +003260: 0112 |000a: move v2, v1 +003262: 5c52 0300 |000b: iput-boolean v2, v5, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +003266: 5252 2300 |000d: iget v2, v5, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00326a: 3232 0600 |000f: if-eq v2, v3, 0015 // +0006 +00326e: 5252 2300 |0011: iget v2, v5, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +003272: 3302 2500 |0013: if-ne v2, v0, 0038 // +0025 +003276: 5252 2300 |0015: iget v2, v5, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00327a: 3332 2700 |0017: if-ne v2, v3, 003e // +0027 +00327e: 0102 |0019: move v2, v0 +003280: 1203 |001a: const/4 v3, #int 0 // #0 +003282: 5953 2800 |001b: iput v3, v5, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +003286: 1203 |001d: const/4 v3, #int 0 // #0 +003288: 5953 2900 |001e: iput v3, v5, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +00328c: 1203 |0020: const/4 v3, #int 0 // #0 +00328e: 5953 2a00 |0021: iput v3, v5, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +003292: 5453 2200 |0023: iget-object v3, v5, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +003296: 5554 0300 |0025: iget-boolean v4, v5, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +00329a: 6e30 7a00 2304 |0027: invoke-virtual {v3, v2, v4}, Lcom/google/android/checkers/a;.a:(ZZ)I // method@007a +0032a0: 0a02 |002a: move-result v2 +0032a2: 3302 1500 |002b: if-ne v2, v0, 0040 // +0015 +0032a6: 5c50 2b00 |002d: iput-boolean v0, v5, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +0032aa: 5450 2200 |002f: iget-object v0, v5, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +0032ae: 5200 3c00 |0031: iget v0, v0, Lcom/google/android/checkers/a;.c:I // field@003c +0032b2: 5950 0e00 |0033: iput v0, v5, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +0032b6: 6e10 6d00 0500 |0035: invoke-virtual {v5}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +0032bc: 5550 0300 |0038: iget-boolean v0, v5, Lcom/google/android/checkers/CheckersView;.B:Z // field@0003 +0032c0: 1e05 |003a: monitor-exit v5 +0032c2: 0f00 |003b: return v0 +0032c4: 0102 |003c: move v2, v0 +0032c6: 28ce |003d: goto 000b // -0032 +0032c8: 0112 |003e: move v2, v1 +0032ca: 28db |003f: goto 001a // -0025 +0032cc: 0110 |0040: move v0, v1 +0032ce: 28ec |0041: goto 002d // -0014 +0032d0: 0d00 |0042: move-exception v0 +0032d2: 1e05 |0043: monitor-exit v5 +0032d4: 2700 |0044: throw v0 + catches : 1 + 0x0006 - 0x003a + <any> -> 0x0042 + positions : + locals : + + #8 : (in Lcom/google/android/checkers/CheckersView;) + name : 'c' + type : '()I' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 2 + ins : 1 + outs : 0 + insns size : 8 16-bit code units +0032e4: |[0032e4] com.google.android.checkers.CheckersView.c:()I +0032f4: 1d01 |0000: monitor-enter v1 +0032f6: 5210 2c00 |0001: iget v0, v1, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +0032fa: 1e01 |0003: monitor-exit v1 +0032fc: 0f00 |0004: return v0 +0032fe: 0d00 |0005: move-exception v0 +003300: 1e01 |0006: monitor-exit v1 +003302: 2700 |0007: throw v0 + catches : 1 + 0x0001 - 0x0003 + <any> -> 0x0005 + positions : + locals : + + #9 : (in Lcom/google/android/checkers/CheckersView;) + name : 'c' + type : '(Z)Z' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 5 + ins : 2 + outs : 3 + insns size : 34 16-bit code units +003310: |[003310] com.google.android.checkers.CheckersView.c:(Z)Z +003320: 1200 |0000: const/4 v0, #int 0 // #0 +003322: 1d03 |0001: monitor-enter v3 +003324: 3804 1700 |0002: if-eqz v4, 0019 // +0017 +003328: 5531 0400 |0004: iget-boolean v1, v3, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +00332c: 3801 1700 |0006: if-eqz v1, 001d // +0017 +003330: 5c30 0400 |0008: iput-boolean v0, v3, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +003334: 5430 1300 |000a: iget-object v0, v3, Lcom/google/android/checkers/CheckersView;.a:Landroid/content/Context; // field@0013 +003338: 1a01 1901 |000c: const-string v1, "rotated board" // string@0119 +00333c: 1202 |000e: const/4 v2, #int 0 // #0 +00333e: 7130 3c00 1002 |000f: invoke-static {v0, v1, v2}, Landroid/widget/Toast;.makeText:(Landroid/content/Context;Ljava/lang/CharSequence;I)Landroid/widget/Toast; // method@003c +003344: 0c00 |0012: move-result-object v0 +003346: 6e10 3d00 0000 |0013: invoke-virtual {v0}, Landroid/widget/Toast;.show:()V // method@003d +00334c: 6e10 6d00 0300 |0016: invoke-virtual {v3}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +003352: 5530 0400 |0019: iget-boolean v0, v3, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +003356: 1e03 |001b: monitor-exit v3 +003358: 0f00 |001c: return v0 +00335a: 1210 |001d: const/4 v0, #int 1 // #1 +00335c: 28ea |001e: goto 0008 // -0016 +00335e: 0d00 |001f: move-exception v0 +003360: 1e03 |0020: monitor-exit v3 +003362: 2700 |0021: throw v0 + catches : 1 + 0x0004 - 0x001b + <any> -> 0x001f + positions : + locals : + + #10 : (in Lcom/google/android/checkers/CheckersView;) + name : 'd' + type : '(Z)Z' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 3 + ins : 2 + outs : 0 + insns size : 19 16-bit code units +003370: |[003370] com.google.android.checkers.CheckersView.d:(Z)Z +003380: 1d01 |0000: monitor-enter v1 +003382: 3802 0900 |0001: if-eqz v2, 000a // +0009 +003386: 5510 0500 |0003: iget-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.D:Z // field@0005 +00338a: 3800 0900 |0005: if-eqz v0, 000e // +0009 +00338e: 1200 |0007: const/4 v0, #int 0 // #0 +003390: 5c10 0500 |0008: iput-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.D:Z // field@0005 +003394: 5510 0500 |000a: iget-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.D:Z // field@0005 +003398: 1e01 |000c: monitor-exit v1 +00339a: 0f00 |000d: return v0 +00339c: 1210 |000e: const/4 v0, #int 1 // #1 +00339e: 28f9 |000f: goto 0008 // -0007 +0033a0: 0d00 |0010: move-exception v0 +0033a2: 1e01 |0011: monitor-exit v1 +0033a4: 2700 |0012: throw v0 + catches : 1 + 0x0003 - 0x000c + <any> -> 0x0010 + positions : + locals : + + #11 : (in Lcom/google/android/checkers/CheckersView;) + name : 'draw' + type : '(Landroid/graphics/Canvas;)V' + access : 0x20001 (PUBLIC DECLARED_SYNCHRONIZED) + code - + registers : 30 + ins : 2 + outs : 8 + insns size : 1264 16-bit code units +0033b4: |[0033b4] com.google.android.checkers.CheckersView.draw:(Landroid/graphics/Canvas;)V +0033c4: 1d1c |0000: monitor-enter v28 +0033c6: 7502 3800 1c00 |0001: invoke-super/range {v28, v29}, Landroid/view/View;.draw:(Landroid/graphics/Canvas;)V // method@0038 +0033cc: 7401 6a00 1c00 |0004: invoke-virtual/range {v28}, Lcom/google/android/checkers/CheckersView;.getWidth:()I // method@006a +0033d2: 0a03 |0007: move-result v3 +0033d4: 7401 6900 1c00 |0008: invoke-virtual/range {v28}, Lcom/google/android/checkers/CheckersView;.getHeight:()I // method@0069 +0033da: 0a04 |000b: move-result v4 +0033dc: 3543 bc01 |000c: if-ge v3, v4, 01c8 // +01bc +0033e0: 0132 |000e: move v2, v3 +0033e2: e216 0203 |000f: ushr-int/lit8 v22, v2, #int 3 // #03 +0033e6: e017 1603 |0011: shl-int/lit8 v23, v22, #int 3 // #03 +0033ea: e218 1601 |0013: ushr-int/lit8 v24, v22, #int 1 // #01 +0033ee: 0800 1c00 |0015: move-object/from16 v0, v28 +0033f2: 5200 1d00 |0017: iget v0, v0, Lcom/google/android/checkers/CheckersView;.k:I // field@001d +0033f6: 0214 0000 |0019: move/from16 v20, v0 +0033fa: db19 1403 |001b: div-int/lit8 v25, v20, #int 3 // #03 +0033fe: 3543 ae01 |001d: if-ge v3, v4, 01cb // +01ae +003402: 1224 |001f: const/4 v4, #int 2 // #2 +003404: da02 140b |0020: mul-int/lit8 v2, v20, #int 11 // #0b +003408: 9103 1702 |0022: sub-int v3, v23, v2 +00340c: 9002 1714 |0024: add-int v2, v23, v20 +003410: 0211 0200 |0026: move/from16 v17, v2 +003414: 0212 0300 |0028: move/from16 v18, v3 +003418: 0213 0200 |002a: move/from16 v19, v2 +00341c: 0215 0400 |002c: move/from16 v21, v4 +003420: 0800 1c00 |002e: move-object/from16 v0, v28 +003424: 5402 1400 |0030: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +003428: 0800 1d00 |0032: move-object/from16 v0, v29 +00342c: 6e20 1e00 2000 |0034: invoke-virtual {v0, v2}, Landroid/graphics/Canvas;.drawPaint:(Landroid/graphics/Paint;)V // method@001e +003432: 0800 1c00 |0037: move-object/from16 v0, v28 +003436: 5202 0700 |0039: iget v2, v0, Lcom/google/android/checkers/CheckersView;.F:I // field@0007 +00343a: 3902 a201 |003b: if-nez v2, 01dd // +01a2 +00343e: 0800 1c00 |003d: move-object/from16 v0, v28 +003442: 5407 1600 |003f: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.d:Landroid/graphics/Paint; // field@0016 +003446: 0800 1c00 |0041: move-object/from16 v0, v28 +00344a: 5402 2100 |0043: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.o:Landroid/graphics/drawable/Drawable; // field@0021 +00344e: 1203 |0045: const/4 v3, #int 0 // #0 +003450: 1204 |0046: const/4 v4, #int 0 // #0 +003452: 0200 1700 |0047: move/from16 v0, v23 +003456: 0201 1700 |0049: move/from16 v1, v23 +00345a: 6e51 2800 3204 |004b: invoke-virtual {v2, v3, v4, v0, v1}, Landroid/graphics/drawable/Drawable;.setBounds:(IIII)V // method@0028 +003460: 0800 1c00 |004e: move-object/from16 v0, v28 +003464: 5402 2100 |0050: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.o:Landroid/graphics/drawable/Drawable; // field@0021 +003468: 0800 1d00 |0052: move-object/from16 v0, v29 +00346c: 6e20 2700 0200 |0054: invoke-virtual {v2, v0}, Landroid/graphics/drawable/Drawable;.draw:(Landroid/graphics/Canvas;)V // method@0027 +003472: 1202 |0057: const/4 v2, #int 0 // #0 +003474: 0129 |0058: move v9, v2 +003476: 1302 0800 |0059: const/16 v2, #int 8 // #8 +00347a: 3429 9a01 |005b: if-lt v9, v2, 01f5 // +019a +00347e: 120d |005d: const/4 v13, #int 0 // #0 +003480: 120c |005e: const/4 v12, #int 0 // #0 +003482: 120b |005f: const/4 v11, #int 0 // #0 +003484: 120a |0060: const/4 v10, #int 0 // #0 +003486: 1213 |0061: const/4 v3, #int 1 // #1 +003488: 1202 |0062: const/4 v2, #int 0 // #0 +00348a: 0210 0200 |0063: move/from16 v16, v2 +00348e: 1302 0800 |0065: const/16 v2, #int 8 // #8 +003492: 0200 1000 |0067: move/from16 v0, v16 +003496: 3420 ad01 |0069: if-lt v0, v2, 0216 // +01ad +00349a: 7601 6700 1c00 |006b: invoke-direct/range {v28}, Lcom/google/android/checkers/CheckersView;.e:()Z // method@0067 +0034a0: 0a02 |006e: move-result v2 +0034a2: 3802 4d03 |006f: if-eqz v2, 03bc // +034d +0034a6: 1a02 1100 |0071: const-string v2, "Checkers for Android" // string@0011 +0034aa: 0200 1500 |0073: move/from16 v0, v21 +0034ae: 8203 |0075: int-to-float v3, v0 +0034b0: 0200 1300 |0076: move/from16 v0, v19 +0034b4: 8204 |0078: int-to-float v4, v0 +0034b6: 0800 1c00 |0079: move-object/from16 v0, v28 +0034ba: 5405 1500 |007b: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +0034be: 0800 1d00 |007d: move-object/from16 v0, v29 +0034c2: 6e55 2000 2043 |007f: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +0034c8: 1a02 b500 |0082: const-string v2, "by Aart J.C. Bik" // string@00b5 +0034cc: 0200 1500 |0084: move/from16 v0, v21 +0034d0: 8203 |0086: int-to-float v3, v0 +0034d2: 9004 1314 |0087: add-int v4, v19, v20 +0034d6: 8244 |0089: int-to-float v4, v4 +0034d8: 0800 1c00 |008a: move-object/from16 v0, v28 +0034dc: 5405 1500 |008c: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +0034e0: 0800 1d00 |008e: move-object/from16 v0, v29 +0034e4: 6e55 2000 2043 |0090: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +0034ea: 6302 6a00 |0093: sget-boolean v2, Lcom/google/android/checkers/g;.r:Z // field@006a +0034ee: 3902 1500 |0095: if-nez v2, 00aa // +0015 +0034f2: 1a02 f800 |0097: const-string v2, "no endgame TBs" // string@00f8 +0034f6: 0200 1500 |0099: move/from16 v0, v21 +0034fa: 8203 |009b: int-to-float v3, v0 +0034fc: da04 1402 |009c: mul-int/lit8 v4, v20, #int 2 // #02 +003500: 9004 0413 |009e: add-int v4, v4, v19 +003504: 8244 |00a0: int-to-float v4, v4 +003506: 0800 1c00 |00a1: move-object/from16 v0, v28 +00350a: 5405 1500 |00a3: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +00350e: 0800 1d00 |00a5: move-object/from16 v0, v29 +003512: 6e55 2000 2043 |00a7: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003518: 0800 1c00 |00aa: move-object/from16 v0, v28 +00351c: 5502 2b00 |00ac: iget-boolean v2, v0, Lcom/google/android/checkers/CheckersView;.y:Z // field@002b +003520: 3802 1300 |00ae: if-eqz v2, 00c1 // +0013 +003524: 1a02 7000 |00b0: const-string v2, "MUST CAPTURE" // string@0070 +003528: 0200 1200 |00b2: move/from16 v0, v18 +00352c: 8203 |00b4: int-to-float v3, v0 +00352e: 0200 1100 |00b5: move/from16 v0, v17 +003532: 8204 |00b7: int-to-float v4, v0 +003534: 0800 1c00 |00b8: move-object/from16 v0, v28 +003538: 5405 1800 |00ba: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.f:Landroid/graphics/Paint; // field@0018 +00353c: 0800 1d00 |00bc: move-object/from16 v0, v29 +003540: 6e55 2000 2043 |00be: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003546: 0800 1c00 |00c1: move-object/from16 v0, v28 +00354a: 5202 2300 |00c3: iget v2, v0, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +00354e: 2b02 1b04 0000 |00c5: packed-switch v2, 000004e0 // +0000041b +003554: 0800 1c00 |00c8: move-object/from16 v0, v28 +003558: 5502 0200 |00ca: iget-boolean v2, v0, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +00355c: 3802 2600 |00cc: if-eqz v2, 00f2 // +0026 +003560: 0800 1c00 |00ce: move-object/from16 v0, v28 +003564: 5202 2300 |00d0: iget v2, v0, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +003568: 1213 |00d2: const/4 v3, #int 1 // #1 +00356a: 3232 0900 |00d3: if-eq v2, v3, 00dc // +0009 +00356e: 0800 1c00 |00d5: move-object/from16 v0, v28 +003572: 5202 2300 |00d7: iget v2, v0, Lcom/google/android/checkers/CheckersView;.q:I // field@0023 +003576: 1233 |00d9: const/4 v3, #int 3 // #3 +003578: 3332 1800 |00da: if-ne v2, v3, 00f2 // +0018 +00357c: 0800 1c00 |00dc: move-object/from16 v0, v28 +003580: 5402 2200 |00de: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +003584: 5229 3c00 |00e0: iget v9, v2, Lcom/google/android/checkers/a;.c:I // field@003c +003588: 0800 1c00 |00e2: move-object/from16 v0, v28 +00358c: 5402 2200 |00e4: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +003590: 542a 3b00 |00e6: iget-object v10, v2, Lcom/google/android/checkers/a;.b:[I // field@003b +003594: 0800 1c00 |00e8: move-object/from16 v0, v28 +003598: 5402 2200 |00ea: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +00359c: 542b 3a00 |00ec: iget-object v11, v2, Lcom/google/android/checkers/a;.a:[I // field@003a +0035a0: 1202 |00ee: const/4 v2, #int 0 // #0 +0035a2: 0128 |00ef: move v8, v2 +0035a4: 3498 dc03 |00f0: if-lt v8, v9, 04cc // +03dc +0035a8: 0800 1c00 |00f2: move-object/from16 v0, v28 +0035ac: 5402 0f00 |00f4: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0035b0: 3802 1700 |00f6: if-eqz v2, 010d // +0017 +0035b4: 0800 1c00 |00f8: move-object/from16 v0, v28 +0035b8: 5402 0f00 |00fa: iget-object v2, v0, Lcom/google/android/checkers/CheckersView;.N:Ljava/lang/String; // field@000f +0035bc: 0200 1200 |00fc: move/from16 v0, v18 +0035c0: 8203 |00fe: int-to-float v3, v0 +0035c2: da04 1402 |00ff: mul-int/lit8 v4, v20, #int 2 // #02 +0035c6: 9004 0411 |0101: add-int v4, v4, v17 +0035ca: 8244 |0103: int-to-float v4, v4 +0035cc: 0800 1c00 |0104: move-object/from16 v0, v28 +0035d0: 5405 1b00 |0106: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.i:Landroid/graphics/Paint; // field@001b +0035d4: 0800 1d00 |0108: move-object/from16 v0, v29 +0035d8: 6e55 2000 2043 |010a: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +0035de: 1202 |010d: const/4 v2, #int 0 // #0 +0035e0: 1203 |010e: const/4 v3, #int 0 // #0 +0035e2: 0800 1c00 |010f: move-object/from16 v0, v28 +0035e6: 5204 1000 |0111: iget v4, v0, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +0035ea: 2e03 0304 |0113: cmpg-float v3, v3, v4 +0035ee: 3c03 5100 |0115: if-gtz v3, 0166 // +0051 +0035f2: 0800 1c00 |0117: move-object/from16 v0, v28 +0035f6: 5203 1000 |0119: iget v3, v0, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +0035fa: 0200 1700 |011b: move/from16 v0, v23 +0035fe: 8204 |011d: int-to-float v4, v0 +003600: 2e03 0304 |011e: cmpg-float v3, v3, v4 +003604: 3b03 4600 |0120: if-gez v3, 0166 // +0046 +003608: 1203 |0122: const/4 v3, #int 0 // #0 +00360a: 0800 1c00 |0123: move-object/from16 v0, v28 +00360e: 5204 1100 |0125: iget v4, v0, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +003612: 2e03 0304 |0127: cmpg-float v3, v3, v4 +003616: 3c03 3d00 |0129: if-gtz v3, 0166 // +003d +00361a: 0800 1c00 |012b: move-object/from16 v0, v28 +00361e: 5203 1100 |012d: iget v3, v0, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +003622: 0200 1700 |012f: move/from16 v0, v23 +003626: 8204 |0131: int-to-float v4, v0 +003628: 2e03 0304 |0132: cmpg-float v3, v3, v4 +00362c: 3b03 3200 |0134: if-gez v3, 0166 // +0032 +003630: 0800 1c00 |0136: move-object/from16 v0, v28 +003634: 5203 1000 |0138: iget v3, v0, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +003638: 8733 |013a: float-to-int v3, v3 +00363a: 9303 0316 |013b: div-int v3, v3, v22 +00363e: 0800 1c00 |013d: move-object/from16 v0, v28 +003642: 5204 1100 |013f: iget v4, v0, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +003646: 8744 |0141: float-to-int v4, v4 +003648: 9304 0416 |0142: div-int v4, v4, v22 +00364c: 3a03 2200 |0144: if-ltz v3, 0166 // +0022 +003650: 1305 0800 |0146: const/16 v5, #int 8 // #8 +003654: 3553 1e00 |0148: if-ge v3, v5, 0166 // +001e +003658: 3a04 1c00 |014a: if-ltz v4, 0166 // +001c +00365c: 1305 0800 |014c: const/16 v5, #int 8 // #8 +003660: 3554 1800 |014e: if-ge v4, v5, 0166 // +0018 +003664: 9202 1603 |0150: mul-int v2, v22, v3 +003668: 9206 1604 |0152: mul-int v6, v22, v4 +00366c: 8223 |0154: int-to-float v3, v2 +00366e: 8264 |0155: int-to-float v4, v6 +003670: 9002 0216 |0156: add-int v2, v2, v22 +003674: 8225 |0158: int-to-float v5, v2 +003676: 9002 0616 |0159: add-int v2, v6, v22 +00367a: 8226 |015b: int-to-float v6, v2 +00367c: 0800 1c00 |015c: move-object/from16 v0, v28 +003680: 5407 1c00 |015e: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.j:Landroid/graphics/Paint; // field@001c +003684: 0802 1d00 |0160: move-object/from16 v2, v29 +003688: 7406 1f00 0200 |0162: invoke-virtual/range {v2, v3, v4, v5, v6, v7}, Landroid/graphics/Canvas;.drawRect:(FFFFLandroid/graphics/Paint;)V // method@001f +00368e: 1212 |0165: const/4 v2, #int 1 // #1 +003690: 3902 2800 |0166: if-nez v2, 018e // +0028 +003694: 0800 1c00 |0168: move-object/from16 v0, v28 +003698: 5202 1000 |016a: iget v2, v0, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +00369c: 0800 1c00 |016c: move-object/from16 v0, v28 +0036a0: 5203 1100 |016e: iget v3, v0, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +0036a4: 1504 a040 |0170: const/high16 v4, #int 1084227584 // #40a0 +0036a8: 0800 1c00 |0172: move-object/from16 v0, v28 +0036ac: 5405 1500 |0174: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +0036b0: 0800 1d00 |0176: move-object/from16 v0, v29 +0036b4: 6e55 1c00 2043 |0178: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0036ba: 0800 1c00 |017b: move-object/from16 v0, v28 +0036be: 5202 1000 |017d: iget v2, v0, Lcom/google/android/checkers/CheckersView;.O:F // field@0010 +0036c2: 0800 1c00 |017f: move-object/from16 v0, v28 +0036c6: 5203 1100 |0181: iget v3, v0, Lcom/google/android/checkers/CheckersView;.P:F // field@0011 +0036ca: 1504 4040 |0183: const/high16 v4, #int 1077936128 // #4040 +0036ce: 0800 1c00 |0185: move-object/from16 v0, v28 +0036d2: 5405 1400 |0187: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +0036d6: 0800 1d00 |0189: move-object/from16 v0, v29 +0036da: 6e55 1c00 2043 |018b: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +0036e0: 0800 1c00 |018e: move-object/from16 v0, v28 +0036e4: 5202 1e00 |0190: iget v2, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +0036e8: 1203 |0192: const/4 v3, #int 0 // #0 +0036ea: 2d02 0203 |0193: cmpl-float v2, v2, v3 +0036ee: 3d02 3100 |0195: if-lez v2, 01c6 // +0031 +0036f2: 0800 1c00 |0197: move-object/from16 v0, v28 +0036f6: 5202 1e00 |0199: iget v2, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +0036fa: 8922 |019b: float-to-double v2, v2 +0036fc: 1804 9a99 9999 9999 a93f |019c: const-wide v4, #double 0.050000 // #3fa999999999999a +003706: cc42 |01a1: sub-double/2addr v2, v4 +003708: 8c22 |01a2: double-to-float v2, v2 +00370a: 0800 1c00 |01a3: move-object/from16 v0, v28 +00370e: 5902 1e00 |01a5: iput v2, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +003712: 0800 1c00 |01a7: move-object/from16 v0, v28 +003716: 5202 1e00 |01a9: iget v2, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +00371a: 1203 |01ab: const/4 v3, #int 0 // #0 +00371c: 2e02 0203 |01ac: cmpg-float v2, v2, v3 +003720: 3c02 1100 |01ae: if-gtz v2, 01bf // +0011 +003724: 1202 |01b0: const/4 v2, #int 0 // #0 +003726: 0800 1c00 |01b1: move-object/from16 v0, v28 +00372a: 5902 1e00 |01b3: iput v2, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +00372e: 1202 |01b5: const/4 v2, #int 0 // #0 +003730: 0800 1c00 |01b6: move-object/from16 v0, v28 +003734: 5902 1f00 |01b8: iput v2, v0, Lcom/google/android/checkers/CheckersView;.m:I // field@001f +003738: 1202 |01ba: const/4 v2, #int 0 // #0 +00373a: 0800 1c00 |01bb: move-object/from16 v0, v28 +00373e: 5902 2000 |01bd: iput v2, v0, Lcom/google/android/checkers/CheckersView;.n:I // field@0020 +003742: 1602 3200 |01bf: const-wide/16 v2, #int 50 // #32 +003746: 0800 1c00 |01c1: move-object/from16 v0, v28 +00374a: 6e30 6e00 2003 |01c3: invoke-virtual {v0, v2, v3}, Lcom/google/android/checkers/CheckersView;.postInvalidateDelayed:(J)V // method@006e +003750: 1e1c |01c6: monitor-exit v28 +003752: 0e00 |01c7: return-void +003754: 0142 |01c8: move v2, v4 +003756: 2900 46fe |01c9: goto/16 000f // -01ba +00375a: d803 1702 |01cb: add-int/lit8 v3, v23, #int 2 // #02 +00375e: da02 1402 |01cd: mul-int/lit8 v2, v20, #int 2 // #02 +003762: 9102 1702 |01cf: sub-int v2, v23, v2 +003766: 9102 0219 |01d1: sub-int v2, v2, v25 +00376a: 0211 0200 |01d3: move/from16 v17, v2 +00376e: 0212 0300 |01d5: move/from16 v18, v3 +003772: 0213 1400 |01d7: move/from16 v19, v20 +003776: 0215 0300 |01d9: move/from16 v21, v3 +00377a: 2900 53fe |01db: goto/16 002e // -01ad +00377e: 0800 1c00 |01dd: move-object/from16 v0, v28 +003782: 5408 1500 |01df: iget-object v8, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003786: 1203 |01e1: const/4 v3, #int 0 // #0 +003788: 1204 |01e2: const/4 v4, #int 0 // #0 +00378a: 0200 1700 |01e3: move/from16 v0, v23 +00378e: 8205 |01e5: int-to-float v5, v0 +003790: 0200 1700 |01e6: move/from16 v0, v23 +003794: 8206 |01e8: int-to-float v6, v0 +003796: 0800 1c00 |01e9: move-object/from16 v0, v28 +00379a: 5407 1700 |01eb: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.e:Landroid/graphics/Paint; // field@0017 +00379e: 0802 1d00 |01ed: move-object/from16 v2, v29 +0037a2: 7406 1f00 0200 |01ef: invoke-virtual/range {v2, v3, v4, v5, v6, v7}, Landroid/graphics/Canvas;.drawRect:(FFFFLandroid/graphics/Paint;)V // method@001f +0037a8: 0787 |01f2: move-object v7, v8 +0037aa: 2900 64fe |01f3: goto/16 0057 // -019c +0037ae: 920a 1609 |01f5: mul-int v10, v22, v9 +0037b2: dd02 0901 |01f7: and-int/lit8 v2, v9, #int 1 // #01 +0037b6: 0128 |01f9: move v8, v2 +0037b8: 1302 0800 |01fa: const/16 v2, #int 8 // #8 +0037bc: 3428 0700 |01fc: if-lt v8, v2, 0203 // +0007 +0037c0: d802 0901 |01fe: add-int/lit8 v2, v9, #int 1 // #01 +0037c4: 0129 |0200: move v9, v2 +0037c6: 2900 58fe |0201: goto/16 0059 // -01a8 +0037ca: 9202 1608 |0203: mul-int v2, v22, v8 +0037ce: 82a3 |0205: int-to-float v3, v10 +0037d0: 8224 |0206: int-to-float v4, v2 +0037d2: 9005 0a16 |0207: add-int v5, v10, v22 +0037d6: 8255 |0209: int-to-float v5, v5 +0037d8: 9002 0216 |020a: add-int v2, v2, v22 +0037dc: 8226 |020c: int-to-float v6, v2 +0037de: 0802 1d00 |020d: move-object/from16 v2, v29 +0037e2: 7406 1f00 0200 |020f: invoke-virtual/range {v2, v3, v4, v5, v6, v7}, Landroid/graphics/Canvas;.drawRect:(FFFFLandroid/graphics/Paint;)V // method@001f +0037e8: d802 0802 |0212: add-int/lit8 v2, v8, #int 2 // #02 +0037ec: 0128 |0214: move v8, v2 +0037ee: 28e5 |0215: goto 01fa // -001b +0037f0: d902 1001 |0216: rsub-int/lit8 v2, v16, #int 1 // #01 +0037f4: dd02 0201 |0218: and-int/lit8 v2, v2, #int 1 // #01 +0037f8: 012e |021a: move v14, v2 +0037fa: 013f |021b: move v15, v3 +0037fc: 1302 0800 |021c: const/16 v2, #int 8 // #8 +003800: 342e 0900 |021e: if-lt v14, v2, 0227 // +0009 +003804: d802 1001 |0220: add-int/lit8 v2, v16, #int 1 // #01 +003808: 0210 0200 |0222: move/from16 v16, v2 +00380c: 01f3 |0224: move v3, v15 +00380e: 2900 40fe |0225: goto/16 0065 // -01c0 +003812: 0800 1c00 |0227: move-object/from16 v0, v28 +003816: 5502 0400 |0229: iget-boolean v2, v0, Lcom/google/android/checkers/CheckersView;.C:Z // field@0004 +00381a: 3802 5d00 |022b: if-eqz v2, 0288 // +005d +00381e: d902 0e07 |022d: rsub-int/lit8 v2, v14, #int 7 // #07 +003822: 9203 1602 |022f: mul-int v3, v22, v2 +003826: d902 1007 |0231: rsub-int/lit8 v2, v16, #int 7 // #07 +00382a: 9202 0216 |0233: mul-int v2, v2, v22 +00382e: 0135 |0235: move v5, v3 +003830: 901a 1805 |0236: add-int v26, v24, v5 +003834: 901b 1802 |0238: add-int v27, v24, v2 +003838: 0800 1c00 |023a: move-object/from16 v0, v28 +00383c: 5203 2800 |023c: iget v3, v0, Lcom/google/android/checkers/CheckersView;.v:I // field@0028 +003840: b5f3 |023e: and-int/2addr v3, v15 +003842: 3803 4f00 |023f: if-eqz v3, 028e // +004f +003846: d803 0501 |0241: add-int/lit8 v3, v5, #int 1 // #01 +00384a: 8233 |0243: int-to-float v3, v3 +00384c: d804 0201 |0244: add-int/lit8 v4, v2, #int 1 // #01 +003850: 8244 |0246: int-to-float v4, v4 +003852: 9005 0516 |0247: add-int v5, v5, v22 +003856: d805 05ff |0249: add-int/lit8 v5, v5, #int -1 // #ff +00385a: 8255 |024b: int-to-float v5, v5 +00385c: 9002 0216 |024c: add-int v2, v2, v22 +003860: d802 02ff |024e: add-int/lit8 v2, v2, #int -1 // #ff +003864: 8226 |0250: int-to-float v6, v2 +003866: 0800 1c00 |0251: move-object/from16 v0, v28 +00386a: 5407 1800 |0253: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.f:Landroid/graphics/Paint; // field@0018 +00386e: 0802 1d00 |0255: move-object/from16 v2, v29 +003872: 7406 1f00 0200 |0257: invoke-virtual/range {v2, v3, v4, v5, v6, v7}, Landroid/graphics/Canvas;.drawRect:(FFFFLandroid/graphics/Paint;)V // method@001f +003878: 0800 1c00 |025a: move-object/from16 v0, v28 +00387c: 5202 2400 |025c: iget v2, v0, Lcom/google/android/checkers/CheckersView;.r:I // field@0024 +003880: b5f2 |025e: and-int/2addr v2, v15 +003882: 3802 7a00 |025f: if-eqz v2, 02d9 // +007a +003886: 0800 1c00 |0261: move-object/from16 v0, v28 +00388a: 5407 1400 |0263: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +00388e: 0800 1c00 |0265: move-object/from16 v0, v28 +003892: 5408 1500 |0267: iget-object v8, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003896: 1209 |0269: const/4 v9, #int 0 // #0 +003898: 0802 1d00 |026a: move-object/from16 v2, v29 +00389c: 0203 1a00 |026c: move/from16 v3, v26 +0038a0: 0204 1b00 |026e: move/from16 v4, v27 +0038a4: 0205 1800 |0270: move/from16 v5, v24 +0038a8: 0206 1900 |0272: move/from16 v6, v25 +0038ac: 7708 5500 0200 |0274: invoke-static/range {v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V // method@0055 +0038b2: d802 0d01 |0277: add-int/lit8 v2, v13, #int 1 // #01 +0038b6: 01b3 |0279: move v3, v11 +0038b8: 01c4 |027a: move v4, v12 +0038ba: 0125 |027b: move v5, v2 +0038bc: 01a2 |027c: move v2, v10 +0038be: e007 0f01 |027d: shl-int/lit8 v7, v15, #int 1 // #01 +0038c2: d806 0e02 |027f: add-int/lit8 v6, v14, #int 2 // #02 +0038c6: 016e |0281: move v14, v6 +0038c8: 012a |0282: move v10, v2 +0038ca: 013b |0283: move v11, v3 +0038cc: 014c |0284: move v12, v4 +0038ce: 015d |0285: move v13, v5 +0038d0: 017f |0286: move v15, v7 +0038d2: 2895 |0287: goto 021c // -006b +0038d4: 9203 160e |0288: mul-int v3, v22, v14 +0038d8: 9202 1610 |028a: mul-int v2, v22, v16 +0038dc: 0135 |028c: move v5, v3 +0038de: 28a9 |028d: goto 0236 // -0057 +0038e0: 0800 1c00 |028e: move-object/from16 v0, v28 +0038e4: 5203 2900 |0290: iget v3, v0, Lcom/google/android/checkers/CheckersView;.w:I // field@0029 +0038e8: b5f3 |0292: and-int/2addr v3, v15 +0038ea: 3803 1f00 |0293: if-eqz v3, 02b2 // +001f +0038ee: d803 0501 |0295: add-int/lit8 v3, v5, #int 1 // #01 +0038f2: 8233 |0297: int-to-float v3, v3 +0038f4: d804 0201 |0298: add-int/lit8 v4, v2, #int 1 // #01 +0038f8: 8244 |029a: int-to-float v4, v4 +0038fa: 9005 0516 |029b: add-int v5, v5, v22 +0038fe: d805 05ff |029d: add-int/lit8 v5, v5, #int -1 // #ff +003902: 8255 |029f: int-to-float v5, v5 +003904: 9002 0216 |02a0: add-int v2, v2, v22 +003908: d802 02ff |02a2: add-int/lit8 v2, v2, #int -1 // #ff +00390c: 8226 |02a4: int-to-float v6, v2 +00390e: 0800 1c00 |02a5: move-object/from16 v0, v28 +003912: 5407 1900 |02a7: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.g:Landroid/graphics/Paint; // field@0019 +003916: 0802 1d00 |02a9: move-object/from16 v2, v29 +00391a: 7406 1f00 0200 |02ab: invoke-virtual/range {v2, v3, v4, v5, v6, v7}, Landroid/graphics/Canvas;.drawRect:(FFFFLandroid/graphics/Paint;)V // method@001f +003920: 28ac |02ae: goto 025a // -0054 +003922: 0d02 |02af: move-exception v2 +003924: 1e1c |02b0: monitor-exit v28 +003926: 2702 |02b1: throw v2 +003928: 0800 1c00 |02b2: move-object/from16 v0, v28 +00392c: 5503 0200 |02b4: iget-boolean v3, v0, Lcom/google/android/checkers/CheckersView;.A:Z // field@0002 +003930: 3803 a4ff |02b6: if-eqz v3, 025a // -005c +003934: 0800 1c00 |02b8: move-object/from16 v0, v28 +003938: 5203 2a00 |02ba: iget v3, v0, Lcom/google/android/checkers/CheckersView;.x:I // field@002a +00393c: b5f3 |02bc: and-int/2addr v3, v15 +00393e: 3803 9dff |02bd: if-eqz v3, 025a // -0063 +003942: d803 0501 |02bf: add-int/lit8 v3, v5, #int 1 // #01 +003946: 8233 |02c1: int-to-float v3, v3 +003948: d804 0201 |02c2: add-int/lit8 v4, v2, #int 1 // #01 +00394c: 8244 |02c4: int-to-float v4, v4 +00394e: 9005 0516 |02c5: add-int v5, v5, v22 +003952: d805 05ff |02c7: add-int/lit8 v5, v5, #int -1 // #ff +003956: 8255 |02c9: int-to-float v5, v5 +003958: 9002 0216 |02ca: add-int v2, v2, v22 +00395c: d802 02ff |02cc: add-int/lit8 v2, v2, #int -1 // #ff +003960: 8226 |02ce: int-to-float v6, v2 +003962: 0800 1c00 |02cf: move-object/from16 v0, v28 +003966: 5407 1b00 |02d1: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.i:Landroid/graphics/Paint; // field@001b +00396a: 0802 1d00 |02d3: move-object/from16 v2, v29 +00396e: 7406 1f00 0200 |02d5: invoke-virtual/range {v2, v3, v4, v5, v6, v7}, Landroid/graphics/Canvas;.drawRect:(FFFFLandroid/graphics/Paint;)V // method@001f +003974: 2882 |02d8: goto 025a // -007e +003976: 0800 1c00 |02d9: move-object/from16 v0, v28 +00397a: 5202 2500 |02db: iget v2, v0, Lcom/google/android/checkers/CheckersView;.s:I // field@0025 +00397e: b5f2 |02dd: and-int/2addr v2, v15 +003980: 3802 1f00 |02de: if-eqz v2, 02fd // +001f +003984: 0800 1c00 |02e0: move-object/from16 v0, v28 +003988: 5407 1500 |02e2: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +00398c: 0800 1c00 |02e4: move-object/from16 v0, v28 +003990: 5408 1400 |02e6: iget-object v8, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +003994: 1209 |02e8: const/4 v9, #int 0 // #0 +003996: 0802 1d00 |02e9: move-object/from16 v2, v29 +00399a: 0203 1a00 |02eb: move/from16 v3, v26 +00399e: 0204 1b00 |02ed: move/from16 v4, v27 +0039a2: 0205 1800 |02ef: move/from16 v5, v24 +0039a6: 0206 1900 |02f1: move/from16 v6, v25 +0039aa: 7708 5500 0200 |02f3: invoke-static/range {v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V // method@0055 +0039b0: d802 0b01 |02f6: add-int/lit8 v2, v11, #int 1 // #01 +0039b4: 0123 |02f8: move v3, v2 +0039b6: 01c4 |02f9: move v4, v12 +0039b8: 01d5 |02fa: move v5, v13 +0039ba: 01a2 |02fb: move v2, v10 +0039bc: 2881 |02fc: goto 027d // -007f +0039be: 0800 1c00 |02fd: move-object/from16 v0, v28 +0039c2: 5202 2600 |02ff: iget v2, v0, Lcom/google/android/checkers/CheckersView;.t:I // field@0026 +0039c6: b5f2 |0301: and-int/2addr v2, v15 +0039c8: 3802 2000 |0302: if-eqz v2, 0322 // +0020 +0039cc: 0800 1c00 |0304: move-object/from16 v0, v28 +0039d0: 5407 1400 |0306: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +0039d4: 0800 1c00 |0308: move-object/from16 v0, v28 +0039d8: 5408 1500 |030a: iget-object v8, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +0039dc: 1219 |030c: const/4 v9, #int 1 // #1 +0039de: 0802 1d00 |030d: move-object/from16 v2, v29 +0039e2: 0203 1a00 |030f: move/from16 v3, v26 +0039e6: 0204 1b00 |0311: move/from16 v4, v27 +0039ea: 0205 1800 |0313: move/from16 v5, v24 +0039ee: 0206 1900 |0315: move/from16 v6, v25 +0039f2: 7708 5500 0200 |0317: invoke-static/range {v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V // method@0055 +0039f8: d802 0c01 |031a: add-int/lit8 v2, v12, #int 1 // #01 +0039fc: 01b3 |031c: move v3, v11 +0039fe: 0124 |031d: move v4, v2 +003a00: 01d5 |031e: move v5, v13 +003a02: 01a2 |031f: move v2, v10 +003a04: 2900 5dff |0320: goto/16 027d // -00a3 +003a08: 0800 1c00 |0322: move-object/from16 v0, v28 +003a0c: 5202 2700 |0324: iget v2, v0, Lcom/google/android/checkers/CheckersView;.u:I // field@0027 +003a10: b5f2 |0326: and-int/2addr v2, v15 +003a12: 3802 1f00 |0327: if-eqz v2, 0346 // +001f +003a16: 0800 1c00 |0329: move-object/from16 v0, v28 +003a1a: 5407 1500 |032b: iget-object v7, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003a1e: 0800 1c00 |032d: move-object/from16 v0, v28 +003a22: 5408 1400 |032f: iget-object v8, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +003a26: 1219 |0331: const/4 v9, #int 1 // #1 +003a28: 0802 1d00 |0332: move-object/from16 v2, v29 +003a2c: 0203 1a00 |0334: move/from16 v3, v26 +003a30: 0204 1b00 |0336: move/from16 v4, v27 +003a34: 0205 1800 |0338: move/from16 v5, v24 +003a38: 0206 1900 |033a: move/from16 v6, v25 +003a3c: 7708 5500 0200 |033c: invoke-static/range {v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/CheckersView;.a:(Landroid/graphics/Canvas;IIIILandroid/graphics/Paint;Landroid/graphics/Paint;Z)V // method@0055 +003a42: d802 0a01 |033f: add-int/lit8 v2, v10, #int 1 // #01 +003a46: 01b3 |0341: move v3, v11 +003a48: 01c4 |0342: move v4, v12 +003a4a: 01d5 |0343: move v5, v13 +003a4c: 2900 39ff |0344: goto/16 027d // -00c7 +003a50: 0800 1c00 |0346: move-object/from16 v0, v28 +003a54: 5202 2000 |0348: iget v2, v0, Lcom/google/android/checkers/CheckersView;.n:I // field@0020 +003a58: b5f2 |034a: and-int/2addr v2, v15 +003a5a: 3802 3600 |034b: if-eqz v2, 0381 // +0036 +003a5e: 0200 1a00 |034d: move/from16 v0, v26 +003a62: 8202 |034f: int-to-float v2, v0 +003a64: 0200 1b00 |0350: move/from16 v0, v27 +003a68: 8203 |0352: int-to-float v3, v0 +003a6a: d804 18fe |0353: add-int/lit8 v4, v24, #int -2 // #fe +003a6e: 8244 |0355: int-to-float v4, v4 +003a70: 0800 1c00 |0356: move-object/from16 v0, v28 +003a74: 5205 1e00 |0358: iget v5, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +003a78: c854 |035a: mul-float/2addr v4, v5 +003a7a: 0800 1c00 |035b: move-object/from16 v0, v28 +003a7e: 5405 1500 |035d: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003a82: 0800 1d00 |035f: move-object/from16 v0, v29 +003a86: 6e55 1c00 2043 |0361: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +003a8c: 0200 1a00 |0364: move/from16 v0, v26 +003a90: 8202 |0366: int-to-float v2, v0 +003a92: 0200 1b00 |0367: move/from16 v0, v27 +003a96: 8203 |0369: int-to-float v3, v0 +003a98: d804 18fc |036a: add-int/lit8 v4, v24, #int -4 // #fc +003a9c: 8244 |036c: int-to-float v4, v4 +003a9e: 0800 1c00 |036d: move-object/from16 v0, v28 +003aa2: 5205 1e00 |036f: iget v5, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +003aa6: c854 |0371: mul-float/2addr v4, v5 +003aa8: 0800 1c00 |0372: move-object/from16 v0, v28 +003aac: 5405 1400 |0374: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +003ab0: 0800 1d00 |0376: move-object/from16 v0, v29 +003ab4: 6e55 1c00 2043 |0378: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +003aba: 01a2 |037b: move v2, v10 +003abc: 01b3 |037c: move v3, v11 +003abe: 01c4 |037d: move v4, v12 +003ac0: 01d5 |037e: move v5, v13 +003ac2: 2900 fefe |037f: goto/16 027d // -0102 +003ac6: 0800 1c00 |0381: move-object/from16 v0, v28 +003aca: 5202 1f00 |0383: iget v2, v0, Lcom/google/android/checkers/CheckersView;.m:I // field@001f +003ace: b5f2 |0385: and-int/2addr v2, v15 +003ad0: 3802 3000 |0386: if-eqz v2, 03b6 // +0030 +003ad4: 0200 1a00 |0388: move/from16 v0, v26 +003ad8: 8202 |038a: int-to-float v2, v0 +003ada: 0200 1b00 |038b: move/from16 v0, v27 +003ade: 8203 |038d: int-to-float v3, v0 +003ae0: d804 18fe |038e: add-int/lit8 v4, v24, #int -2 // #fe +003ae4: 8244 |0390: int-to-float v4, v4 +003ae6: 0800 1c00 |0391: move-object/from16 v0, v28 +003aea: 5205 1e00 |0393: iget v5, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +003aee: c854 |0395: mul-float/2addr v4, v5 +003af0: 0800 1c00 |0396: move-object/from16 v0, v28 +003af4: 5405 1400 |0398: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +003af8: 0800 1d00 |039a: move-object/from16 v0, v29 +003afc: 6e55 1c00 2043 |039c: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +003b02: 0200 1a00 |039f: move/from16 v0, v26 +003b06: 8202 |03a1: int-to-float v2, v0 +003b08: 0200 1b00 |03a2: move/from16 v0, v27 +003b0c: 8203 |03a4: int-to-float v3, v0 +003b0e: d804 18fc |03a5: add-int/lit8 v4, v24, #int -4 // #fc +003b12: 8244 |03a7: int-to-float v4, v4 +003b14: 0800 1c00 |03a8: move-object/from16 v0, v28 +003b18: 5205 1e00 |03aa: iget v5, v0, Lcom/google/android/checkers/CheckersView;.l:F // field@001e +003b1c: c854 |03ac: mul-float/2addr v4, v5 +003b1e: 0800 1c00 |03ad: move-object/from16 v0, v28 +003b22: 5405 1500 |03af: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003b26: 0800 1d00 |03b1: move-object/from16 v0, v29 +003b2a: 6e55 1c00 2043 |03b3: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawCircle:(FFFLandroid/graphics/Paint;)V // method@001c +003b30: 01a2 |03b6: move v2, v10 +003b32: 01b3 |03b7: move v3, v11 +003b34: 01c4 |03b8: move v4, v12 +003b36: 01d5 |03b9: move v5, v13 +003b38: 2900 c3fe |03ba: goto/16 027d // -013d +003b3c: 1a02 9c00 |03bc: const-string v2, "White" // string@009c +003b40: 0200 1500 |03be: move/from16 v0, v21 +003b44: 8203 |03c0: int-to-float v3, v0 +003b46: 0200 1300 |03c1: move/from16 v0, v19 +003b4a: 8204 |03c3: int-to-float v4, v0 +003b4c: 0800 1c00 |03c4: move-object/from16 v0, v28 +003b50: 5405 1500 |03c6: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003b54: 0800 1d00 |03c8: move-object/from16 v0, v29 +003b58: 6e55 2000 2043 |03ca: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003b5e: 1a02 0b00 |03cd: const-string v2, "Black" // string@000b +003b62: 0200 1500 |03cf: move/from16 v0, v21 +003b66: 8203 |03d1: int-to-float v3, v0 +003b68: 9004 1314 |03d2: add-int v4, v19, v20 +003b6c: 8244 |03d4: int-to-float v4, v4 +003b6e: 0800 1c00 |03d5: move-object/from16 v0, v28 +003b72: 5405 1500 |03d7: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003b76: 0800 1d00 |03d9: move-object/from16 v0, v29 +003b7a: 6e55 2000 2043 |03db: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003b80: 1a02 7200 |03de: const-string v2, "Moves" // string@0072 +003b84: 0200 1500 |03e0: move/from16 v0, v21 +003b88: 8203 |03e2: int-to-float v3, v0 +003b8a: da04 1402 |03e3: mul-int/lit8 v4, v20, #int 2 // #02 +003b8e: 9004 0413 |03e5: add-int v4, v4, v19 +003b92: 8244 |03e7: int-to-float v4, v4 +003b94: 0800 1c00 |03e8: move-object/from16 v0, v28 +003b98: 5405 1500 |03ea: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003b9c: 0800 1d00 |03ec: move-object/from16 v0, v29 +003ba0: 6e55 2000 2043 |03ee: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003ba6: 2202 3000 |03f1: new-instance v2, Ljava/lang/StringBuilder; // type@0030 +003baa: 1a03 0300 |03f3: const-string v3, ": " // string@0003 +003bae: 7020 a600 3200 |03f5: invoke-direct {v2, v3}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +003bb4: 6e20 a700 d200 |03f8: invoke-virtual {v2, v13}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +003bba: 0c02 |03fb: move-result-object v2 +003bbc: 1a03 0200 |03fc: const-string v3, "+" // string@0002 +003bc0: 6e20 a900 3200 |03fe: invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@00a9 +003bc6: 0c02 |0401: move-result-object v2 +003bc8: 6e20 a700 c200 |0402: invoke-virtual {v2, v12}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +003bce: 0c02 |0405: move-result-object v2 +003bd0: 6e10 aa00 0200 |0406: invoke-virtual {v2}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +003bd6: 0c02 |0409: move-result-object v2 +003bd8: da03 1403 |040a: mul-int/lit8 v3, v20, #int 3 // #03 +003bdc: 9003 0315 |040c: add-int v3, v3, v21 +003be0: 8233 |040e: int-to-float v3, v3 +003be2: 0200 1300 |040f: move/from16 v0, v19 +003be6: 8204 |0411: int-to-float v4, v0 +003be8: 0800 1c00 |0412: move-object/from16 v0, v28 +003bec: 5405 1500 |0414: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003bf0: 0800 1d00 |0416: move-object/from16 v0, v29 +003bf4: 6e55 2000 2043 |0418: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003bfa: 2202 3000 |041b: new-instance v2, Ljava/lang/StringBuilder; // type@0030 +003bfe: 1a03 0300 |041d: const-string v3, ": " // string@0003 +003c02: 7020 a600 3200 |041f: invoke-direct {v2, v3}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +003c08: 6e20 a700 b200 |0422: invoke-virtual {v2, v11}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +003c0e: 0c02 |0425: move-result-object v2 +003c10: 1a03 0200 |0426: const-string v3, "+" // string@0002 +003c14: 6e20 a900 3200 |0428: invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@00a9 +003c1a: 0c02 |042b: move-result-object v2 +003c1c: 6e20 a700 a200 |042c: invoke-virtual {v2, v10}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +003c22: 0c02 |042f: move-result-object v2 +003c24: 6e10 aa00 0200 |0430: invoke-virtual {v2}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +003c2a: 0c02 |0433: move-result-object v2 +003c2c: da03 1403 |0434: mul-int/lit8 v3, v20, #int 3 // #03 +003c30: 9003 0315 |0436: add-int v3, v3, v21 +003c34: 8233 |0438: int-to-float v3, v3 +003c36: 9004 1314 |0439: add-int v4, v19, v20 +003c3a: 8244 |043b: int-to-float v4, v4 +003c3c: 0800 1c00 |043c: move-object/from16 v0, v28 +003c40: 5405 1500 |043e: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003c44: 0800 1d00 |0440: move-object/from16 v0, v29 +003c48: 6e55 2000 2043 |0442: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003c4e: 2202 3000 |0445: new-instance v2, Ljava/lang/StringBuilder; // type@0030 +003c52: 1a03 0300 |0447: const-string v3, ": " // string@0003 +003c56: 7020 a600 3200 |0449: invoke-direct {v2, v3}, Ljava/lang/StringBuilder;.<init>:(Ljava/lang/String;)V // method@00a6 +003c5c: 0800 1c00 |044c: move-object/from16 v0, v28 +003c60: 5203 0e00 |044e: iget v3, v0, Lcom/google/android/checkers/CheckersView;.M:I // field@000e +003c64: 6e20 a700 3200 |0450: invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;.append:(I)Ljava/lang/StringBuilder; // method@00a7 +003c6a: 0c02 |0453: move-result-object v2 +003c6c: 6e10 aa00 0200 |0454: invoke-virtual {v2}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@00aa +003c72: 0c02 |0457: move-result-object v2 +003c74: da03 1403 |0458: mul-int/lit8 v3, v20, #int 3 // #03 +003c78: 9003 0315 |045a: add-int v3, v3, v21 +003c7c: 8233 |045c: int-to-float v3, v3 +003c7e: da04 1402 |045d: mul-int/lit8 v4, v20, #int 2 // #02 +003c82: 9004 0413 |045f: add-int v4, v4, v19 +003c86: 8244 |0461: int-to-float v4, v4 +003c88: 0800 1c00 |0462: move-object/from16 v0, v28 +003c8c: 5405 1500 |0464: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003c90: 0800 1d00 |0466: move-object/from16 v0, v29 +003c94: 6e55 2000 2043 |0468: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003c9a: 2900 3ffc |046b: goto/16 00aa // -03c1 +003c9e: 1a02 0a00 |046d: const-string v2, "BLACK'S MOVE?" // string@000a +003ca2: 0200 1200 |046f: move/from16 v0, v18 +003ca6: 8203 |0471: int-to-float v3, v0 +003ca8: 9004 1114 |0472: add-int v4, v17, v20 +003cac: 8244 |0474: int-to-float v4, v4 +003cae: 0800 1c00 |0475: move-object/from16 v0, v28 +003cb2: 5405 1500 |0477: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003cb6: 0800 1d00 |0479: move-object/from16 v0, v29 +003cba: 6e55 2000 2043 |047b: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003cc0: 2900 4afc |047e: goto/16 00c8 // -03b6 +003cc4: 1a02 9a00 |0480: const-string v2, "WHITE'S MOVE?" // string@009a +003cc8: 0200 1200 |0482: move/from16 v0, v18 +003ccc: 8203 |0484: int-to-float v3, v0 +003cce: 9004 1114 |0485: add-int v4, v17, v20 +003cd2: 8244 |0487: int-to-float v4, v4 +003cd4: 0800 1c00 |0488: move-object/from16 v0, v28 +003cd8: 5405 1500 |048a: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003cdc: 0800 1d00 |048c: move-object/from16 v0, v29 +003ce0: 6e55 2000 2043 |048e: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003ce6: 2900 37fc |0491: goto/16 00c8 // -03c9 +003cea: 1a02 8100 |0493: const-string v2, "THINKING...." // string@0081 +003cee: 0200 1200 |0495: move/from16 v0, v18 +003cf2: 8203 |0497: int-to-float v3, v0 +003cf4: 9004 1114 |0498: add-int v4, v17, v20 +003cf8: 8244 |049a: int-to-float v4, v4 +003cfa: 0800 1c00 |049b: move-object/from16 v0, v28 +003cfe: 5405 1500 |049d: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003d02: 0800 1d00 |049f: move-object/from16 v0, v29 +003d06: 6e55 2000 2043 |04a1: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003d0c: 2900 24fc |04a4: goto/16 00c8 // -03dc +003d10: 1a02 9900 |04a6: const-string v2, "WHITE WINS" // string@0099 +003d14: 0200 1200 |04a8: move/from16 v0, v18 +003d18: 8203 |04aa: int-to-float v3, v0 +003d1a: 9004 1114 |04ab: add-int v4, v17, v20 +003d1e: 8244 |04ad: int-to-float v4, v4 +003d20: 0800 1c00 |04ae: move-object/from16 v0, v28 +003d24: 5405 1500 |04b0: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003d28: 0800 1d00 |04b2: move-object/from16 v0, v29 +003d2c: 6e55 2000 2043 |04b4: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003d32: 2900 11fc |04b7: goto/16 00c8 // -03ef +003d36: 1a02 0900 |04b9: const-string v2, "BLACK WINS" // string@0009 +003d3a: 0200 1200 |04bb: move/from16 v0, v18 +003d3e: 8203 |04bd: int-to-float v3, v0 +003d40: 9004 1114 |04be: add-int v4, v17, v20 +003d44: 8244 |04c0: int-to-float v4, v4 +003d46: 0800 1c00 |04c1: move-object/from16 v0, v28 +003d4a: 5405 1500 |04c3: iget-object v5, v0, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003d4e: 0800 1d00 |04c5: move-object/from16 v0, v29 +003d52: 6e55 2000 2043 |04c7: invoke-virtual {v0, v2, v3, v4, v5}, Landroid/graphics/Canvas;.drawText:(Ljava/lang/String;FFLandroid/graphics/Paint;)V // method@0020 +003d58: 2900 fefb |04ca: goto/16 00c8 // -0402 +003d5c: 4404 0a08 |04cc: aget v4, v10, v8 +003d60: 4405 0b08 |04ce: aget v5, v11, v8 +003d64: 0802 1c00 |04d0: move-object/from16 v2, v28 +003d68: 0803 1d00 |04d2: move-object/from16 v3, v29 +003d6c: 0206 1600 |04d4: move/from16 v6, v22 +003d70: 0207 1800 |04d6: move/from16 v7, v24 +003d74: 7606 5f00 0200 |04d8: invoke-direct/range {v2, v3, v4, v5, v6, v7}, Lcom/google/android/checkers/CheckersView;.b:(Landroid/graphics/Canvas;IIII)V // method@005f +003d7a: d802 0801 |04db: add-int/lit8 v2, v8, #int 1 // #01 +003d7e: 0128 |04dd: move v8, v2 +003d80: 2900 12fc |04de: goto/16 00f0 // -03ee +003d84: 0001 0600 0100 0000 a803 0000 ce03 ... |04e0: packed-switch-data (16 units) + catches : 3 + 0x0001 - 0x01c6 + <any> -> 0x02af + 0x01dd - 0x02ae + <any> -> 0x02af + 0x02b2 - 0x04db + <any> -> 0x02af + positions : + locals : + + #12 : (in Lcom/google/android/checkers/CheckersView;) + name : 'e' + type : '(Z)Z' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 3 + ins : 2 + outs : 0 + insns size : 19 16-bit code units +003dc0: |[003dc0] com.google.android.checkers.CheckersView.e:(Z)Z +003dd0: 1d01 |0000: monitor-enter v1 +003dd2: 3802 0900 |0001: if-eqz v2, 000a // +0009 +003dd6: 5510 0600 |0003: iget-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +003dda: 3800 0900 |0005: if-eqz v0, 000e // +0009 +003dde: 1200 |0007: const/4 v0, #int 0 // #0 +003de0: 5c10 0600 |0008: iput-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +003de4: 5510 0600 |000a: iget-boolean v0, v1, Lcom/google/android/checkers/CheckersView;.E:Z // field@0006 +003de8: 1e01 |000c: monitor-exit v1 +003dea: 0f00 |000d: return v0 +003dec: 1210 |000e: const/4 v0, #int 1 // #1 +003dee: 28f9 |000f: goto 0008 // -0007 +003df0: 0d00 |0010: move-exception v0 +003df2: 1e01 |0011: monitor-exit v1 +003df4: 2700 |0012: throw v0 + catches : 1 + 0x0003 - 0x000c + <any> -> 0x0010 + positions : + locals : + + #13 : (in Lcom/google/android/checkers/CheckersView;) + name : 'onSizeChanged' + type : '(IIII)V' + access : 0x20004 (PROTECTED DECLARED_SYNCHRONIZED) + code - + registers : 8 + ins : 5 + outs : 2 + insns size : 52 16-bit code units +003e04: |[003e04] com.google.android.checkers.CheckersView.onSizeChanged:(IIII)V +003e14: 1d03 |0000: monitor-enter v3 +003e16: 3554 2c00 |0001: if-ge v4, v5, 002d // +002c +003e1a: 0140 |0003: move v0, v4 +003e1c: db01 0018 |0004: div-int/lit8 v1, v0, #int 24 // #18 +003e20: 8211 |0006: int-to-float v1, v1 +003e22: 3754 2b00 |0007: if-le v4, v5, 0032 // +002b +003e26: 9100 0400 |0009: sub-int v0, v4, v0 +003e2a: db00 000a |000b: div-int/lit8 v0, v0, #int 10 // #0a +003e2e: 8200 |000d: int-to-float v0, v0 +003e30: 2e02 0001 |000e: cmpg-float v2, v0, v1 +003e34: 3b02 2200 |0010: if-gez v2, 0032 // +0022 +003e38: 5431 1400 |0012: iget-object v1, v3, Lcom/google/android/checkers/CheckersView;.b:Landroid/graphics/Paint; // field@0014 +003e3c: 6e20 2600 0100 |0014: invoke-virtual {v1, v0}, Landroid/graphics/Paint;.setTextSize:(F)V // method@0026 +003e42: 5431 1500 |0017: iget-object v1, v3, Lcom/google/android/checkers/CheckersView;.c:Landroid/graphics/Paint; // field@0015 +003e46: 6e20 2600 0100 |0019: invoke-virtual {v1, v0}, Landroid/graphics/Paint;.setTextSize:(F)V // method@0026 +003e4c: 5431 1b00 |001c: iget-object v1, v3, Lcom/google/android/checkers/CheckersView;.i:Landroid/graphics/Paint; // field@001b +003e50: 6e20 2600 0100 |001e: invoke-virtual {v1, v0}, Landroid/graphics/Paint;.setTextSize:(F)V // method@0026 +003e56: 5431 1800 |0021: iget-object v1, v3, Lcom/google/android/checkers/CheckersView;.f:Landroid/graphics/Paint; // field@0018 +003e5a: 6e20 2600 0100 |0023: invoke-virtual {v1, v0}, Landroid/graphics/Paint;.setTextSize:(F)V // method@0026 +003e60: 8700 |0026: float-to-int v0, v0 +003e62: d800 0001 |0027: add-int/lit8 v0, v0, #int 1 // #01 +003e66: 5930 1d00 |0029: iput v0, v3, Lcom/google/android/checkers/CheckersView;.k:I // field@001d +003e6a: 1e03 |002b: monitor-exit v3 +003e6c: 0e00 |002c: return-void +003e6e: 0150 |002d: move v0, v5 +003e70: 28d6 |002e: goto 0004 // -002a +003e72: 0d00 |002f: move-exception v0 +003e74: 1e03 |0030: monitor-exit v3 +003e76: 2700 |0031: throw v0 +003e78: 0110 |0032: move v0, v1 +003e7a: 28df |0033: goto 0012 // -0021 + catches : 1 + 0x0004 - 0x002b + <any> -> 0x002f + positions : + locals : + + #14 : (in Lcom/google/android/checkers/CheckersView;) + name : 'onTouchEvent' + type : '(Landroid/view/MotionEvent;)Z' + access : 0x0001 (PUBLIC) + code - + registers : 8 + ins : 2 + outs : 6 + insns size : 41 16-bit code units +003e88: |[003e88] com.google.android.checkers.CheckersView.onTouchEvent:(Landroid/view/MotionEvent;)Z +003e98: 1201 |0000: const/4 v1, #int 0 // #0 +003e9a: 6e10 3100 0700 |0001: invoke-virtual {v7}, Landroid/view/MotionEvent;.getAction:()I // method@0031 +003ea0: 0a00 |0004: move-result v0 +003ea2: 3900 1f00 |0005: if-nez v0, 0024 // +001f +003ea6: 6e10 3200 0700 |0007: invoke-virtual {v7}, Landroid/view/MotionEvent;.getX:()F // method@0032 +003eac: 0a00 |000a: move-result v0 +003eae: 6e10 3300 0700 |000b: invoke-virtual {v7}, Landroid/view/MotionEvent;.getY:()F // method@0033 +003eb4: 0a02 |000e: move-result v2 +003eb6: 7030 5c00 0602 |000f: invoke-direct {v6, v0, v2}, Lcom/google/android/checkers/CheckersView;.b:(FF)I // method@005c +003ebc: 0a02 |0012: move-result v2 +003ebe: 3802 1100 |0013: if-eqz v2, 0024 // +0011 +003ec2: 0760 |0015: move-object v0, v6 +003ec4: 0113 |0016: move v3, v1 +003ec6: 0114 |0017: move v4, v1 +003ec8: 0115 |0018: move v5, v1 +003eca: 7606 5b00 0000 |0019: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/CheckersView;.a:(ZIIII)Z // method@005b +003ed0: 0a00 |001c: move-result v0 +003ed2: 3800 0500 |001d: if-eqz v0, 0022 // +0005 +003ed6: 6e10 6d00 0600 |001f: invoke-virtual {v6}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +003edc: 1210 |0022: const/4 v0, #int 1 // #1 +003ede: 0f00 |0023: return v0 +003ee0: 6f20 3900 7600 |0024: invoke-super {v6, v7}, Landroid/view/View;.onTouchEvent:(Landroid/view/MotionEvent;)Z // method@0039 +003ee6: 0a00 |0027: move-result v0 +003ee8: 28fb |0028: goto 0023 // -0005 + catches : (none) + positions : + locals : + + #15 : (in Lcom/google/android/checkers/CheckersView;) + name : 'setLevel' + type : '(I)V' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 4 + ins : 2 + outs : 0 + insns size : 64 16-bit code units +003eec: |[003eec] com.google.android.checkers.CheckersView.setLevel:(I)V +003efc: 1300 e803 |0000: const/16 v0, #int 1000 // #3e8 +003f00: 1d02 |0002: monitor-enter v2 +003f02: 2b03 2700 0000 |0003: packed-switch v3, 0000002a // +00000027 +003f08: 1233 |0006: const/4 v3, #int 3 // #3 +003f0a: 5421 2200 |0007: iget-object v1, v2, Lcom/google/android/checkers/CheckersView;.p:Lcom/google/android/checkers/a; // field@0022 +003f0e: 5910 4100 |0009: iput v0, v1, Lcom/google/android/checkers/a;.h:I // field@0041 +003f12: 5923 2c00 |000b: iput v3, v2, Lcom/google/android/checkers/CheckersView;.z:I // field@002c +003f16: 1e02 |000d: monitor-exit v2 +003f18: 0e00 |000e: return-void +003f1a: 12f0 |000f: const/4 v0, #int -1 // #ff +003f1c: 28f7 |0010: goto 0007 // -0009 +003f1e: 1200 |0011: const/4 v0, #int 0 // #0 +003f20: 28f5 |0012: goto 0007 // -000b +003f22: 1300 6400 |0013: const/16 v0, #int 100 // #64 +003f26: 28f2 |0015: goto 0007 // -000e +003f28: 1300 8813 |0016: const/16 v0, #int 5000 // #1388 +003f2c: 28ef |0018: goto 0007 // -0011 +003f2e: 1300 1027 |0019: const/16 v0, #int 10000 // #2710 +003f32: 28ec |001b: goto 0007 // -0014 +003f34: 1300 983a |001c: const/16 v0, #int 15000 // #3a98 +003f38: 28e9 |001e: goto 0007 // -0017 +003f3a: 1300 3075 |001f: const/16 v0, #int 30000 // #7530 +003f3e: 28e6 |0021: goto 0007 // -001a +003f40: 1400 60ea 0000 |0022: const v0, #float 0.000000 // #0000ea60 +003f46: 28e2 |0025: goto 0007 // -001e +003f48: 0d00 |0026: move-exception v0 +003f4a: 1e02 |0027: monitor-exit v2 +003f4c: 2700 |0028: throw v0 +003f4e: 0000 |0029: nop // spacer +003f50: 0001 0900 0000 0000 0c00 0000 0e00 ... |002a: packed-switch-data (22 units) + catches : 1 + 0x0007 - 0x000d + <any> -> 0x0026 + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #2 header: +class_idx : 32 +access_flags : 17 (0x0011) +superclass_idx : 50 +interfaces_off : 0 (0x000000) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 34848 (0x008820) +static_fields_size : 10 +instance_fields_size: 29 +direct_methods_size : 23 +virtual_methods_size: 6 + +Class #2 - + Class descriptor : 'Lcom/google/android/checkers/a;' + Access flags : 0x0011 (PUBLIC FINAL) + Superclass : 'Ljava/lang/Thread;' + Interfaces - + Static fields - + #0 : (in Lcom/google/android/checkers/a;) + name : 'D' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #1 : (in Lcom/google/android/checkers/a;) + name : 'E' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #2 : (in Lcom/google/android/checkers/a;) + name : 'F' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #3 : (in Lcom/google/android/checkers/a;) + name : 'G' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #4 : (in Lcom/google/android/checkers/a;) + name : 'H' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #5 : (in Lcom/google/android/checkers/a;) + name : 'I' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #6 : (in Lcom/google/android/checkers/a;) + name : 'J' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #7 : (in Lcom/google/android/checkers/a;) + name : 'K' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #8 : (in Lcom/google/android/checkers/a;) + name : 'L' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + #9 : (in Lcom/google/android/checkers/a;) + name : 'M' + type : '[I' + access : 0x001a (PRIVATE STATIC FINAL) + Instance fields - + #0 : (in Lcom/google/android/checkers/a;) + name : 'A' + type : '[B' + access : 0x0002 (PRIVATE) + #1 : (in Lcom/google/android/checkers/a;) + name : 'B' + type : 'Z' + access : 0x0002 (PRIVATE) + #2 : (in Lcom/google/android/checkers/a;) + name : 'C' + type : 'I' + access : 0x0002 (PRIVATE) + #3 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '[I' + access : 0x0001 (PUBLIC) + #4 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '[I' + access : 0x0001 (PUBLIC) + #5 : (in Lcom/google/android/checkers/a;) + name : 'c' + type : 'I' + access : 0x0001 (PUBLIC) + #6 : (in Lcom/google/android/checkers/a;) + name : 'd' + type : 'I' + access : 0x0001 (PUBLIC) + #7 : (in Lcom/google/android/checkers/a;) + name : 'e' + type : 'I' + access : 0x0001 (PUBLIC) + #8 : (in Lcom/google/android/checkers/a;) + name : 'f' + type : 'I' + access : 0x0001 (PUBLIC) + #9 : (in Lcom/google/android/checkers/a;) + name : 'g' + type : 'I' + access : 0x0001 (PUBLIC) + #10 : (in Lcom/google/android/checkers/a;) + name : 'h' + type : 'I' + access : 0x0041 (PUBLIC VOLATILE) + #11 : (in Lcom/google/android/checkers/a;) + name : 'i' + type : 'Ljava/util/Random;' + access : 0x0002 (PRIVATE) + #12 : (in Lcom/google/android/checkers/a;) + name : 'j' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x0002 (PRIVATE) + #13 : (in Lcom/google/android/checkers/a;) + name : 'k' + type : 'Z' + access : 0x0002 (PRIVATE) + #14 : (in Lcom/google/android/checkers/a;) + name : 'l' + type : 'J' + access : 0x0002 (PRIVATE) + #15 : (in Lcom/google/android/checkers/a;) + name : 'm' + type : 'Z' + access : 0x0002 (PRIVATE) + #16 : (in Lcom/google/android/checkers/a;) + name : 'n' + type : 'I' + access : 0x0002 (PRIVATE) + #17 : (in Lcom/google/android/checkers/a;) + name : 'o' + type : '[I' + access : 0x0002 (PRIVATE) + #18 : (in Lcom/google/android/checkers/a;) + name : 'p' + type : '[I' + access : 0x0002 (PRIVATE) + #19 : (in Lcom/google/android/checkers/a;) + name : 'q' + type : '[I' + access : 0x0002 (PRIVATE) + #20 : (in Lcom/google/android/checkers/a;) + name : 'r' + type : 'I' + access : 0x0002 (PRIVATE) + #21 : (in Lcom/google/android/checkers/a;) + name : 's' + type : 'Z' + access : 0x0002 (PRIVATE) + #22 : (in Lcom/google/android/checkers/a;) + name : 't' + type : 'Z' + access : 0x0002 (PRIVATE) + #23 : (in Lcom/google/android/checkers/a;) + name : 'u' + type : 'Z' + access : 0x0002 (PRIVATE) + #24 : (in Lcom/google/android/checkers/a;) + name : 'v' + type : 'I' + access : 0x0002 (PRIVATE) + #25 : (in Lcom/google/android/checkers/a;) + name : 'w' + type : 'I' + access : 0x0002 (PRIVATE) + #26 : (in Lcom/google/android/checkers/a;) + name : 'x' + type : 'I' + access : 0x0002 (PRIVATE) + #27 : (in Lcom/google/android/checkers/a;) + name : 'y' + type : '[I' + access : 0x0002 (PRIVATE) + #28 : (in Lcom/google/android/checkers/a;) + name : 'z' + type : '[S' + access : 0x0002 (PRIVATE) + Direct methods - + #0 : (in Lcom/google/android/checkers/a;) + name : '<clinit>' + type : '()V' + access : 0x10008 (STATIC CONSTRUCTOR) + code - + registers : 8 + ins : 0 + outs : 0 + insns size : 1390 16-bit code units +003f88: |[003f88] com.google.android.checkers.a.<clinit>:()V +003f98: 1227 |0000: const/4 v7, #int 2 // #2 +003f9a: 1306 1500 |0001: const/16 v6, #int 21 // #15 +003f9e: 1305 1000 |0003: const/16 v5, #int 16 // #10 +003fa2: 1304 0a00 |0005: const/16 v4, #int 10 // #a +003fa6: 1303 2000 |0007: const/16 v3, #int 32 // #20 +003faa: 2330 3700 |0009: new-array v0, v3, [I // type@0037 +003fae: 1251 |000b: const/4 v1, #int 5 // #5 +003fb0: 1212 |000c: const/4 v2, #int 1 // #1 +003fb2: 4b02 0001 |000d: aput v2, v0, v1 +003fb6: 1261 |000f: const/4 v1, #int 6 // #6 +003fb8: 4b07 0001 |0010: aput v7, v0, v1 +003fbc: 1271 |0012: const/4 v1, #int 7 // #7 +003fbe: 1242 |0013: const/4 v2, #int 4 // #4 +003fc0: 4b02 0001 |0014: aput v2, v0, v1 +003fc4: 1301 0800 |0016: const/16 v1, #int 8 // #8 +003fc8: 4b05 0001 |0018: aput v5, v0, v1 +003fcc: 1301 0900 |001a: const/16 v1, #int 9 // #9 +003fd0: 4b03 0001 |001c: aput v3, v0, v1 +003fd4: 1301 4000 |001e: const/16 v1, #int 64 // #40 +003fd8: 4b01 0004 |0020: aput v1, v0, v4 +003fdc: 1301 0b00 |0022: const/16 v1, #int 11 // #b +003fe0: 1302 8000 |0024: const/16 v2, #int 128 // #80 +003fe4: 4b02 0001 |0026: aput v2, v0, v1 +003fe8: 1301 0d00 |0028: const/16 v1, #int 13 // #d +003fec: 1302 0001 |002a: const/16 v2, #int 256 // #100 +003ff0: 4b02 0001 |002c: aput v2, v0, v1 +003ff4: 1301 0e00 |002e: const/16 v1, #int 14 // #e +003ff8: 1302 0002 |0030: const/16 v2, #int 512 // #200 +003ffc: 4b02 0001 |0032: aput v2, v0, v1 +004000: 1301 0f00 |0034: const/16 v1, #int 15 // #f +004004: 1302 0004 |0036: const/16 v2, #int 1024 // #400 +004008: 4b02 0001 |0038: aput v2, v0, v1 +00400c: 1301 0010 |003a: const/16 v1, #int 4096 // #1000 +004010: 4b01 0005 |003c: aput v1, v0, v5 +004014: 1301 1100 |003e: const/16 v1, #int 17 // #11 +004018: 1302 0020 |0040: const/16 v2, #int 8192 // #2000 +00401c: 4b02 0001 |0042: aput v2, v0, v1 +004020: 1301 1200 |0044: const/16 v1, #int 18 // #12 +004024: 1302 0040 |0046: const/16 v2, #int 16384 // #4000 +004028: 4b02 0001 |0048: aput v2, v0, v1 +00402c: 1301 1300 |004a: const/16 v1, #int 19 // #13 +004030: 1402 0080 0000 |004c: const v2, #float 0.000000 // #00008000 +004036: 4b02 0001 |004f: aput v2, v0, v1 +00403a: 1501 0100 |0051: const/high16 v1, #int 65536 // #1 +00403e: 4b01 0006 |0053: aput v1, v0, v6 +004042: 1301 1600 |0055: const/16 v1, #int 22 // #16 +004046: 1502 0200 |0057: const/high16 v2, #int 131072 // #2 +00404a: 4b02 0001 |0059: aput v2, v0, v1 +00404e: 1301 1700 |005b: const/16 v1, #int 23 // #17 +004052: 1502 0400 |005d: const/high16 v2, #int 262144 // #4 +004056: 4b02 0001 |005f: aput v2, v0, v1 +00405a: 1301 1800 |0061: const/16 v1, #int 24 // #18 +00405e: 1502 1000 |0063: const/high16 v2, #int 1048576 // #10 +004062: 4b02 0001 |0065: aput v2, v0, v1 +004066: 1301 1900 |0067: const/16 v1, #int 25 // #19 +00406a: 1502 2000 |0069: const/high16 v2, #int 2097152 // #20 +00406e: 4b02 0001 |006b: aput v2, v0, v1 +004072: 1301 1a00 |006d: const/16 v1, #int 26 // #1a +004076: 1502 4000 |006f: const/high16 v2, #int 4194304 // #40 +00407a: 4b02 0001 |0071: aput v2, v0, v1 +00407e: 1301 1b00 |0073: const/16 v1, #int 27 // #1b +004082: 1502 8000 |0075: const/high16 v2, #int 8388608 // #80 +004086: 4b02 0001 |0077: aput v2, v0, v1 +00408a: 1301 1d00 |0079: const/16 v1, #int 29 // #1d +00408e: 1502 0001 |007b: const/high16 v2, #int 16777216 // #100 +004092: 4b02 0001 |007d: aput v2, v0, v1 +004096: 1301 1e00 |007f: const/16 v1, #int 30 // #1e +00409a: 1502 0002 |0081: const/high16 v2, #int 33554432 // #200 +00409e: 4b02 0001 |0083: aput v2, v0, v1 +0040a2: 1301 1f00 |0085: const/16 v1, #int 31 // #1f +0040a6: 1502 0004 |0087: const/high16 v2, #int 67108864 // #400 +0040aa: 4b02 0001 |0089: aput v2, v0, v1 +0040ae: 6900 3000 |008b: sput-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +0040b2: 2330 3700 |008d: new-array v0, v3, [I // type@0037 +0040b6: 1301 0900 |008f: const/16 v1, #int 9 // #9 +0040ba: 1212 |0091: const/4 v2, #int 1 // #1 +0040bc: 4b02 0001 |0092: aput v2, v0, v1 +0040c0: 4b07 0004 |0094: aput v7, v0, v4 +0040c4: 1301 0b00 |0096: const/16 v1, #int 11 // #b +0040c8: 1242 |0098: const/4 v2, #int 4 // #4 +0040ca: 4b02 0001 |0099: aput v2, v0, v1 +0040ce: 1301 0d00 |009b: const/16 v1, #int 13 // #d +0040d2: 4b05 0001 |009d: aput v5, v0, v1 +0040d6: 1301 0e00 |009f: const/16 v1, #int 14 // #e +0040da: 4b03 0001 |00a1: aput v3, v0, v1 +0040de: 1301 0f00 |00a3: const/16 v1, #int 15 // #f +0040e2: 1302 4000 |00a5: const/16 v2, #int 64 // #40 +0040e6: 4b02 0001 |00a7: aput v2, v0, v1 +0040ea: 1301 1100 |00a9: const/16 v1, #int 17 // #11 +0040ee: 1302 0001 |00ab: const/16 v2, #int 256 // #100 +0040f2: 4b02 0001 |00ad: aput v2, v0, v1 +0040f6: 1301 1200 |00af: const/16 v1, #int 18 // #12 +0040fa: 1302 0002 |00b1: const/16 v2, #int 512 // #200 +0040fe: 4b02 0001 |00b3: aput v2, v0, v1 +004102: 1301 1300 |00b5: const/16 v1, #int 19 // #13 +004106: 1302 0004 |00b7: const/16 v2, #int 1024 // #400 +00410a: 4b02 0001 |00b9: aput v2, v0, v1 +00410e: 1301 0010 |00bb: const/16 v1, #int 4096 // #1000 +004112: 4b01 0006 |00bd: aput v1, v0, v6 +004116: 1301 1600 |00bf: const/16 v1, #int 22 // #16 +00411a: 1302 0020 |00c1: const/16 v2, #int 8192 // #2000 +00411e: 4b02 0001 |00c3: aput v2, v0, v1 +004122: 1301 1700 |00c5: const/16 v1, #int 23 // #17 +004126: 1302 0040 |00c7: const/16 v2, #int 16384 // #4000 +00412a: 4b02 0001 |00c9: aput v2, v0, v1 +00412e: 1301 1900 |00cb: const/16 v1, #int 25 // #19 +004132: 1502 0100 |00cd: const/high16 v2, #int 65536 // #1 +004136: 4b02 0001 |00cf: aput v2, v0, v1 +00413a: 1301 1a00 |00d1: const/16 v1, #int 26 // #1a +00413e: 1502 0200 |00d3: const/high16 v2, #int 131072 // #2 +004142: 4b02 0001 |00d5: aput v2, v0, v1 +004146: 1301 1b00 |00d7: const/16 v1, #int 27 // #1b +00414a: 1502 0400 |00d9: const/high16 v2, #int 262144 // #4 +00414e: 4b02 0001 |00db: aput v2, v0, v1 +004152: 1301 1d00 |00dd: const/16 v1, #int 29 // #1d +004156: 1502 1000 |00df: const/high16 v2, #int 1048576 // #10 +00415a: 4b02 0001 |00e1: aput v2, v0, v1 +00415e: 1301 1e00 |00e3: const/16 v1, #int 30 // #1e +004162: 1502 2000 |00e5: const/high16 v2, #int 2097152 // #20 +004166: 4b02 0001 |00e7: aput v2, v0, v1 +00416a: 1301 1f00 |00e9: const/16 v1, #int 31 // #1f +00416e: 1502 4000 |00eb: const/high16 v2, #int 4194304 // #40 +004172: 4b02 0001 |00ed: aput v2, v0, v1 +004176: 6900 3100 |00ef: sput-object v0, Lcom/google/android/checkers/a;.E:[I // field@0031 +00417a: 2330 3700 |00f1: new-array v0, v3, [I // type@0037 +00417e: 1241 |00f3: const/4 v1, #int 4 // #4 +004180: 1212 |00f4: const/4 v2, #int 1 // #1 +004182: 4b02 0001 |00f5: aput v2, v0, v1 +004186: 1251 |00f7: const/4 v1, #int 5 // #5 +004188: 4b07 0001 |00f8: aput v7, v0, v1 +00418c: 1261 |00fa: const/4 v1, #int 6 // #6 +00418e: 1242 |00fb: const/4 v2, #int 4 // #4 +004190: 4b02 0001 |00fc: aput v2, v0, v1 +004194: 1271 |00fe: const/4 v1, #int 7 // #7 +004196: 1302 0800 |00ff: const/16 v2, #int 8 // #8 +00419a: 4b02 0001 |0101: aput v2, v0, v1 +00419e: 1301 0800 |0103: const/16 v1, #int 8 // #8 +0041a2: 4b03 0001 |0105: aput v3, v0, v1 +0041a6: 1301 0900 |0107: const/16 v1, #int 9 // #9 +0041aa: 1302 4000 |0109: const/16 v2, #int 64 // #40 +0041ae: 4b02 0001 |010b: aput v2, v0, v1 +0041b2: 1301 8000 |010d: const/16 v1, #int 128 // #80 +0041b6: 4b01 0004 |010f: aput v1, v0, v4 +0041ba: 1301 0c00 |0111: const/16 v1, #int 12 // #c +0041be: 1302 0001 |0113: const/16 v2, #int 256 // #100 +0041c2: 4b02 0001 |0115: aput v2, v0, v1 +0041c6: 1301 0d00 |0117: const/16 v1, #int 13 // #d +0041ca: 1302 0002 |0119: const/16 v2, #int 512 // #200 +0041ce: 4b02 0001 |011b: aput v2, v0, v1 +0041d2: 1301 0e00 |011d: const/16 v1, #int 14 // #e +0041d6: 1302 0004 |011f: const/16 v2, #int 1024 // #400 +0041da: 4b02 0001 |0121: aput v2, v0, v1 +0041de: 1301 0f00 |0123: const/16 v1, #int 15 // #f +0041e2: 1302 0008 |0125: const/16 v2, #int 2048 // #800 +0041e6: 4b02 0001 |0127: aput v2, v0, v1 +0041ea: 1301 0020 |0129: const/16 v1, #int 8192 // #2000 +0041ee: 4b01 0005 |012b: aput v1, v0, v5 +0041f2: 1301 1100 |012d: const/16 v1, #int 17 // #11 +0041f6: 1302 0040 |012f: const/16 v2, #int 16384 // #4000 +0041fa: 4b02 0001 |0131: aput v2, v0, v1 +0041fe: 1301 1200 |0133: const/16 v1, #int 18 // #12 +004202: 1402 0080 0000 |0135: const v2, #float 0.000000 // #00008000 +004208: 4b02 0001 |0138: aput v2, v0, v1 +00420c: 1301 1400 |013a: const/16 v1, #int 20 // #14 +004210: 1502 0100 |013c: const/high16 v2, #int 65536 // #1 +004214: 4b02 0001 |013e: aput v2, v0, v1 +004218: 1501 0200 |0140: const/high16 v1, #int 131072 // #2 +00421c: 4b01 0006 |0142: aput v1, v0, v6 +004220: 1301 1600 |0144: const/16 v1, #int 22 // #16 +004224: 1502 0400 |0146: const/high16 v2, #int 262144 // #4 +004228: 4b02 0001 |0148: aput v2, v0, v1 +00422c: 1301 1700 |014a: const/16 v1, #int 23 // #17 +004230: 1502 0800 |014c: const/high16 v2, #int 524288 // #8 +004234: 4b02 0001 |014e: aput v2, v0, v1 +004238: 1301 1800 |0150: const/16 v1, #int 24 // #18 +00423c: 1502 2000 |0152: const/high16 v2, #int 2097152 // #20 +004240: 4b02 0001 |0154: aput v2, v0, v1 +004244: 1301 1900 |0156: const/16 v1, #int 25 // #19 +004248: 1502 4000 |0158: const/high16 v2, #int 4194304 // #40 +00424c: 4b02 0001 |015a: aput v2, v0, v1 +004250: 1301 1a00 |015c: const/16 v1, #int 26 // #1a +004254: 1502 8000 |015e: const/high16 v2, #int 8388608 // #80 +004258: 4b02 0001 |0160: aput v2, v0, v1 +00425c: 1301 1c00 |0162: const/16 v1, #int 28 // #1c +004260: 1502 0001 |0164: const/high16 v2, #int 16777216 // #100 +004264: 4b02 0001 |0166: aput v2, v0, v1 +004268: 1301 1d00 |0168: const/16 v1, #int 29 // #1d +00426c: 1502 0002 |016a: const/high16 v2, #int 33554432 // #200 +004270: 4b02 0001 |016c: aput v2, v0, v1 +004274: 1301 1e00 |016e: const/16 v1, #int 30 // #1e +004278: 1502 0004 |0170: const/high16 v2, #int 67108864 // #400 +00427c: 4b02 0001 |0172: aput v2, v0, v1 +004280: 1301 1f00 |0174: const/16 v1, #int 31 // #1f +004284: 1502 0008 |0176: const/high16 v2, #int 134217728 // #800 +004288: 4b02 0001 |0178: aput v2, v0, v1 +00428c: 6900 3200 |017a: sput-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +004290: 2330 3700 |017c: new-array v0, v3, [I // type@0037 +004294: 1301 0800 |017e: const/16 v1, #int 8 // #8 +004298: 4b07 0001 |0180: aput v7, v0, v1 +00429c: 1301 0900 |0182: const/16 v1, #int 9 // #9 +0042a0: 1242 |0184: const/4 v2, #int 4 // #4 +0042a2: 4b02 0001 |0185: aput v2, v0, v1 +0042a6: 1301 0800 |0187: const/16 v1, #int 8 // #8 +0042aa: 4b01 0004 |0189: aput v1, v0, v4 +0042ae: 1301 0c00 |018b: const/16 v1, #int 12 // #c +0042b2: 4b03 0001 |018d: aput v3, v0, v1 +0042b6: 1301 0d00 |018f: const/16 v1, #int 13 // #d +0042ba: 1302 4000 |0191: const/16 v2, #int 64 // #40 +0042be: 4b02 0001 |0193: aput v2, v0, v1 +0042c2: 1301 0e00 |0195: const/16 v1, #int 14 // #e +0042c6: 1302 8000 |0197: const/16 v2, #int 128 // #80 +0042ca: 4b02 0001 |0199: aput v2, v0, v1 +0042ce: 1301 0002 |019b: const/16 v1, #int 512 // #200 +0042d2: 4b01 0005 |019d: aput v1, v0, v5 +0042d6: 1301 1100 |019f: const/16 v1, #int 17 // #11 +0042da: 1302 0004 |01a1: const/16 v2, #int 1024 // #400 +0042de: 4b02 0001 |01a3: aput v2, v0, v1 +0042e2: 1301 1200 |01a5: const/16 v1, #int 18 // #12 +0042e6: 1302 0008 |01a7: const/16 v2, #int 2048 // #800 +0042ea: 4b02 0001 |01a9: aput v2, v0, v1 +0042ee: 1301 1400 |01ab: const/16 v1, #int 20 // #14 +0042f2: 1302 0020 |01ad: const/16 v2, #int 8192 // #2000 +0042f6: 4b02 0001 |01af: aput v2, v0, v1 +0042fa: 1301 0040 |01b1: const/16 v1, #int 16384 // #4000 +0042fe: 4b01 0006 |01b3: aput v1, v0, v6 +004302: 1301 1600 |01b5: const/16 v1, #int 22 // #16 +004306: 1402 0080 0000 |01b7: const v2, #float 0.000000 // #00008000 +00430c: 4b02 0001 |01ba: aput v2, v0, v1 +004310: 1301 1800 |01bc: const/16 v1, #int 24 // #18 +004314: 1502 0200 |01be: const/high16 v2, #int 131072 // #2 +004318: 4b02 0001 |01c0: aput v2, v0, v1 +00431c: 1301 1900 |01c2: const/16 v1, #int 25 // #19 +004320: 1502 0400 |01c4: const/high16 v2, #int 262144 // #4 +004324: 4b02 0001 |01c6: aput v2, v0, v1 +004328: 1301 1a00 |01c8: const/16 v1, #int 26 // #1a +00432c: 1502 0800 |01ca: const/high16 v2, #int 524288 // #8 +004330: 4b02 0001 |01cc: aput v2, v0, v1 +004334: 1301 1c00 |01ce: const/16 v1, #int 28 // #1c +004338: 1502 2000 |01d0: const/high16 v2, #int 2097152 // #20 +00433c: 4b02 0001 |01d2: aput v2, v0, v1 +004340: 1301 1d00 |01d4: const/16 v1, #int 29 // #1d +004344: 1502 4000 |01d6: const/high16 v2, #int 4194304 // #40 +004348: 4b02 0001 |01d8: aput v2, v0, v1 +00434c: 1301 1e00 |01da: const/16 v1, #int 30 // #1e +004350: 1502 8000 |01dc: const/high16 v2, #int 8388608 // #80 +004354: 4b02 0001 |01de: aput v2, v0, v1 +004358: 6900 3300 |01e0: sput-object v0, Lcom/google/android/checkers/a;.G:[I // field@0033 +00435c: 2330 3700 |01e2: new-array v0, v3, [I // type@0037 +004360: 1201 |01e4: const/4 v1, #int 0 // #0 +004362: 4b05 0001 |01e5: aput v5, v0, v1 +004366: 1211 |01e7: const/4 v1, #int 1 // #1 +004368: 4b03 0001 |01e8: aput v3, v0, v1 +00436c: 1301 4000 |01ea: const/16 v1, #int 64 // #40 +004370: 4b01 0007 |01ec: aput v1, v0, v7 +004374: 1231 |01ee: const/4 v1, #int 3 // #3 +004376: 1302 8000 |01ef: const/16 v2, #int 128 // #80 +00437a: 4b02 0001 |01f1: aput v2, v0, v1 +00437e: 1251 |01f3: const/4 v1, #int 5 // #5 +004380: 1302 0001 |01f4: const/16 v2, #int 256 // #100 +004384: 4b02 0001 |01f6: aput v2, v0, v1 +004388: 1261 |01f8: const/4 v1, #int 6 // #6 +00438a: 1302 0002 |01f9: const/16 v2, #int 512 // #200 +00438e: 4b02 0001 |01fb: aput v2, v0, v1 +004392: 1271 |01fd: const/4 v1, #int 7 // #7 +004394: 1302 0004 |01fe: const/16 v2, #int 1024 // #400 +004398: 4b02 0001 |0200: aput v2, v0, v1 +00439c: 1301 0800 |0202: const/16 v1, #int 8 // #8 +0043a0: 1302 0010 |0204: const/16 v2, #int 4096 // #1000 +0043a4: 4b02 0001 |0206: aput v2, v0, v1 +0043a8: 1301 0900 |0208: const/16 v1, #int 9 // #9 +0043ac: 1302 0020 |020a: const/16 v2, #int 8192 // #2000 +0043b0: 4b02 0001 |020c: aput v2, v0, v1 +0043b4: 1301 0040 |020e: const/16 v1, #int 16384 // #4000 +0043b8: 4b01 0004 |0210: aput v1, v0, v4 +0043bc: 1301 0b00 |0212: const/16 v1, #int 11 // #b +0043c0: 1402 0080 0000 |0214: const v2, #float 0.000000 // #00008000 +0043c6: 4b02 0001 |0217: aput v2, v0, v1 +0043ca: 1301 0d00 |0219: const/16 v1, #int 13 // #d +0043ce: 1502 0100 |021b: const/high16 v2, #int 65536 // #1 +0043d2: 4b02 0001 |021d: aput v2, v0, v1 +0043d6: 1301 0e00 |021f: const/16 v1, #int 14 // #e +0043da: 1502 0200 |0221: const/high16 v2, #int 131072 // #2 +0043de: 4b02 0001 |0223: aput v2, v0, v1 +0043e2: 1301 0f00 |0225: const/16 v1, #int 15 // #f +0043e6: 1502 0400 |0227: const/high16 v2, #int 262144 // #4 +0043ea: 4b02 0001 |0229: aput v2, v0, v1 +0043ee: 1501 1000 |022b: const/high16 v1, #int 1048576 // #10 +0043f2: 4b01 0005 |022d: aput v1, v0, v5 +0043f6: 1301 1100 |022f: const/16 v1, #int 17 // #11 +0043fa: 1502 2000 |0231: const/high16 v2, #int 2097152 // #20 +0043fe: 4b02 0001 |0233: aput v2, v0, v1 +004402: 1301 1200 |0235: const/16 v1, #int 18 // #12 +004406: 1502 4000 |0237: const/high16 v2, #int 4194304 // #40 +00440a: 4b02 0001 |0239: aput v2, v0, v1 +00440e: 1301 1300 |023b: const/16 v1, #int 19 // #13 +004412: 1502 8000 |023d: const/high16 v2, #int 8388608 // #80 +004416: 4b02 0001 |023f: aput v2, v0, v1 +00441a: 1501 0001 |0241: const/high16 v1, #int 16777216 // #100 +00441e: 4b01 0006 |0243: aput v1, v0, v6 +004422: 1301 1600 |0245: const/16 v1, #int 22 // #16 +004426: 1502 0002 |0247: const/high16 v2, #int 33554432 // #200 +00442a: 4b02 0001 |0249: aput v2, v0, v1 +00442e: 1301 1700 |024b: const/16 v1, #int 23 // #17 +004432: 1502 0004 |024d: const/high16 v2, #int 67108864 // #400 +004436: 4b02 0001 |024f: aput v2, v0, v1 +00443a: 1301 1800 |0251: const/16 v1, #int 24 // #18 +00443e: 1502 0010 |0253: const/high16 v2, #int 268435456 // #1000 +004442: 4b02 0001 |0255: aput v2, v0, v1 +004446: 1301 1900 |0257: const/16 v1, #int 25 // #19 +00444a: 1502 0020 |0259: const/high16 v2, #int 536870912 // #2000 +00444e: 4b02 0001 |025b: aput v2, v0, v1 +004452: 1301 1a00 |025d: const/16 v1, #int 26 // #1a +004456: 1502 0040 |025f: const/high16 v2, #int 1073741824 // #4000 +00445a: 4b02 0001 |0261: aput v2, v0, v1 +00445e: 1301 1b00 |0263: const/16 v1, #int 27 // #1b +004462: 1502 0080 |0265: const/high16 v2, #int -2147483648 // #8000 +004466: 4b02 0001 |0267: aput v2, v0, v1 +00446a: 6900 3400 |0269: sput-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +00446e: 2330 3700 |026b: new-array v0, v3, [I // type@0037 +004472: 1211 |026d: const/4 v1, #int 1 // #1 +004474: 1302 0001 |026e: const/16 v2, #int 256 // #100 +004478: 4b02 0001 |0270: aput v2, v0, v1 +00447c: 1301 0002 |0272: const/16 v1, #int 512 // #200 +004480: 4b01 0007 |0274: aput v1, v0, v7 +004484: 1231 |0276: const/4 v1, #int 3 // #3 +004486: 1302 0004 |0277: const/16 v2, #int 1024 // #400 +00448a: 4b02 0001 |0279: aput v2, v0, v1 +00448e: 1251 |027b: const/4 v1, #int 5 // #5 +004490: 1302 0010 |027c: const/16 v2, #int 4096 // #1000 +004494: 4b02 0001 |027e: aput v2, v0, v1 +004498: 1261 |0280: const/4 v1, #int 6 // #6 +00449a: 1302 0020 |0281: const/16 v2, #int 8192 // #2000 +00449e: 4b02 0001 |0283: aput v2, v0, v1 +0044a2: 1271 |0285: const/4 v1, #int 7 // #7 +0044a4: 1302 0040 |0286: const/16 v2, #int 16384 // #4000 +0044a8: 4b02 0001 |0288: aput v2, v0, v1 +0044ac: 1301 0900 |028a: const/16 v1, #int 9 // #9 +0044b0: 1502 0100 |028c: const/high16 v2, #int 65536 // #1 +0044b4: 4b02 0001 |028e: aput v2, v0, v1 +0044b8: 1501 0200 |0290: const/high16 v1, #int 131072 // #2 +0044bc: 4b01 0004 |0292: aput v1, v0, v4 +0044c0: 1301 0b00 |0294: const/16 v1, #int 11 // #b +0044c4: 1502 0400 |0296: const/high16 v2, #int 262144 // #4 +0044c8: 4b02 0001 |0298: aput v2, v0, v1 +0044cc: 1301 0d00 |029a: const/16 v1, #int 13 // #d +0044d0: 1502 1000 |029c: const/high16 v2, #int 1048576 // #10 +0044d4: 4b02 0001 |029e: aput v2, v0, v1 +0044d8: 1301 0e00 |02a0: const/16 v1, #int 14 // #e +0044dc: 1502 2000 |02a2: const/high16 v2, #int 2097152 // #20 +0044e0: 4b02 0001 |02a4: aput v2, v0, v1 +0044e4: 1301 0f00 |02a6: const/16 v1, #int 15 // #f +0044e8: 1502 4000 |02a8: const/high16 v2, #int 4194304 // #40 +0044ec: 4b02 0001 |02aa: aput v2, v0, v1 +0044f0: 1301 1100 |02ac: const/16 v1, #int 17 // #11 +0044f4: 1502 0001 |02ae: const/high16 v2, #int 16777216 // #100 +0044f8: 4b02 0001 |02b0: aput v2, v0, v1 +0044fc: 1301 1200 |02b2: const/16 v1, #int 18 // #12 +004500: 1502 0002 |02b4: const/high16 v2, #int 33554432 // #200 +004504: 4b02 0001 |02b6: aput v2, v0, v1 +004508: 1301 1300 |02b8: const/16 v1, #int 19 // #13 +00450c: 1502 0004 |02ba: const/high16 v2, #int 67108864 // #400 +004510: 4b02 0001 |02bc: aput v2, v0, v1 +004514: 1501 0010 |02be: const/high16 v1, #int 268435456 // #1000 +004518: 4b01 0006 |02c0: aput v1, v0, v6 +00451c: 1301 1600 |02c2: const/16 v1, #int 22 // #16 +004520: 1502 0020 |02c4: const/high16 v2, #int 536870912 // #2000 +004524: 4b02 0001 |02c6: aput v2, v0, v1 +004528: 1301 1700 |02c8: const/16 v1, #int 23 // #17 +00452c: 1502 0040 |02ca: const/high16 v2, #int 1073741824 // #4000 +004530: 4b02 0001 |02cc: aput v2, v0, v1 +004534: 6900 3500 |02ce: sput-object v0, Lcom/google/android/checkers/a;.I:[I // field@0035 +004538: 2330 3700 |02d0: new-array v0, v3, [I // type@0037 +00453c: 1201 |02d2: const/4 v1, #int 0 // #0 +00453e: 4b03 0001 |02d3: aput v3, v0, v1 +004542: 1211 |02d5: const/4 v1, #int 1 // #1 +004544: 1302 4000 |02d6: const/16 v2, #int 64 // #40 +004548: 4b02 0001 |02d8: aput v2, v0, v1 +00454c: 1301 8000 |02da: const/16 v1, #int 128 // #80 +004550: 4b01 0007 |02dc: aput v1, v0, v7 +004554: 1241 |02de: const/4 v1, #int 4 // #4 +004556: 1302 0001 |02df: const/16 v2, #int 256 // #100 +00455a: 4b02 0001 |02e1: aput v2, v0, v1 +00455e: 1251 |02e3: const/4 v1, #int 5 // #5 +004560: 1302 0002 |02e4: const/16 v2, #int 512 // #200 +004564: 4b02 0001 |02e6: aput v2, v0, v1 +004568: 1261 |02e8: const/4 v1, #int 6 // #6 +00456a: 1302 0004 |02e9: const/16 v2, #int 1024 // #400 +00456e: 4b02 0001 |02eb: aput v2, v0, v1 +004572: 1271 |02ed: const/4 v1, #int 7 // #7 +004574: 1302 0008 |02ee: const/16 v2, #int 2048 // #800 +004578: 4b02 0001 |02f0: aput v2, v0, v1 +00457c: 1301 0800 |02f2: const/16 v1, #int 8 // #8 +004580: 1302 0020 |02f4: const/16 v2, #int 8192 // #2000 +004584: 4b02 0001 |02f6: aput v2, v0, v1 +004588: 1301 0900 |02f8: const/16 v1, #int 9 // #9 +00458c: 1302 0040 |02fa: const/16 v2, #int 16384 // #4000 +004590: 4b02 0001 |02fc: aput v2, v0, v1 +004594: 1401 0080 0000 |02fe: const v1, #float 0.000000 // #00008000 +00459a: 4b01 0004 |0301: aput v1, v0, v4 +00459e: 1301 0c00 |0303: const/16 v1, #int 12 // #c +0045a2: 1502 0100 |0305: const/high16 v2, #int 65536 // #1 +0045a6: 4b02 0001 |0307: aput v2, v0, v1 +0045aa: 1301 0d00 |0309: const/16 v1, #int 13 // #d +0045ae: 1502 0200 |030b: const/high16 v2, #int 131072 // #2 +0045b2: 4b02 0001 |030d: aput v2, v0, v1 +0045b6: 1301 0e00 |030f: const/16 v1, #int 14 // #e +0045ba: 1502 0400 |0311: const/high16 v2, #int 262144 // #4 +0045be: 4b02 0001 |0313: aput v2, v0, v1 +0045c2: 1301 0f00 |0315: const/16 v1, #int 15 // #f +0045c6: 1502 0800 |0317: const/high16 v2, #int 524288 // #8 +0045ca: 4b02 0001 |0319: aput v2, v0, v1 +0045ce: 1501 2000 |031b: const/high16 v1, #int 2097152 // #20 +0045d2: 4b01 0005 |031d: aput v1, v0, v5 +0045d6: 1301 1100 |031f: const/16 v1, #int 17 // #11 +0045da: 1502 4000 |0321: const/high16 v2, #int 4194304 // #40 +0045de: 4b02 0001 |0323: aput v2, v0, v1 +0045e2: 1301 1200 |0325: const/16 v1, #int 18 // #12 +0045e6: 1502 8000 |0327: const/high16 v2, #int 8388608 // #80 +0045ea: 4b02 0001 |0329: aput v2, v0, v1 +0045ee: 1301 1400 |032b: const/16 v1, #int 20 // #14 +0045f2: 1502 0001 |032d: const/high16 v2, #int 16777216 // #100 +0045f6: 4b02 0001 |032f: aput v2, v0, v1 +0045fa: 1501 0002 |0331: const/high16 v1, #int 33554432 // #200 +0045fe: 4b01 0006 |0333: aput v1, v0, v6 +004602: 1301 1600 |0335: const/16 v1, #int 22 // #16 +004606: 1502 0004 |0337: const/high16 v2, #int 67108864 // #400 +00460a: 4b02 0001 |0339: aput v2, v0, v1 +00460e: 1301 1700 |033b: const/16 v1, #int 23 // #17 +004612: 1502 0008 |033d: const/high16 v2, #int 134217728 // #800 +004616: 4b02 0001 |033f: aput v2, v0, v1 +00461a: 1301 1800 |0341: const/16 v1, #int 24 // #18 +00461e: 1502 0020 |0343: const/high16 v2, #int 536870912 // #2000 +004622: 4b02 0001 |0345: aput v2, v0, v1 +004626: 1301 1900 |0347: const/16 v1, #int 25 // #19 +00462a: 1502 0040 |0349: const/high16 v2, #int 1073741824 // #4000 +00462e: 4b02 0001 |034b: aput v2, v0, v1 +004632: 1301 1a00 |034d: const/16 v1, #int 26 // #1a +004636: 1502 0080 |034f: const/high16 v2, #int -2147483648 // #8000 +00463a: 4b02 0001 |0351: aput v2, v0, v1 +00463e: 6900 3600 |0353: sput-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +004642: 2330 3700 |0355: new-array v0, v3, [I // type@0037 +004646: 1201 |0357: const/4 v1, #int 0 // #0 +004648: 1302 0002 |0358: const/16 v2, #int 512 // #200 +00464c: 4b02 0001 |035a: aput v2, v0, v1 +004650: 1211 |035c: const/4 v1, #int 1 // #1 +004652: 1302 0004 |035d: const/16 v2, #int 1024 // #400 +004656: 4b02 0001 |035f: aput v2, v0, v1 +00465a: 1301 0008 |0361: const/16 v1, #int 2048 // #800 +00465e: 4b01 0007 |0363: aput v1, v0, v7 +004662: 1241 |0365: const/4 v1, #int 4 // #4 +004664: 1302 0020 |0366: const/16 v2, #int 8192 // #2000 +004668: 4b02 0001 |0368: aput v2, v0, v1 +00466c: 1251 |036a: const/4 v1, #int 5 // #5 +00466e: 1302 0040 |036b: const/16 v2, #int 16384 // #4000 +004672: 4b02 0001 |036d: aput v2, v0, v1 +004676: 1261 |036f: const/4 v1, #int 6 // #6 +004678: 1402 0080 0000 |0370: const v2, #float 0.000000 // #00008000 +00467e: 4b02 0001 |0373: aput v2, v0, v1 +004682: 1301 0800 |0375: const/16 v1, #int 8 // #8 +004686: 1502 0200 |0377: const/high16 v2, #int 131072 // #2 +00468a: 4b02 0001 |0379: aput v2, v0, v1 +00468e: 1301 0900 |037b: const/16 v1, #int 9 // #9 +004692: 1502 0400 |037d: const/high16 v2, #int 262144 // #4 +004696: 4b02 0001 |037f: aput v2, v0, v1 +00469a: 1501 0800 |0381: const/high16 v1, #int 524288 // #8 +00469e: 4b01 0004 |0383: aput v1, v0, v4 +0046a2: 1301 0c00 |0385: const/16 v1, #int 12 // #c +0046a6: 1502 2000 |0387: const/high16 v2, #int 2097152 // #20 +0046aa: 4b02 0001 |0389: aput v2, v0, v1 +0046ae: 1301 0d00 |038b: const/16 v1, #int 13 // #d +0046b2: 1502 4000 |038d: const/high16 v2, #int 4194304 // #40 +0046b6: 4b02 0001 |038f: aput v2, v0, v1 +0046ba: 1301 0e00 |0391: const/16 v1, #int 14 // #e +0046be: 1502 8000 |0393: const/high16 v2, #int 8388608 // #80 +0046c2: 4b02 0001 |0395: aput v2, v0, v1 +0046c6: 1501 0002 |0397: const/high16 v1, #int 33554432 // #200 +0046ca: 4b01 0005 |0399: aput v1, v0, v5 +0046ce: 1301 1100 |039b: const/16 v1, #int 17 // #11 +0046d2: 1502 0004 |039d: const/high16 v2, #int 67108864 // #400 +0046d6: 4b02 0001 |039f: aput v2, v0, v1 +0046da: 1301 1200 |03a1: const/16 v1, #int 18 // #12 +0046de: 1502 0008 |03a3: const/high16 v2, #int 134217728 // #800 +0046e2: 4b02 0001 |03a5: aput v2, v0, v1 +0046e6: 1301 1400 |03a7: const/16 v1, #int 20 // #14 +0046ea: 1502 0020 |03a9: const/high16 v2, #int 536870912 // #2000 +0046ee: 4b02 0001 |03ab: aput v2, v0, v1 +0046f2: 1501 0040 |03ad: const/high16 v1, #int 1073741824 // #4000 +0046f6: 4b01 0006 |03af: aput v1, v0, v6 +0046fa: 1301 1600 |03b1: const/16 v1, #int 22 // #16 +0046fe: 1502 0080 |03b3: const/high16 v2, #int -2147483648 // #8000 +004702: 4b02 0001 |03b5: aput v2, v0, v1 +004706: 6900 3700 |03b7: sput-object v0, Lcom/google/android/checkers/a;.K:[I // field@0037 +00470a: 2330 3700 |03b9: new-array v0, v3, [I // type@0037 +00470e: 1211 |03bb: const/4 v1, #int 1 // #1 +004710: 4b01 0007 |03bc: aput v1, v0, v7 +004714: 1231 |03be: const/4 v1, #int 3 // #3 +004716: 1232 |03bf: const/4 v2, #int 3 // #3 +004718: 4b02 0001 |03c0: aput v2, v0, v1 +00471c: 1241 |03c2: const/4 v1, #int 4 // #4 +00471e: 1262 |03c3: const/4 v2, #int 6 // #6 +004720: 4b02 0001 |03c4: aput v2, v0, v1 +004724: 1251 |03c6: const/4 v1, #int 5 // #5 +004726: 4b04 0001 |03c7: aput v4, v0, v1 +00472a: 1261 |03c9: const/4 v1, #int 6 // #6 +00472c: 1302 0f00 |03ca: const/16 v2, #int 15 // #f +004730: 4b02 0001 |03cc: aput v2, v0, v1 +004734: 1271 |03ce: const/4 v1, #int 7 // #7 +004736: 4b06 0001 |03cf: aput v6, v0, v1 +00473a: 1301 0800 |03d1: const/16 v1, #int 8 // #8 +00473e: 1302 1c00 |03d3: const/16 v2, #int 28 // #1c +004742: 4b02 0001 |03d5: aput v2, v0, v1 +004746: 1301 0900 |03d7: const/16 v1, #int 9 // #9 +00474a: 1302 2400 |03d9: const/16 v2, #int 36 // #24 +00474e: 4b02 0001 |03db: aput v2, v0, v1 +004752: 1301 2d00 |03dd: const/16 v1, #int 45 // #2d +004756: 4b01 0004 |03df: aput v1, v0, v4 +00475a: 1301 0b00 |03e1: const/16 v1, #int 11 // #b +00475e: 1302 3700 |03e3: const/16 v2, #int 55 // #37 +004762: 4b02 0001 |03e5: aput v2, v0, v1 +004766: 1301 0c00 |03e7: const/16 v1, #int 12 // #c +00476a: 1302 4200 |03e9: const/16 v2, #int 66 // #42 +00476e: 4b02 0001 |03eb: aput v2, v0, v1 +004772: 1301 0d00 |03ed: const/16 v1, #int 13 // #d +004776: 1302 4e00 |03ef: const/16 v2, #int 78 // #4e +00477a: 4b02 0001 |03f1: aput v2, v0, v1 +00477e: 1301 0e00 |03f3: const/16 v1, #int 14 // #e +004782: 1302 5b00 |03f5: const/16 v2, #int 91 // #5b +004786: 4b02 0001 |03f7: aput v2, v0, v1 +00478a: 1301 0f00 |03f9: const/16 v1, #int 15 // #f +00478e: 1302 6900 |03fb: const/16 v2, #int 105 // #69 +004792: 4b02 0001 |03fd: aput v2, v0, v1 +004796: 1301 7800 |03ff: const/16 v1, #int 120 // #78 +00479a: 4b01 0005 |0401: aput v1, v0, v5 +00479e: 1301 1100 |0403: const/16 v1, #int 17 // #11 +0047a2: 1302 8800 |0405: const/16 v2, #int 136 // #88 +0047a6: 4b02 0001 |0407: aput v2, v0, v1 +0047aa: 1301 1200 |0409: const/16 v1, #int 18 // #12 +0047ae: 1302 9900 |040b: const/16 v2, #int 153 // #99 +0047b2: 4b02 0001 |040d: aput v2, v0, v1 +0047b6: 1301 1300 |040f: const/16 v1, #int 19 // #13 +0047ba: 1302 ab00 |0411: const/16 v2, #int 171 // #ab +0047be: 4b02 0001 |0413: aput v2, v0, v1 +0047c2: 1301 1400 |0415: const/16 v1, #int 20 // #14 +0047c6: 1302 be00 |0417: const/16 v2, #int 190 // #be +0047ca: 4b02 0001 |0419: aput v2, v0, v1 +0047ce: 1301 d200 |041b: const/16 v1, #int 210 // #d2 +0047d2: 4b01 0006 |041d: aput v1, v0, v6 +0047d6: 1301 1600 |041f: const/16 v1, #int 22 // #16 +0047da: 1302 e700 |0421: const/16 v2, #int 231 // #e7 +0047de: 4b02 0001 |0423: aput v2, v0, v1 +0047e2: 1301 1700 |0425: const/16 v1, #int 23 // #17 +0047e6: 1302 fd00 |0427: const/16 v2, #int 253 // #fd +0047ea: 4b02 0001 |0429: aput v2, v0, v1 +0047ee: 1301 1800 |042b: const/16 v1, #int 24 // #18 +0047f2: 1302 1401 |042d: const/16 v2, #int 276 // #114 +0047f6: 4b02 0001 |042f: aput v2, v0, v1 +0047fa: 1301 1900 |0431: const/16 v1, #int 25 // #19 +0047fe: 1302 2c01 |0433: const/16 v2, #int 300 // #12c +004802: 4b02 0001 |0435: aput v2, v0, v1 +004806: 1301 1a00 |0437: const/16 v1, #int 26 // #1a +00480a: 1302 4501 |0439: const/16 v2, #int 325 // #145 +00480e: 4b02 0001 |043b: aput v2, v0, v1 +004812: 1301 1b00 |043d: const/16 v1, #int 27 // #1b +004816: 1302 5f01 |043f: const/16 v2, #int 351 // #15f +00481a: 4b02 0001 |0441: aput v2, v0, v1 +00481e: 1301 1c00 |0443: const/16 v1, #int 28 // #1c +004822: 1302 7a01 |0445: const/16 v2, #int 378 // #17a +004826: 4b02 0001 |0447: aput v2, v0, v1 +00482a: 1301 1d00 |0449: const/16 v1, #int 29 // #1d +00482e: 1302 9601 |044b: const/16 v2, #int 406 // #196 +004832: 4b02 0001 |044d: aput v2, v0, v1 +004836: 1301 1e00 |044f: const/16 v1, #int 30 // #1e +00483a: 1302 b301 |0451: const/16 v2, #int 435 // #1b3 +00483e: 4b02 0001 |0453: aput v2, v0, v1 +004842: 1301 1f00 |0455: const/16 v1, #int 31 // #1f +004846: 1302 d101 |0457: const/16 v2, #int 465 // #1d1 +00484a: 4b02 0001 |0459: aput v2, v0, v1 +00484e: 6900 3800 |045b: sput-object v0, Lcom/google/android/checkers/a;.L:[I // field@0038 +004852: 1300 8100 |045d: const/16 v0, #int 129 // #81 +004856: 2300 3700 |045f: new-array v0, v0, [I // type@0037 +00485a: 2600 0700 0000 |0461: fill-array-data v0, 00000468 // +00000007 +004860: 6900 3900 |0464: sput-object v0, Lcom/google/android/checkers/a;.M:[I // field@0039 +004864: 0e00 |0466: return-void +004866: 0000 |0467: nop // spacer +004868: 0003 0400 8100 0000 6745 68ba ff5c ... |0468: array-data (262 units) + catches : (none) + positions : + locals : + + #1 : (in Lcom/google/android/checkers/a;) + name : '<init>' + type : '(Lcom/google/android/checkers/CheckersView;)V' + access : 0x10001 (PUBLIC CONSTRUCTOR) + code - + registers : 7 + ins : 2 + outs : 1 + insns size : 94 16-bit code units +004a74: |[004a74] com.google.android.checkers.a.<init>:(Lcom/google/android/checkers/CheckersView;)V +004a84: 1304 4000 |0000: const/16 v4, #int 64 // #40 +004a88: 1203 |0002: const/4 v3, #int 0 // #0 +004a8a: 1302 0010 |0003: const/16 v2, #int 4096 // #1000 +004a8e: 1200 |0005: const/4 v0, #int 0 // #0 +004a90: 7010 ad00 0500 |0006: invoke-direct {v5}, Ljava/lang/Thread;.<init>:()V // method@00ad +004a96: 1301 e803 |0009: const/16 v1, #int 1000 // #3e8 +004a9a: 5951 4100 |000b: iput v1, v5, Lcom/google/android/checkers/a;.h:I // field@0041 +004a9e: 2201 3300 |000d: new-instance v1, Ljava/util/Random; // type@0033 +004aa2: 7010 af00 0100 |000f: invoke-direct {v1}, Ljava/util/Random;.<init>:()V // method@00af +004aa8: 5b51 4200 |0012: iput-object v1, v5, Lcom/google/android/checkers/a;.i:Ljava/util/Random; // field@0042 +004aac: 5b56 4300 |0014: iput-object v6, v5, Lcom/google/android/checkers/a;.j:Lcom/google/android/checkers/CheckersView; // field@0043 +004ab0: 5c50 4400 |0016: iput-boolean v0, v5, Lcom/google/android/checkers/a;.k:Z // field@0044 +004ab4: 2321 3700 |0018: new-array v1, v2, [I // type@0037 +004ab8: 5b51 4800 |001a: iput-object v1, v5, Lcom/google/android/checkers/a;.o:[I // field@0048 +004abc: 2321 3700 |001c: new-array v1, v2, [I // type@0037 +004ac0: 5b51 3a00 |001e: iput-object v1, v5, Lcom/google/android/checkers/a;.a:[I // field@003a +004ac4: 2321 3700 |0020: new-array v1, v2, [I // type@0037 +004ac8: 5b51 4900 |0022: iput-object v1, v5, Lcom/google/android/checkers/a;.p:[I // field@0049 +004acc: 2341 3700 |0024: new-array v1, v4, [I // type@0037 +004ad0: 5b51 3b00 |0026: iput-object v1, v5, Lcom/google/android/checkers/a;.b:[I // field@003b +004ad4: 2341 3700 |0028: new-array v1, v4, [I // type@0037 +004ad8: 5b51 4a00 |002a: iput-object v1, v5, Lcom/google/android/checkers/a;.q:[I // field@004a +004adc: 5c50 4c00 |002c: iput-boolean v0, v5, Lcom/google/android/checkers/a;.s:Z // field@004c +004ae0: 1501 1000 |002e: const/high16 v1, #int 1048576 // #10 +004ae4: 2311 3700 |0030: new-array v1, v1, [I // type@0037 +004ae8: 5b51 5200 |0032: iput-object v1, v5, Lcom/google/android/checkers/a;.y:[I // field@0052 +004aec: 1501 1000 |0034: const/high16 v1, #int 1048576 // #10 +004af0: 2311 3900 |0036: new-array v1, v1, [S // type@0039 +004af4: 5b51 5300 |0038: iput-object v1, v5, Lcom/google/android/checkers/a;.z:[S // field@0053 +004af8: 1501 1000 |003a: const/high16 v1, #int 1048576 // #10 +004afc: 2311 3600 |003c: new-array v1, v1, [B // type@0036 +004b00: 5b51 2d00 |003e: iput-object v1, v5, Lcom/google/android/checkers/a;.A:[B // field@002d +004b04: 5451 5200 |0040: iget-object v1, v5, Lcom/google/android/checkers/a;.y:[I // field@0052 +004b08: 3801 0b00 |0042: if-eqz v1, 004d // +000b +004b0c: 5451 5300 |0044: iget-object v1, v5, Lcom/google/android/checkers/a;.z:[S // field@0053 +004b10: 3801 0700 |0046: if-eqz v1, 004d // +0007 +004b14: 5451 2d00 |0048: iget-object v1, v5, Lcom/google/android/checkers/a;.A:[B // field@002d +004b18: 3801 0300 |004a: if-eqz v1, 004d // +0003 +004b1c: 1210 |004c: const/4 v0, #int 1 // #1 +004b1e: 5c50 2e00 |004d: iput-boolean v0, v5, Lcom/google/android/checkers/a;.B:Z // field@002e +004b22: 6e10 7b00 0500 |004f: invoke-virtual {v5}, Lcom/google/android/checkers/a;.a:()V // method@007b +004b28: 6e10 8e00 0500 |0052: invoke-virtual {v5}, Lcom/google/android/checkers/a;.start:()V // method@008e +004b2e: 0e00 |0055: return-void +004b30: 0d01 |0056: move-exception v1 +004b32: 5b53 5200 |0057: iput-object v3, v5, Lcom/google/android/checkers/a;.y:[I // field@0052 +004b36: 5b53 5300 |0059: iput-object v3, v5, Lcom/google/android/checkers/a;.z:[S // field@0053 +004b3a: 5b53 2d00 |005b: iput-object v3, v5, Lcom/google/android/checkers/a;.A:[B // field@002d +004b3e: 28e3 |005d: goto 0040 // -001d + catches : 1 + 0x0030 - 0x0040 + Ljava/lang/Exception; -> 0x0056 + positions : + locals : + + #2 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(II)I' + access : 0x001a (PRIVATE STATIC FINAL) + code - + registers : 3 + ins : 2 + outs : 0 + insns size : 14 16-bit code units +004b4c: |[004b4c] com.google.android.checkers.a.a:(II)I +004b5c: 3d01 0600 |0000: if-lez v1, 0006 // +0006 +004b60: d010 757e |0002: add-int/lit16 v0, v1, #int 32373 // #7e75 +004b64: b120 |0004: sub-int/2addr v0, v2 +004b66: 0f00 |0005: return v0 +004b68: 3b01 0600 |0006: if-gez v1, 000c // +0006 +004b6c: d010 8b81 |0008: add-int/lit16 v0, v1, #int -32373 // #818b +004b70: b020 |000a: add-int/2addr v0, v2 +004b72: 28fa |000b: goto 0005 // -0006 +004b74: 1200 |000c: const/4 v0, #int 0 // #0 +004b76: 28f8 |000d: goto 0005 // -0008 + catches : (none) + positions : + locals : + + #3 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IIIIIZ)I' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 31 + ins : 7 + outs : 7 + insns size : 1296 16-bit code units +004b78: |[004b78] com.google.android.checkers.a.a:(IIIIIZ)I +004b88: 0800 1800 |0000: move-object/from16 v0, v24 +004b8c: 5203 2f00 |0002: iget v3, v0, Lcom/google/android/checkers/a;.C:I // field@002f +004b90: d803 0301 |0004: add-int/lit8 v3, v3, #int 1 // #01 +004b94: 0800 1800 |0006: move-object/from16 v0, v24 +004b98: 5903 2f00 |0008: iput v3, v0, Lcom/google/android/checkers/a;.C:I // field@002f +004b9c: 0800 1800 |000a: move-object/from16 v0, v24 +004ba0: 5203 4f00 |000c: iget v3, v0, Lcom/google/android/checkers/a;.v:I // field@004f +004ba4: 3803 0800 |000e: if-eqz v3, 0016 // +0008 +004ba8: 0800 1800 |0010: move-object/from16 v0, v24 +004bac: 5203 5000 |0012: iget v3, v0, Lcom/google/android/checkers/a;.w:I // field@0050 +004bb0: 3903 0700 |0014: if-nez v3, 001b // +0007 +004bb4: 0200 1b00 |0016: move/from16 v0, v27 +004bb8: d004 0c81 |0018: add-int/lit16 v4, v0, #int -32500 // #810c +004bbc: 0f04 |001a: return v4 +004bbe: 0800 1800 |001b: move-object/from16 v0, v24 +004bc2: 5503 4e00 |001d: iget-boolean v3, v0, Lcom/google/android/checkers/a;.u:Z // field@004e +004bc6: 3803 0e00 |001f: if-eqz v3, 002d // +000e +004bca: 0800 1800 |0021: move-object/from16 v0, v24 +004bce: 5203 4f00 |0023: iget v3, v0, Lcom/google/android/checkers/a;.v:I // field@004f +004bd2: 0800 1800 |0025: move-object/from16 v0, v24 +004bd6: 5204 5000 |0027: iget v4, v0, Lcom/google/android/checkers/a;.w:I // field@0050 +004bda: b043 |0029: add-int/2addr v3, v4 +004bdc: 2b03 c004 0000 |002a: packed-switch v3, 000004ea // +000004c0 +004be2: 1303 4000 |002d: const/16 v3, #int 64 // #40 +004be6: 0200 1c00 |002f: move/from16 v0, v28 +004bea: 3530 b404 |0031: if-ge v0, v3, 04e5 // +04b4 +004bee: 1303 6400 |0033: const/16 v3, #int 100 // #64 +004bf2: 0200 1d00 |0035: move/from16 v0, v29 +004bf6: 3430 ae04 |0037: if-lt v0, v3, 04e5 // +04ae +004bfa: d81d 1d9c |0039: add-int/lit8 v29, v29, #int -100 // #9c +004bfe: d81c 1c01 |003b: add-int/lit8 v28, v28, #int 1 // #01 +004c02: 0207 1c00 |003d: move/from16 v7, v28 +004c06: 0200 1b00 |003f: move/from16 v0, v27 +004c0a: 3570 8103 |0041: if-ge v0, v7, 03c2 // +0381 +004c0e: 0800 1800 |0043: move-object/from16 v0, v24 +004c12: 5503 2e00 |0045: iget-boolean v3, v0, Lcom/google/android/checkers/a;.B:Z // field@002e +004c16: 3803 3400 |0047: if-eqz v3, 007b // +0034 +004c1a: 0800 1800 |0049: move-object/from16 v0, v24 +004c1e: 5203 5100 |004b: iget v3, v0, Lcom/google/android/checkers/a;.x:I // field@0051 +004c22: 1404 ffff 0f00 |004d: const v4, #float 0.000000 // #000fffff +004c28: b534 |0050: and-int/2addr v4, v3 +004c2a: 0800 1800 |0051: move-object/from16 v0, v24 +004c2e: 5405 5200 |0053: iget-object v5, v0, Lcom/google/android/checkers/a;.y:[I // field@0052 +004c32: 4405 0504 |0055: aget v5, v5, v4 +004c36: 3335 d702 |0057: if-ne v5, v3, 032e // +02d7 +004c3a: 0800 1800 |0059: move-object/from16 v0, v24 +004c3e: 5403 2d00 |005b: iget-object v3, v0, Lcom/google/android/checkers/a;.A:[B // field@002d +004c42: 4803 0304 |005d: aget-byte v3, v3, v4 +004c46: dd03 033f |005f: and-int/lit8 v3, v3, #int 63 // #3f +004c4a: 9105 071b |0061: sub-int v5, v7, v27 +004c4e: 3453 cb02 |0063: if-lt v3, v5, 032e // +02cb +004c52: 0800 1800 |0065: move-object/from16 v0, v24 +004c56: 5403 2d00 |0067: iget-object v3, v0, Lcom/google/android/checkers/a;.A:[B // field@002d +004c5a: 4803 0304 |0069: aget-byte v3, v3, v4 +004c5e: d533 c000 |006b: and-int/lit16 v3, v3, #int 192 // #00c0 +004c62: 0800 1800 |006d: move-object/from16 v0, v24 +004c66: 5405 5300 |006f: iget-object v5, v0, Lcom/google/android/checkers/a;.z:[S // field@0053 +004c6a: 4a04 0504 |0071: aget-short v4, v5, v4 +004c6e: 2c03 8104 0000 |0073: sparse-switch v3, 000004f4 // +00000481 +004c74: 1403 3f42 0f00 |0076: const v3, #float 0.000000 // #000f423f +004c7a: 3334 a1ff |0079: if-ne v4, v3, 001a // -005f +004c7e: 0800 1800 |007b: move-object/from16 v0, v24 +004c82: 0201 1b00 |007d: move/from16 v1, v27 +004c86: 0202 1e00 |007f: move/from16 v2, v30 +004c8a: 7030 7500 1002 |0081: invoke-direct {v0, v1, v2}, Lcom/google/android/checkers/a;.a:(IZ)I // method@0075 +004c90: 0a03 |0084: move-result v3 +004c92: 2b03 7904 0000 |0085: packed-switch v3, 000004fe // +00000479 +004c98: 0800 1800 |0088: move-object/from16 v0, v24 +004c9c: 520d 3c00 |008a: iget v13, v0, Lcom/google/android/checkers/a;.c:I // field@003c +004ca0: 0800 1800 |008c: move-object/from16 v0, v24 +004ca4: 520e 4b00 |008e: iget v14, v0, Lcom/google/android/checkers/a;.r:I // field@004b +004ca8: 0800 1800 |0090: move-object/from16 v0, v24 +004cac: 520f 3d00 |0092: iget v15, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004cb0: 0800 1800 |0094: move-object/from16 v0, v24 +004cb4: 5200 3e00 |0096: iget v0, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004cb8: 0210 0000 |0098: move/from16 v16, v0 +004cbc: 0800 1800 |009a: move-object/from16 v0, v24 +004cc0: 5200 3f00 |009c: iget v0, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004cc4: 0211 0000 |009e: move/from16 v17, v0 +004cc8: 0800 1800 |00a0: move-object/from16 v0, v24 +004ccc: 5200 4000 |00a2: iget v0, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004cd0: 0212 0000 |00a4: move/from16 v18, v0 +004cd4: 0800 1800 |00a6: move-object/from16 v0, v24 +004cd8: 5200 4f00 |00a8: iget v0, v0, Lcom/google/android/checkers/a;.v:I // field@004f +004cdc: 0213 0000 |00aa: move/from16 v19, v0 +004ce0: 0800 1800 |00ac: move-object/from16 v0, v24 +004ce4: 5200 5000 |00ae: iget v0, v0, Lcom/google/android/checkers/a;.w:I // field@0050 +004ce8: 0214 0000 |00b0: move/from16 v20, v0 +004cec: 0800 1800 |00b2: move-object/from16 v0, v24 +004cf0: 5200 5100 |00b4: iget v0, v0, Lcom/google/android/checkers/a;.x:I // field@0051 +004cf4: 0215 0000 |00b6: move/from16 v21, v0 +004cf8: 2b0d 4e04 0000 |00b8: packed-switch v13, 00000506 // +0000044e +004cfe: 1208 |00bb: const/4 v8, #int 0 // #0 +004d00: 130a 4000 |00bc: const/16 v10, #int 64 // #40 +004d04: 1203 |00be: const/4 v3, #int 0 // #0 +004d06: 013c |00bf: move v12, v3 +004d08: 020b 1900 |00c0: move/from16 v11, v25 +004d0c: 34dc 9602 |00c2: if-lt v12, v13, 0358 // +0296 +004d10: 0800 1800 |00c4: move-object/from16 v0, v24 +004d14: 5503 2e00 |00c6: iget-boolean v3, v0, Lcom/google/android/checkers/a;.B:Z // field@002e +004d18: 3803 0900 |00c8: if-eqz v3, 00d1 // +0009 +004d1c: 0800 1800 |00ca: move-object/from16 v0, v24 +004d20: 0201 1b00 |00cc: move/from16 v1, v27 +004d24: 705b 7d00 10a7 |00ce: invoke-direct {v0, v1, v7, v10, v11}, Lcom/google/android/checkers/a;.a:(IIII)V // method@007d +004d2a: 01b4 |00d1: move v4, v11 +004d2c: 2900 48ff |00d2: goto/16 001a // -00b8 +004d30: 0800 1800 |00d4: move-object/from16 v0, v24 +004d34: 5203 3d00 |00d6: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004d38: 3803 3800 |00d8: if-eqz v3, 0110 // +0038 +004d3c: 0800 1800 |00da: move-object/from16 v0, v24 +004d40: 5203 3f00 |00dc: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004d44: 3803 3200 |00de: if-eqz v3, 0110 // +0032 +004d48: 0800 1800 |00e0: move-object/from16 v0, v24 +004d4c: 5203 3d00 |00e2: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004d50: 0800 1800 |00e4: move-object/from16 v0, v24 +004d54: 5205 3f00 |00e6: iget v5, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004d58: 381e 1b00 |00e8: if-eqz v30, 0103 // +001b +004d5c: 7110 9f00 0300 |00ea: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004d62: 0a04 |00ed: move-result v4 +004d64: 7110 9f00 0500 |00ee: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004d6a: 0a03 |00f1: move-result v3 +004d6c: d803 03fc |00f2: add-int/lit8 v3, v3, #int -4 // #fc +004d70: 6205 5c00 |00f4: sget-object v5, Lcom/google/android/checkers/g;.d:[B // field@005c +004d74: da03 031c |00f6: mul-int/lit8 v3, v3, #int 28 // #1c +004d78: b043 |00f8: add-int/2addr v3, v4 +004d7a: 4803 0503 |00f9: aget-byte v3, v5, v3 +004d7e: 0200 1b00 |00fb: move/from16 v0, v27 +004d82: 7120 7300 0300 |00fd: invoke-static {v3, v0}, Lcom/google/android/checkers/a;.a:(II)I // method@0073 +004d88: 0a04 |0100: move-result v4 +004d8a: 2900 19ff |0101: goto/16 001a // -00e7 +004d8e: 7110 9f00 0500 |0103: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004d94: 0a04 |0106: move-result v4 +004d96: d904 041f |0107: rsub-int/lit8 v4, v4, #int 31 // #1f +004d9a: 7110 9f00 0300 |0109: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004da0: 0a03 |010c: move-result v3 +004da2: d903 031f |010d: rsub-int/lit8 v3, v3, #int 31 // #1f +004da6: 28e3 |010f: goto 00f2 // -001d +004da8: 0800 1800 |0110: move-object/from16 v0, v24 +004dac: 5203 3f00 |0112: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004db0: 3803 1200 |0114: if-eqz v3, 0126 // +0012 +004db4: 0800 1800 |0116: move-object/from16 v0, v24 +004db8: 5203 3e00 |0118: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004dbc: 0800 1800 |011a: move-object/from16 v0, v24 +004dc0: 5204 3f00 |011c: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004dc4: 1205 |011e: const/4 v5, #int 0 // #0 +004dc6: 0200 1e00 |011f: move/from16 v0, v30 +004dca: 7140 7900 3054 |0121: invoke-static {v0, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(ZIIZ)I // method@0079 +004dd0: 0a03 |0124: move-result v3 +004dd2: 28d6 |0125: goto 00fb // -002a +004dd4: 0800 1800 |0126: move-object/from16 v0, v24 +004dd8: 5203 3d00 |0128: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004ddc: 3803 1500 |012a: if-eqz v3, 013f // +0015 +004de0: 381e 1100 |012c: if-eqz v30, 013d // +0011 +004de4: 1203 |012e: const/4 v3, #int 0 // #0 +004de6: 0800 1800 |012f: move-object/from16 v0, v24 +004dea: 5204 4000 |0131: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004dee: 0800 1800 |0133: move-object/from16 v0, v24 +004df2: 5205 3d00 |0135: iget v5, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004df6: 1216 |0137: const/4 v6, #int 1 // #1 +004df8: 7140 7900 4365 |0138: invoke-static {v3, v4, v5, v6}, Lcom/google/android/checkers/a;.a:(ZIIZ)I // method@0079 +004dfe: 0a03 |013b: move-result v3 +004e00: 28bf |013c: goto 00fb // -0041 +004e02: 1213 |013d: const/4 v3, #int 1 // #1 +004e04: 28f1 |013e: goto 012f // -000f +004e06: 0800 1800 |013f: move-object/from16 v0, v24 +004e0a: 5203 3e00 |0141: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004e0e: 0800 1800 |0143: move-object/from16 v0, v24 +004e12: 5205 4000 |0145: iget v5, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004e16: 381e 1a00 |0147: if-eqz v30, 0161 // +001a +004e1a: 7110 9f00 0300 |0149: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004e20: 0a04 |014c: move-result v4 +004e22: 7110 9f00 0500 |014d: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004e28: 0a03 |0150: move-result v3 +004e2a: 1305 1000 |0151: const/16 v5, #int 16 // #10 +004e2e: 3454 0600 |0153: if-lt v4, v5, 0159 // +0006 +004e32: d904 041f |0155: rsub-int/lit8 v4, v4, #int 31 // #1f +004e36: d903 031f |0157: rsub-int/lit8 v3, v3, #int 31 // #1f +004e3a: 6205 5900 |0159: sget-object v5, Lcom/google/android/checkers/g;.a:[B // field@0059 +004e3e: da03 0310 |015b: mul-int/lit8 v3, v3, #int 16 // #10 +004e42: b043 |015d: add-int/2addr v3, v4 +004e44: 4803 0503 |015e: aget-byte v3, v5, v3 +004e48: 289b |0160: goto 00fb // -0065 +004e4a: 7110 9f00 0500 |0161: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004e50: 0a04 |0164: move-result v4 +004e52: 7110 9f00 0300 |0165: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +004e58: 0a03 |0168: move-result v3 +004e5a: 28e8 |0169: goto 0151 // -0018 +004e5c: 0800 1800 |016a: move-object/from16 v0, v24 +004e60: 5203 4f00 |016c: iget v3, v0, Lcom/google/android/checkers/a;.v:I // field@004f +004e64: 1214 |016e: const/4 v4, #int 1 // #1 +004e66: 3343 a000 |016f: if-ne v3, v4, 020f // +00a0 +004e6a: 0800 1800 |0171: move-object/from16 v0, v24 +004e6e: 5203 3d00 |0173: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004e72: 3803 5200 |0175: if-eqz v3, 01c7 // +0052 +004e76: 0800 1800 |0177: move-object/from16 v0, v24 +004e7a: 5203 4000 |0179: iget v3, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004e7e: 3903 1c00 |017b: if-nez v3, 0197 // +001c +004e82: 381e 1800 |017d: if-eqz v30, 0195 // +0018 +004e86: 1203 |017f: const/4 v3, #int 0 // #0 +004e88: 0800 1800 |0180: move-object/from16 v0, v24 +004e8c: 5204 3f00 |0182: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004e90: 0800 1800 |0184: move-object/from16 v0, v24 +004e94: 5205 3d00 |0186: iget v5, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004e98: 1216 |0188: const/4 v6, #int 1 // #1 +004e9a: 7140 8c00 4365 |0189: invoke-static {v3, v4, v5, v6}, Lcom/google/android/checkers/a;.d:(ZIIZ)I // method@008c +004ea0: 0a03 |018c: move-result v3 +004ea2: 0200 1b00 |018d: move/from16 v0, v27 +004ea6: 7120 7300 0300 |018f: invoke-static {v3, v0}, Lcom/google/android/checkers/a;.a:(II)I // method@0073 +004eac: 0a04 |0192: move-result v4 +004eae: 2900 87fe |0193: goto/16 001a // -0179 +004eb2: 1213 |0195: const/4 v3, #int 1 // #1 +004eb4: 28ea |0196: goto 0180 // -0016 +004eb6: 0800 1800 |0197: move-object/from16 v0, v24 +004eba: 5203 3f00 |0199: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004ebe: 3803 1900 |019b: if-eqz v3, 01b4 // +0019 +004ec2: 381e 1500 |019d: if-eqz v30, 01b2 // +0015 +004ec6: 1203 |019f: const/4 v3, #int 0 // #0 +004ec8: 0800 1800 |01a0: move-object/from16 v0, v24 +004ecc: 5204 3f00 |01a2: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004ed0: 0800 1800 |01a4: move-object/from16 v0, v24 +004ed4: 5205 4000 |01a6: iget v5, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004ed8: 0800 1800 |01a8: move-object/from16 v0, v24 +004edc: 5206 3d00 |01aa: iget v6, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004ee0: 1217 |01ac: const/4 v7, #int 1 // #1 +004ee2: 7157 8200 4365 |01ad: invoke-static {v3, v4, v5, v6, v7}, Lcom/google/android/checkers/a;.b:(ZIIIZ)I // method@0082 +004ee8: 0a03 |01b0: move-result v3 +004eea: 28dc |01b1: goto 018d // -0024 +004eec: 1213 |01b2: const/4 v3, #int 1 // #1 +004eee: 28ed |01b3: goto 01a0 // -0013 +004ef0: 381e 1100 |01b4: if-eqz v30, 01c5 // +0011 +004ef4: 1203 |01b6: const/4 v3, #int 0 // #0 +004ef6: 0800 1800 |01b7: move-object/from16 v0, v24 +004efa: 5204 4000 |01b9: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004efe: 0800 1800 |01bb: move-object/from16 v0, v24 +004f02: 5205 3d00 |01bd: iget v5, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004f06: 1216 |01bf: const/4 v6, #int 1 // #1 +004f08: 7140 8300 4365 |01c0: invoke-static {v3, v4, v5, v6}, Lcom/google/android/checkers/a;.b:(ZIIZ)I // method@0083 +004f0e: 0a03 |01c3: move-result v3 +004f10: 28c9 |01c4: goto 018d // -0037 +004f12: 1213 |01c5: const/4 v3, #int 1 // #1 +004f14: 28f1 |01c6: goto 01b7 // -000f +004f16: 0800 1800 |01c7: move-object/from16 v0, v24 +004f1a: 5203 4000 |01c9: iget v3, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004f1e: 3903 1500 |01cb: if-nez v3, 01e0 // +0015 +004f22: 381e 1100 |01cd: if-eqz v30, 01de // +0011 +004f26: 1203 |01cf: const/4 v3, #int 0 // #0 +004f28: 0800 1800 |01d0: move-object/from16 v0, v24 +004f2c: 5204 3f00 |01d2: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004f30: 0800 1800 |01d4: move-object/from16 v0, v24 +004f34: 5205 3e00 |01d6: iget v5, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004f38: 1216 |01d8: const/4 v6, #int 1 // #1 +004f3a: 7140 8900 4365 |01d9: invoke-static {v3, v4, v5, v6}, Lcom/google/android/checkers/a;.c:(ZIIZ)I // method@0089 +004f40: 0a03 |01dc: move-result v3 +004f42: 28b0 |01dd: goto 018d // -0050 +004f44: 1213 |01de: const/4 v3, #int 1 // #1 +004f46: 28f1 |01df: goto 01d0 // -000f +004f48: 0800 1800 |01e0: move-object/from16 v0, v24 +004f4c: 5203 3f00 |01e2: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004f50: 3803 1900 |01e4: if-eqz v3, 01fd // +0019 +004f54: 381e 1500 |01e6: if-eqz v30, 01fb // +0015 +004f58: 1203 |01e8: const/4 v3, #int 0 // #0 +004f5a: 0800 1800 |01e9: move-object/from16 v0, v24 +004f5e: 5204 3f00 |01eb: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004f62: 0800 1800 |01ed: move-object/from16 v0, v24 +004f66: 5205 4000 |01ef: iget v5, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004f6a: 0800 1800 |01f1: move-object/from16 v0, v24 +004f6e: 5206 3e00 |01f3: iget v6, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004f72: 1217 |01f5: const/4 v7, #int 1 // #1 +004f74: 7157 7800 4365 |01f6: invoke-static {v3, v4, v5, v6, v7}, Lcom/google/android/checkers/a;.a:(ZIIIZ)I // method@0078 +004f7a: 0a03 |01f9: move-result v3 +004f7c: 2893 |01fa: goto 018d // -006d +004f7e: 1213 |01fb: const/4 v3, #int 1 // #1 +004f80: 28ed |01fc: goto 01e9 // -0013 +004f82: 381e 1000 |01fd: if-eqz v30, 020d // +0010 +004f86: 1203 |01ff: const/4 v3, #int 0 // #0 +004f88: 0800 1800 |0200: move-object/from16 v0, v24 +004f8c: 5204 4000 |0202: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +004f90: 0800 1800 |0204: move-object/from16 v0, v24 +004f94: 5205 3e00 |0206: iget v5, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004f98: 7130 7700 4305 |0208: invoke-static {v3, v4, v5}, Lcom/google/android/checkers/a;.a:(ZII)I // method@0077 +004f9e: 0a03 |020b: move-result v3 +004fa0: 2881 |020c: goto 018d // -007f +004fa2: 1213 |020d: const/4 v3, #int 1 // #1 +004fa4: 28f2 |020e: goto 0200 // -000e +004fa6: 0800 1800 |020f: move-object/from16 v0, v24 +004faa: 5203 3f00 |0211: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004fae: 3803 4500 |0213: if-eqz v3, 0258 // +0045 +004fb2: 0800 1800 |0215: move-object/from16 v0, v24 +004fb6: 5203 3e00 |0217: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004fba: 3903 1300 |0219: if-nez v3, 022c // +0013 +004fbe: 0800 1800 |021b: move-object/from16 v0, v24 +004fc2: 5203 3d00 |021d: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004fc6: 0800 1800 |021f: move-object/from16 v0, v24 +004fca: 5204 3f00 |0221: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +004fce: 1205 |0223: const/4 v5, #int 0 // #0 +004fd0: 0200 1e00 |0224: move/from16 v0, v30 +004fd4: 7140 8c00 3054 |0226: invoke-static {v0, v3, v4, v5}, Lcom/google/android/checkers/a;.d:(ZIIZ)I // method@008c +004fda: 0a03 |0229: move-result v3 +004fdc: 2900 63ff |022a: goto/16 018d // -009d +004fe0: 0800 1800 |022c: move-object/from16 v0, v24 +004fe4: 5203 3d00 |022e: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004fe8: 3803 1700 |0230: if-eqz v3, 0247 // +0017 +004fec: 0800 1800 |0232: move-object/from16 v0, v24 +004ff0: 5203 3d00 |0234: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +004ff4: 0800 1800 |0236: move-object/from16 v0, v24 +004ff8: 5204 3e00 |0238: iget v4, v0, Lcom/google/android/checkers/a;.e:I // field@003e +004ffc: 0800 1800 |023a: move-object/from16 v0, v24 +005000: 5205 3f00 |023c: iget v5, v0, Lcom/google/android/checkers/a;.f:I // field@003f +005004: 1206 |023e: const/4 v6, #int 0 // #0 +005006: 0200 1e00 |023f: move/from16 v0, v30 +00500a: 7156 8200 3054 |0241: invoke-static {v0, v3, v4, v5, v6}, Lcom/google/android/checkers/a;.b:(ZIIIZ)I // method@0082 +005010: 0a03 |0244: move-result v3 +005012: 2900 48ff |0245: goto/16 018d // -00b8 +005016: 0800 1800 |0247: move-object/from16 v0, v24 +00501a: 5203 3e00 |0249: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +00501e: 0800 1800 |024b: move-object/from16 v0, v24 +005022: 5204 3f00 |024d: iget v4, v0, Lcom/google/android/checkers/a;.f:I // field@003f +005026: 1205 |024f: const/4 v5, #int 0 // #0 +005028: 0200 1e00 |0250: move/from16 v0, v30 +00502c: 7140 8300 3054 |0252: invoke-static {v0, v3, v4, v5}, Lcom/google/android/checkers/a;.b:(ZIIZ)I // method@0083 +005032: 0a03 |0255: move-result v3 +005034: 2900 37ff |0256: goto/16 018d // -00c9 +005038: 0800 1800 |0258: move-object/from16 v0, v24 +00503c: 5203 3e00 |025a: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005040: 3903 1300 |025c: if-nez v3, 026f // +0013 +005044: 0800 1800 |025e: move-object/from16 v0, v24 +005048: 5203 3d00 |0260: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +00504c: 0800 1800 |0262: move-object/from16 v0, v24 +005050: 5204 4000 |0264: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005054: 1205 |0266: const/4 v5, #int 0 // #0 +005056: 0200 1e00 |0267: move/from16 v0, v30 +00505a: 7140 8900 3054 |0269: invoke-static {v0, v3, v4, v5}, Lcom/google/android/checkers/a;.c:(ZIIZ)I // method@0089 +005060: 0a03 |026c: move-result v3 +005062: 2900 20ff |026d: goto/16 018d // -00e0 +005066: 0800 1800 |026f: move-object/from16 v0, v24 +00506a: 5203 3d00 |0271: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +00506e: 3803 1700 |0273: if-eqz v3, 028a // +0017 +005072: 0800 1800 |0275: move-object/from16 v0, v24 +005076: 5203 3d00 |0277: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +00507a: 0800 1800 |0279: move-object/from16 v0, v24 +00507e: 5204 3e00 |027b: iget v4, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005082: 0800 1800 |027d: move-object/from16 v0, v24 +005086: 5205 4000 |027f: iget v5, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +00508a: 1206 |0281: const/4 v6, #int 0 // #0 +00508c: 0200 1e00 |0282: move/from16 v0, v30 +005090: 7156 7800 3054 |0284: invoke-static {v0, v3, v4, v5, v6}, Lcom/google/android/checkers/a;.a:(ZIIIZ)I // method@0078 +005096: 0a03 |0287: move-result v3 +005098: 2900 05ff |0288: goto/16 018d // -00fb +00509c: 0800 1800 |028a: move-object/from16 v0, v24 +0050a0: 5203 3e00 |028c: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +0050a4: 0800 1800 |028e: move-object/from16 v0, v24 +0050a8: 5204 4000 |0290: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +0050ac: 0200 1e00 |0292: move/from16 v0, v30 +0050b0: 7130 7700 3004 |0294: invoke-static {v0, v3, v4}, Lcom/google/android/checkers/a;.a:(ZII)I // method@0077 +0050b6: 0a03 |0297: move-result v3 +0050b8: 2900 f5fe |0298: goto/16 018d // -010b +0050bc: 0800 1800 |029a: move-object/from16 v0, v24 +0050c0: 5203 4f00 |029c: iget v3, v0, Lcom/google/android/checkers/a;.v:I // field@004f +0050c4: 1224 |029e: const/4 v4, #int 2 // #2 +0050c6: 3343 8efd |029f: if-ne v3, v4, 002d // -0272 +0050ca: 0800 1800 |02a1: move-object/from16 v0, v24 +0050ce: 5203 5000 |02a3: iget v3, v0, Lcom/google/android/checkers/a;.w:I // field@0050 +0050d2: 1224 |02a5: const/4 v4, #int 2 // #2 +0050d4: 3343 87fd |02a6: if-ne v3, v4, 002d // -0279 +0050d8: 0800 1800 |02a8: move-object/from16 v0, v24 +0050dc: 5203 3d00 |02aa: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +0050e0: 3903 81fd |02ac: if-nez v3, 002d // -027f +0050e4: 0800 1800 |02ae: move-object/from16 v0, v24 +0050e8: 5203 3f00 |02b0: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +0050ec: 3903 7bfd |02b2: if-nez v3, 002d // -0285 +0050f0: 0800 1800 |02b4: move-object/from16 v0, v24 +0050f4: 5203 3e00 |02b6: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +0050f8: 0800 1800 |02b8: move-object/from16 v0, v24 +0050fc: 5207 4000 |02ba: iget v7, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005100: 381e 4000 |02bc: if-eqz v30, 02fc // +0040 +005104: 7110 9f00 0300 |02be: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +00510a: 0a06 |02c1: move-result v6 +00510c: d804 03ff |02c2: add-int/lit8 v4, v3, #int -1 // #ff +005110: b543 |02c4: and-int/2addr v3, v4 +005112: 7110 9f00 0300 |02c5: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005118: 0a05 |02c8: move-result v5 +00511a: 7110 9f00 0700 |02c9: invoke-static {v7}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005120: 0a04 |02cc: move-result v4 +005122: d803 07ff |02cd: add-int/lit8 v3, v7, #int -1 // #ff +005126: b573 |02cf: and-int/2addr v3, v7 +005128: 7110 9f00 0300 |02d0: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +00512e: 0a03 |02d3: move-result v3 +005130: 0216 0300 |02d4: move/from16 v22, v3 +005134: 0153 |02d6: move v3, v5 +005136: 0205 1600 |02d7: move/from16 v5, v22 +00513a: 0217 0400 |02d9: move/from16 v23, v4 +00513e: 0164 |02db: move v4, v6 +005140: 0206 1700 |02dc: move/from16 v6, v23 +005144: 3534 3f00 |02de: if-ge v4, v3, 031d // +003f +005148: 6207 3800 |02e0: sget-object v7, Lcom/google/android/checkers/a;.L:[I // field@0038 +00514c: 4403 0703 |02e2: aget v3, v7, v3 +005150: b043 |02e4: add-int/2addr v3, v4 +005152: 0134 |02e5: move v4, v3 +005154: 3556 3e00 |02e6: if-ge v6, v5, 0324 // +003e +005158: 6203 3800 |02e8: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +00515c: 4403 0305 |02ea: aget v3, v3, v5 +005160: b063 |02ec: add-int/2addr v3, v6 +005162: 6205 6900 |02ed: sget-object v5, Lcom/google/android/checkers/g;.q:[B // field@0069 +005166: d244 f001 |02ef: mul-int/lit16 v4, v4, #int 496 // #01f0 +00516a: b043 |02f1: add-int/2addr v3, v4 +00516c: 4803 0503 |02f2: aget-byte v3, v5, v3 +005170: 0200 1b00 |02f4: move/from16 v0, v27 +005174: 7120 7300 0300 |02f6: invoke-static {v3, v0}, Lcom/google/android/checkers/a;.a:(II)I // method@0073 +00517a: 0a04 |02f9: move-result v4 +00517c: 2900 20fd |02fa: goto/16 001a // -02e0 +005180: 7110 9f00 0700 |02fc: invoke-static {v7}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005186: 0a06 |02ff: move-result v6 +005188: d804 07ff |0300: add-int/lit8 v4, v7, #int -1 // #ff +00518c: b574 |0302: and-int/2addr v4, v7 +00518e: 7110 9f00 0400 |0303: invoke-static {v4}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005194: 0a05 |0306: move-result v5 +005196: 7110 9f00 0300 |0307: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +00519c: 0a04 |030a: move-result v4 +00519e: d807 03ff |030b: add-int/lit8 v7, v3, #int -1 // #ff +0051a2: b573 |030d: and-int/2addr v3, v7 +0051a4: 7110 9f00 0300 |030e: invoke-static {v3}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0051aa: 0a03 |0311: move-result v3 +0051ac: 0216 0300 |0312: move/from16 v22, v3 +0051b0: 0153 |0314: move v3, v5 +0051b2: 0205 1600 |0315: move/from16 v5, v22 +0051b6: 0217 0400 |0317: move/from16 v23, v4 +0051ba: 0164 |0319: move v4, v6 +0051bc: 0206 1700 |031a: move/from16 v6, v23 +0051c0: 28c2 |031c: goto 02de // -003e +0051c2: 6207 3800 |031d: sget-object v7, Lcom/google/android/checkers/a;.L:[I // field@0038 +0051c6: 4404 0704 |031f: aget v4, v7, v4 +0051ca: b043 |0321: add-int/2addr v3, v4 +0051cc: 0134 |0322: move v4, v3 +0051ce: 28c3 |0323: goto 02e6 // -003d +0051d0: 6203 3800 |0324: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +0051d4: 4403 0306 |0326: aget v3, v3, v6 +0051d8: b053 |0328: add-int/2addr v3, v5 +0051da: 28c4 |0329: goto 02ed // -003c +0051dc: 0200 1900 |032a: move/from16 v0, v25 +0051e0: 3704 4afd |032c: if-le v4, v0, 0076 // -02b6 +0051e4: 1404 3f42 0f00 |032e: const v4, #float 0.000000 // #000f423f +0051ea: 2900 45fd |0331: goto/16 0076 // -02bb +0051ee: 0200 1a00 |0333: move/from16 v0, v26 +0051f2: 3404 f9ff |0335: if-lt v4, v0, 032e // -0007 +0051f6: 2900 3ffd |0337: goto/16 0076 // -02c1 +0051fa: 0200 1b00 |0339: move/from16 v0, v27 +0051fe: d004 0c81 |033b: add-int/lit16 v4, v0, #int -32500 // #810c +005202: 2900 ddfc |033d: goto/16 001a // -0323 +005206: 9103 071b |033f: sub-int v3, v7, v27 +00520a: 1214 |0341: const/4 v4, #int 1 // #1 +00520c: 3643 46fd |0342: if-gt v3, v4, 0088 // -02ba +005210: 1303 4000 |0344: const/16 v3, #int 64 // #40 +005214: 3537 42fd |0346: if-ge v7, v3, 0088 // -02be +005218: d807 0701 |0348: add-int/lit8 v7, v7, #int 1 // #01 +00521c: 2900 3efd |034a: goto/16 0088 // -02c2 +005220: d808 1d1e |034c: add-int/lit8 v8, v29, #int 30 // #1e +005224: 2900 6efd |034e: goto/16 00bc // -0292 +005228: d808 1d0a |0350: add-int/lit8 v8, v29, #int 10 // #0a +00522c: 2900 6afd |0352: goto/16 00bc // -0296 +005230: d808 1d05 |0354: add-int/lit8 v8, v29, #int 5 // #05 +005234: 2900 66fd |0356: goto/16 00bc // -029a +005238: 9003 0e0c |0358: add-int v3, v14, v12 +00523c: 0800 1800 |035a: move-object/from16 v0, v24 +005240: 7020 8500 3000 |035c: invoke-direct {v0, v3}, Lcom/google/android/checkers/a;.b:(I)V // method@0085 +005246: 0200 1a00 |035f: move/from16 v0, v26 +00524a: 7b04 |0361: neg-int v4, v0 +00524c: 7bb5 |0362: neg-int v5, v11 +00524e: d806 1b01 |0363: add-int/lit8 v6, v27, #int 1 // #01 +005252: 381e 3b00 |0365: if-eqz v30, 03a0 // +003b +005256: 1209 |0367: const/4 v9, #int 0 // #0 +005258: 0803 1800 |0368: move-object/from16 v3, v24 +00525c: 7607 7400 0300 |036a: invoke-direct/range {v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/a;.a:(IIIIIZ)I // method@0074 +005262: 0a03 |036d: move-result v3 +005264: 7b34 |036e: neg-int v4, v3 +005266: 0800 1800 |036f: move-object/from16 v0, v24 +00526a: 590f 3d00 |0371: iput v15, v0, Lcom/google/android/checkers/a;.d:I // field@003d +00526e: 0200 1000 |0373: move/from16 v0, v16 +005272: 0801 1800 |0375: move-object/from16 v1, v24 +005276: 5910 3e00 |0377: iput v0, v1, Lcom/google/android/checkers/a;.e:I // field@003e +00527a: 0200 1100 |0379: move/from16 v0, v17 +00527e: 0801 1800 |037b: move-object/from16 v1, v24 +005282: 5910 3f00 |037d: iput v0, v1, Lcom/google/android/checkers/a;.f:I // field@003f +005286: 0200 1200 |037f: move/from16 v0, v18 +00528a: 0801 1800 |0381: move-object/from16 v1, v24 +00528e: 5910 4000 |0383: iput v0, v1, Lcom/google/android/checkers/a;.g:I // field@0040 +005292: 0200 1300 |0385: move/from16 v0, v19 +005296: 0801 1800 |0387: move-object/from16 v1, v24 +00529a: 5910 4f00 |0389: iput v0, v1, Lcom/google/android/checkers/a;.v:I // field@004f +00529e: 0200 1400 |038b: move/from16 v0, v20 +0052a2: 0801 1800 |038d: move-object/from16 v1, v24 +0052a6: 5910 5000 |038f: iput v0, v1, Lcom/google/android/checkers/a;.w:I // field@0050 +0052aa: 0200 1500 |0391: move/from16 v0, v21 +0052ae: 0801 1800 |0393: move-object/from16 v1, v24 +0052b2: 5910 5100 |0395: iput v0, v1, Lcom/google/android/checkers/a;.x:I // field@0051 +0052b6: 0800 1800 |0397: move-object/from16 v0, v24 +0052ba: 5503 4600 |0399: iget-boolean v3, v0, Lcom/google/android/checkers/a;.m:Z // field@0046 +0052be: 3803 0700 |039b: if-eqz v3, 03a2 // +0007 +0052c2: 1204 |039d: const/4 v4, #int 0 // #0 +0052c4: 2900 7cfc |039e: goto/16 001a // -0384 +0052c8: 1219 |03a0: const/4 v9, #int 1 // #1 +0052ca: 28c7 |03a1: goto 0368 // -0039 +0052cc: 37b4 3f01 |03a2: if-le v4, v11, 04e1 // +013f +0052d0: 0200 1a00 |03a4: move/from16 v0, v26 +0052d4: 3404 1300 |03a6: if-lt v4, v0, 03b9 // +0013 +0052d8: 0800 1800 |03a8: move-object/from16 v0, v24 +0052dc: 5503 2e00 |03aa: iget-boolean v3, v0, Lcom/google/android/checkers/a;.B:Z // field@002e +0052e0: 3803 6efc |03ac: if-eqz v3, 001a // -0392 +0052e4: 1303 8000 |03ae: const/16 v3, #int 128 // #80 +0052e8: 0800 1800 |03b0: move-object/from16 v0, v24 +0052ec: 0201 1b00 |03b2: move/from16 v1, v27 +0052f0: 7054 7d00 1037 |03b4: invoke-direct {v0, v1, v7, v3, v4}, Lcom/google/android/checkers/a;.a:(IIII)V // method@007d +0052f6: 2900 63fc |03b7: goto/16 001a // -039d +0052fa: 1303 c000 |03b9: const/16 v3, #int 192 // #c0 +0052fe: d805 0c01 |03bb: add-int/lit8 v5, v12, #int 1 // #01 +005302: 015c |03bd: move v12, v5 +005304: 013a |03be: move v10, v3 +005306: 014b |03bf: move v11, v4 +005308: 2900 02fd |03c0: goto/16 00c2 // -02fe +00530c: 0800 1800 |03c2: move-object/from16 v0, v24 +005310: 5203 4700 |03c4: iget v3, v0, Lcom/google/android/checkers/a;.n:I // field@0047 +005314: d804 0301 |03c6: add-int/lit8 v4, v3, #int 1 // #01 +005318: 0800 1800 |03c8: move-object/from16 v0, v24 +00531c: 5904 4700 |03ca: iput v4, v0, Lcom/google/android/checkers/a;.n:I // field@0047 +005320: 1304 8813 |03cc: const/16 v4, #int 5000 // #1388 +005324: 3743 2100 |03ce: if-le v3, v4, 03ef // +0021 +005328: 1203 |03d0: const/4 v3, #int 0 // #0 +00532a: 0800 1800 |03d1: move-object/from16 v0, v24 +00532e: 5903 4700 |03d3: iput v3, v0, Lcom/google/android/checkers/a;.n:I // field@0047 +005332: 7100 ab00 0000 |03d5: invoke-static {}, Ljava/lang/System;.currentTimeMillis:()J // method@00ab +005338: 0b03 |03d8: move-result-wide v3 +00533a: 0800 1800 |03d9: move-object/from16 v0, v24 +00533e: 5305 4500 |03db: iget-wide v5, v0, Lcom/google/android/checkers/a;.l:J // field@0045 +005342: 0800 1800 |03dd: move-object/from16 v0, v24 +005346: 5207 4100 |03df: iget v7, v0, Lcom/google/android/checkers/a;.h:I // field@0041 +00534a: 8177 |03e1: int-to-long v7, v7 +00534c: bb75 |03e2: add-long/2addr v5, v7 +00534e: 3103 0305 |03e3: cmp-long v3, v3, v5 +005352: 3d03 0a00 |03e5: if-lez v3, 03ef // +000a +005356: 1213 |03e7: const/4 v3, #int 1 // #1 +005358: 0800 1800 |03e8: move-object/from16 v0, v24 +00535c: 5c03 4600 |03ea: iput-boolean v3, v0, Lcom/google/android/checkers/a;.m:Z // field@0046 +005360: 1204 |03ec: const/4 v4, #int 0 // #0 +005362: 2900 2dfc |03ed: goto/16 001a // -03d3 +005366: 1204 |03ef: const/4 v4, #int 0 // #0 +005368: 0800 1800 |03f0: move-object/from16 v0, v24 +00536c: 5203 3d00 |03f2: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +005370: 3903 ad00 |03f4: if-nez v3, 04a1 // +00ad +005374: 0800 1800 |03f6: move-object/from16 v0, v24 +005378: 5203 3e00 |03f8: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +00537c: 0145 |03fa: move v5, v4 +00537e: 3903 ad00 |03fb: if-nez v3, 04a8 // +00ad +005382: 1204 |03fd: const/4 v4, #int 0 // #0 +005384: 0800 1800 |03fe: move-object/from16 v0, v24 +005388: 5203 3f00 |0400: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +00538c: 3903 ae00 |0402: if-nez v3, 04b0 // +00ae +005390: 0800 1800 |0404: move-object/from16 v0, v24 +005394: 5203 4000 |0406: iget v3, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005398: 3903 af00 |0408: if-nez v3, 04b7 // +00af +00539c: 3345 b400 |040a: if-ne v5, v4, 04be // +00b4 +0053a0: 1203 |040c: const/4 v3, #int 0 // #0 +0053a2: 3545 bd00 |040d: if-ge v5, v4, 04ca // +00bd +0053a6: 0800 1800 |040f: move-object/from16 v0, v24 +0053aa: 5204 3e00 |0411: iget v4, v0, Lcom/google/android/checkers/a;.e:I // field@003e +0053ae: 1405 1100 0088 |0413: const v5, #float -0.000000 // #88000011 +0053b4: b554 |0416: and-int/2addr v4, v5 +0053b6: 3804 0900 |0417: if-eqz v4, 0420 // +0009 +0053ba: 7110 9e00 0400 |0419: invoke-static {v4}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +0053c0: 0a04 |041c: move-result v4 +0053c2: e004 0403 |041d: shl-int/lit8 v4, v4, #int 3 // #03 +0053c6: b043 |041f: add-int/2addr v3, v4 +0053c8: 0800 1800 |0420: move-object/from16 v0, v24 +0053cc: 5204 3d00 |0422: iget v4, v0, Lcom/google/android/checkers/a;.d:I // field@003d +0053d0: 0800 1800 |0424: move-object/from16 v0, v24 +0053d4: 5205 3e00 |0426: iget v5, v0, Lcom/google/android/checkers/a;.e:I // field@003e +0053d8: b654 |0428: or-int/2addr v4, v5 +0053da: 0800 1800 |0429: move-object/from16 v0, v24 +0053de: 5205 3f00 |042b: iget v5, v0, Lcom/google/android/checkers/a;.f:I // field@003f +0053e2: 0800 1800 |042d: move-object/from16 v0, v24 +0053e6: 5206 4000 |042f: iget v6, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +0053ea: b665 |0431: or-int/2addr v5, v6 +0053ec: 0800 1800 |0432: move-object/from16 v0, v24 +0053f0: 5206 3f00 |0434: iget v6, v0, Lcom/google/android/checkers/a;.f:I // field@003f +0053f4: 3806 0900 |0436: if-eqz v6, 043f // +0009 +0053f8: dd06 0405 |0438: and-int/lit8 v6, v4, #int 5 // #05 +0053fc: 1257 |043a: const/4 v7, #int 5 // #5 +0053fe: 3376 0400 |043b: if-ne v6, v7, 043f // +0004 +005402: d803 030c |043d: add-int/lit8 v3, v3, #int 12 // #0c +005406: 0800 1800 |043f: move-object/from16 v0, v24 +00540a: 5206 3d00 |0441: iget v6, v0, Lcom/google/android/checkers/a;.d:I // field@003d +00540e: 3806 0b00 |0443: if-eqz v6, 044e // +000b +005412: 1506 00a0 |0445: const/high16 v6, #int -1610612736 // #a000 +005416: b556 |0447: and-int/2addr v6, v5 +005418: 1507 00a0 |0448: const/high16 v7, #int -1610612736 // #a000 +00541c: 3376 0400 |044a: if-ne v6, v7, 044e // +0004 +005420: d803 03f4 |044c: add-int/lit8 v3, v3, #int -12 // #f4 +005424: 1406 0066 6600 |044e: const v6, #float 0.000000 // #00666600 +00542a: b564 |0451: and-int/2addr v4, v6 +00542c: 7110 9e00 0400 |0452: invoke-static {v4}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +005432: 0a04 |0455: move-result v4 +005434: 1406 0066 6600 |0456: const v6, #float 0.000000 // #00666600 +00543a: b565 |0459: and-int/2addr v5, v6 +00543c: 7110 9e00 0500 |045a: invoke-static {v5}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +005442: 0a05 |045d: move-result v5 +005444: b154 |045e: sub-int/2addr v4, v5 +005446: b043 |045f: add-int/2addr v3, v4 +005448: 0800 1800 |0460: move-object/from16 v0, v24 +00544c: 5204 3d00 |0462: iget v4, v0, Lcom/google/android/checkers/a;.d:I // field@003d +005450: 1405 1818 1818 |0464: const v5, #float 0.000000 // #18181818 +005456: b554 |0467: and-int/2addr v4, v5 +005458: 7110 9e00 0400 |0468: invoke-static {v4}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +00545e: 0a04 |046b: move-result v4 +005460: 0800 1800 |046c: move-object/from16 v0, v24 +005464: 5205 3f00 |046e: iget v5, v0, Lcom/google/android/checkers/a;.f:I // field@003f +005468: 1406 1818 1818 |0470: const v6, #float 0.000000 // #18181818 +00546e: b565 |0473: and-int/2addr v5, v6 +005470: 7110 9e00 0500 |0474: invoke-static {v5}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +005476: 0a05 |0477: move-result v5 +005478: b154 |0478: sub-int/2addr v4, v5 +00547a: b143 |0479: sub-int/2addr v3, v4 +00547c: 0800 1800 |047a: move-object/from16 v0, v24 +005480: 5204 3e00 |047c: iget v4, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005484: 1405 0800 0010 |047e: const v5, #float 0.000000 // #10000008 +00548a: b554 |0481: and-int/2addr v4, v5 +00548c: 3804 0900 |0482: if-eqz v4, 048b // +0009 +005490: 7110 9e00 0400 |0484: invoke-static {v4}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +005496: 0a04 |0487: move-result v4 +005498: e004 0405 |0488: shl-int/lit8 v4, v4, #int 5 // #05 +00549c: b143 |048a: sub-int/2addr v3, v4 +00549e: 0800 1800 |048b: move-object/from16 v0, v24 +0054a2: 5204 4000 |048d: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +0054a6: 1405 0800 0010 |048f: const v5, #float 0.000000 // #10000008 +0054ac: b554 |0492: and-int/2addr v4, v5 +0054ae: 3804 4c00 |0493: if-eqz v4, 04df // +004c +0054b2: 7110 9e00 0400 |0495: invoke-static {v4}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +0054b8: 0a04 |0498: move-result v4 +0054ba: e004 0405 |0499: shl-int/lit8 v4, v4, #int 5 // #05 +0054be: b034 |049b: add-int/2addr v4, v3 +0054c0: 391e 7efb |049c: if-nez v30, 001a // -0482 +0054c4: 7b44 |049e: neg-int v4, v4 +0054c6: 2900 7bfb |049f: goto/16 001a // -0485 +0054ca: d804 0464 |04a1: add-int/lit8 v4, v4, #int 100 // #64 +0054ce: d805 03ff |04a3: add-int/lit8 v5, v3, #int -1 // #ff +0054d2: b553 |04a5: and-int/2addr v3, v5 +0054d4: 2900 4eff |04a6: goto/16 03f4 // -00b2 +0054d8: d054 8600 |04a8: add-int/lit16 v4, v5, #int 134 // #0086 +0054dc: d805 03ff |04aa: add-int/lit8 v5, v3, #int -1 // #ff +0054e0: b553 |04ac: and-int/2addr v3, v5 +0054e2: 0145 |04ad: move v5, v4 +0054e4: 2900 4dff |04ae: goto/16 03fb // -00b3 +0054e8: d804 0464 |04b0: add-int/lit8 v4, v4, #int 100 // #64 +0054ec: d806 03ff |04b2: add-int/lit8 v6, v3, #int -1 // #ff +0054f0: b563 |04b4: and-int/2addr v3, v6 +0054f2: 2900 4dff |04b5: goto/16 0402 // -00b3 +0054f6: d044 8600 |04b7: add-int/lit16 v4, v4, #int 134 // #0086 +0054fa: d806 03ff |04b9: add-int/lit8 v6, v3, #int -1 // #ff +0054fe: b563 |04bb: and-int/2addr v3, v6 +005500: 2900 4cff |04bc: goto/16 0408 // -00b4 +005504: 9103 0504 |04be: sub-int v3, v5, v4 +005508: 9106 0504 |04c0: sub-int v6, v5, v4 +00550c: e006 0608 |04c2: shl-int/lit8 v6, v6, #int 8 // #08 +005510: 9007 0504 |04c4: add-int v7, v5, v4 +005514: b376 |04c6: div-int/2addr v6, v7 +005516: b063 |04c7: add-int/2addr v3, v6 +005518: 2900 45ff |04c8: goto/16 040d // -00bb +00551c: 3745 56ff |04ca: if-le v5, v4, 0420 // -00aa +005520: 0800 1800 |04cc: move-object/from16 v0, v24 +005524: 5204 4000 |04ce: iget v4, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005528: 1405 1100 0088 |04d0: const v5, #float -0.000000 // #88000011 +00552e: b554 |04d3: and-int/2addr v4, v5 +005530: 3804 4cff |04d4: if-eqz v4, 0420 // -00b4 +005534: 7110 9e00 0400 |04d6: invoke-static {v4}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +00553a: 0a04 |04d9: move-result v4 +00553c: e004 0403 |04da: shl-int/lit8 v4, v4, #int 3 // #03 +005540: b143 |04dc: sub-int/2addr v3, v4 +005542: 2900 43ff |04dd: goto/16 0420 // -00bd +005546: 0134 |04df: move v4, v3 +005548: 28bc |04e0: goto 049c // -0044 +00554a: 01a3 |04e1: move v3, v10 +00554c: 01b4 |04e2: move v4, v11 +00554e: 2900 d8fe |04e3: goto/16 03bb // -0128 +005552: 0207 1c00 |04e5: move/from16 v7, v28 +005556: 2900 58fb |04e7: goto/16 003f // -04a8 +00555a: 0000 |04e9: nop // spacer +00555c: 0001 0300 0200 0000 aa00 0000 4001 ... |04ea: packed-switch-data (10 units) +005570: 0002 0200 4000 0000 8000 0000 b702 ... |04f4: sparse-switch-data (10 units) +005584: 0001 0200 0000 0000 b402 0000 ba02 ... |04fe: packed-switch-data (8 units) +005594: 0001 0300 0100 0000 9402 0000 9802 ... |0506: packed-switch-data (10 units) + catches : (none) + positions : + locals : + + #4 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IZ)I' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 7 + ins : 3 + outs : 4 + insns size : 55 16-bit code units +0055a8: |[0055a8] com.google.android.checkers.a.a:(IZ)I +0055b8: 1221 |0000: const/4 v1, #int 2 // #2 +0055ba: 1200 |0001: const/4 v0, #int 0 // #0 +0055bc: 5940 3c00 |0002: iput v0, v4, Lcom/google/android/checkers/a;.c:I // field@003c +0055c0: da02 0540 |0004: mul-int/lit8 v2, v5, #int 64 // #40 +0055c4: 5942 4b00 |0006: iput v2, v4, Lcom/google/android/checkers/a;.r:I // field@004b +0055c8: 5242 3d00 |0008: iget v2, v4, Lcom/google/android/checkers/a;.d:I // field@003d +0055cc: 5243 3e00 |000a: iget v3, v4, Lcom/google/android/checkers/a;.e:I // field@003e +0055d0: b632 |000c: or-int/2addr v2, v3 +0055d2: 5243 3f00 |000d: iget v3, v4, Lcom/google/android/checkers/a;.f:I // field@003f +0055d6: b632 |000f: or-int/2addr v2, v3 +0055d8: 5243 4000 |0010: iget v3, v4, Lcom/google/android/checkers/a;.g:I // field@0040 +0055dc: b632 |0012: or-int/2addr v2, v3 +0055de: df02 02ff |0013: xor-int/lit8 v2, v2, #int -1 // #ff +0055e2: 5543 4c00 |0015: iget-boolean v3, v4, Lcom/google/android/checkers/a;.s:Z // field@004c +0055e6: 3803 1000 |0017: if-eqz v3, 0027 // +0010 +0055ea: 7040 8100 5426 |0019: invoke-direct {v4, v5, v6, v2}, Lcom/google/android/checkers/a;.a:(IZI)Z // method@0081 +0055f0: 0a03 |001c: move-result v3 +0055f2: 7040 8800 5426 |001d: invoke-direct {v4, v5, v6, v2}, Lcom/google/android/checkers/a;.b:(IZI)Z // method@0088 +0055f8: 0a02 |0020: move-result v2 +0055fa: 3903 0400 |0021: if-nez v3, 0025 // +0004 +0055fe: 3802 0300 |0023: if-eqz v2, 0026 // +0003 +005602: 0110 |0025: move v0, v1 +005604: 0f00 |0026: return v0 +005606: 7040 8100 5426 |0027: invoke-direct {v4, v5, v6, v2}, Lcom/google/android/checkers/a;.a:(IZI)Z // method@0081 +00560c: 0a03 |002a: move-result v3 +00560e: 3803 0400 |002b: if-eqz v3, 002f // +0004 +005612: 1210 |002d: const/4 v0, #int 1 // #1 +005614: 28f8 |002e: goto 0026 // -0008 +005616: 7040 8800 5426 |002f: invoke-direct {v4, v5, v6, v2}, Lcom/google/android/checkers/a;.b:(IZI)Z // method@0088 +00561c: 0a02 |0032: move-result v2 +00561e: 3802 f3ff |0033: if-eqz v2, 0026 // -000d +005622: 0110 |0035: move v0, v1 +005624: 28f0 |0036: goto 0026 // -0010 + catches : (none) + positions : + locals : + + #5 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(Z)I' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 7 + ins : 2 + outs : 1 + insns size : 98 16-bit code units +005628: |[005628] com.google.android.checkers.a.a:(Z)I +005638: 3806 1700 |0000: if-eqz v6, 0017 // +0017 +00563c: 1200 |0002: const/4 v0, #int 0 // #0 +00563e: 5251 3d00 |0003: iget v1, v5, Lcom/google/android/checkers/a;.d:I // field@003d +005642: 0114 |0005: move v4, v1 +005644: 0101 |0006: move v1, v0 +005646: 0140 |0007: move v0, v4 +005648: 3900 1600 |0008: if-nez v0, 001e // +0016 +00564c: 5250 3e00 |000a: iget v0, v5, Lcom/google/android/checkers/a;.e:I // field@003e +005650: 3900 2300 |000c: if-nez v0, 002f // +0023 +005654: 5250 3f00 |000e: iget v0, v5, Lcom/google/android/checkers/a;.f:I // field@003f +005658: 3900 3000 |0010: if-nez v0, 0040 // +0030 +00565c: 5250 4000 |0012: iget v0, v5, Lcom/google/android/checkers/a;.g:I // field@0040 +005660: 3900 3d00 |0014: if-nez v0, 0051 // +003d +005664: 0f01 |0016: return v1 +005666: 6200 3900 |0017: sget-object v0, Lcom/google/android/checkers/a;.M:[I // field@0039 +00566a: 1301 8000 |0019: const/16 v1, #int 128 // #80 +00566e: 4400 0001 |001b: aget v0, v0, v1 +005672: 28e6 |001d: goto 0003 // -001a +005674: 6202 3900 |001e: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +005678: 7110 9f00 0000 |0020: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +00567e: 0a03 |0023: move-result v3 +005680: da03 0304 |0024: mul-int/lit8 v3, v3, #int 4 // #04 +005684: d803 0300 |0026: add-int/lit8 v3, v3, #int 0 // #00 +005688: 4402 0203 |0028: aget v2, v2, v3 +00568c: b721 |002a: xor-int/2addr v1, v2 +00568e: d802 00ff |002b: add-int/lit8 v2, v0, #int -1 // #ff +005692: b520 |002d: and-int/2addr v0, v2 +005694: 28da |002e: goto 0008 // -0026 +005696: 6202 3900 |002f: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +00569a: 7110 9f00 0000 |0031: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0056a0: 0a03 |0034: move-result v3 +0056a2: da03 0304 |0035: mul-int/lit8 v3, v3, #int 4 // #04 +0056a6: d803 0301 |0037: add-int/lit8 v3, v3, #int 1 // #01 +0056aa: 4402 0203 |0039: aget v2, v2, v3 +0056ae: b721 |003b: xor-int/2addr v1, v2 +0056b0: d802 00ff |003c: add-int/lit8 v2, v0, #int -1 // #ff +0056b4: b520 |003e: and-int/2addr v0, v2 +0056b6: 28cd |003f: goto 000c // -0033 +0056b8: 6202 3900 |0040: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +0056bc: 7110 9f00 0000 |0042: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0056c2: 0a03 |0045: move-result v3 +0056c4: da03 0304 |0046: mul-int/lit8 v3, v3, #int 4 // #04 +0056c8: d803 0302 |0048: add-int/lit8 v3, v3, #int 2 // #02 +0056cc: 4402 0203 |004a: aget v2, v2, v3 +0056d0: b721 |004c: xor-int/2addr v1, v2 +0056d2: d802 00ff |004d: add-int/lit8 v2, v0, #int -1 // #ff +0056d6: b520 |004f: and-int/2addr v0, v2 +0056d8: 28c0 |0050: goto 0010 // -0040 +0056da: 6202 3900 |0051: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +0056de: 7110 9f00 0000 |0053: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0056e4: 0a03 |0056: move-result v3 +0056e6: da03 0304 |0057: mul-int/lit8 v3, v3, #int 4 // #04 +0056ea: d803 0303 |0059: add-int/lit8 v3, v3, #int 3 // #03 +0056ee: 4402 0203 |005b: aget v2, v2, v3 +0056f2: b721 |005d: xor-int/2addr v1, v2 +0056f4: d802 00ff |005e: add-int/lit8 v2, v0, #int -1 // #ff +0056f8: b520 |0060: and-int/2addr v0, v2 +0056fa: 28b3 |0061: goto 0014 // -004d + catches : (none) + positions : + locals : + + #6 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(ZII)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 7 + ins : 3 + outs : 1 + insns size : 56 16-bit code units +0056fc: |[0056fc] com.google.android.checkers.a.a:(ZII)I +00570c: 7110 9f00 0500 |0000: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005712: 0a00 |0003: move-result v0 +005714: d801 05ff |0004: add-int/lit8 v1, v5, #int -1 // #ff +005718: b551 |0006: and-int/2addr v1, v5 +00571a: 7110 9f00 0100 |0007: invoke-static {v1}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005720: 0a02 |000a: move-result v2 +005722: 7110 9f00 0600 |000b: invoke-static {v6}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005728: 0a01 |000e: move-result v1 +00572a: 1303 1000 |000f: const/16 v3, #int 16 // #10 +00572e: 3431 0800 |0011: if-lt v1, v3, 0019 // +0008 +005732: d900 001f |0013: rsub-int/lit8 v0, v0, #int 31 // #1f +005736: d902 021f |0015: rsub-int/lit8 v2, v2, #int 31 // #1f +00573a: d901 011f |0017: rsub-int/lit8 v1, v1, #int 31 // #1f +00573e: 3520 1100 |0019: if-ge v0, v2, 002a // +0011 +005742: 6203 3800 |001b: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +005746: 4402 0302 |001d: aget v2, v3, v2 +00574a: b020 |001f: add-int/2addr v0, v2 +00574c: 3804 1000 |0020: if-eqz v4, 0030 // +0010 +005750: 6202 5d00 |0022: sget-object v2, Lcom/google/android/checkers/g;.e:[B // field@005d +005754: da00 0010 |0024: mul-int/lit8 v0, v0, #int 16 // #10 +005758: b010 |0026: add-int/2addr v0, v1 +00575a: 4800 0200 |0027: aget-byte v0, v2, v0 +00575e: 0f00 |0029: return v0 +005760: 6203 3800 |002a: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +005764: 4400 0300 |002c: aget v0, v3, v0 +005768: b020 |002e: add-int/2addr v0, v2 +00576a: 28f1 |002f: goto 0020 // -000f +00576c: 6202 5e00 |0030: sget-object v2, Lcom/google/android/checkers/g;.f:[B // field@005e +005770: da00 0010 |0032: mul-int/lit8 v0, v0, #int 16 // #10 +005774: b010 |0034: add-int/2addr v0, v1 +005776: 4800 0200 |0035: aget-byte v0, v2, v0 +00577a: 28f2 |0037: goto 0029 // -000e + catches : (none) + positions : + locals : + + #7 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(ZIIIZ)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 9 + ins : 5 + outs : 1 + insns size : 44 16-bit code units +00577c: |[00577c] com.google.android.checkers.a.a:(ZIIIZ)I +00578c: 7110 9f00 0500 |0000: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005792: 0a02 |0003: move-result v2 +005794: 7110 9f00 0600 |0004: invoke-static {v6}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +00579a: 0a01 |0007: move-result v1 +00579c: 7110 9f00 0700 |0008: invoke-static {v7}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0057a2: 0a00 |000b: move-result v0 +0057a4: 3808 0800 |000c: if-eqz v8, 0014 // +0008 +0057a8: d902 021f |000e: rsub-int/lit8 v2, v2, #int 31 // #1f +0057ac: d901 011f |0010: rsub-int/lit8 v1, v1, #int 31 // #1f +0057b0: d900 001f |0012: rsub-int/lit8 v0, v0, #int 31 // #1f +0057b4: 3804 0d00 |0014: if-eqz v4, 0021 // +000d +0057b8: 6203 6100 |0016: sget-object v3, Lcom/google/android/checkers/g;.i:[B // field@0061 +0057bc: d222 0004 |0018: mul-int/lit16 v2, v2, #int 1024 // #0400 +0057c0: da01 0120 |001a: mul-int/lit8 v1, v1, #int 32 // #20 +0057c4: b021 |001c: add-int/2addr v1, v2 +0057c6: b010 |001d: add-int/2addr v0, v1 +0057c8: 4800 0300 |001e: aget-byte v0, v3, v0 +0057cc: 0f00 |0020: return v0 +0057ce: 6203 6200 |0021: sget-object v3, Lcom/google/android/checkers/g;.j:[B // field@0062 +0057d2: d222 0004 |0023: mul-int/lit16 v2, v2, #int 1024 // #0400 +0057d6: da01 0120 |0025: mul-int/lit8 v1, v1, #int 32 // #20 +0057da: b021 |0027: add-int/2addr v1, v2 +0057dc: b010 |0028: add-int/2addr v0, v1 +0057de: 4800 0300 |0029: aget-byte v0, v3, v0 +0057e2: 28f5 |002b: goto 0020 // -000b + catches : (none) + positions : + locals : + + #8 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(ZIIZ)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 7 + ins : 4 + outs : 1 + insns size : 34 16-bit code units +0057e4: |[0057e4] com.google.android.checkers.a.a:(ZIIZ)I +0057f4: 7110 9f00 0400 |0000: invoke-static {v4}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0057fa: 0a01 |0003: move-result v1 +0057fc: 7110 9f00 0500 |0004: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005802: 0a00 |0007: move-result v0 +005804: 3806 0600 |0008: if-eqz v6, 000e // +0006 +005808: d901 011f |000a: rsub-int/lit8 v1, v1, #int 31 // #1f +00580c: d900 001f |000c: rsub-int/lit8 v0, v0, #int 31 // #1f +005810: d800 00fc |000e: add-int/lit8 v0, v0, #int -4 // #fc +005814: 3803 0a00 |0010: if-eqz v3, 001a // +000a +005818: 6202 5a00 |0012: sget-object v2, Lcom/google/android/checkers/g;.b:[B // field@005a +00581c: da00 0020 |0014: mul-int/lit8 v0, v0, #int 32 // #20 +005820: b010 |0016: add-int/2addr v0, v1 +005822: 4800 0200 |0017: aget-byte v0, v2, v0 +005826: 0f00 |0019: return v0 +005828: 6202 5b00 |001a: sget-object v2, Lcom/google/android/checkers/g;.c:[B // field@005b +00582c: da00 0020 |001c: mul-int/lit8 v0, v0, #int 32 // #20 +005830: b010 |001e: add-int/2addr v0, v1 +005832: 4800 0200 |001f: aget-byte v0, v2, v0 +005836: 28f8 |0021: goto 0019 // -0008 + catches : (none) + positions : + locals : + + #9 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 9 + ins : 5 + outs : 0 + insns size : 76 16-bit code units +005838: |[005838] com.google.android.checkers.a.a:(IIII)V +005848: 1301 007d |0000: const/16 v1, #int 32000 // #7d00 +00584c: 1302 3f00 |0002: const/16 v2, #int 63 // #3f +005850: 1203 |0004: const/4 v3, #int 0 // #0 +005852: 1300 0083 |0005: const/16 v0, #int -32000 // #8300 +005856: 3608 2300 |0007: if-gt v8, v0, 002a // +0023 +00585a: 2c07 2f00 0000 |0009: sparse-switch v7, 00000038 // +0000002f +005860: 0e00 |000c: return-void +005862: 1307 4000 |000d: const/16 v7, #int 64 // #40 +005866: 0108 |000f: move v8, v0 +005868: 0126 |0010: move v6, v2 +00586a: 0135 |0011: move v5, v3 +00586c: 5240 5100 |0012: iget v0, v4, Lcom/google/android/checkers/a;.x:I // field@0051 +005870: 1401 ffff 0f00 |0014: const v1, #float 0.000000 // #000fffff +005876: b501 |0017: and-int/2addr v1, v0 +005878: 5442 5200 |0018: iget-object v2, v4, Lcom/google/android/checkers/a;.y:[I // field@0052 +00587c: 4b00 0201 |001a: aput v0, v2, v1 +005880: 5440 5300 |001c: iget-object v0, v4, Lcom/google/android/checkers/a;.z:[S // field@0053 +005884: 8f82 |001e: int-to-short v2, v8 +005886: 5102 0001 |001f: aput-short v2, v0, v1 +00588a: 5440 2d00 |0021: iget-object v0, v4, Lcom/google/android/checkers/a;.A:[B // field@002d +00588e: 9102 0605 |0023: sub-int v2, v6, v5 +005892: b672 |0025: or-int/2addr v2, v7 +005894: 8d22 |0026: int-to-byte v2, v2 +005896: 4f02 0001 |0027: aput-byte v2, v0, v1 +00589a: 28e3 |0029: goto 000c // -001d +00589c: 3418 e8ff |002a: if-lt v8, v1, 0012 // -0018 +0058a0: 2c07 1600 0000 |002c: sparse-switch v7, 00000042 // +00000016 +0058a6: 28dd |002f: goto 000c // -0023 +0058a8: 0118 |0030: move v8, v1 +0058aa: 0126 |0031: move v6, v2 +0058ac: 0135 |0032: move v5, v3 +0058ae: 28df |0033: goto 0012 // -0021 +0058b0: 1307 8000 |0034: const/16 v7, #int 128 // #80 +0058b4: 28fa |0036: goto 0030 // -0006 +0058b6: 0000 |0037: nop // spacer +0058b8: 0002 0200 4000 0000 c000 0000 0600 ... |0038: sparse-switch-data (10 units) +0058cc: 0002 0200 8000 0000 c000 0000 0400 ... |0042: sparse-switch-data (10 units) + catches : (none) + positions : + locals : + + #10 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IIIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 10 + ins : 6 + outs : 0 + insns size : 39 16-bit code units +0058e0: |[0058e0] com.google.android.checkers.a.a:(IIIII)V +0058f0: 5240 3c00 |0000: iget v0, v4, Lcom/google/android/checkers/a;.c:I // field@003c +0058f4: 1301 4000 |0002: const/16 v1, #int 64 // #40 +0058f8: 3410 0300 |0004: if-lt v0, v1, 0007 // +0003 +0058fc: 0e00 |0006: return-void +0058fe: 5240 3c00 |0007: iget v0, v4, Lcom/google/android/checkers/a;.c:I // field@003c +005902: d801 0001 |0009: add-int/lit8 v1, v0, #int 1 // #01 +005906: 5941 3c00 |000b: iput v1, v4, Lcom/google/android/checkers/a;.c:I // field@003c +00590a: 5241 4b00 |000d: iget v1, v4, Lcom/google/android/checkers/a;.r:I // field@004b +00590e: 5442 4800 |000f: iget-object v2, v4, Lcom/google/android/checkers/a;.o:[I // field@0048 +005912: 9003 0100 |0011: add-int v3, v1, v0 +005916: 4b06 0203 |0013: aput v6, v2, v3 +00591a: 5442 3a00 |0015: iget-object v2, v4, Lcom/google/android/checkers/a;.a:[I // field@003a +00591e: 9003 0100 |0017: add-int v3, v1, v0 +005922: 4b07 0203 |0019: aput v7, v2, v3 +005926: 5442 4900 |001b: iget-object v2, v4, Lcom/google/android/checkers/a;.p:[I // field@0049 +00592a: b001 |001d: add-int/2addr v1, v0 +00592c: 4b08 0201 |001e: aput v8, v2, v1 +005930: 3905 e6ff |0020: if-nez v5, 0006 // -001a +005934: 5441 3b00 |0022: iget-object v1, v4, Lcom/google/android/checkers/a;.b:[I // field@003b +005938: 4b09 0100 |0024: aput v9, v1, v0 +00593c: 28e0 |0026: goto 0006 // -0020 + catches : (none) + positions : + locals : + + #11 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IIIIIIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 18 + ins : 9 + outs : 9 + insns size : 99 16-bit code units +005940: |[005940] com.google.android.checkers.a.a:(IIIIIIII)V +005950: 1210 |0000: const/4 v0, #int 1 // #1 +005952: 6201 3000 |0001: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005956: 4401 010d |0003: aget v1, v1, v13 +00595a: b5c1 |0005: and-int/2addr v1, v12 +00595c: 3801 2500 |0006: if-eqz v1, 002b // +0025 +005960: 6201 3100 |0008: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005964: 4401 010d |000a: aget v1, v1, v13 +005968: b5b1 |000c: and-int/2addr v1, v11 +00596a: 3801 1e00 |000d: if-eqz v1, 002b // +001e +00596e: d804 0df7 |000f: add-int/lit8 v4, v13, #int -9 // #f7 +005972: 6200 3100 |0011: sget-object v0, Lcom/google/android/checkers/a;.E:[I // field@0031 +005976: 4405 000d |0013: aget v5, v0, v13 +00597a: 6200 3000 |0015: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +00597e: 4400 000d |0017: aget v0, v0, v13 +005982: 9606 0f00 |0019: or-int v6, v15, v0 +005986: d807 1001 |001b: add-int/lit8 v7, v16, #int 1 // #01 +00598a: 6200 3100 |001d: sget-object v0, Lcom/google/android/checkers/a;.E:[I // field@0031 +00598e: 4400 000d |001f: aget v0, v0, v13 +005992: 9608 1100 |0021: or-int v8, v17, v0 +005996: 0790 |0023: move-object v0, v9 +005998: 01a1 |0024: move v1, v10 +00599a: 01b2 |0025: move v2, v11 +00599c: 01c3 |0026: move v3, v12 +00599e: 7609 7f00 0000 |0027: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.a:(IIIIIIII)V // method@007f +0059a4: 1200 |002a: const/4 v0, #int 0 // #0 +0059a6: 6201 3200 |002b: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +0059aa: 4401 010d |002d: aget v1, v1, v13 +0059ae: b5c1 |002f: and-int/2addr v1, v12 +0059b0: 3801 2500 |0030: if-eqz v1, 0055 // +0025 +0059b4: 6201 3300 |0032: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +0059b8: 4401 010d |0034: aget v1, v1, v13 +0059bc: b5b1 |0036: and-int/2addr v1, v11 +0059be: 3801 1e00 |0037: if-eqz v1, 0055 // +001e +0059c2: d804 0df9 |0039: add-int/lit8 v4, v13, #int -7 // #f9 +0059c6: 6200 3300 |003b: sget-object v0, Lcom/google/android/checkers/a;.G:[I // field@0033 +0059ca: 4405 000d |003d: aget v5, v0, v13 +0059ce: 6200 3200 |003f: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +0059d2: 4400 000d |0041: aget v0, v0, v13 +0059d6: 9606 0f00 |0043: or-int v6, v15, v0 +0059da: d807 1001 |0045: add-int/lit8 v7, v16, #int 1 // #01 +0059de: 6200 3300 |0047: sget-object v0, Lcom/google/android/checkers/a;.G:[I // field@0033 +0059e2: 4400 000d |0049: aget v0, v0, v13 +0059e6: 9608 1100 |004b: or-int v8, v17, v0 +0059ea: 0790 |004d: move-object v0, v9 +0059ec: 01a1 |004e: move v1, v10 +0059ee: 01b2 |004f: move v2, v11 +0059f0: 01c3 |0050: move v3, v12 +0059f2: 7609 7f00 0000 |0051: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.a:(IIIIIIII)V // method@007f +0059f8: 1200 |0054: const/4 v0, #int 0 // #0 +0059fa: 3800 0d00 |0055: if-eqz v0, 0062 // +000d +0059fe: 0790 |0057: move-object v0, v9 +005a00: 01a1 |0058: move v1, v10 +005a02: 01e2 |0059: move v2, v14 +005a04: 01f3 |005a: move v3, v15 +005a06: 0204 1000 |005b: move/from16 v4, v16 +005a0a: 0205 1100 |005d: move/from16 v5, v17 +005a0e: 7606 7e00 0000 |005f: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +005a14: 0e00 |0062: return-void + catches : (none) + positions : + locals : + + #12 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IZI)Z' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 21 + ins : 4 + outs : 9 + insns size : 875 16-bit code units +005a18: |[005a18] com.google.android.checkers.a.a:(IZI)Z +005a28: 3813 b901 |0000: if-eqz v19, 01b9 // +01b9 +005a2c: 0800 1100 |0002: move-object/from16 v0, v17 +005a30: 5201 3e00 |0004: iget v1, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005a34: 0800 1100 |0006: move-object/from16 v0, v17 +005a38: 5202 3d00 |0008: iget v2, v0, Lcom/google/android/checkers/a;.d:I // field@003d +005a3c: b612 |000a: or-int/2addr v2, v1 +005a3e: 0800 1100 |000b: move-object/from16 v0, v17 +005a42: 5201 4000 |000d: iget v1, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005a46: 0800 1100 |000f: move-object/from16 v0, v17 +005a4a: 5203 3f00 |0011: iget v3, v0, Lcom/google/android/checkers/a;.f:I // field@003f +005a4e: 9604 0103 |0013: or-int v4, v1, v3 +005a52: 1201 |0015: const/4 v1, #int 0 // #0 +005a54: e203 1404 |0016: ushr-int/lit8 v3, v20, #int 4 // #04 +005a58: b543 |0018: and-int/2addr v3, v4 +005a5a: 3803 1200 |0019: if-eqz v3, 002b // +0012 +005a5e: 1401 e0e0 e0e0 |001b: const v1, #float -129633581999069331456.000000 // #e0e0e0e0 +005a64: b531 |001e: and-int/2addr v1, v3 +005a66: e201 0105 |001f: ushr-int/lit8 v1, v1, #int 5 // #05 +005a6a: 1405 0007 0707 |0021: const v5, #float 0.000000 // #07070700 +005a70: b553 |0024: and-int/2addr v3, v5 +005a72: e203 0303 |0025: ushr-int/lit8 v3, v3, #int 3 // #03 +005a76: b631 |0027: or-int/2addr v1, v3 +005a78: b521 |0028: and-int/2addr v1, v2 +005a7a: de01 0100 |0029: or-int/lit8 v1, v1, #int 0 // #00 +005a7e: 1403 e0e0 e0e0 |002b: const v3, #float -129633581999069331456.000000 // #e0e0e0e0 +005a84: 9503 0314 |002e: and-int v3, v3, v20 +005a88: e203 0305 |0030: ushr-int/lit8 v3, v3, #int 5 // #05 +005a8c: 1405 0007 0707 |0032: const v5, #float 0.000000 // #07070700 +005a92: 9505 0514 |0035: and-int v5, v5, v20 +005a96: e205 0503 |0037: ushr-int/lit8 v5, v5, #int 3 // #03 +005a9a: b653 |0039: or-int/2addr v3, v5 +005a9c: b543 |003a: and-int/2addr v3, v4 +005a9e: 3803 0600 |003b: if-eqz v3, 0041 // +0006 +005aa2: e203 0304 |003d: ushr-int/lit8 v3, v3, #int 4 // #04 +005aa6: b532 |003f: and-int/2addr v2, v3 +005aa8: b621 |0040: or-int/2addr v1, v2 +005aaa: 0800 1100 |0041: move-object/from16 v0, v17 +005aae: 5202 3e00 |0043: iget v2, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005ab2: 3802 3400 |0045: if-eqz v2, 0079 // +0034 +005ab6: e002 1404 |0047: shl-int/lit8 v2, v20, #int 4 // #04 +005aba: b542 |0049: and-int/2addr v2, v4 +005abc: 3802 1500 |004a: if-eqz v2, 005f // +0015 +005ac0: 0800 1100 |004c: move-object/from16 v0, v17 +005ac4: 5203 3e00 |004e: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005ac8: 1405 0707 0707 |0050: const v5, #float 0.000000 // #07070707 +005ace: b525 |0053: and-int/2addr v5, v2 +005ad0: e005 0505 |0054: shl-int/lit8 v5, v5, #int 5 // #05 +005ad4: 1406 e0e0 e000 |0056: const v6, #float 0.000000 // #00e0e0e0 +005ada: b562 |0059: and-int/2addr v2, v6 +005adc: e002 0203 |005a: shl-int/lit8 v2, v2, #int 3 // #03 +005ae0: b652 |005c: or-int/2addr v2, v5 +005ae2: b532 |005d: and-int/2addr v2, v3 +005ae4: b621 |005e: or-int/2addr v1, v2 +005ae6: 1402 0707 0707 |005f: const v2, #float 0.000000 // #07070707 +005aec: 9502 0214 |0062: and-int v2, v2, v20 +005af0: e002 0205 |0064: shl-int/lit8 v2, v2, #int 5 // #05 +005af4: 1403 e0e0 e000 |0066: const v3, #float 0.000000 // #00e0e0e0 +005afa: 9503 0314 |0069: and-int v3, v3, v20 +005afe: e003 0303 |006b: shl-int/lit8 v3, v3, #int 3 // #03 +005b02: b632 |006d: or-int/2addr v2, v3 +005b04: b542 |006e: and-int/2addr v2, v4 +005b06: 3802 0a00 |006f: if-eqz v2, 0079 // +000a +005b0a: 0800 1100 |0071: move-object/from16 v0, v17 +005b0e: 5203 3e00 |0073: iget v3, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005b12: e002 0204 |0075: shl-int/lit8 v2, v2, #int 4 // #04 +005b16: b532 |0077: and-int/2addr v2, v3 +005b18: b621 |0078: or-int/2addr v1, v2 +005b1a: 3901 0a00 |0079: if-nez v1, 0083 // +000a +005b1e: 0800 1100 |007b: move-object/from16 v0, v17 +005b22: 5201 3c00 |007d: iget v1, v0, Lcom/google/android/checkers/a;.c:I // field@003c +005b26: 3801 e902 |007f: if-eqz v1, 0368 // +02e9 +005b2a: 1211 |0081: const/4 v1, #int 1 // #1 +005b2c: 0f01 |0082: return v1 +005b2e: 7110 9f00 0100 |0083: invoke-static {v1}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005b34: 0a0f |0086: move-result v15 +005b36: 1212 |0087: const/4 v2, #int 1 // #1 +005b38: 9810 020f |0088: shl-int v16, v2, v15 +005b3c: 970e 0110 |008a: xor-int v14, v1, v16 +005b40: 0800 1100 |008c: move-object/from16 v0, v17 +005b44: 5201 3e00 |008e: iget v1, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005b48: 9501 0110 |0090: and-int v1, v1, v16 +005b4c: 3901 5c00 |0092: if-nez v1, 00ee // +005c +005b50: 6201 3400 |0094: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +005b54: 4401 010f |0096: aget v1, v1, v15 +005b58: b541 |0098: and-int/2addr v1, v4 +005b5a: 3801 2700 |0099: if-eqz v1, 00c0 // +0027 +005b5e: 6201 3500 |009b: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +005b62: 4401 010f |009d: aget v1, v1, v15 +005b66: 9501 0114 |009f: and-int v1, v1, v20 +005b6a: 3801 1f00 |00a1: if-eqz v1, 00c0 // +001f +005b6e: d805 0f07 |00a3: add-int/lit8 v5, v15, #int 7 // #07 +005b72: 6201 3500 |00a5: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +005b76: 4406 010f |00a7: aget v6, v1, v15 +005b7a: 6201 3400 |00a9: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +005b7e: 4401 010f |00ab: aget v1, v1, v15 +005b82: 9607 1001 |00ad: or-int v7, v16, v1 +005b86: 1308 0101 |00af: const/16 v8, #int 257 // #101 +005b8a: 6201 3500 |00b1: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +005b8e: 4401 010f |00b3: aget v1, v1, v15 +005b92: 9609 1001 |00b5: or-int v9, v16, v1 +005b96: 0801 1100 |00b7: move-object/from16 v1, v17 +005b9a: 0202 1200 |00b9: move/from16 v2, v18 +005b9e: 0203 1400 |00bb: move/from16 v3, v20 +005ba2: 7609 8600 0100 |00bd: invoke-direct/range {v1, v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/a;.b:(IIIIIIII)V // method@0086 +005ba8: 6201 3600 |00c0: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +005bac: 4401 010f |00c2: aget v1, v1, v15 +005bb0: b541 |00c4: and-int/2addr v1, v4 +005bb2: 3801 f100 |00c5: if-eqz v1, 01b6 // +00f1 +005bb6: 6201 3700 |00c7: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +005bba: 4401 010f |00c9: aget v1, v1, v15 +005bbe: 9501 0114 |00cb: and-int v1, v1, v20 +005bc2: 3801 e900 |00cd: if-eqz v1, 01b6 // +00e9 +005bc6: d805 0f09 |00cf: add-int/lit8 v5, v15, #int 9 // #09 +005bca: 6201 3700 |00d1: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +005bce: 4406 010f |00d3: aget v6, v1, v15 +005bd2: 6201 3600 |00d5: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +005bd6: 4401 010f |00d7: aget v1, v1, v15 +005bda: 9607 1001 |00d9: or-int v7, v16, v1 +005bde: 1308 0101 |00db: const/16 v8, #int 257 // #101 +005be2: 6201 3700 |00dd: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +005be6: 4401 010f |00df: aget v1, v1, v15 +005bea: 9609 1001 |00e1: or-int v9, v16, v1 +005bee: 0801 1100 |00e3: move-object/from16 v1, v17 +005bf2: 0202 1200 |00e5: move/from16 v2, v18 +005bf6: 0203 1400 |00e7: move/from16 v3, v20 +005bfa: 7609 8600 0100 |00e9: invoke-direct/range {v1, v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/a;.b:(IIIIIIII)V // method@0086 +005c00: 01e1 |00ec: move v1, v14 +005c02: 288c |00ed: goto 0079 // -0074 +005c04: 6201 3000 |00ee: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005c08: 4401 010f |00f0: aget v1, v1, v15 +005c0c: b541 |00f2: and-int/2addr v1, v4 +005c0e: 3801 2d00 |00f3: if-eqz v1, 0120 // +002d +005c12: 6201 3100 |00f5: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005c16: 4401 010f |00f7: aget v1, v1, v15 +005c1a: 9501 0114 |00f9: and-int v1, v1, v20 +005c1e: 3801 2500 |00fb: if-eqz v1, 0120 // +0025 +005c22: 9607 1410 |00fd: or-int v7, v20, v16 +005c26: 6201 3000 |00ff: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005c2a: 4401 010f |0101: aget v1, v1, v15 +005c2e: 9708 0401 |0103: xor-int v8, v4, v1 +005c32: d809 0ff7 |0105: add-int/lit8 v9, v15, #int -9 // #f7 +005c36: 6201 3100 |0107: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005c3a: 440a 010f |0109: aget v10, v1, v15 +005c3e: 6201 3000 |010b: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005c42: 4401 010f |010d: aget v1, v1, v15 +005c46: 960b 1001 |010f: or-int v11, v16, v1 +005c4a: 130c 0102 |0111: const/16 v12, #int 513 // #201 +005c4e: 6201 3100 |0113: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005c52: 4401 010f |0115: aget v1, v1, v15 +005c56: 960d 1001 |0117: or-int v13, v16, v1 +005c5a: 0805 1100 |0119: move-object/from16 v5, v17 +005c5e: 0206 1200 |011b: move/from16 v6, v18 +005c62: 7609 8b00 0500 |011d: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +005c68: 6201 3200 |0120: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005c6c: 4401 010f |0122: aget v1, v1, v15 +005c70: b541 |0124: and-int/2addr v1, v4 +005c72: 3801 2d00 |0125: if-eqz v1, 0152 // +002d +005c76: 6201 3300 |0127: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005c7a: 4401 010f |0129: aget v1, v1, v15 +005c7e: 9501 0114 |012b: and-int v1, v1, v20 +005c82: 3801 2500 |012d: if-eqz v1, 0152 // +0025 +005c86: 9607 1410 |012f: or-int v7, v20, v16 +005c8a: 6201 3200 |0131: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005c8e: 4401 010f |0133: aget v1, v1, v15 +005c92: 9708 0401 |0135: xor-int v8, v4, v1 +005c96: d809 0ff9 |0137: add-int/lit8 v9, v15, #int -7 // #f9 +005c9a: 6201 3300 |0139: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005c9e: 440a 010f |013b: aget v10, v1, v15 +005ca2: 6201 3200 |013d: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005ca6: 4401 010f |013f: aget v1, v1, v15 +005caa: 960b 1001 |0141: or-int v11, v16, v1 +005cae: 130c 0102 |0143: const/16 v12, #int 513 // #201 +005cb2: 6201 3300 |0145: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005cb6: 4401 010f |0147: aget v1, v1, v15 +005cba: 960d 1001 |0149: or-int v13, v16, v1 +005cbe: 0805 1100 |014b: move-object/from16 v5, v17 +005cc2: 0206 1200 |014d: move/from16 v6, v18 +005cc6: 7609 8b00 0500 |014f: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +005ccc: 6201 3400 |0152: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +005cd0: 4401 010f |0154: aget v1, v1, v15 +005cd4: b541 |0156: and-int/2addr v1, v4 +005cd6: 3801 2d00 |0157: if-eqz v1, 0184 // +002d +005cda: 6201 3500 |0159: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +005cde: 4401 010f |015b: aget v1, v1, v15 +005ce2: 9501 0114 |015d: and-int v1, v1, v20 +005ce6: 3801 2500 |015f: if-eqz v1, 0184 // +0025 +005cea: 9607 1410 |0161: or-int v7, v20, v16 +005cee: 6201 3400 |0163: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +005cf2: 4401 010f |0165: aget v1, v1, v15 +005cf6: 9708 0401 |0167: xor-int v8, v4, v1 +005cfa: d809 0f07 |0169: add-int/lit8 v9, v15, #int 7 // #07 +005cfe: 6201 3500 |016b: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +005d02: 440a 010f |016d: aget v10, v1, v15 +005d06: 6201 3400 |016f: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +005d0a: 4401 010f |0171: aget v1, v1, v15 +005d0e: 960b 1001 |0173: or-int v11, v16, v1 +005d12: 130c 0102 |0175: const/16 v12, #int 513 // #201 +005d16: 6201 3500 |0177: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +005d1a: 4401 010f |0179: aget v1, v1, v15 +005d1e: 960d 1001 |017b: or-int v13, v16, v1 +005d22: 0805 1100 |017d: move-object/from16 v5, v17 +005d26: 0206 1200 |017f: move/from16 v6, v18 +005d2a: 7609 8b00 0500 |0181: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +005d30: 6201 3600 |0184: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +005d34: 4401 010f |0186: aget v1, v1, v15 +005d38: b541 |0188: and-int/2addr v1, v4 +005d3a: 3801 2d00 |0189: if-eqz v1, 01b6 // +002d +005d3e: 6201 3700 |018b: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +005d42: 4401 010f |018d: aget v1, v1, v15 +005d46: 9501 0114 |018f: and-int v1, v1, v20 +005d4a: 3801 2500 |0191: if-eqz v1, 01b6 // +0025 +005d4e: 9607 1410 |0193: or-int v7, v20, v16 +005d52: 6201 3600 |0195: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +005d56: 4401 010f |0197: aget v1, v1, v15 +005d5a: 9708 0401 |0199: xor-int v8, v4, v1 +005d5e: d809 0f09 |019b: add-int/lit8 v9, v15, #int 9 // #09 +005d62: 6201 3700 |019d: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +005d66: 440a 010f |019f: aget v10, v1, v15 +005d6a: 6201 3600 |01a1: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +005d6e: 4401 010f |01a3: aget v1, v1, v15 +005d72: 960b 1001 |01a5: or-int v11, v16, v1 +005d76: 130c 0102 |01a7: const/16 v12, #int 513 // #201 +005d7a: 6201 3700 |01a9: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +005d7e: 4401 010f |01ab: aget v1, v1, v15 +005d82: 960d 1001 |01ad: or-int v13, v16, v1 +005d86: 0805 1100 |01af: move-object/from16 v5, v17 +005d8a: 0206 1200 |01b1: move/from16 v6, v18 +005d8e: 7609 8b00 0500 |01b3: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +005d94: 01e1 |01b6: move v1, v14 +005d96: 2900 c2fe |01b7: goto/16 0079 // -013e +005d9a: 0800 1100 |01b9: move-object/from16 v0, v17 +005d9e: 5201 4000 |01bb: iget v1, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005da2: 0800 1100 |01bd: move-object/from16 v0, v17 +005da6: 5202 3f00 |01bf: iget v2, v0, Lcom/google/android/checkers/a;.f:I // field@003f +005daa: b612 |01c1: or-int/2addr v2, v1 +005dac: 0800 1100 |01c2: move-object/from16 v0, v17 +005db0: 5201 3e00 |01c4: iget v1, v0, Lcom/google/android/checkers/a;.e:I // field@003e +005db4: 0800 1100 |01c6: move-object/from16 v0, v17 +005db8: 5203 3d00 |01c8: iget v3, v0, Lcom/google/android/checkers/a;.d:I // field@003d +005dbc: 9604 0103 |01ca: or-int v4, v1, v3 +005dc0: 1201 |01cc: const/4 v1, #int 0 // #0 +005dc2: 0800 1100 |01cd: move-object/from16 v0, v17 +005dc6: 5203 4000 |01cf: iget v3, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005dca: 3803 3500 |01d1: if-eqz v3, 0206 // +0035 +005dce: e203 1404 |01d3: ushr-int/lit8 v3, v20, #int 4 // #04 +005dd2: b543 |01d5: and-int/2addr v3, v4 +005dd4: 3803 1600 |01d6: if-eqz v3, 01ec // +0016 +005dd8: 0800 1100 |01d8: move-object/from16 v0, v17 +005ddc: 5201 4000 |01da: iget v1, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005de0: 1405 e0e0 e0e0 |01dc: const v5, #float -129633581999069331456.000000 // #e0e0e0e0 +005de6: b535 |01df: and-int/2addr v5, v3 +005de8: e205 0505 |01e0: ushr-int/lit8 v5, v5, #int 5 // #05 +005dec: 1406 0007 0707 |01e2: const v6, #float 0.000000 // #07070700 +005df2: b563 |01e5: and-int/2addr v3, v6 +005df4: e203 0303 |01e6: ushr-int/lit8 v3, v3, #int 3 // #03 +005df8: b653 |01e8: or-int/2addr v3, v5 +005dfa: b531 |01e9: and-int/2addr v1, v3 +005dfc: de01 0100 |01ea: or-int/lit8 v1, v1, #int 0 // #00 +005e00: 1403 e0e0 e0e0 |01ec: const v3, #float -129633581999069331456.000000 // #e0e0e0e0 +005e06: 9503 0314 |01ef: and-int v3, v3, v20 +005e0a: e203 0305 |01f1: ushr-int/lit8 v3, v3, #int 5 // #05 +005e0e: 1405 0007 0707 |01f3: const v5, #float 0.000000 // #07070700 +005e14: 9505 0514 |01f6: and-int v5, v5, v20 +005e18: e205 0503 |01f8: ushr-int/lit8 v5, v5, #int 3 // #03 +005e1c: b653 |01fa: or-int/2addr v3, v5 +005e1e: b543 |01fb: and-int/2addr v3, v4 +005e20: 3803 0a00 |01fc: if-eqz v3, 0206 // +000a +005e24: 0800 1100 |01fe: move-object/from16 v0, v17 +005e28: 5205 4000 |0200: iget v5, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005e2c: e203 0304 |0202: ushr-int/lit8 v3, v3, #int 4 // #04 +005e30: b553 |0204: and-int/2addr v3, v5 +005e32: b631 |0205: or-int/2addr v1, v3 +005e34: e003 1404 |0206: shl-int/lit8 v3, v20, #int 4 // #04 +005e38: b543 |0208: and-int/2addr v3, v4 +005e3a: 3803 1100 |0209: if-eqz v3, 021a // +0011 +005e3e: 1405 0707 0707 |020b: const v5, #float 0.000000 // #07070707 +005e44: b535 |020e: and-int/2addr v5, v3 +005e46: e005 0505 |020f: shl-int/lit8 v5, v5, #int 5 // #05 +005e4a: 1406 e0e0 e000 |0211: const v6, #float 0.000000 // #00e0e0e0 +005e50: b563 |0214: and-int/2addr v3, v6 +005e52: e003 0303 |0215: shl-int/lit8 v3, v3, #int 3 // #03 +005e56: b653 |0217: or-int/2addr v3, v5 +005e58: b523 |0218: and-int/2addr v3, v2 +005e5a: b631 |0219: or-int/2addr v1, v3 +005e5c: 1403 0707 0707 |021a: const v3, #float 0.000000 // #07070707 +005e62: 9503 0314 |021d: and-int v3, v3, v20 +005e66: e003 0305 |021f: shl-int/lit8 v3, v3, #int 5 // #05 +005e6a: 1405 e0e0 e000 |0221: const v5, #float 0.000000 // #00e0e0e0 +005e70: 9505 0514 |0224: and-int v5, v5, v20 +005e74: e005 0503 |0226: shl-int/lit8 v5, v5, #int 3 // #03 +005e78: b653 |0228: or-int/2addr v3, v5 +005e7a: b543 |0229: and-int/2addr v3, v4 +005e7c: 3803 0600 |022a: if-eqz v3, 0230 // +0006 +005e80: e003 0304 |022c: shl-int/lit8 v3, v3, #int 4 // #04 +005e84: b532 |022e: and-int/2addr v2, v3 +005e86: b621 |022f: or-int/2addr v1, v2 +005e88: 3801 4bfe |0230: if-eqz v1, 007b // -01b5 +005e8c: 7110 9f00 0100 |0232: invoke-static {v1}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +005e92: 0a0f |0235: move-result v15 +005e94: 1212 |0236: const/4 v2, #int 1 // #1 +005e96: 9810 020f |0237: shl-int v16, v2, v15 +005e9a: 970e 0110 |0239: xor-int v14, v1, v16 +005e9e: 0800 1100 |023b: move-object/from16 v0, v17 +005ea2: 5201 4000 |023d: iget v1, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +005ea6: 9501 0110 |023f: and-int v1, v1, v16 +005eaa: 3901 5c00 |0241: if-nez v1, 029d // +005c +005eae: 6201 3000 |0243: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005eb2: 4401 010f |0245: aget v1, v1, v15 +005eb6: b541 |0247: and-int/2addr v1, v4 +005eb8: 3801 2700 |0248: if-eqz v1, 026f // +0027 +005ebc: 6201 3100 |024a: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005ec0: 4401 010f |024c: aget v1, v1, v15 +005ec4: 9501 0114 |024e: and-int v1, v1, v20 +005ec8: 3801 1f00 |0250: if-eqz v1, 026f // +001f +005ecc: d805 0ff7 |0252: add-int/lit8 v5, v15, #int -9 // #f7 +005ed0: 6201 3100 |0254: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005ed4: 4406 010f |0256: aget v6, v1, v15 +005ed8: 6201 3000 |0258: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005edc: 4401 010f |025a: aget v1, v1, v15 +005ee0: 9607 1001 |025c: or-int v7, v16, v1 +005ee4: 1308 0104 |025e: const/16 v8, #int 1025 // #401 +005ee8: 6201 3100 |0260: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005eec: 4401 010f |0262: aget v1, v1, v15 +005ef0: 9609 1001 |0264: or-int v9, v16, v1 +005ef4: 0801 1100 |0266: move-object/from16 v1, v17 +005ef8: 0202 1200 |0268: move/from16 v2, v18 +005efc: 0203 1400 |026a: move/from16 v3, v20 +005f00: 7609 7f00 0100 |026c: invoke-direct/range {v1, v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/a;.a:(IIIIIIII)V // method@007f +005f06: 6201 3200 |026f: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005f0a: 4401 010f |0271: aget v1, v1, v15 +005f0e: b541 |0273: and-int/2addr v1, v4 +005f10: 3801 f100 |0274: if-eqz v1, 0365 // +00f1 +005f14: 6201 3300 |0276: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005f18: 4401 010f |0278: aget v1, v1, v15 +005f1c: 9501 0114 |027a: and-int v1, v1, v20 +005f20: 3801 e900 |027c: if-eqz v1, 0365 // +00e9 +005f24: d805 0ff9 |027e: add-int/lit8 v5, v15, #int -7 // #f9 +005f28: 6201 3300 |0280: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005f2c: 4406 010f |0282: aget v6, v1, v15 +005f30: 6201 3200 |0284: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005f34: 4401 010f |0286: aget v1, v1, v15 +005f38: 9607 1001 |0288: or-int v7, v16, v1 +005f3c: 1308 0104 |028a: const/16 v8, #int 1025 // #401 +005f40: 6201 3300 |028c: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005f44: 4401 010f |028e: aget v1, v1, v15 +005f48: 9609 1001 |0290: or-int v9, v16, v1 +005f4c: 0801 1100 |0292: move-object/from16 v1, v17 +005f50: 0202 1200 |0294: move/from16 v2, v18 +005f54: 0203 1400 |0296: move/from16 v3, v20 +005f58: 7609 7f00 0100 |0298: invoke-direct/range {v1, v2, v3, v4, v5, v6, v7, v8, v9}, Lcom/google/android/checkers/a;.a:(IIIIIIII)V // method@007f +005f5e: 01e1 |029b: move v1, v14 +005f60: 2894 |029c: goto 0230 // -006c +005f62: 6201 3000 |029d: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005f66: 4401 010f |029f: aget v1, v1, v15 +005f6a: b541 |02a1: and-int/2addr v1, v4 +005f6c: 3801 2d00 |02a2: if-eqz v1, 02cf // +002d +005f70: 6201 3100 |02a4: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005f74: 4401 010f |02a6: aget v1, v1, v15 +005f78: 9501 0114 |02a8: and-int v1, v1, v20 +005f7c: 3801 2500 |02aa: if-eqz v1, 02cf // +0025 +005f80: 9607 1410 |02ac: or-int v7, v20, v16 +005f84: 6201 3000 |02ae: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005f88: 4401 010f |02b0: aget v1, v1, v15 +005f8c: 9708 0401 |02b2: xor-int v8, v4, v1 +005f90: d809 0ff7 |02b4: add-int/lit8 v9, v15, #int -9 // #f7 +005f94: 6201 3100 |02b6: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005f98: 440a 010f |02b8: aget v10, v1, v15 +005f9c: 6201 3000 |02ba: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +005fa0: 4401 010f |02bc: aget v1, v1, v15 +005fa4: 960b 1001 |02be: or-int v11, v16, v1 +005fa8: 130c 0108 |02c0: const/16 v12, #int 2049 // #801 +005fac: 6201 3100 |02c2: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +005fb0: 4401 010f |02c4: aget v1, v1, v15 +005fb4: 960d 1001 |02c6: or-int v13, v16, v1 +005fb8: 0805 1100 |02c8: move-object/from16 v5, v17 +005fbc: 0206 1200 |02ca: move/from16 v6, v18 +005fc0: 7609 8b00 0500 |02cc: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +005fc6: 6201 3200 |02cf: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005fca: 4401 010f |02d1: aget v1, v1, v15 +005fce: b541 |02d3: and-int/2addr v1, v4 +005fd0: 3801 2d00 |02d4: if-eqz v1, 0301 // +002d +005fd4: 6201 3300 |02d6: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005fd8: 4401 010f |02d8: aget v1, v1, v15 +005fdc: 9501 0114 |02da: and-int v1, v1, v20 +005fe0: 3801 2500 |02dc: if-eqz v1, 0301 // +0025 +005fe4: 9607 1410 |02de: or-int v7, v20, v16 +005fe8: 6201 3200 |02e0: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +005fec: 4401 010f |02e2: aget v1, v1, v15 +005ff0: 9708 0401 |02e4: xor-int v8, v4, v1 +005ff4: d809 0ff9 |02e6: add-int/lit8 v9, v15, #int -7 // #f9 +005ff8: 6201 3300 |02e8: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +005ffc: 440a 010f |02ea: aget v10, v1, v15 +006000: 6201 3200 |02ec: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +006004: 4401 010f |02ee: aget v1, v1, v15 +006008: 960b 1001 |02f0: or-int v11, v16, v1 +00600c: 130c 0108 |02f2: const/16 v12, #int 2049 // #801 +006010: 6201 3300 |02f4: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +006014: 4401 010f |02f6: aget v1, v1, v15 +006018: 960d 1001 |02f8: or-int v13, v16, v1 +00601c: 0805 1100 |02fa: move-object/from16 v5, v17 +006020: 0206 1200 |02fc: move/from16 v6, v18 +006024: 7609 8b00 0500 |02fe: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +00602a: 6201 3400 |0301: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +00602e: 4401 010f |0303: aget v1, v1, v15 +006032: b541 |0305: and-int/2addr v1, v4 +006034: 3801 2d00 |0306: if-eqz v1, 0333 // +002d +006038: 6201 3500 |0308: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +00603c: 4401 010f |030a: aget v1, v1, v15 +006040: 9501 0114 |030c: and-int v1, v1, v20 +006044: 3801 2500 |030e: if-eqz v1, 0333 // +0025 +006048: 9607 1410 |0310: or-int v7, v20, v16 +00604c: 6201 3400 |0312: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +006050: 4401 010f |0314: aget v1, v1, v15 +006054: 9708 0401 |0316: xor-int v8, v4, v1 +006058: d809 0f07 |0318: add-int/lit8 v9, v15, #int 7 // #07 +00605c: 6201 3500 |031a: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +006060: 440a 010f |031c: aget v10, v1, v15 +006064: 6201 3400 |031e: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +006068: 4401 010f |0320: aget v1, v1, v15 +00606c: 960b 1001 |0322: or-int v11, v16, v1 +006070: 130c 0108 |0324: const/16 v12, #int 2049 // #801 +006074: 6201 3500 |0326: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +006078: 4401 010f |0328: aget v1, v1, v15 +00607c: 960d 1001 |032a: or-int v13, v16, v1 +006080: 0805 1100 |032c: move-object/from16 v5, v17 +006084: 0206 1200 |032e: move/from16 v6, v18 +006088: 7609 8b00 0500 |0330: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +00608e: 6201 3600 |0333: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +006092: 4401 010f |0335: aget v1, v1, v15 +006096: b541 |0337: and-int/2addr v1, v4 +006098: 3801 2d00 |0338: if-eqz v1, 0365 // +002d +00609c: 6201 3700 |033a: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +0060a0: 4401 010f |033c: aget v1, v1, v15 +0060a4: 9501 0114 |033e: and-int v1, v1, v20 +0060a8: 3801 2500 |0340: if-eqz v1, 0365 // +0025 +0060ac: 9607 1410 |0342: or-int v7, v20, v16 +0060b0: 6201 3600 |0344: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +0060b4: 4401 010f |0346: aget v1, v1, v15 +0060b8: 9708 0401 |0348: xor-int v8, v4, v1 +0060bc: d809 0f09 |034a: add-int/lit8 v9, v15, #int 9 // #09 +0060c0: 6201 3700 |034c: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +0060c4: 440a 010f |034e: aget v10, v1, v15 +0060c8: 6201 3600 |0350: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +0060cc: 4401 010f |0352: aget v1, v1, v15 +0060d0: 960b 1001 |0354: or-int v11, v16, v1 +0060d4: 130c 0108 |0356: const/16 v12, #int 2049 // #801 +0060d8: 6201 3700 |0358: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +0060dc: 4401 010f |035a: aget v1, v1, v15 +0060e0: 960d 1001 |035c: or-int v13, v16, v1 +0060e4: 0805 1100 |035e: move-object/from16 v5, v17 +0060e8: 0206 1200 |0360: move/from16 v6, v18 +0060ec: 7609 8b00 0500 |0362: invoke-direct/range {v5, v6, v7, v8, v9, v10, v11, v12, v13}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +0060f2: 01e1 |0365: move v1, v14 +0060f4: 2900 cafe |0366: goto/16 0230 // -0136 +0060f8: 1201 |0368: const/4 v1, #int 0 // #0 +0060fa: 2900 19fd |0369: goto/16 0082 // -02e7 + catches : (none) + positions : + locals : + + #13 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '(ZIIIZ)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 9 + ins : 5 + outs : 1 + insns size : 46 16-bit code units +006100: |[006100] com.google.android.checkers.a.b:(ZIIIZ)I +006110: 7110 9f00 0500 |0000: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006116: 0a02 |0003: move-result v2 +006118: 7110 9f00 0600 |0004: invoke-static {v6}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +00611e: 0a01 |0007: move-result v1 +006120: 7110 9f00 0700 |0008: invoke-static {v7}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006126: 0a00 |000b: move-result v0 +006128: 3808 0800 |000c: if-eqz v8, 0014 // +0008 +00612c: d902 021f |000e: rsub-int/lit8 v2, v2, #int 31 // #1f +006130: d901 011f |0010: rsub-int/lit8 v1, v1, #int 31 // #1f +006134: d900 001f |0012: rsub-int/lit8 v0, v0, #int 31 // #1f +006138: d800 00fc |0014: add-int/lit8 v0, v0, #int -4 // #fc +00613c: 3804 0d00 |0016: if-eqz v4, 0023 // +000d +006140: 6203 6300 |0018: sget-object v3, Lcom/google/android/checkers/g;.k:[B // field@0063 +006144: d200 8003 |001a: mul-int/lit16 v0, v0, #int 896 // #0380 +006148: da02 0220 |001c: mul-int/lit8 v2, v2, #int 32 // #20 +00614c: b020 |001e: add-int/2addr v0, v2 +00614e: b010 |001f: add-int/2addr v0, v1 +006150: 4800 0300 |0020: aget-byte v0, v3, v0 +006154: 0f00 |0022: return v0 +006156: 6203 6400 |0023: sget-object v3, Lcom/google/android/checkers/g;.l:[B // field@0064 +00615a: d200 8003 |0025: mul-int/lit16 v0, v0, #int 896 // #0380 +00615e: da02 0220 |0027: mul-int/lit8 v2, v2, #int 32 // #20 +006162: b020 |0029: add-int/2addr v0, v2 +006164: b010 |002a: add-int/2addr v0, v1 +006166: 4800 0300 |002b: aget-byte v0, v3, v0 +00616a: 28f5 |002d: goto 0022 // -000b + catches : (none) + positions : + locals : + + #14 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '(ZIIZ)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 8 + ins : 4 + outs : 1 + insns size : 56 16-bit code units +00616c: |[00616c] com.google.android.checkers.a.b:(ZIIZ)I +00617c: 7110 9f00 0500 |0000: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006182: 0a00 |0003: move-result v0 +006184: d801 05ff |0004: add-int/lit8 v1, v5, #int -1 // #ff +006188: b551 |0006: and-int/2addr v1, v5 +00618a: 7110 9f00 0100 |0007: invoke-static {v1}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006190: 0a02 |000a: move-result v2 +006192: 7110 9f00 0600 |000b: invoke-static {v6}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006198: 0a01 |000e: move-result v1 +00619a: 3807 0800 |000f: if-eqz v7, 0017 // +0008 +00619e: d900 001f |0011: rsub-int/lit8 v0, v0, #int 31 // #1f +0061a2: d902 021f |0013: rsub-int/lit8 v2, v2, #int 31 // #1f +0061a6: d901 011f |0015: rsub-int/lit8 v1, v1, #int 31 // #1f +0061aa: d801 01fc |0017: add-int/lit8 v1, v1, #int -4 // #fc +0061ae: 3520 1100 |0019: if-ge v0, v2, 002a // +0011 +0061b2: 6203 3800 |001b: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +0061b6: 4402 0302 |001d: aget v2, v3, v2 +0061ba: b020 |001f: add-int/2addr v0, v2 +0061bc: 3804 1000 |0020: if-eqz v4, 0030 // +0010 +0061c0: 6202 5f00 |0022: sget-object v2, Lcom/google/android/checkers/g;.g:[B // field@005f +0061c4: d211 f001 |0024: mul-int/lit16 v1, v1, #int 496 // #01f0 +0061c8: b010 |0026: add-int/2addr v0, v1 +0061ca: 4800 0200 |0027: aget-byte v0, v2, v0 +0061ce: 0f00 |0029: return v0 +0061d0: 6203 3800 |002a: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +0061d4: 4400 0300 |002c: aget v0, v3, v0 +0061d8: b020 |002e: add-int/2addr v0, v2 +0061da: 28f1 |002f: goto 0020 // -000f +0061dc: 6202 6000 |0030: sget-object v2, Lcom/google/android/checkers/g;.h:[B // field@0060 +0061e0: d211 f001 |0032: mul-int/lit16 v1, v1, #int 496 // #01f0 +0061e4: b010 |0034: add-int/2addr v0, v1 +0061e6: 4800 0200 |0035: aget-byte v0, v2, v0 +0061ea: 28f2 |0037: goto 0029 // -000e + catches : (none) + positions : + locals : + + #15 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '()V' + access : 0x20012 (PRIVATE FINAL DECLARED_SYNCHRONIZED) + code - + registers : 2 + ins : 1 + outs : 1 + insns size : 19 16-bit code units +0061ec: |[0061ec] com.google.android.checkers.a.b:()V +0061fc: 1d01 |0000: monitor-enter v1 +0061fe: 5510 4400 |0001: iget-boolean v0, v1, Lcom/google/android/checkers/a;.k:Z // field@0044 +006202: 3800 0700 |0003: if-eqz v0, 000a // +0007 +006206: 1200 |0005: const/4 v0, #int 0 // #0 +006208: 5c10 4400 |0006: iput-boolean v0, v1, Lcom/google/android/checkers/a;.k:Z // field@0044 +00620c: 1e01 |0008: monitor-exit v1 +00620e: 0e00 |0009: return-void +006210: 6e10 a200 0100 |000a: invoke-virtual {v1}, Ljava/lang/Object;.wait:()V // method@00a2 +006216: 28f4 |000d: goto 0001 // -000c +006218: 0d00 |000e: move-exception v0 +00621a: 28f2 |000f: goto 0001 // -000e +00621c: 0d00 |0010: move-exception v0 +00621e: 1e01 |0011: monitor-exit v1 +006220: 2700 |0012: throw v0 + catches : 2 + 0x0001 - 0x0008 + <any> -> 0x0010 + 0x000a - 0x000d + Ljava/lang/InterruptedException; -> 0x000e + <any> -> 0x0010 + positions : + locals : + + #16 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '(I)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 10 + ins : 2 + outs : 1 + insns size : 368 16-bit code units +00623c: |[00623c] com.google.android.checkers.a.b:(I)V +00624c: 5280 3d00 |0000: iget v0, v8, Lcom/google/android/checkers/a;.d:I // field@003d +006250: 5282 3e00 |0002: iget v2, v8, Lcom/google/android/checkers/a;.e:I // field@003e +006254: 5283 3f00 |0004: iget v3, v8, Lcom/google/android/checkers/a;.f:I // field@003f +006258: 5284 4000 |0006: iget v4, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +00625c: 5481 4900 |0008: iget-object v1, v8, Lcom/google/android/checkers/a;.p:[I // field@0049 +006260: 4401 0109 |000a: aget v1, v1, v9 +006264: d511 ff00 |000c: and-int/lit16 v1, v1, #int 255 // #00ff +006268: 5485 4900 |000e: iget-object v5, v8, Lcom/google/android/checkers/a;.p:[I // field@0049 +00626c: 4405 0509 |0010: aget v5, v5, v9 +006270: d555 000f |0012: and-int/lit16 v5, v5, #int 3840 // #0f00 +006274: 5486 4800 |0014: iget-object v6, v8, Lcom/google/android/checkers/a;.o:[I // field@0048 +006278: 4406 0609 |0016: aget v6, v6, v9 +00627c: 2c05 4601 0000 |0018: sparse-switch v5, 0000015e // +00000146 +006282: 5281 5100 |001b: iget v1, v8, Lcom/google/android/checkers/a;.x:I // field@0051 +006286: 6205 3900 |001d: sget-object v5, Lcom/google/android/checkers/a;.M:[I // field@0039 +00628a: 1306 8000 |001f: const/16 v6, #int 128 // #80 +00628e: 4405 0506 |0021: aget v5, v5, v6 +006292: b751 |0023: xor-int/2addr v1, v5 +006294: 5285 3d00 |0024: iget v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +006298: b750 |0026: xor-int/2addr v0, v5 +00629a: 3900 ee00 |0027: if-nez v0, 0115 // +00ee +00629e: 5280 3e00 |0029: iget v0, v8, Lcom/google/android/checkers/a;.e:I // field@003e +0062a2: b720 |002b: xor-int/2addr v0, v2 +0062a4: 3900 fb00 |002c: if-nez v0, 0127 // +00fb +0062a8: 5280 3f00 |002e: iget v0, v8, Lcom/google/android/checkers/a;.f:I // field@003f +0062ac: b730 |0030: xor-int/2addr v0, v3 +0062ae: 3900 0801 |0031: if-nez v0, 0139 // +0108 +0062b2: 5280 4000 |0033: iget v0, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +0062b6: b740 |0035: xor-int/2addr v0, v4 +0062b8: 3900 1501 |0036: if-nez v0, 014b // +0115 +0062bc: 5981 5100 |0038: iput v1, v8, Lcom/google/android/checkers/a;.x:I // field@0051 +0062c0: 0e00 |003a: return-void +0062c2: 1505 00f0 |003b: const/high16 v5, #int -268435456 // #f000 +0062c6: b565 |003d: and-int/2addr v5, v6 +0062c8: 3805 3100 |003e: if-eqz v5, 006f // +0031 +0062cc: 5285 3d00 |0040: iget v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +0062d0: 5487 3a00 |0042: iget-object v7, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +0062d4: 4407 0709 |0044: aget v7, v7, v9 +0062d8: df07 07ff |0046: xor-int/lit8 v7, v7, #int -1 // #ff +0062dc: b575 |0048: and-int/2addr v5, v7 +0062de: 5985 3d00 |0049: iput v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +0062e2: 5285 3e00 |004b: iget v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +0062e6: b665 |004d: or-int/2addr v5, v6 +0062e8: 5985 3e00 |004e: iput v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +0062ec: 3801 cbff |0050: if-eqz v1, 001b // -0035 +0062f0: 5285 3f00 |0052: iget v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +0062f4: 5486 3a00 |0054: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +0062f8: 4406 0609 |0056: aget v6, v6, v9 +0062fc: df06 06ff |0058: xor-int/lit8 v6, v6, #int -1 // #ff +006300: b565 |005a: and-int/2addr v5, v6 +006302: 5985 3f00 |005b: iput v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +006306: 5285 4000 |005d: iget v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +00630a: 5486 3a00 |005f: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +00630e: 4406 0609 |0061: aget v6, v6, v9 +006312: df06 06ff |0063: xor-int/lit8 v6, v6, #int -1 // #ff +006316: b565 |0065: and-int/2addr v5, v6 +006318: 5985 4000 |0066: iput v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +00631c: 5285 5000 |0068: iget v5, v8, Lcom/google/android/checkers/a;.w:I // field@0050 +006320: 9101 0501 |006a: sub-int v1, v5, v1 +006324: 5981 5000 |006c: iput v1, v8, Lcom/google/android/checkers/a;.w:I // field@0050 +006328: 28ad |006e: goto 001b // -0053 +00632a: 5285 3d00 |006f: iget v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +00632e: 5487 3a00 |0071: iget-object v7, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +006332: 4407 0709 |0073: aget v7, v7, v9 +006336: df07 07ff |0075: xor-int/lit8 v7, v7, #int -1 // #ff +00633a: b575 |0077: and-int/2addr v5, v7 +00633c: b665 |0078: or-int/2addr v5, v6 +00633e: 5985 3d00 |0079: iput v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +006342: 28d5 |007b: goto 0050 // -002b +006344: 5285 3e00 |007c: iget v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +006348: 5487 3a00 |007e: iget-object v7, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +00634c: 4407 0709 |0080: aget v7, v7, v9 +006350: df07 07ff |0082: xor-int/lit8 v7, v7, #int -1 // #ff +006354: b575 |0084: and-int/2addr v5, v7 +006356: b665 |0085: or-int/2addr v5, v6 +006358: 5985 3e00 |0086: iput v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +00635c: 3801 93ff |0088: if-eqz v1, 001b // -006d +006360: 5285 3f00 |008a: iget v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +006364: 5486 3a00 |008c: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +006368: 4406 0609 |008e: aget v6, v6, v9 +00636c: df06 06ff |0090: xor-int/lit8 v6, v6, #int -1 // #ff +006370: b565 |0092: and-int/2addr v5, v6 +006372: 5985 3f00 |0093: iput v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +006376: 5285 4000 |0095: iget v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +00637a: 5486 3a00 |0097: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +00637e: 4406 0609 |0099: aget v6, v6, v9 +006382: df06 06ff |009b: xor-int/lit8 v6, v6, #int -1 // #ff +006386: b565 |009d: and-int/2addr v5, v6 +006388: 5985 4000 |009e: iput v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +00638c: 5285 5000 |00a0: iget v5, v8, Lcom/google/android/checkers/a;.w:I // field@0050 +006390: 9101 0501 |00a2: sub-int v1, v5, v1 +006394: 5981 5000 |00a4: iput v1, v8, Lcom/google/android/checkers/a;.w:I // field@0050 +006398: 2900 75ff |00a6: goto/16 001b // -008b +00639c: dd05 060f |00a8: and-int/lit8 v5, v6, #int 15 // #0f +0063a0: 3805 3200 |00aa: if-eqz v5, 00dc // +0032 +0063a4: 5285 3f00 |00ac: iget v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +0063a8: 5487 3a00 |00ae: iget-object v7, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +0063ac: 4407 0709 |00b0: aget v7, v7, v9 +0063b0: df07 07ff |00b2: xor-int/lit8 v7, v7, #int -1 // #ff +0063b4: b575 |00b4: and-int/2addr v5, v7 +0063b6: 5985 3f00 |00b5: iput v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +0063ba: 5285 4000 |00b7: iget v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +0063be: b665 |00b9: or-int/2addr v5, v6 +0063c0: 5985 4000 |00ba: iput v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +0063c4: 3801 5fff |00bc: if-eqz v1, 001b // -00a1 +0063c8: 5285 3d00 |00be: iget v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +0063cc: 5486 3a00 |00c0: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +0063d0: 4406 0609 |00c2: aget v6, v6, v9 +0063d4: df06 06ff |00c4: xor-int/lit8 v6, v6, #int -1 // #ff +0063d8: b565 |00c6: and-int/2addr v5, v6 +0063da: 5985 3d00 |00c7: iput v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +0063de: 5285 3e00 |00c9: iget v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +0063e2: 5486 3a00 |00cb: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +0063e6: 4406 0609 |00cd: aget v6, v6, v9 +0063ea: df06 06ff |00cf: xor-int/lit8 v6, v6, #int -1 // #ff +0063ee: b565 |00d1: and-int/2addr v5, v6 +0063f0: 5985 3e00 |00d2: iput v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +0063f4: 5285 4f00 |00d4: iget v5, v8, Lcom/google/android/checkers/a;.v:I // field@004f +0063f8: 9101 0501 |00d6: sub-int v1, v5, v1 +0063fc: 5981 4f00 |00d8: iput v1, v8, Lcom/google/android/checkers/a;.v:I // field@004f +006400: 2900 41ff |00da: goto/16 001b // -00bf +006404: 5285 3f00 |00dc: iget v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +006408: 5487 3a00 |00de: iget-object v7, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +00640c: 4407 0709 |00e0: aget v7, v7, v9 +006410: df07 07ff |00e2: xor-int/lit8 v7, v7, #int -1 // #ff +006414: b575 |00e4: and-int/2addr v5, v7 +006416: b665 |00e5: or-int/2addr v5, v6 +006418: 5985 3f00 |00e6: iput v5, v8, Lcom/google/android/checkers/a;.f:I // field@003f +00641c: 28d4 |00e8: goto 00bc // -002c +00641e: 5285 4000 |00e9: iget v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +006422: 5487 3a00 |00eb: iget-object v7, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +006426: 4407 0709 |00ed: aget v7, v7, v9 +00642a: df07 07ff |00ef: xor-int/lit8 v7, v7, #int -1 // #ff +00642e: b575 |00f1: and-int/2addr v5, v7 +006430: b665 |00f2: or-int/2addr v5, v6 +006432: 5985 4000 |00f3: iput v5, v8, Lcom/google/android/checkers/a;.g:I // field@0040 +006436: 3801 26ff |00f5: if-eqz v1, 001b // -00da +00643a: 5285 3d00 |00f7: iget v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +00643e: 5486 3a00 |00f9: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +006442: 4406 0609 |00fb: aget v6, v6, v9 +006446: df06 06ff |00fd: xor-int/lit8 v6, v6, #int -1 // #ff +00644a: b565 |00ff: and-int/2addr v5, v6 +00644c: 5985 3d00 |0100: iput v5, v8, Lcom/google/android/checkers/a;.d:I // field@003d +006450: 5285 3e00 |0102: iget v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +006454: 5486 3a00 |0104: iget-object v6, v8, Lcom/google/android/checkers/a;.a:[I // field@003a +006458: 4406 0609 |0106: aget v6, v6, v9 +00645c: df06 06ff |0108: xor-int/lit8 v6, v6, #int -1 // #ff +006460: b565 |010a: and-int/2addr v5, v6 +006462: 5985 3e00 |010b: iput v5, v8, Lcom/google/android/checkers/a;.e:I // field@003e +006466: 5285 4f00 |010d: iget v5, v8, Lcom/google/android/checkers/a;.v:I // field@004f +00646a: 9101 0501 |010f: sub-int v1, v5, v1 +00646e: 5981 4f00 |0111: iput v1, v8, Lcom/google/android/checkers/a;.v:I // field@004f +006472: 2900 08ff |0113: goto/16 001b // -00f8 +006476: 6205 3900 |0115: sget-object v5, Lcom/google/android/checkers/a;.M:[I // field@0039 +00647a: 7110 9f00 0000 |0117: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006480: 0a06 |011a: move-result v6 +006482: da06 0604 |011b: mul-int/lit8 v6, v6, #int 4 // #04 +006486: d806 0600 |011d: add-int/lit8 v6, v6, #int 0 // #00 +00648a: 4405 0506 |011f: aget v5, v5, v6 +00648e: b751 |0121: xor-int/2addr v1, v5 +006490: d805 00ff |0122: add-int/lit8 v5, v0, #int -1 // #ff +006494: b550 |0124: and-int/2addr v0, v5 +006496: 2900 02ff |0125: goto/16 0027 // -00fe +00649a: 6202 3900 |0127: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +00649e: 7110 9f00 0000 |0129: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0064a4: 0a05 |012c: move-result v5 +0064a6: da05 0504 |012d: mul-int/lit8 v5, v5, #int 4 // #04 +0064aa: d805 0501 |012f: add-int/lit8 v5, v5, #int 1 // #01 +0064ae: 4402 0205 |0131: aget v2, v2, v5 +0064b2: b721 |0133: xor-int/2addr v1, v2 +0064b4: d802 00ff |0134: add-int/lit8 v2, v0, #int -1 // #ff +0064b8: b520 |0136: and-int/2addr v0, v2 +0064ba: 2900 f5fe |0137: goto/16 002c // -010b +0064be: 6202 3900 |0139: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +0064c2: 7110 9f00 0000 |013b: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0064c8: 0a03 |013e: move-result v3 +0064ca: da03 0304 |013f: mul-int/lit8 v3, v3, #int 4 // #04 +0064ce: d803 0302 |0141: add-int/lit8 v3, v3, #int 2 // #02 +0064d2: 4402 0203 |0143: aget v2, v2, v3 +0064d6: b721 |0145: xor-int/2addr v1, v2 +0064d8: d802 00ff |0146: add-int/lit8 v2, v0, #int -1 // #ff +0064dc: b520 |0148: and-int/2addr v0, v2 +0064de: 2900 e8fe |0149: goto/16 0031 // -0118 +0064e2: 6202 3900 |014b: sget-object v2, Lcom/google/android/checkers/a;.M:[I // field@0039 +0064e6: 7110 9f00 0000 |014d: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0064ec: 0a03 |0150: move-result v3 +0064ee: da03 0304 |0151: mul-int/lit8 v3, v3, #int 4 // #04 +0064f2: d803 0303 |0153: add-int/lit8 v3, v3, #int 3 // #03 +0064f6: 4402 0203 |0155: aget v2, v2, v3 +0064fa: b721 |0157: xor-int/2addr v1, v2 +0064fc: d802 00ff |0158: add-int/lit8 v2, v0, #int -1 // #ff +006500: b520 |015a: and-int/2addr v0, v2 +006502: 2900 dbfe |015b: goto/16 0036 // -0125 +006506: 0000 |015d: nop // spacer +006508: 0002 0400 0001 0000 0002 0000 0004 ... |015e: sparse-switch-data (18 units) + catches : (none) + positions : + locals : + + #17 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '(IIIIIIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 18 + ins : 9 + outs : 9 + insns size : 99 16-bit code units +00652c: |[00652c] com.google.android.checkers.a.b:(IIIIIIII)V +00653c: 1210 |0000: const/4 v0, #int 1 // #1 +00653e: 6201 3400 |0001: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +006542: 4401 010d |0003: aget v1, v1, v13 +006546: b5c1 |0005: and-int/2addr v1, v12 +006548: 3801 2500 |0006: if-eqz v1, 002b // +0025 +00654c: 6201 3500 |0008: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +006550: 4401 010d |000a: aget v1, v1, v13 +006554: b5b1 |000c: and-int/2addr v1, v11 +006556: 3801 1e00 |000d: if-eqz v1, 002b // +001e +00655a: d804 0d07 |000f: add-int/lit8 v4, v13, #int 7 // #07 +00655e: 6200 3500 |0011: sget-object v0, Lcom/google/android/checkers/a;.I:[I // field@0035 +006562: 4405 000d |0013: aget v5, v0, v13 +006566: 6200 3400 |0015: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +00656a: 4400 000d |0017: aget v0, v0, v13 +00656e: 9606 0f00 |0019: or-int v6, v15, v0 +006572: d807 1001 |001b: add-int/lit8 v7, v16, #int 1 // #01 +006576: 6200 3500 |001d: sget-object v0, Lcom/google/android/checkers/a;.I:[I // field@0035 +00657a: 4400 000d |001f: aget v0, v0, v13 +00657e: 9608 1100 |0021: or-int v8, v17, v0 +006582: 0790 |0023: move-object v0, v9 +006584: 01a1 |0024: move v1, v10 +006586: 01b2 |0025: move v2, v11 +006588: 01c3 |0026: move v3, v12 +00658a: 7609 8600 0000 |0027: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.b:(IIIIIIII)V // method@0086 +006590: 1200 |002a: const/4 v0, #int 0 // #0 +006592: 6201 3600 |002b: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +006596: 4401 010d |002d: aget v1, v1, v13 +00659a: b5c1 |002f: and-int/2addr v1, v12 +00659c: 3801 2500 |0030: if-eqz v1, 0055 // +0025 +0065a0: 6201 3700 |0032: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +0065a4: 4401 010d |0034: aget v1, v1, v13 +0065a8: b5b1 |0036: and-int/2addr v1, v11 +0065aa: 3801 1e00 |0037: if-eqz v1, 0055 // +001e +0065ae: d804 0d09 |0039: add-int/lit8 v4, v13, #int 9 // #09 +0065b2: 6200 3700 |003b: sget-object v0, Lcom/google/android/checkers/a;.K:[I // field@0037 +0065b6: 4405 000d |003d: aget v5, v0, v13 +0065ba: 6200 3600 |003f: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +0065be: 4400 000d |0041: aget v0, v0, v13 +0065c2: 9606 0f00 |0043: or-int v6, v15, v0 +0065c6: d807 1001 |0045: add-int/lit8 v7, v16, #int 1 // #01 +0065ca: 6200 3700 |0047: sget-object v0, Lcom/google/android/checkers/a;.K:[I // field@0037 +0065ce: 4400 000d |0049: aget v0, v0, v13 +0065d2: 9608 1100 |004b: or-int v8, v17, v0 +0065d6: 0790 |004d: move-object v0, v9 +0065d8: 01a1 |004e: move v1, v10 +0065da: 01b2 |004f: move v2, v11 +0065dc: 01c3 |0050: move v3, v12 +0065de: 7609 8600 0000 |0051: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.b:(IIIIIIII)V // method@0086 +0065e4: 1200 |0054: const/4 v0, #int 0 // #0 +0065e6: 3800 0d00 |0055: if-eqz v0, 0062 // +000d +0065ea: 0790 |0057: move-object v0, v9 +0065ec: 01a1 |0058: move v1, v10 +0065ee: 01e2 |0059: move v2, v14 +0065f0: 01f3 |005a: move v3, v15 +0065f2: 0204 1000 |005b: move/from16 v4, v16 +0065f6: 0205 1100 |005d: move/from16 v5, v17 +0065fa: 7606 7e00 0000 |005f: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006600: 0e00 |0062: return-void + catches : (none) + positions : + locals : + + #18 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '(IZI)Z' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 15 + ins : 4 + outs : 6 + insns size : 461 16-bit code units +006604: |[006604] com.google.android.checkers.a.b:(IZI)Z +006614: 1404 e0e0 e000 |0000: const v4, #float 0.000000 // #00e0e0e0 +00661a: 1216 |0003: const/4 v6, #int 1 // #1 +00661c: 1403 e0e0 e0e0 |0004: const v3, #float -129633581999069331456.000000 // #e0e0e0e0 +006622: 130a 0008 |0007: const/16 v10, #int 2048 // #800 +006626: 1309 0002 |0009: const/16 v9, #int 512 // #200 +00662a: 380d e400 |000b: if-eqz v13, 00ef // +00e4 +00662e: 52b0 3e00 |000d: iget v0, v11, Lcom/google/android/checkers/a;.e:I // field@003e +006632: 3900 1b00 |000f: if-nez v0, 002a // +001b +006636: 52b0 3d00 |0011: iget v0, v11, Lcom/google/android/checkers/a;.d:I // field@003d +00663a: e201 0e04 |0013: ushr-int/lit8 v1, v14, #int 4 // #04 +00663e: 9502 0e03 |0015: and-int v2, v14, v3 +006642: e202 0205 |0017: ushr-int/lit8 v2, v2, #int 5 // #05 +006646: b621 |0019: or-int/2addr v1, v2 +006648: 1402 0007 0707 |001a: const v2, #float 0.000000 // #07070700 +00664e: b5e2 |001d: and-int/2addr v2, v14 +006650: e202 0203 |001e: ushr-int/lit8 v2, v2, #int 3 // #03 +006654: b621 |0020: or-int/2addr v1, v2 +006656: b510 |0021: and-int/2addr v0, v1 +006658: 3900 2f00 |0022: if-nez v0, 0051 // +002f +00665c: 52b0 3c00 |0024: iget v0, v11, Lcom/google/android/checkers/a;.c:I // field@003c +006660: 3800 a401 |0026: if-eqz v0, 01ca // +01a4 +006664: 0160 |0028: move v0, v6 +006666: 0f00 |0029: return v0 +006668: 52b0 3e00 |002a: iget v0, v11, Lcom/google/android/checkers/a;.e:I // field@003e +00666c: 52b1 3d00 |002c: iget v1, v11, Lcom/google/android/checkers/a;.d:I // field@003d +006670: b610 |002e: or-int/2addr v0, v1 +006672: e201 0e04 |002f: ushr-int/lit8 v1, v14, #int 4 // #04 +006676: 9502 0e03 |0031: and-int v2, v14, v3 +00667a: e202 0205 |0033: ushr-int/lit8 v2, v2, #int 5 // #05 +00667e: b621 |0035: or-int/2addr v1, v2 +006680: 1402 0007 0707 |0036: const v2, #float 0.000000 // #07070700 +006686: b5e2 |0039: and-int/2addr v2, v14 +006688: e202 0203 |003a: ushr-int/lit8 v2, v2, #int 3 // #03 +00668c: b621 |003c: or-int/2addr v1, v2 +00668e: b510 |003d: and-int/2addr v0, v1 +006690: 52b1 3e00 |003e: iget v1, v11, Lcom/google/android/checkers/a;.e:I // field@003e +006694: e002 0e04 |0040: shl-int/lit8 v2, v14, #int 4 // #04 +006698: 1403 0707 0707 |0042: const v3, #float 0.000000 // #07070707 +00669e: b5e3 |0045: and-int/2addr v3, v14 +0066a0: e003 0305 |0046: shl-int/lit8 v3, v3, #int 5 // #05 +0066a4: b632 |0048: or-int/2addr v2, v3 +0066a6: 9503 0e04 |0049: and-int v3, v14, v4 +0066aa: e003 0303 |004b: shl-int/lit8 v3, v3, #int 3 // #03 +0066ae: b632 |004d: or-int/2addr v2, v3 +0066b0: b521 |004e: and-int/2addr v1, v2 +0066b2: b610 |004f: or-int/2addr v0, v1 +0066b4: 28d2 |0050: goto 0022 // -002e +0066b6: 7110 9f00 0000 |0051: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0066bc: 0a08 |0054: move-result v8 +0066be: 9803 0608 |0055: shl-int v3, v6, v8 +0066c2: 9707 0003 |0057: xor-int v7, v0, v3 +0066c6: 52b0 3e00 |0059: iget v0, v11, Lcom/google/android/checkers/a;.e:I // field@003e +0066ca: b530 |005b: and-int/2addr v0, v3 +0066cc: 3900 3400 |005c: if-nez v0, 0090 // +0034 +0066d0: 6200 3400 |005e: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +0066d4: 4400 0008 |0060: aget v0, v0, v8 +0066d8: b5e0 |0062: and-int/2addr v0, v14 +0066da: 3800 1300 |0063: if-eqz v0, 0076 // +0013 +0066de: 6200 3400 |0065: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +0066e2: 4402 0008 |0067: aget v2, v0, v8 +0066e6: 1304 0001 |0069: const/16 v4, #int 256 // #100 +0066ea: 6200 3400 |006b: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +0066ee: 4400 0008 |006d: aget v0, v0, v8 +0066f2: 9605 0300 |006f: or-int v5, v3, v0 +0066f6: 07b0 |0071: move-object v0, v11 +0066f8: 01c1 |0072: move v1, v12 +0066fa: 7606 7e00 0000 |0073: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006700: 6200 3600 |0076: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +006704: 4400 0008 |0078: aget v0, v0, v8 +006708: b5e0 |007a: and-int/2addr v0, v14 +00670a: 3800 7100 |007b: if-eqz v0, 00ec // +0071 +00670e: 6200 3600 |007d: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +006712: 4402 0008 |007f: aget v2, v0, v8 +006716: 1304 0001 |0081: const/16 v4, #int 256 // #100 +00671a: 6200 3600 |0083: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +00671e: 4400 0008 |0085: aget v0, v0, v8 +006722: 9605 0300 |0087: or-int v5, v3, v0 +006726: 07b0 |0089: move-object v0, v11 +006728: 01c1 |008a: move v1, v12 +00672a: 7606 7e00 0000 |008b: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006730: 0170 |008e: move v0, v7 +006732: 2893 |008f: goto 0022 // -006d +006734: 6200 3000 |0090: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +006738: 4400 0008 |0092: aget v0, v0, v8 +00673c: b5e0 |0094: and-int/2addr v0, v14 +00673e: 3800 1200 |0095: if-eqz v0, 00a7 // +0012 +006742: 6200 3000 |0097: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +006746: 4402 0008 |0099: aget v2, v0, v8 +00674a: 6200 3000 |009b: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +00674e: 4400 0008 |009d: aget v0, v0, v8 +006752: 9605 0300 |009f: or-int v5, v3, v0 +006756: 07b0 |00a1: move-object v0, v11 +006758: 01c1 |00a2: move v1, v12 +00675a: 0194 |00a3: move v4, v9 +00675c: 7606 7e00 0000 |00a4: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006762: 6200 3200 |00a7: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +006766: 4400 0008 |00a9: aget v0, v0, v8 +00676a: b5e0 |00ab: and-int/2addr v0, v14 +00676c: 3800 1200 |00ac: if-eqz v0, 00be // +0012 +006770: 6200 3200 |00ae: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +006774: 4402 0008 |00b0: aget v2, v0, v8 +006778: 6200 3200 |00b2: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +00677c: 4400 0008 |00b4: aget v0, v0, v8 +006780: 9605 0300 |00b6: or-int v5, v3, v0 +006784: 07b0 |00b8: move-object v0, v11 +006786: 01c1 |00b9: move v1, v12 +006788: 0194 |00ba: move v4, v9 +00678a: 7606 7e00 0000 |00bb: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006790: 6200 3400 |00be: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +006794: 4400 0008 |00c0: aget v0, v0, v8 +006798: b5e0 |00c2: and-int/2addr v0, v14 +00679a: 3800 1200 |00c3: if-eqz v0, 00d5 // +0012 +00679e: 6200 3400 |00c5: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +0067a2: 4402 0008 |00c7: aget v2, v0, v8 +0067a6: 6200 3400 |00c9: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +0067aa: 4400 0008 |00cb: aget v0, v0, v8 +0067ae: 9605 0300 |00cd: or-int v5, v3, v0 +0067b2: 07b0 |00cf: move-object v0, v11 +0067b4: 01c1 |00d0: move v1, v12 +0067b6: 0194 |00d1: move v4, v9 +0067b8: 7606 7e00 0000 |00d2: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +0067be: 6200 3600 |00d5: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +0067c2: 4400 0008 |00d7: aget v0, v0, v8 +0067c6: b5e0 |00d9: and-int/2addr v0, v14 +0067c8: 3800 1200 |00da: if-eqz v0, 00ec // +0012 +0067cc: 6200 3600 |00dc: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +0067d0: 4402 0008 |00de: aget v2, v0, v8 +0067d4: 6200 3600 |00e0: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +0067d8: 4400 0008 |00e2: aget v0, v0, v8 +0067dc: 9605 0300 |00e4: or-int v5, v3, v0 +0067e0: 07b0 |00e6: move-object v0, v11 +0067e2: 01c1 |00e7: move v1, v12 +0067e4: 0194 |00e8: move v4, v9 +0067e6: 7606 7e00 0000 |00e9: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +0067ec: 0170 |00ec: move v0, v7 +0067ee: 2900 35ff |00ed: goto/16 0022 // -00cb +0067f2: 52b0 4000 |00ef: iget v0, v11, Lcom/google/android/checkers/a;.g:I // field@0040 +0067f6: 3900 5400 |00f1: if-nez v0, 0145 // +0054 +0067fa: 52b0 3f00 |00f3: iget v0, v11, Lcom/google/android/checkers/a;.f:I // field@003f +0067fe: e001 0e04 |00f5: shl-int/lit8 v1, v14, #int 4 // #04 +006802: 1402 0707 0707 |00f7: const v2, #float 0.000000 // #07070707 +006808: b5e2 |00fa: and-int/2addr v2, v14 +00680a: e002 0205 |00fb: shl-int/lit8 v2, v2, #int 5 // #05 +00680e: b621 |00fd: or-int/2addr v1, v2 +006810: 9502 0e04 |00fe: and-int v2, v14, v4 +006814: e002 0203 |0100: shl-int/lit8 v2, v2, #int 3 // #03 +006818: b621 |0102: or-int/2addr v1, v2 +00681a: b510 |0103: and-int/2addr v0, v1 +00681c: 3800 20ff |0104: if-eqz v0, 0024 // -00e0 +006820: 7110 9f00 0000 |0106: invoke-static {v0}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006826: 0a08 |0109: move-result v8 +006828: 9803 0608 |010a: shl-int v3, v6, v8 +00682c: 9707 0003 |010c: xor-int v7, v0, v3 +006830: 52b0 4000 |010e: iget v0, v11, Lcom/google/android/checkers/a;.g:I // field@0040 +006834: b530 |0110: and-int/2addr v0, v3 +006836: 3900 5a00 |0111: if-nez v0, 016b // +005a +00683a: 6200 3000 |0113: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +00683e: 4400 0008 |0115: aget v0, v0, v8 +006842: b5e0 |0117: and-int/2addr v0, v14 +006844: 3800 1300 |0118: if-eqz v0, 012b // +0013 +006848: 6200 3000 |011a: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +00684c: 4402 0008 |011c: aget v2, v0, v8 +006850: 1304 0004 |011e: const/16 v4, #int 1024 // #400 +006854: 6200 3000 |0120: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +006858: 4400 0008 |0122: aget v0, v0, v8 +00685c: 9605 0300 |0124: or-int v5, v3, v0 +006860: 07b0 |0126: move-object v0, v11 +006862: 01c1 |0127: move v1, v12 +006864: 7606 7e00 0000 |0128: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +00686a: 6200 3200 |012b: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +00686e: 4400 0008 |012d: aget v0, v0, v8 +006872: b5e0 |012f: and-int/2addr v0, v14 +006874: 3800 9700 |0130: if-eqz v0, 01c7 // +0097 +006878: 6200 3200 |0132: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +00687c: 4402 0008 |0134: aget v2, v0, v8 +006880: 1304 0004 |0136: const/16 v4, #int 1024 // #400 +006884: 6200 3200 |0138: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +006888: 4400 0008 |013a: aget v0, v0, v8 +00688c: 9605 0300 |013c: or-int v5, v3, v0 +006890: 07b0 |013e: move-object v0, v11 +006892: 01c1 |013f: move v1, v12 +006894: 7606 7e00 0000 |0140: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +00689a: 0170 |0143: move v0, v7 +00689c: 28c0 |0144: goto 0104 // -0040 +00689e: 52b0 4000 |0145: iget v0, v11, Lcom/google/android/checkers/a;.g:I // field@0040 +0068a2: 52b1 3f00 |0147: iget v1, v11, Lcom/google/android/checkers/a;.f:I // field@003f +0068a6: b610 |0149: or-int/2addr v0, v1 +0068a8: e001 0e04 |014a: shl-int/lit8 v1, v14, #int 4 // #04 +0068ac: 1402 0707 0707 |014c: const v2, #float 0.000000 // #07070707 +0068b2: b5e2 |014f: and-int/2addr v2, v14 +0068b4: e002 0205 |0150: shl-int/lit8 v2, v2, #int 5 // #05 +0068b8: b621 |0152: or-int/2addr v1, v2 +0068ba: 9502 0e04 |0153: and-int v2, v14, v4 +0068be: e002 0203 |0155: shl-int/lit8 v2, v2, #int 3 // #03 +0068c2: b621 |0157: or-int/2addr v1, v2 +0068c4: b510 |0158: and-int/2addr v0, v1 +0068c6: 52b1 4000 |0159: iget v1, v11, Lcom/google/android/checkers/a;.g:I // field@0040 +0068ca: e202 0e04 |015b: ushr-int/lit8 v2, v14, #int 4 // #04 +0068ce: b5e3 |015d: and-int/2addr v3, v14 +0068d0: e203 0305 |015e: ushr-int/lit8 v3, v3, #int 5 // #05 +0068d4: b632 |0160: or-int/2addr v2, v3 +0068d6: 1403 0007 0707 |0161: const v3, #float 0.000000 // #07070700 +0068dc: b5e3 |0164: and-int/2addr v3, v14 +0068de: e203 0303 |0165: ushr-int/lit8 v3, v3, #int 3 // #03 +0068e2: b632 |0167: or-int/2addr v2, v3 +0068e4: b521 |0168: and-int/2addr v1, v2 +0068e6: b610 |0169: or-int/2addr v0, v1 +0068e8: 289a |016a: goto 0104 // -0066 +0068ea: 6200 3000 |016b: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +0068ee: 4400 0008 |016d: aget v0, v0, v8 +0068f2: b5e0 |016f: and-int/2addr v0, v14 +0068f4: 3800 1200 |0170: if-eqz v0, 0182 // +0012 +0068f8: 6200 3000 |0172: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +0068fc: 4402 0008 |0174: aget v2, v0, v8 +006900: 6200 3000 |0176: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +006904: 4400 0008 |0178: aget v0, v0, v8 +006908: 9605 0300 |017a: or-int v5, v3, v0 +00690c: 07b0 |017c: move-object v0, v11 +00690e: 01c1 |017d: move v1, v12 +006910: 01a4 |017e: move v4, v10 +006912: 7606 7e00 0000 |017f: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006918: 6200 3200 |0182: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +00691c: 4400 0008 |0184: aget v0, v0, v8 +006920: b5e0 |0186: and-int/2addr v0, v14 +006922: 3800 1200 |0187: if-eqz v0, 0199 // +0012 +006926: 6200 3200 |0189: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +00692a: 4402 0008 |018b: aget v2, v0, v8 +00692e: 6200 3200 |018d: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +006932: 4400 0008 |018f: aget v0, v0, v8 +006936: 9605 0300 |0191: or-int v5, v3, v0 +00693a: 07b0 |0193: move-object v0, v11 +00693c: 01c1 |0194: move v1, v12 +00693e: 01a4 |0195: move v4, v10 +006940: 7606 7e00 0000 |0196: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006946: 6200 3400 |0199: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +00694a: 4400 0008 |019b: aget v0, v0, v8 +00694e: b5e0 |019d: and-int/2addr v0, v14 +006950: 3800 1200 |019e: if-eqz v0, 01b0 // +0012 +006954: 6200 3400 |01a0: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +006958: 4402 0008 |01a2: aget v2, v0, v8 +00695c: 6200 3400 |01a4: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +006960: 4400 0008 |01a6: aget v0, v0, v8 +006964: 9605 0300 |01a8: or-int v5, v3, v0 +006968: 07b0 |01aa: move-object v0, v11 +00696a: 01c1 |01ab: move v1, v12 +00696c: 01a4 |01ac: move v4, v10 +00696e: 7606 7e00 0000 |01ad: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006974: 6200 3600 |01b0: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +006978: 4400 0008 |01b2: aget v0, v0, v8 +00697c: b5e0 |01b4: and-int/2addr v0, v14 +00697e: 3800 1200 |01b5: if-eqz v0, 01c7 // +0012 +006982: 6200 3600 |01b7: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +006986: 4402 0008 |01b9: aget v2, v0, v8 +00698a: 6200 3600 |01bb: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +00698e: 4400 0008 |01bd: aget v0, v0, v8 +006992: 9605 0300 |01bf: or-int v5, v3, v0 +006996: 07b0 |01c1: move-object v0, v11 +006998: 01c1 |01c2: move v1, v12 +00699a: 01a4 |01c3: move v4, v10 +00699c: 7606 7e00 0000 |01c4: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +0069a2: 0170 |01c7: move v0, v7 +0069a4: 2900 3cff |01c8: goto/16 0104 // -00c4 +0069a8: 1200 |01ca: const/4 v0, #int 0 // #0 +0069aa: 2900 5efe |01cb: goto/16 0029 // -01a2 + catches : (none) + positions : + locals : + + #19 : (in Lcom/google/android/checkers/a;) + name : 'c' + type : '(ZIIZ)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 8 + ins : 4 + outs : 1 + insns size : 54 16-bit code units +0069b0: |[0069b0] com.google.android.checkers.a.c:(ZIIZ)I +0069c0: 7110 9f00 0500 |0000: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0069c6: 0a00 |0003: move-result v0 +0069c8: d801 05ff |0004: add-int/lit8 v1, v5, #int -1 // #ff +0069cc: b551 |0006: and-int/2addr v1, v5 +0069ce: 7110 9f00 0100 |0007: invoke-static {v1}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0069d4: 0a02 |000a: move-result v2 +0069d6: 7110 9f00 0600 |000b: invoke-static {v6}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +0069dc: 0a01 |000e: move-result v1 +0069de: 3807 0800 |000f: if-eqz v7, 0017 // +0008 +0069e2: d900 001f |0011: rsub-int/lit8 v0, v0, #int 31 // #1f +0069e6: d902 021f |0013: rsub-int/lit8 v2, v2, #int 31 // #1f +0069ea: d901 011f |0015: rsub-int/lit8 v1, v1, #int 31 // #1f +0069ee: 3520 1100 |0017: if-ge v0, v2, 0028 // +0011 +0069f2: 6203 3800 |0019: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +0069f6: 4402 0302 |001b: aget v2, v3, v2 +0069fa: b020 |001d: add-int/2addr v0, v2 +0069fc: 3804 1000 |001e: if-eqz v4, 002e // +0010 +006a00: 6202 6500 |0020: sget-object v2, Lcom/google/android/checkers/g;.m:[B // field@0065 +006a04: da00 0020 |0022: mul-int/lit8 v0, v0, #int 32 // #20 +006a08: b010 |0024: add-int/2addr v0, v1 +006a0a: 4800 0200 |0025: aget-byte v0, v2, v0 +006a0e: 0f00 |0027: return v0 +006a10: 6203 3800 |0028: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +006a14: 4400 0300 |002a: aget v0, v3, v0 +006a18: b020 |002c: add-int/2addr v0, v2 +006a1a: 28f1 |002d: goto 001e // -000f +006a1c: 6202 6600 |002e: sget-object v2, Lcom/google/android/checkers/g;.n:[B // field@0066 +006a20: da00 0020 |0030: mul-int/lit8 v0, v0, #int 32 // #20 +006a24: b010 |0032: add-int/2addr v0, v1 +006a26: 4800 0200 |0033: aget-byte v0, v2, v0 +006a2a: 28f2 |0035: goto 0027 // -000e + catches : (none) + positions : + locals : + + #20 : (in Lcom/google/android/checkers/a;) + name : 'c' + type : '()V' + access : 0x000a (PRIVATE STATIC) + code - + registers : 2 + ins : 0 + outs : 2 + insns size : 8 16-bit code units +006a2c: |[006a2c] com.google.android.checkers.a.c:()V +006a3c: 1600 f401 |0000: const-wide/16 v0, #int 500 // #1f4 +006a40: 7120 ae00 1000 |0002: invoke-static {v0, v1}, Ljava/lang/Thread;.sleep:(J)V // method@00ae +006a46: 0e00 |0005: return-void +006a48: 0d00 |0006: move-exception v0 +006a4a: 28fe |0007: goto 0005 // -0002 + catches : 1 + 0x0002 - 0x0005 + Ljava/lang/InterruptedException; -> 0x0006 + positions : + locals : + + #21 : (in Lcom/google/android/checkers/a;) + name : 'c' + type : '(IIIIIIII)V' + access : 0x0012 (PRIVATE FINAL) + code - + registers : 18 + ins : 9 + outs : 9 + insns size : 203 16-bit code units +006a58: |[006a58] com.google.android.checkers.a.c:(IIIIIIII)V +006a68: 1210 |0000: const/4 v0, #int 1 // #1 +006a6a: 6201 3000 |0001: sget-object v1, Lcom/google/android/checkers/a;.D:[I // field@0030 +006a6e: 4401 010d |0003: aget v1, v1, v13 +006a72: b5c1 |0005: and-int/2addr v1, v12 +006a74: 3801 2a00 |0006: if-eqz v1, 0030 // +002a +006a78: 6201 3100 |0008: sget-object v1, Lcom/google/android/checkers/a;.E:[I // field@0031 +006a7c: 4401 010d |000a: aget v1, v1, v13 +006a80: b5b1 |000c: and-int/2addr v1, v11 +006a82: 3801 2300 |000d: if-eqz v1, 0030 // +0023 +006a86: 6200 3000 |000f: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +006a8a: 4400 000d |0011: aget v0, v0, v13 +006a8e: 9703 0c00 |0013: xor-int v3, v12, v0 +006a92: d804 0df7 |0015: add-int/lit8 v4, v13, #int -9 // #f7 +006a96: 6200 3100 |0017: sget-object v0, Lcom/google/android/checkers/a;.E:[I // field@0031 +006a9a: 4405 000d |0019: aget v5, v0, v13 +006a9e: 6200 3000 |001b: sget-object v0, Lcom/google/android/checkers/a;.D:[I // field@0030 +006aa2: 4400 000d |001d: aget v0, v0, v13 +006aa6: 9606 0f00 |001f: or-int v6, v15, v0 +006aaa: d807 1001 |0021: add-int/lit8 v7, v16, #int 1 // #01 +006aae: 6200 3100 |0023: sget-object v0, Lcom/google/android/checkers/a;.E:[I // field@0031 +006ab2: 4400 000d |0025: aget v0, v0, v13 +006ab6: 9608 1100 |0027: or-int v8, v17, v0 +006aba: 0790 |0029: move-object v0, v9 +006abc: 01a1 |002a: move v1, v10 +006abe: 01b2 |002b: move v2, v11 +006ac0: 7609 8b00 0000 |002c: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +006ac6: 1200 |002f: const/4 v0, #int 0 // #0 +006ac8: 6201 3200 |0030: sget-object v1, Lcom/google/android/checkers/a;.F:[I // field@0032 +006acc: 4401 010d |0032: aget v1, v1, v13 +006ad0: b5c1 |0034: and-int/2addr v1, v12 +006ad2: 3801 2a00 |0035: if-eqz v1, 005f // +002a +006ad6: 6201 3300 |0037: sget-object v1, Lcom/google/android/checkers/a;.G:[I // field@0033 +006ada: 4401 010d |0039: aget v1, v1, v13 +006ade: b5b1 |003b: and-int/2addr v1, v11 +006ae0: 3801 2300 |003c: if-eqz v1, 005f // +0023 +006ae4: 6200 3200 |003e: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +006ae8: 4400 000d |0040: aget v0, v0, v13 +006aec: 9703 0c00 |0042: xor-int v3, v12, v0 +006af0: d804 0df9 |0044: add-int/lit8 v4, v13, #int -7 // #f9 +006af4: 6200 3300 |0046: sget-object v0, Lcom/google/android/checkers/a;.G:[I // field@0033 +006af8: 4405 000d |0048: aget v5, v0, v13 +006afc: 6200 3200 |004a: sget-object v0, Lcom/google/android/checkers/a;.F:[I // field@0032 +006b00: 4400 000d |004c: aget v0, v0, v13 +006b04: 9606 0f00 |004e: or-int v6, v15, v0 +006b08: d807 1001 |0050: add-int/lit8 v7, v16, #int 1 // #01 +006b0c: 6200 3300 |0052: sget-object v0, Lcom/google/android/checkers/a;.G:[I // field@0033 +006b10: 4400 000d |0054: aget v0, v0, v13 +006b14: 9608 1100 |0056: or-int v8, v17, v0 +006b18: 0790 |0058: move-object v0, v9 +006b1a: 01a1 |0059: move v1, v10 +006b1c: 01b2 |005a: move v2, v11 +006b1e: 7609 8b00 0000 |005b: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +006b24: 1200 |005e: const/4 v0, #int 0 // #0 +006b26: 6201 3400 |005f: sget-object v1, Lcom/google/android/checkers/a;.H:[I // field@0034 +006b2a: 4401 010d |0061: aget v1, v1, v13 +006b2e: b5c1 |0063: and-int/2addr v1, v12 +006b30: 3801 2a00 |0064: if-eqz v1, 008e // +002a +006b34: 6201 3500 |0066: sget-object v1, Lcom/google/android/checkers/a;.I:[I // field@0035 +006b38: 4401 010d |0068: aget v1, v1, v13 +006b3c: b5b1 |006a: and-int/2addr v1, v11 +006b3e: 3801 2300 |006b: if-eqz v1, 008e // +0023 +006b42: 6200 3400 |006d: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +006b46: 4400 000d |006f: aget v0, v0, v13 +006b4a: 9703 0c00 |0071: xor-int v3, v12, v0 +006b4e: d804 0d07 |0073: add-int/lit8 v4, v13, #int 7 // #07 +006b52: 6200 3500 |0075: sget-object v0, Lcom/google/android/checkers/a;.I:[I // field@0035 +006b56: 4405 000d |0077: aget v5, v0, v13 +006b5a: 6200 3400 |0079: sget-object v0, Lcom/google/android/checkers/a;.H:[I // field@0034 +006b5e: 4400 000d |007b: aget v0, v0, v13 +006b62: 9606 0f00 |007d: or-int v6, v15, v0 +006b66: d807 1001 |007f: add-int/lit8 v7, v16, #int 1 // #01 +006b6a: 6200 3500 |0081: sget-object v0, Lcom/google/android/checkers/a;.I:[I // field@0035 +006b6e: 4400 000d |0083: aget v0, v0, v13 +006b72: 9608 1100 |0085: or-int v8, v17, v0 +006b76: 0790 |0087: move-object v0, v9 +006b78: 01a1 |0088: move v1, v10 +006b7a: 01b2 |0089: move v2, v11 +006b7c: 7609 8b00 0000 |008a: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +006b82: 1200 |008d: const/4 v0, #int 0 // #0 +006b84: 6201 3600 |008e: sget-object v1, Lcom/google/android/checkers/a;.J:[I // field@0036 +006b88: 4401 010d |0090: aget v1, v1, v13 +006b8c: b5c1 |0092: and-int/2addr v1, v12 +006b8e: 3801 2a00 |0093: if-eqz v1, 00bd // +002a +006b92: 6201 3700 |0095: sget-object v1, Lcom/google/android/checkers/a;.K:[I // field@0037 +006b96: 4401 010d |0097: aget v1, v1, v13 +006b9a: b5b1 |0099: and-int/2addr v1, v11 +006b9c: 3801 2300 |009a: if-eqz v1, 00bd // +0023 +006ba0: 6200 3600 |009c: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +006ba4: 4400 000d |009e: aget v0, v0, v13 +006ba8: 9703 0c00 |00a0: xor-int v3, v12, v0 +006bac: d804 0d09 |00a2: add-int/lit8 v4, v13, #int 9 // #09 +006bb0: 6200 3700 |00a4: sget-object v0, Lcom/google/android/checkers/a;.K:[I // field@0037 +006bb4: 4405 000d |00a6: aget v5, v0, v13 +006bb8: 6200 3600 |00a8: sget-object v0, Lcom/google/android/checkers/a;.J:[I // field@0036 +006bbc: 4400 000d |00aa: aget v0, v0, v13 +006bc0: 9606 0f00 |00ac: or-int v6, v15, v0 +006bc4: d807 1001 |00ae: add-int/lit8 v7, v16, #int 1 // #01 +006bc8: 6200 3700 |00b0: sget-object v0, Lcom/google/android/checkers/a;.K:[I // field@0037 +006bcc: 4400 000d |00b2: aget v0, v0, v13 +006bd0: 9608 1100 |00b4: or-int v8, v17, v0 +006bd4: 0790 |00b6: move-object v0, v9 +006bd6: 01a1 |00b7: move v1, v10 +006bd8: 01b2 |00b8: move v2, v11 +006bda: 7609 8b00 0000 |00b9: invoke-direct/range {v0, v1, v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.c:(IIIIIIII)V // method@008b +006be0: 1200 |00bc: const/4 v0, #int 0 // #0 +006be2: 3800 0d00 |00bd: if-eqz v0, 00ca // +000d +006be6: 0790 |00bf: move-object v0, v9 +006be8: 01a1 |00c0: move v1, v10 +006bea: 01e2 |00c1: move v2, v14 +006bec: 01f3 |00c2: move v3, v15 +006bee: 0204 1000 |00c3: move/from16 v4, v16 +006bf2: 0205 1100 |00c5: move/from16 v5, v17 +006bf6: 7606 7e00 0000 |00c7: invoke-direct/range {v0, v1, v2, v3, v4, v5}, Lcom/google/android/checkers/a;.a:(IIIII)V // method@007e +006bfc: 0e00 |00ca: return-void + catches : (none) + positions : + locals : + + #22 : (in Lcom/google/android/checkers/a;) + name : 'd' + type : '(ZIIZ)I' + access : 0x000a (PRIVATE STATIC) + code - + registers : 8 + ins : 4 + outs : 1 + insns size : 56 16-bit code units +006c00: |[006c00] com.google.android.checkers.a.d:(ZIIZ)I +006c10: 7110 9f00 0500 |0000: invoke-static {v5}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006c16: 0a00 |0003: move-result v0 +006c18: d801 05ff |0004: add-int/lit8 v1, v5, #int -1 // #ff +006c1c: b551 |0006: and-int/2addr v1, v5 +006c1e: 7110 9f00 0100 |0007: invoke-static {v1}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006c24: 0a02 |000a: move-result v2 +006c26: 7110 9f00 0600 |000b: invoke-static {v6}, Ljava/lang/Integer;.numberOfTrailingZeros:(I)I // method@009f +006c2c: 0a01 |000e: move-result v1 +006c2e: 3807 0800 |000f: if-eqz v7, 0017 // +0008 +006c32: d900 001f |0011: rsub-int/lit8 v0, v0, #int 31 // #1f +006c36: d902 021f |0013: rsub-int/lit8 v2, v2, #int 31 // #1f +006c3a: d901 011f |0015: rsub-int/lit8 v1, v1, #int 31 // #1f +006c3e: d801 01fc |0017: add-int/lit8 v1, v1, #int -4 // #fc +006c42: 3520 1100 |0019: if-ge v0, v2, 002a // +0011 +006c46: 6203 3800 |001b: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +006c4a: 4402 0302 |001d: aget v2, v3, v2 +006c4e: b020 |001f: add-int/2addr v0, v2 +006c50: 3804 1000 |0020: if-eqz v4, 0030 // +0010 +006c54: 6202 6700 |0022: sget-object v2, Lcom/google/android/checkers/g;.o:[B // field@0067 +006c58: da00 001c |0024: mul-int/lit8 v0, v0, #int 28 // #1c +006c5c: b010 |0026: add-int/2addr v0, v1 +006c5e: 4800 0200 |0027: aget-byte v0, v2, v0 +006c62: 0f00 |0029: return v0 +006c64: 6203 3800 |002a: sget-object v3, Lcom/google/android/checkers/a;.L:[I // field@0038 +006c68: 4400 0300 |002c: aget v0, v3, v0 +006c6c: b020 |002e: add-int/2addr v0, v2 +006c6e: 28f1 |002f: goto 0020 // -000f +006c70: 6202 6800 |0030: sget-object v2, Lcom/google/android/checkers/g;.p:[B // field@0068 +006c74: da00 001c |0032: mul-int/lit8 v0, v0, #int 28 // #1c +006c78: b010 |0034: add-int/2addr v0, v1 +006c7a: 4800 0200 |0035: aget-byte v0, v2, v0 +006c7e: 28f2 |0037: goto 0029 // -000e + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(ZZ)I' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 4 + ins : 3 + outs : 3 + insns size : 8 16-bit code units +006c80: |[006c80] com.google.android.checkers.a.a:(ZZ)I +006c90: 5c13 4c00 |0000: iput-boolean v3, v1, Lcom/google/android/checkers/a;.s:Z // field@004c +006c94: 1200 |0002: const/4 v0, #int 0 // #0 +006c96: 7030 7500 0102 |0003: invoke-direct {v1, v0, v2}, Lcom/google/android/checkers/a;.a:(IZ)I // method@0075 +006c9c: 0a00 |0006: move-result v0 +006c9e: 0f00 |0007: return v0 + catches : (none) + positions : + locals : + + #1 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '()V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 4 + ins : 1 + outs : 3 + insns size : 45 16-bit code units +006ca0: |[006ca0] com.google.android.checkers.a.a:()V +006cb0: 1302 0c00 |0000: const/16 v2, #int 12 // #c +006cb4: 1201 |0002: const/4 v1, #int 0 // #0 +006cb6: 1300 ff0f |0003: const/16 v0, #int 4095 // #fff +006cba: 5930 3d00 |0005: iput v0, v3, Lcom/google/android/checkers/a;.d:I // field@003d +006cbe: 5931 3e00 |0007: iput v1, v3, Lcom/google/android/checkers/a;.e:I // field@003e +006cc2: 1500 f0ff |0009: const/high16 v0, #int -1048576 // #fff0 +006cc6: 5930 3f00 |000b: iput v0, v3, Lcom/google/android/checkers/a;.f:I // field@003f +006cca: 5931 4000 |000d: iput v1, v3, Lcom/google/android/checkers/a;.g:I // field@0040 +006cce: 5932 4f00 |000f: iput v2, v3, Lcom/google/android/checkers/a;.v:I // field@004f +006cd2: 5932 5000 |0011: iput v2, v3, Lcom/google/android/checkers/a;.w:I // field@0050 +006cd6: 7020 7600 1300 |0013: invoke-direct {v3, v1}, Lcom/google/android/checkers/a;.a:(Z)I // method@0076 +006cdc: 0a00 |0016: move-result v0 +006cde: 5930 5100 |0017: iput v0, v3, Lcom/google/android/checkers/a;.x:I // field@0051 +006ce2: 7030 7500 1301 |0019: invoke-direct {v3, v1, v1}, Lcom/google/android/checkers/a;.a:(IZ)I // method@0075 +006ce8: 5530 2e00 |001c: iget-boolean v0, v3, Lcom/google/android/checkers/a;.B:Z // field@002e +006cec: 3800 0700 |001e: if-eqz v0, 0025 // +0007 +006cf0: 0110 |0020: move v0, v1 +006cf2: 1502 1000 |0021: const/high16 v2, #int 1048576 // #10 +006cf6: 3420 0300 |0023: if-lt v0, v2, 0026 // +0003 +006cfa: 0e00 |0025: return-void +006cfc: 5432 5200 |0026: iget-object v2, v3, Lcom/google/android/checkers/a;.y:[I // field@0052 +006d00: 4b01 0200 |0028: aput v1, v2, v0 +006d04: d800 0001 |002a: add-int/lit8 v0, v0, #int 1 // #01 +006d08: 28f5 |002c: goto 0021 // -000b + catches : (none) + positions : + locals : + + #2 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(I)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 2 + ins : 2 + outs : 2 + insns size : 4 16-bit code units +006d0c: |[006d0c] com.google.android.checkers.a.a:(I)V +006d1c: 7020 8500 1000 |0000: invoke-direct {v0, v1}, Lcom/google/android/checkers/a;.b:(I)V // method@0085 +006d22: 0e00 |0003: return-void + catches : (none) + positions : + locals : + + #3 : (in Lcom/google/android/checkers/a;) + name : 'a' + type : '(IIIIZ)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 8 + ins : 6 + outs : 2 + insns size : 37 16-bit code units +006d24: |[006d24] com.google.android.checkers.a.a:(IIIIZ)V +006d34: 5923 3d00 |0000: iput v3, v2, Lcom/google/android/checkers/a;.d:I // field@003d +006d38: 5924 3e00 |0002: iput v4, v2, Lcom/google/android/checkers/a;.e:I // field@003e +006d3c: 5925 3f00 |0004: iput v5, v2, Lcom/google/android/checkers/a;.f:I // field@003f +006d40: 5926 4000 |0006: iput v6, v2, Lcom/google/android/checkers/a;.g:I // field@0040 +006d44: 5220 3d00 |0008: iget v0, v2, Lcom/google/android/checkers/a;.d:I // field@003d +006d48: 5221 3e00 |000a: iget v1, v2, Lcom/google/android/checkers/a;.e:I // field@003e +006d4c: b610 |000c: or-int/2addr v0, v1 +006d4e: 7110 9e00 0000 |000d: invoke-static {v0}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +006d54: 0a00 |0010: move-result v0 +006d56: 5920 4f00 |0011: iput v0, v2, Lcom/google/android/checkers/a;.v:I // field@004f +006d5a: 5220 3f00 |0013: iget v0, v2, Lcom/google/android/checkers/a;.f:I // field@003f +006d5e: 5221 4000 |0015: iget v1, v2, Lcom/google/android/checkers/a;.g:I // field@0040 +006d62: b610 |0017: or-int/2addr v0, v1 +006d64: 7110 9e00 0000 |0018: invoke-static {v0}, Ljava/lang/Integer;.bitCount:(I)I // method@009e +006d6a: 0a00 |001b: move-result v0 +006d6c: 5920 5000 |001c: iput v0, v2, Lcom/google/android/checkers/a;.w:I // field@0050 +006d70: 7020 7600 7200 |001e: invoke-direct {v2, v7}, Lcom/google/android/checkers/a;.a:(Z)I // method@0076 +006d76: 0a00 |0021: move-result v0 +006d78: 5920 5100 |0022: iput v0, v2, Lcom/google/android/checkers/a;.x:I // field@0051 +006d7c: 0e00 |0024: return-void + catches : (none) + positions : + locals : + + #4 : (in Lcom/google/android/checkers/a;) + name : 'b' + type : '(ZZ)V' + access : 0x20011 (PUBLIC FINAL DECLARED_SYNCHRONIZED) + code - + registers : 4 + ins : 3 + outs : 1 + insns size : 16 16-bit code units +006d80: |[006d80] com.google.android.checkers.a.b:(ZZ)V +006d90: 1d01 |0000: monitor-enter v1 +006d92: 5c12 4d00 |0001: iput-boolean v2, v1, Lcom/google/android/checkers/a;.t:Z // field@004d +006d96: 5c13 4c00 |0003: iput-boolean v3, v1, Lcom/google/android/checkers/a;.s:Z // field@004c +006d9a: 1210 |0005: const/4 v0, #int 1 // #1 +006d9c: 5c10 4400 |0006: iput-boolean v0, v1, Lcom/google/android/checkers/a;.k:Z // field@0044 +006da0: 6e10 a100 0100 |0008: invoke-virtual {v1}, Ljava/lang/Object;.notify:()V // method@00a1 +006da6: 1e01 |000b: monitor-exit v1 +006da8: 0e00 |000c: return-void +006daa: 0d00 |000d: move-exception v0 +006dac: 1e01 |000e: monitor-exit v1 +006dae: 2700 |000f: throw v0 + catches : 1 + 0x0001 - 0x000b + <any> -> 0x000d + positions : + locals : + + #5 : (in Lcom/google/android/checkers/a;) + name : 'run' + type : '()V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 24 + ins : 1 + outs : 7 + insns size : 526 16-bit code units +006dbc: |[006dbc] com.google.android.checkers.a.run:()V +006dcc: 7601 8400 1700 |0000: invoke-direct/range {v23}, Lcom/google/android/checkers/a;.b:()V // method@0084 +006dd2: 0800 1700 |0003: move-object/from16 v0, v23 +006dd6: 5202 3c00 |0005: iget v2, v0, Lcom/google/android/checkers/a;.c:I // field@003c +006dda: 1213 |0007: const/4 v3, #int 1 // #1 +006ddc: 3332 1100 |0008: if-ne v2, v3, 0019 // +0011 +006de0: 7100 8a00 0000 |000a: invoke-static {}, Lcom/google/android/checkers/a;.c:()V // method@008a +006de6: 0800 1700 |000d: move-object/from16 v0, v23 +006dea: 5402 4300 |000f: iget-object v2, v0, Lcom/google/android/checkers/a;.j:Lcom/google/android/checkers/CheckersView; // field@0043 +006dee: 1203 |0011: const/4 v3, #int 0 // #0 +006df0: 1204 |0012: const/4 v4, #int 0 // #0 +006df2: 12f5 |0013: const/4 v5, #int -1 // #ff +006df4: 1216 |0014: const/4 v6, #int 1 // #1 +006df6: 6e56 5000 3254 |0015: invoke-virtual {v2, v3, v4, v5, v6}, Lcom/google/android/checkers/CheckersView;.a:(IIII)V // method@0050 +006dfc: 28e8 |0018: goto 0000 // -0018 +006dfe: 0800 1700 |0019: move-object/from16 v0, v23 +006e02: 5202 4100 |001b: iget v2, v0, Lcom/google/android/checkers/a;.h:I // field@0041 +006e06: 3902 1c00 |001d: if-nez v2, 0039 // +001c +006e0a: 7100 8a00 0000 |001f: invoke-static {}, Lcom/google/android/checkers/a;.c:()V // method@008a +006e10: 0800 1700 |0022: move-object/from16 v0, v23 +006e14: 5402 4200 |0024: iget-object v2, v0, Lcom/google/android/checkers/a;.i:Ljava/util/Random; // field@0042 +006e18: 0800 1700 |0026: move-object/from16 v0, v23 +006e1c: 5203 3c00 |0028: iget v3, v0, Lcom/google/android/checkers/a;.c:I // field@003c +006e20: 6e20 b000 3200 |002a: invoke-virtual {v2, v3}, Ljava/util/Random;.nextInt:(I)I // method@00b0 +006e26: 0a02 |002d: move-result v2 +006e28: 0800 1700 |002e: move-object/from16 v0, v23 +006e2c: 5403 4300 |0030: iget-object v3, v0, Lcom/google/android/checkers/a;.j:Lcom/google/android/checkers/CheckersView; // field@0043 +006e30: 1204 |0032: const/4 v4, #int 0 // #0 +006e32: 1205 |0033: const/4 v5, #int 0 // #0 +006e34: 1216 |0034: const/4 v6, #int 1 // #1 +006e36: 6e56 5000 2354 |0035: invoke-virtual {v3, v2, v4, v5, v6}, Lcom/google/android/checkers/CheckersView;.a:(IIII)V // method@0050 +006e3c: 28c8 |0038: goto 0000 // -0038 +006e3e: 0800 1700 |0039: move-object/from16 v0, v23 +006e42: 5202 3d00 |003b: iget v2, v0, Lcom/google/android/checkers/a;.d:I // field@003d +006e46: 1303 ff0f |003d: const/16 v3, #int 4095 // #fff +006e4a: 3332 2700 |003f: if-ne v2, v3, 0066 // +0027 +006e4e: 7100 8a00 0000 |0041: invoke-static {}, Lcom/google/android/checkers/a;.c:()V // method@008a +006e54: 0800 1700 |0044: move-object/from16 v0, v23 +006e58: 5403 4200 |0046: iget-object v3, v0, Lcom/google/android/checkers/a;.i:Ljava/util/Random; // field@0042 +006e5c: 0800 1700 |0048: move-object/from16 v0, v23 +006e60: 5204 3c00 |004a: iget v4, v0, Lcom/google/android/checkers/a;.c:I // field@003c +006e64: 0800 1700 |004c: move-object/from16 v0, v23 +006e68: 5502 4d00 |004e: iget-boolean v2, v0, Lcom/google/android/checkers/a;.t:Z // field@004d +006e6c: 3802 1400 |0050: if-eqz v2, 0064 // +0014 +006e70: 1212 |0052: const/4 v2, #int 1 // #1 +006e72: 9102 0402 |0053: sub-int v2, v4, v2 +006e76: 6e20 b000 2300 |0055: invoke-virtual {v3, v2}, Ljava/util/Random;.nextInt:(I)I // method@00b0 +006e7c: 0a02 |0058: move-result v2 +006e7e: 0800 1700 |0059: move-object/from16 v0, v23 +006e82: 5403 4300 |005b: iget-object v3, v0, Lcom/google/android/checkers/a;.j:Lcom/google/android/checkers/CheckersView; // field@0043 +006e86: 1204 |005d: const/4 v4, #int 0 // #0 +006e88: 12e5 |005e: const/4 v5, #int -2 // #fe +006e8a: 1216 |005f: const/4 v6, #int 1 // #1 +006e8c: 6e56 5000 2354 |0060: invoke-virtual {v3, v2, v4, v5, v6}, Lcom/google/android/checkers/CheckersView;.a:(IIII)V // method@0050 +006e92: 289d |0063: goto 0000 // -0063 +006e94: 1202 |0064: const/4 v2, #int 0 // #0 +006e96: 28ee |0065: goto 0053 // -0012 +006e98: 7100 ab00 0000 |0066: invoke-static {}, Ljava/lang/System;.currentTimeMillis:()J // method@00ab +006e9e: 0b02 |0069: move-result-wide v2 +006ea0: 0800 1700 |006a: move-object/from16 v0, v23 +006ea4: 5a02 4500 |006c: iput-wide v2, v0, Lcom/google/android/checkers/a;.l:J // field@0045 +006ea8: 1202 |006e: const/4 v2, #int 0 // #0 +006eaa: 0800 1700 |006f: move-object/from16 v0, v23 +006eae: 5902 4700 |0071: iput v2, v0, Lcom/google/android/checkers/a;.n:I // field@0047 +006eb2: 1202 |0073: const/4 v2, #int 0 // #0 +006eb4: 0800 1700 |0074: move-object/from16 v0, v23 +006eb8: 5c02 4600 |0076: iput-boolean v2, v0, Lcom/google/android/checkers/a;.m:Z // field@0046 +006ebc: 0800 1700 |0078: move-object/from16 v0, v23 +006ec0: 5502 4c00 |007a: iget-boolean v2, v0, Lcom/google/android/checkers/a;.s:Z // field@004c +006ec4: 3902 5100 |007c: if-nez v2, 00cd // +0051 +006ec8: 6302 6a00 |007e: sget-boolean v2, Lcom/google/android/checkers/g;.r:Z // field@006a +006ecc: 3802 4d00 |0080: if-eqz v2, 00cd // +004d +006ed0: 1212 |0082: const/4 v2, #int 1 // #1 +006ed2: 0800 1700 |0083: move-object/from16 v0, v23 +006ed6: 5c02 4e00 |0085: iput-boolean v2, v0, Lcom/google/android/checkers/a;.u:Z // field@004e +006eda: 0800 1700 |0087: move-object/from16 v0, v23 +006ede: 520e 3c00 |0089: iget v14, v0, Lcom/google/android/checkers/a;.c:I // field@003c +006ee2: 120d |008b: const/4 v13, #int 0 // #0 +006ee4: 120b |008c: const/4 v11, #int 0 // #0 +006ee6: 120a |008d: const/4 v10, #int 0 // #0 +006ee8: 1212 |008e: const/4 v2, #int 1 // #1 +006eea: 0800 1700 |008f: move-object/from16 v0, v23 +006eee: 5902 2f00 |0091: iput v2, v0, Lcom/google/android/checkers/a;.C:I // field@002f +006ef2: 0800 1700 |0093: move-object/from16 v0, v23 +006ef6: 520f 3d00 |0095: iget v15, v0, Lcom/google/android/checkers/a;.d:I // field@003d +006efa: 0800 1700 |0097: move-object/from16 v0, v23 +006efe: 5200 3e00 |0099: iget v0, v0, Lcom/google/android/checkers/a;.e:I // field@003e +006f02: 0210 0000 |009b: move/from16 v16, v0 +006f06: 0800 1700 |009d: move-object/from16 v0, v23 +006f0a: 5200 3f00 |009f: iget v0, v0, Lcom/google/android/checkers/a;.f:I // field@003f +006f0e: 0211 0000 |00a1: move/from16 v17, v0 +006f12: 0800 1700 |00a3: move-object/from16 v0, v23 +006f16: 5200 4000 |00a5: iget v0, v0, Lcom/google/android/checkers/a;.g:I // field@0040 +006f1a: 0212 0000 |00a7: move/from16 v18, v0 +006f1e: 0800 1700 |00a9: move-object/from16 v0, v23 +006f22: 5200 4f00 |00ab: iget v0, v0, Lcom/google/android/checkers/a;.v:I // field@004f +006f26: 0213 0000 |00ad: move/from16 v19, v0 +006f2a: 0800 1700 |00af: move-object/from16 v0, v23 +006f2e: 5200 5000 |00b1: iget v0, v0, Lcom/google/android/checkers/a;.w:I // field@0050 +006f32: 0214 0000 |00b3: move/from16 v20, v0 +006f36: 0800 1700 |00b5: move-object/from16 v0, v23 +006f3a: 5200 5100 |00b7: iget v0, v0, Lcom/google/android/checkers/a;.x:I // field@0051 +006f3e: 0215 0000 |00b9: move/from16 v21, v0 +006f42: 1216 |00bb: const/4 v6, #int 1 // #1 +006f44: 1302 4000 |00bc: const/16 v2, #int 64 // #40 +006f48: 3726 1100 |00be: if-le v6, v2, 00cf // +0011 +006f4c: 0800 1700 |00c0: move-object/from16 v0, v23 +006f50: 5402 4300 |00c2: iget-object v2, v0, Lcom/google/android/checkers/a;.j:Lcom/google/android/checkers/CheckersView; // field@0043 +006f54: 0800 1700 |00c4: move-object/from16 v0, v23 +006f58: 5203 2f00 |00c6: iget v3, v0, Lcom/google/android/checkers/a;.C:I // field@002f +006f5c: 6e53 5000 d2ab |00c8: invoke-virtual {v2, v13, v11, v10, v3}, Lcom/google/android/checkers/CheckersView;.a:(IIII)V // method@0050 +006f62: 2900 35ff |00cb: goto/16 0000 // -00cb +006f66: 1202 |00cd: const/4 v2, #int 0 // #0 +006f68: 28b5 |00ce: goto 0083 // -004b +006f6a: 1309 0180 |00cf: const/16 v9, #int -32767 // #8001 +006f6e: 120c |00d1: const/4 v12, #int 0 // #0 +006f70: 34ec 1500 |00d2: if-lt v12, v14, 00e7 // +0015 +006f74: 1302 0083 |00d4: const/16 v2, #int -32000 // #8300 +006f78: 3729 eaff |00d6: if-le v9, v2, 00c0 // -0016 +006f7c: 1302 007d |00d8: const/16 v2, #int 32000 // #7d00 +006f80: 3529 e6ff |00da: if-ge v9, v2, 00c0 // -001a +006f84: 1202 |00dc: const/4 v2, #int 0 // #0 +006f86: 0125 |00dd: move v5, v2 +006f88: 01d2 |00de: move v2, v13 +006f8a: d803 0eff |00df: add-int/lit8 v3, v14, #int -1 // #ff +006f8e: 3435 6000 |00e1: if-lt v5, v3, 0141 // +0060 +006f92: d806 0601 |00e3: add-int/lit8 v6, v6, #int 1 // #01 +006f96: 012d |00e5: move v13, v2 +006f98: 28d6 |00e6: goto 00bc // -002a +006f9a: 0800 1700 |00e7: move-object/from16 v0, v23 +006f9e: 7020 8500 c000 |00e9: invoke-direct {v0, v12}, Lcom/google/android/checkers/a;.b:(I)V // method@0085 +006fa4: 1303 0180 |00ec: const/16 v3, #int -32767 // #8001 +006fa8: 7b94 |00ee: neg-int v4, v9 +006faa: 1215 |00ef: const/4 v5, #int 1 // #1 +006fac: 1207 |00f0: const/4 v7, #int 0 // #0 +006fae: 0800 1700 |00f1: move-object/from16 v0, v23 +006fb2: 5502 4d00 |00f3: iget-boolean v2, v0, Lcom/google/android/checkers/a;.t:Z // field@004d +006fb6: 3802 4a00 |00f5: if-eqz v2, 013f // +004a +006fba: 1208 |00f7: const/4 v8, #int 0 // #0 +006fbc: 0802 1700 |00f8: move-object/from16 v2, v23 +006fc0: 7607 7400 0200 |00fa: invoke-direct/range {v2, v3, v4, v5, v6, v7, v8}, Lcom/google/android/checkers/a;.a:(IIIIIZ)I // method@0074 +006fc6: 0a02 |00fd: move-result v2 +006fc8: 7b22 |00fe: neg-int v2, v2 +006fca: 0800 1700 |00ff: move-object/from16 v0, v23 +006fce: 5403 4a00 |0101: iget-object v3, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +006fd2: 4b02 030c |0103: aput v2, v3, v12 +006fd6: 0800 1700 |0105: move-object/from16 v0, v23 +006fda: 590f 3d00 |0107: iput v15, v0, Lcom/google/android/checkers/a;.d:I // field@003d +006fde: 0200 1000 |0109: move/from16 v0, v16 +006fe2: 0801 1700 |010b: move-object/from16 v1, v23 +006fe6: 5910 3e00 |010d: iput v0, v1, Lcom/google/android/checkers/a;.e:I // field@003e +006fea: 0200 1100 |010f: move/from16 v0, v17 +006fee: 0801 1700 |0111: move-object/from16 v1, v23 +006ff2: 5910 3f00 |0113: iput v0, v1, Lcom/google/android/checkers/a;.f:I // field@003f +006ff6: 0200 1200 |0115: move/from16 v0, v18 +006ffa: 0801 1700 |0117: move-object/from16 v1, v23 +006ffe: 5910 4000 |0119: iput v0, v1, Lcom/google/android/checkers/a;.g:I // field@0040 +007002: 0200 1300 |011b: move/from16 v0, v19 +007006: 0801 1700 |011d: move-object/from16 v1, v23 +00700a: 5910 4f00 |011f: iput v0, v1, Lcom/google/android/checkers/a;.v:I // field@004f +00700e: 0200 1400 |0121: move/from16 v0, v20 +007012: 0801 1700 |0123: move-object/from16 v1, v23 +007016: 5910 5000 |0125: iput v0, v1, Lcom/google/android/checkers/a;.w:I // field@0050 +00701a: 0200 1500 |0127: move/from16 v0, v21 +00701e: 0801 1700 |0129: move-object/from16 v1, v23 +007022: 5910 5100 |012b: iput v0, v1, Lcom/google/android/checkers/a;.x:I // field@0051 +007026: 0800 1700 |012d: move-object/from16 v0, v23 +00702a: 5503 4600 |012f: iget-boolean v3, v0, Lcom/google/android/checkers/a;.m:Z // field@0046 +00702e: 3903 8fff |0131: if-nez v3, 00c0 // -0071 +007032: 3792 d500 |0133: if-le v2, v9, 0208 // +00d5 +007036: 0163 |0135: move v3, v6 +007038: 0124 |0136: move v4, v2 +00703a: 01c5 |0137: move v5, v12 +00703c: d80c 0c01 |0138: add-int/lit8 v12, v12, #int 1 // #01 +007040: 0129 |013a: move v9, v2 +007042: 013a |013b: move v10, v3 +007044: 014b |013c: move v11, v4 +007046: 015d |013d: move v13, v5 +007048: 2894 |013e: goto 00d2 // -006c +00704a: 1218 |013f: const/4 v8, #int 1 // #1 +00704c: 28b8 |0140: goto 00f8 // -0048 +00704e: 1213 |0141: const/4 v3, #int 1 // #1 +007050: d804 0eff |0142: add-int/lit8 v4, v14, #int -1 // #ff +007054: 3445 0800 |0144: if-lt v5, v4, 014c // +0008 +007058: 3903 9dff |0146: if-nez v3, 00e3 // -0063 +00705c: d803 0501 |0148: add-int/lit8 v3, v5, #int 1 // #01 +007060: 0135 |014a: move v5, v3 +007062: 2894 |014b: goto 00df // -006c +007064: 0800 1700 |014c: move-object/from16 v0, v23 +007068: 5407 4a00 |014e: iget-object v7, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +00706c: 4407 0704 |0150: aget v7, v7, v4 +007070: 0800 1700 |0152: move-object/from16 v0, v23 +007074: 5408 4a00 |0154: iget-object v8, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +007078: d809 04ff |0156: add-int/lit8 v9, v4, #int -1 // #ff +00707c: 4408 0809 |0158: aget v8, v8, v9 +007080: 3787 a800 |015a: if-le v7, v8, 0202 // +00a8 +007084: 0800 1700 |015c: move-object/from16 v0, v23 +007088: 5403 4a00 |015e: iget-object v3, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +00708c: 4403 0304 |0160: aget v3, v3, v4 +007090: 0800 1700 |0162: move-object/from16 v0, v23 +007094: 5407 4a00 |0164: iget-object v7, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +007098: 0800 1700 |0166: move-object/from16 v0, v23 +00709c: 5408 4a00 |0168: iget-object v8, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +0070a0: d809 04ff |016a: add-int/lit8 v9, v4, #int -1 // #ff +0070a4: 4408 0809 |016c: aget v8, v8, v9 +0070a8: 4b08 0704 |016e: aput v8, v7, v4 +0070ac: 0800 1700 |0170: move-object/from16 v0, v23 +0070b0: 5407 4a00 |0172: iget-object v7, v0, Lcom/google/android/checkers/a;.q:[I // field@004a +0070b4: d808 04ff |0174: add-int/lit8 v8, v4, #int -1 // #ff +0070b8: 4b03 0708 |0176: aput v3, v7, v8 +0070bc: 0800 1700 |0178: move-object/from16 v0, v23 +0070c0: 5403 4800 |017a: iget-object v3, v0, Lcom/google/android/checkers/a;.o:[I // field@0048 +0070c4: 4403 0304 |017c: aget v3, v3, v4 +0070c8: 0800 1700 |017e: move-object/from16 v0, v23 +0070cc: 5407 4800 |0180: iget-object v7, v0, Lcom/google/android/checkers/a;.o:[I // field@0048 +0070d0: 0800 1700 |0182: move-object/from16 v0, v23 +0070d4: 5408 4800 |0184: iget-object v8, v0, Lcom/google/android/checkers/a;.o:[I // field@0048 +0070d8: d809 04ff |0186: add-int/lit8 v9, v4, #int -1 // #ff +0070dc: 4408 0809 |0188: aget v8, v8, v9 +0070e0: 4b08 0704 |018a: aput v8, v7, v4 +0070e4: 0800 1700 |018c: move-object/from16 v0, v23 +0070e8: 5407 4800 |018e: iget-object v7, v0, Lcom/google/android/checkers/a;.o:[I // field@0048 +0070ec: d808 04ff |0190: add-int/lit8 v8, v4, #int -1 // #ff +0070f0: 4b03 0708 |0192: aput v3, v7, v8 +0070f4: 0800 1700 |0194: move-object/from16 v0, v23 +0070f8: 5403 3a00 |0196: iget-object v3, v0, Lcom/google/android/checkers/a;.a:[I // field@003a +0070fc: 4403 0304 |0198: aget v3, v3, v4 +007100: 0800 1700 |019a: move-object/from16 v0, v23 +007104: 5407 3a00 |019c: iget-object v7, v0, Lcom/google/android/checkers/a;.a:[I // field@003a +007108: 0800 1700 |019e: move-object/from16 v0, v23 +00710c: 5408 3a00 |01a0: iget-object v8, v0, Lcom/google/android/checkers/a;.a:[I // field@003a +007110: d809 04ff |01a2: add-int/lit8 v9, v4, #int -1 // #ff +007114: 4408 0809 |01a4: aget v8, v8, v9 +007118: 4b08 0704 |01a6: aput v8, v7, v4 +00711c: 0800 1700 |01a8: move-object/from16 v0, v23 +007120: 5407 3a00 |01aa: iget-object v7, v0, Lcom/google/android/checkers/a;.a:[I // field@003a +007124: d808 04ff |01ac: add-int/lit8 v8, v4, #int -1 // #ff +007128: 4b03 0708 |01ae: aput v3, v7, v8 +00712c: 0800 1700 |01b0: move-object/from16 v0, v23 +007130: 5403 4900 |01b2: iget-object v3, v0, Lcom/google/android/checkers/a;.p:[I // field@0049 +007134: 4403 0304 |01b4: aget v3, v3, v4 +007138: 0800 1700 |01b6: move-object/from16 v0, v23 +00713c: 5407 4900 |01b8: iget-object v7, v0, Lcom/google/android/checkers/a;.p:[I // field@0049 +007140: 0800 1700 |01ba: move-object/from16 v0, v23 +007144: 5408 4900 |01bc: iget-object v8, v0, Lcom/google/android/checkers/a;.p:[I // field@0049 +007148: d809 04ff |01be: add-int/lit8 v9, v4, #int -1 // #ff +00714c: 4408 0809 |01c0: aget v8, v8, v9 +007150: 4b08 0704 |01c2: aput v8, v7, v4 +007154: 0800 1700 |01c4: move-object/from16 v0, v23 +007158: 5407 4900 |01c6: iget-object v7, v0, Lcom/google/android/checkers/a;.p:[I // field@0049 +00715c: d808 04ff |01c8: add-int/lit8 v8, v4, #int -1 // #ff +007160: 4b03 0708 |01ca: aput v3, v7, v8 +007164: 0800 1700 |01cc: move-object/from16 v0, v23 +007168: 5403 3b00 |01ce: iget-object v3, v0, Lcom/google/android/checkers/a;.b:[I // field@003b +00716c: 4403 0304 |01d0: aget v3, v3, v4 +007170: 0800 1700 |01d2: move-object/from16 v0, v23 +007174: 5407 3b00 |01d4: iget-object v7, v0, Lcom/google/android/checkers/a;.b:[I // field@003b +007178: 0800 1700 |01d6: move-object/from16 v0, v23 +00717c: 5408 3b00 |01d8: iget-object v8, v0, Lcom/google/android/checkers/a;.b:[I // field@003b +007180: d809 04ff |01da: add-int/lit8 v9, v4, #int -1 // #ff +007184: 4408 0809 |01dc: aget v8, v8, v9 +007188: 4b08 0704 |01de: aput v8, v7, v4 +00718c: 0800 1700 |01e0: move-object/from16 v0, v23 +007190: 5407 3b00 |01e2: iget-object v7, v0, Lcom/google/android/checkers/a;.b:[I // field@003b +007194: d808 04ff |01e4: add-int/lit8 v8, v4, #int -1 // #ff +007198: 4b03 0708 |01e6: aput v3, v7, v8 +00719c: 3342 1300 |01e8: if-ne v2, v4, 01fb // +0013 +0071a0: d802 02ff |01ea: add-int/lit8 v2, v2, #int -1 // #ff +0071a4: 1203 |01ec: const/4 v3, #int 0 // #0 +0071a6: 0216 0300 |01ed: move/from16 v22, v3 +0071aa: 0123 |01ef: move v3, v2 +0071ac: 0202 1600 |01f0: move/from16 v2, v22 +0071b0: d804 04ff |01f2: add-int/lit8 v4, v4, #int -1 // #ff +0071b4: 0216 0200 |01f4: move/from16 v22, v2 +0071b8: 0132 |01f6: move v2, v3 +0071ba: 0203 1600 |01f7: move/from16 v3, v22 +0071be: 2900 4bff |01f9: goto/16 0144 // -00b5 +0071c2: d803 04ff |01fb: add-int/lit8 v3, v4, #int -1 // #ff +0071c6: 3332 efff |01fd: if-ne v2, v3, 01ec // -0011 +0071ca: d802 0201 |01ff: add-int/lit8 v2, v2, #int 1 // #01 +0071ce: 28eb |0201: goto 01ec // -0015 +0071d0: 0216 0300 |0202: move/from16 v22, v3 +0071d4: 0123 |0204: move v3, v2 +0071d6: 0202 1600 |0205: move/from16 v2, v22 +0071da: 28eb |0207: goto 01f2 // -0015 +0071dc: 0192 |0208: move v2, v9 +0071de: 01a3 |0209: move v3, v10 +0071e0: 01b4 |020a: move v4, v11 +0071e2: 01d5 |020b: move v5, v13 +0071e4: 2900 2cff |020c: goto/16 0138 // -00d4 + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #3 header: +class_idx : 33 +access_flags : 16 (0x0010) +superclass_idx : 46 +interfaces_off : 29520 (0x007350) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 35082 (0x00890a) +static_fields_size : 0 +instance_fields_size: 1 +direct_methods_size : 1 +virtual_methods_size: 1 + +Class #3 - + Class descriptor : 'Lcom/google/android/checkers/b;' + Access flags : 0x0010 (FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Landroid/content/DialogInterface$OnClickListener;' + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/b;) + name : 'a' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x1010 (FINAL SYNTHETIC) + Direct methods - + #0 : (in Lcom/google/android/checkers/b;) + name : '<init>' + type : '(Lcom/google/android/checkers/CheckersView;)V' + access : 0x10000 (CONSTRUCTOR) + code - + registers : 2 + ins : 2 + outs : 1 + insns size : 6 16-bit code units +0071e8: |[0071e8] com.google.android.checkers.b.<init>:(Lcom/google/android/checkers/CheckersView;)V +0071f8: 5b01 5400 |0000: iput-object v1, v0, Lcom/google/android/checkers/b;.a:Lcom/google/android/checkers/CheckersView; // field@0054 +0071fc: 7010 a000 0000 |0002: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00a0 +007202: 0e00 |0005: return-void + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/b;) + name : 'onClick' + type : '(Landroid/content/DialogInterface;I)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 4 + ins : 3 + outs : 1 + insns size : 14 16-bit code units +007204: |[007204] com.google.android.checkers.b.onClick:(Landroid/content/DialogInterface;I)V +007214: 5410 5400 |0000: iget-object v0, v1, Lcom/google/android/checkers/b;.a:Lcom/google/android/checkers/CheckersView; // field@0054 +007218: 7110 5900 0000 |0002: invoke-static {v0}, Lcom/google/android/checkers/CheckersView;.a:(Lcom/google/android/checkers/CheckersView;)Z // method@0059 +00721e: 0a00 |0005: move-result v0 +007220: 3800 0700 |0006: if-eqz v0, 000d // +0007 +007224: 5410 5400 |0008: iget-object v0, v1, Lcom/google/android/checkers/b;.a:Lcom/google/android/checkers/CheckersView; // field@0054 +007228: 6e10 6d00 0000 |000a: invoke-virtual {v0}, Lcom/google/android/checkers/CheckersView;.postInvalidate:()V // method@006d +00722e: 0e00 |000d: return-void + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #4 header: +class_idx : 34 +access_flags : 16 (0x0010) +superclass_idx : 46 +interfaces_off : 29520 (0x007350) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 35103 (0x00891f) +static_fields_size : 0 +instance_fields_size: 1 +direct_methods_size : 1 +virtual_methods_size: 1 + +Class #4 - + Class descriptor : 'Lcom/google/android/checkers/c;' + Access flags : 0x0010 (FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Landroid/content/DialogInterface$OnClickListener;' + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/c;) + name : 'a' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x1010 (FINAL SYNTHETIC) + Direct methods - + #0 : (in Lcom/google/android/checkers/c;) + name : '<init>' + type : '(Lcom/google/android/checkers/CheckersView;)V' + access : 0x10000 (CONSTRUCTOR) + code - + registers : 2 + ins : 2 + outs : 1 + insns size : 6 16-bit code units +007230: |[007230] com.google.android.checkers.c.<init>:(Lcom/google/android/checkers/CheckersView;)V +007240: 5b01 5500 |0000: iput-object v1, v0, Lcom/google/android/checkers/c;.a:Lcom/google/android/checkers/CheckersView; // field@0055 +007244: 7010 a000 0000 |0002: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00a0 +00724a: 0e00 |0005: return-void + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/c;) + name : 'onClick' + type : '(Landroid/content/DialogInterface;I)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 3 + ins : 3 + outs : 0 + insns size : 1 16-bit code units +00724c: |[00724c] com.google.android.checkers.c.onClick:(Landroid/content/DialogInterface;I)V +00725c: 0e00 |0000: return-void + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #5 header: +class_idx : 35 +access_flags : 16 (0x0010) +superclass_idx : 46 +interfaces_off : 29520 (0x007350) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 35124 (0x008934) +static_fields_size : 0 +instance_fields_size: 1 +direct_methods_size : 1 +virtual_methods_size: 1 + +Class #5 - + Class descriptor : 'Lcom/google/android/checkers/d;' + Access flags : 0x0010 (FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Landroid/content/DialogInterface$OnClickListener;' + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/d;) + name : 'a' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x1010 (FINAL SYNTHETIC) + Direct methods - + #0 : (in Lcom/google/android/checkers/d;) + name : '<init>' + type : '(Lcom/google/android/checkers/CheckersView;)V' + access : 0x10000 (CONSTRUCTOR) + code - + registers : 2 + ins : 2 + outs : 1 + insns size : 6 16-bit code units +007260: |[007260] com.google.android.checkers.d.<init>:(Lcom/google/android/checkers/CheckersView;)V +007270: 5b01 5600 |0000: iput-object v1, v0, Lcom/google/android/checkers/d;.a:Lcom/google/android/checkers/CheckersView; // field@0056 +007274: 7010 a000 0000 |0002: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00a0 +00727a: 0e00 |0005: return-void + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/d;) + name : 'onClick' + type : '(Landroid/content/DialogInterface;I)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 3 + ins : 3 + outs : 0 + insns size : 1 16-bit code units +00727c: |[00727c] com.google.android.checkers.d.onClick:(Landroid/content/DialogInterface;I)V +00728c: 0e00 |0000: return-void + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #6 header: +class_idx : 36 +access_flags : 16 (0x0010) +superclass_idx : 46 +interfaces_off : 29520 (0x007350) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 35145 (0x008949) +static_fields_size : 0 +instance_fields_size: 1 +direct_methods_size : 1 +virtual_methods_size: 1 + +Class #6 - + Class descriptor : 'Lcom/google/android/checkers/e;' + Access flags : 0x0010 (FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Landroid/content/DialogInterface$OnClickListener;' + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/e;) + name : 'a' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x1010 (FINAL SYNTHETIC) + Direct methods - + #0 : (in Lcom/google/android/checkers/e;) + name : '<init>' + type : '(Lcom/google/android/checkers/CheckersView;)V' + access : 0x10000 (CONSTRUCTOR) + code - + registers : 2 + ins : 2 + outs : 1 + insns size : 6 16-bit code units +007290: |[007290] com.google.android.checkers.e.<init>:(Lcom/google/android/checkers/CheckersView;)V +0072a0: 5b01 5700 |0000: iput-object v1, v0, Lcom/google/android/checkers/e;.a:Lcom/google/android/checkers/CheckersView; // field@0057 +0072a4: 7010 a000 0000 |0002: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00a0 +0072aa: 0e00 |0005: return-void + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/e;) + name : 'onClick' + type : '(Landroid/content/DialogInterface;I)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 5 + ins : 3 + outs : 2 + insns size : 7 16-bit code units +0072ac: |[0072ac] com.google.android.checkers.e.onClick:(Landroid/content/DialogInterface;I)V +0072bc: 5420 5700 |0000: iget-object v0, v2, Lcom/google/android/checkers/e;.a:Lcom/google/android/checkers/CheckersView; // field@0057 +0072c0: 1211 |0002: const/4 v1, #int 1 // #1 +0072c2: 6e20 6800 1000 |0003: invoke-virtual {v0, v1}, Lcom/google/android/checkers/CheckersView;.e:(Z)Z // method@0068 +0072c8: 0e00 |0006: return-void + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #7 header: +class_idx : 37 +access_flags : 16 (0x0010) +superclass_idx : 46 +interfaces_off : 29520 (0x007350) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 35166 (0x00895e) +static_fields_size : 0 +instance_fields_size: 1 +direct_methods_size : 1 +virtual_methods_size: 1 + +Class #7 - + Class descriptor : 'Lcom/google/android/checkers/f;' + Access flags : 0x0010 (FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + #0 : 'Landroid/content/DialogInterface$OnClickListener;' + Static fields - + Instance fields - + #0 : (in Lcom/google/android/checkers/f;) + name : 'a' + type : 'Lcom/google/android/checkers/CheckersView;' + access : 0x1010 (FINAL SYNTHETIC) + Direct methods - + #0 : (in Lcom/google/android/checkers/f;) + name : '<init>' + type : '(Lcom/google/android/checkers/CheckersView;)V' + access : 0x10000 (CONSTRUCTOR) + code - + registers : 2 + ins : 2 + outs : 1 + insns size : 6 16-bit code units +0072cc: |[0072cc] com.google.android.checkers.f.<init>:(Lcom/google/android/checkers/CheckersView;)V +0072dc: 5b01 5800 |0000: iput-object v1, v0, Lcom/google/android/checkers/f;.a:Lcom/google/android/checkers/CheckersView; // field@0058 +0072e0: 7010 a000 0000 |0002: invoke-direct {v0}, Ljava/lang/Object;.<init>:()V // method@00a0 +0072e6: 0e00 |0005: return-void + catches : (none) + positions : + locals : + + Virtual methods - + #0 : (in Lcom/google/android/checkers/f;) + name : 'onClick' + type : '(Landroid/content/DialogInterface;I)V' + access : 0x0011 (PUBLIC FINAL) + code - + registers : 4 + ins : 3 + outs : 2 + insns size : 6 16-bit code units +0072e8: |[0072e8] com.google.android.checkers.f.onClick:(Landroid/content/DialogInterface;I)V +0072f8: 5410 5800 |0000: iget-object v0, v1, Lcom/google/android/checkers/f;.a:Lcom/google/android/checkers/CheckersView; // field@0058 +0072fc: 7120 5600 3000 |0002: invoke-static {v0, v3}, Lcom/google/android/checkers/CheckersView;.a:(Lcom/google/android/checkers/CheckersView;I)V // method@0056 +007302: 0e00 |0005: return-void + catches : (none) + positions : + locals : + + source_file_idx : -1 (unknown) + +Class #8 header: +class_idx : 38 +access_flags : 17 (0x0011) +superclass_idx : 46 +interfaces_off : 0 (0x000000) +source_file_idx : -1 +annotations_off : 0 (0x000000) +class_data_off : 35187 (0x008973) +static_fields_size : 19 +instance_fields_size: 0 +direct_methods_size : 1 +virtual_methods_size: 0 + +Class #8 - + Class descriptor : 'Lcom/google/android/checkers/g;' + Access flags : 0x0011 (PUBLIC FINAL) + Superclass : 'Ljava/lang/Object;' + Interfaces - + Static fields - + #0 : (in Lcom/google/android/checkers/g;) + name : 'a' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #1 : (in Lcom/google/android/checkers/g;) + name : 'b' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #2 : (in Lcom/google/android/checkers/g;) + name : 'c' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #3 : (in Lcom/google/android/checkers/g;) + name : 'd' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #4 : (in Lcom/google/android/checkers/g;) + name : 'e' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #5 : (in Lcom/google/android/checkers/g;) + name : 'f' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #6 : (in Lcom/google/android/checkers/g;) + name : 'g' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #7 : (in Lcom/google/android/checkers/g;) + name : 'h' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #8 : (in Lcom/google/android/checkers/g;) + name : 'i' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #9 : (in Lcom/google/android/checkers/g;) + name : 'j' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #10 : (in Lcom/google/android/checkers/g;) + name : 'k' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #11 : (in Lcom/google/android/checkers/g;) + name : 'l' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #12 : (in Lcom/google/android/checkers/g;) + name : 'm' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #13 : (in Lcom/google/android/checkers/g;) + name : 'n' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #14 : (in Lcom/google/android/checkers/g;) + name : 'o' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #15 : (in Lcom/google/android/checkers/g;) + name : 'p' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #16 : (in Lcom/google/android/checkers/g;) + name : 'q' + type : '[B' + access : 0x0009 (PUBLIC STATIC) + #17 : (in Lcom/google/android/checkers/g;) + name : 'r' + type : 'Z' + access : 0x0009 (PUBLIC STATIC) + #18 : (in Lcom/google/android/checkers/g;) + name : 's' + type : 'Ljava/io/BufferedInputStream;' + access : 0x0008 (STATIC) + Instance fields - + Direct methods - + #0 : (in Lcom/google/android/checkers/g;) + name : 'a' + type : '([B)Z' + access : 0x0008 (STATIC) + code - + registers : 5 + ins : 1 + outs : 2 + insns size : 29 16-bit code units +007304: |[007304] com.google.android.checkers.g.a:([B)Z +007314: 2141 |0000: array-length v1, v4 +007316: 1200 |0001: const/4 v0, #int 0 // #0 +007318: 3410 0400 |0002: if-lt v0, v1, 0006 // +0004 +00731c: 1210 |0004: const/4 v0, #int 1 // #1 +00731e: 0f00 |0005: return v0 +007320: 6202 6b00 |0006: sget-object v2, Lcom/google/android/checkers/g;.s:Ljava/io/BufferedInputStream; // field@006b +007324: 6e10 9c00 0200 |0008: invoke-virtual {v2}, Ljava/io/BufferedInputStream;.read:()I // method@009c +00732a: 0a02 |000b: move-result v2 +00732c: 12f3 |000c: const/4 v3, #int -1 // #ff +00732e: 3332 0a00 |000d: if-ne v2, v3, 0017 // +000a +007332: 2200 2b00 |000f: new-instance v0, Ljava/lang/Exception; // type@002b +007336: 1a01 3401 |0011: const-string v1, "tb eof" // string@0134 +00733a: 7020 9d00 1000 |0013: invoke-direct {v0, v1}, Ljava/lang/Exception;.<init>:(Ljava/lang/String;)V // method@009d +007340: 2700 |0016: throw v0 +007342: 8d22 |0017: int-to-byte v2, v2 +007344: 4f02 0400 |0018: aput-byte v2, v4, v0 +007348: d800 0001 |001a: add-int/lit8 v0, v0, #int 1 // #01 +00734c: 28e6 |001c: goto 0002 // -001a + catches : (none) + positions : + locals : + + Virtual methods - + source_file_idx : -1 (unknown) + diff --git a/test/dexdump/checkers.xml b/test/dexdump/checkers.xml new file mode 100755 index 0000000000..232254fd2a --- /dev/null +++ b/test/dexdump/checkers.xml @@ -0,0 +1,672 @@ +<api> +<package name="com.google.android.checkers" +> +<class name="Checkers" + extends="android.app.Activity" + abstract="false" + static="false" + final="false" + visibility="public" +> +<constructor name="Checkers" + type="com.google.android.checkers.Checkers" + static="false" + final="false" + visibility="public" +> +</constructor> +<method name="onConfigurationChanged" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.content.res.Configuration"> +</parameter> +</method> +<method name="onCreate" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.os.Bundle"> +</parameter> +</method> +<method name="onCreateOptionsMenu" + return="boolean" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.view.Menu"> +</parameter> +</method> +<method name="onKeyDown" + return="boolean" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="int"> +</parameter> +<parameter name="arg1" type="android.view.KeyEvent"> +</parameter> +</method> +<method name="onOptionsItemSelected" + return="boolean" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.view.MenuItem"> +</parameter> +</method> +<method name="onPause" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +</method> +<method name="onStop" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +</method> +<method name="onTrackballEvent" + return="boolean" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.view.MotionEvent"> +</parameter> +</method> +</class> +<class name="CheckersView" + extends="android.view.View" + abstract="false" + static="false" + final="false" + visibility="public" +> +<constructor name="CheckersView" + type="com.google.android.checkers.CheckersView" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.content.Context"> +</parameter> +<parameter name="arg1" type="android.content.SharedPreferences"> +</parameter> +</constructor> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +</method> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="float"> +</parameter> +<parameter name="arg1" type="float"> +</parameter> +</method> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="int"> +</parameter> +<parameter name="arg1" type="int"> +</parameter> +<parameter name="arg2" type="int"> +</parameter> +<parameter name="arg3" type="int"> +</parameter> +</method> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="android.content.SharedPreferences.Editor"> +</parameter> +</method> +<method name="a" + return="boolean" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="int"> +</parameter> +</method> +<method name="a" + return="boolean" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +</method> +<method name="b" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +</method> +<method name="b" + return="boolean" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +</method> +<method name="c" + return="int" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +</method> +<method name="c" + return="boolean" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +</method> +<method name="d" + return="boolean" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +</method> +<method name="draw" + return="void" + abstract="false" + native="false" + synchronized="true" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.graphics.Canvas"> +</parameter> +</method> +<method name="e" + return="boolean" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +</method> +<method name="onSizeChanged" + return="void" + abstract="false" + native="false" + synchronized="true" + static="false" + final="false" + visibility="protected" +> +<parameter name="arg0" type="int"> +</parameter> +<parameter name="arg1" type="int"> +</parameter> +<parameter name="arg2" type="int"> +</parameter> +<parameter name="arg3" type="int"> +</parameter> +</method> +<method name="onTouchEvent" + return="boolean" + abstract="false" + native="false" + synchronized="false" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="android.view.MotionEvent"> +</parameter> +</method> +<method name="setLevel" + return="void" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="int"> +</parameter> +</method> +</class> +<class name="a" + extends="java.lang.Thread" + abstract="false" + static="false" + final="true" + visibility="public" +> +<field name="a" + type="int[]" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="b" + type="int[]" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="c" + type="int" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="d" + type="int" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="e" + type="int" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="f" + type="int" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="g" + type="int" + transient="false" + volatile="false" + static="false" + final="false" + visibility="public" +> +</field> +<field name="h" + type="int" + transient="false" + volatile="true" + static="false" + final="false" + visibility="public" +> +</field> +<constructor name="a" + type="com.google.android.checkers.a" + static="false" + final="false" + visibility="public" +> +<parameter name="arg0" type="com.google.android.checkers.CheckersView"> +</parameter> +</constructor> +<method name="a" + return="int" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +<parameter name="arg1" type="boolean"> +</parameter> +</method> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +</method> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="int"> +</parameter> +</method> +<method name="a" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="int"> +</parameter> +<parameter name="arg1" type="int"> +</parameter> +<parameter name="arg2" type="int"> +</parameter> +<parameter name="arg3" type="int"> +</parameter> +<parameter name="arg4" type="boolean"> +</parameter> +</method> +<method name="b" + return="void" + abstract="false" + native="false" + synchronized="true" + static="false" + final="true" + visibility="public" +> +<parameter name="arg0" type="boolean"> +</parameter> +<parameter name="arg1" type="boolean"> +</parameter> +</method> +<method name="run" + return="void" + abstract="false" + native="false" + synchronized="false" + static="false" + final="true" + visibility="public" +> +</method> +</class> +<class name="g" + extends="java.lang.Object" + abstract="false" + static="false" + final="true" + visibility="public" +> +<field name="a" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="b" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="c" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="d" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="e" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="f" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="g" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="h" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="i" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="j" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="k" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="l" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="m" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="n" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="o" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="p" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="q" + type="byte[]" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +<field name="r" + type="boolean" + transient="false" + volatile="false" + static="true" + final="false" + visibility="public" +> +</field> +</class> +</package> +</api> diff --git a/test/dexdump/run-all-tests b/test/dexdump/run-all-tests new file mode 100755 index 0000000000..d9f1e9649d --- /dev/null +++ b/test/dexdump/run-all-tests @@ -0,0 +1,103 @@ +#!/bin/bash +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set up prog to be the path of this script, including following symlinks, +# and set up progdir to be the fully-qualified pathname of its directory. +prog="$0" +while [ -h "${prog}" ]; do + newProg=`/bin/ls -ld "${prog}"` + newProg=`expr "${newProg}" : ".* -> \(.*\)$"` + if expr "x${newProg}" : 'x/' >/dev/null; then + prog="${newProg}" + else + progdir=`dirname "${prog}"` + prog="${progdir}/${newProg}" + fi +done +oldwd=`pwd` +progdir=`dirname "${prog}"` +cd "${progdir}" +progdir=`pwd` +prog="${progdir}"/`basename "${prog}"` + +# Set up a temp directory for output. +tmpdir=/tmp/test-$$ +mkdir ${tmpdir} + +# Set up dexdump binary and flags to test. +DEXD="${ANDROID_HOST_OUT}/bin/dexdump2" +DEXDFLAGS1="-dfh" +DEXDFLAGS2="-l xml" + +# Set up dexlist binary and flags to test. +DEXL="${ANDROID_HOST_OUT}/bin/dexlist2" +DEXLFLAGS="" + +# Run the tests. +passed=0 +failed=0 +for i in *.dex; do + echo $i + basenm=`basename "${i}" .dex` + txtfile=${basenm}.txt + xmlfile=${basenm}.xml + lstfile=${basenm}.lst + gentxtfile=${tmpdir}/${txtfile} + genxmlfile=${tmpdir}/${xmlfile} + genlstfile=${tmpdir}/${lstfile} + ${DEXD} ${DEXDFLAGS1} ${i} > ${gentxtfile} + cmp ${txtfile} ${gentxtfile} + if [ "$?" = "0" ]; then + ((passed += 1)) + else + ((failed += 1)) + echo failed: ${i} + fi + ${DEXD} ${DEXDFLAGS2} ${i} > ${genxmlfile} + cmp ${xmlfile} ${genxmlfile} + if [ "$?" = "0" ]; then + ((passed += 1)) + else + ((failed += 1)) + echo failed: ${i} + fi + ${DEXL} ${DEXLFLAGS} ${i} > ${genlstfile} + cmp ${lstfile} ${genlstfile} + if [ "$?" = "0" ]; then + ((passed += 1)) + else + ((failed += 1)) + echo failed: ${i} + fi +done + +# Report results. +echo +echo "passed: ${passed} test(s)" +echo "failed: ${failed} test(s)" +echo + +# Clean up, cd back to original dir. +rm -rf ${tmpdir} +cd ${oldwd} + +# Return status. +if [ "${failed}" != "0" ]; then + echo failed + exit 1 +fi +exit 0 + diff --git a/test/etc/default-build b/test/etc/default-build index 92954a98cc..c281bca3f5 100755 --- a/test/etc/default-build +++ b/test/etc/default-build @@ -39,24 +39,57 @@ if [ -e classes.dex ]; then exit 0 fi -if [ -d src ]; then - mkdir classes - ${JAVAC} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'` -fi - -if [ -d src2 ]; then - mkdir -p classes - ${JAVAC} -d classes `find src2 -name '*.java'` -fi - if ! [ -d src ] && ! [ -d src2 ]; then # No src directory? Then forget about trying to run dx. SKIP_DX_MERGER="true" fi -if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then - ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \ - --dump-width=1000 ${DX_FLAGS} classes +if [ -d src-multidex ]; then + # Jack does not support this configuration unless we specify how to partition the DEX file + # with a .jpp file. + USE_JACK="false" +fi + +if [ ${USE_JACK} = "true" ]; then + # Jack toolchain + if [ -d src ]; then + ${JACK} --output-jack src.jack src + imported_jack_files="--import src.jack" + fi + + if [ -d src2 ]; then + ${JACK} --output-jack src2.jack src2 + imported_jack_files="--import src2.jack ${imported_jack_files}" + fi + + # Compile jack files into a DEX file. We set jack.import.type.policy=keep-first to consider + # class definitions from src2 first. + ${JACK} ${imported_jack_files} -D jack.import.type.policy=keep-first --output-dex . +else + # Legacy toolchain with javac+dx + if [ -d src ]; then + mkdir classes + ${JAVAC} -implicit:none -classpath src-multidex -d classes `find src -name '*.java'` + fi + + if [ -d src-multidex ]; then + mkdir classes2 + ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'` + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \ + --dump-width=1000 ${DX_FLAGS} classes2 + fi + fi + + if [ -d src2 ]; then + mkdir -p classes + ${JAVAC} -d classes `find src2 -name '*.java'` + fi + + if [ ${NEED_DEX} = "true" -a ${SKIP_DX_MERGER} = "false" ]; then + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \ + --dump-width=1000 ${DX_FLAGS} classes + fi fi if [ -d smali ]; then @@ -72,30 +105,34 @@ if [ -d smali ]; then fi if [ -d src-ex ]; then - mkdir classes-ex - ${JAVAC} -d classes-ex -cp classes `find src-ex -name '*.java'` - if [ ${NEED_DEX} = "true" ]; then - ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \ - --dump-width=1000 ${DX_FLAGS} classes-ex - - # quick shuffle so that the stored name is "classes.dex" - mv classes.dex classes-1.dex - mv classes-ex.dex classes.dex - zip $TEST_NAME-ex.jar classes.dex - mv classes.dex classes-ex.dex - mv classes-1.dex classes.dex + if [ ${USE_JACK} = "true" ]; then + # Rename previous "classes.dex" so it is not overwritten. + mv classes.dex classes-1.dex + #TODO find another way to append src.jack to the jack classpath + ${JACK}:src.jack --output-dex . src-ex + zip $TEST_NAME-ex.jar classes.dex + # Restore previous "classes.dex" so it can be zipped. + mv classes-1.dex classes.dex + else + mkdir classes-ex + ${JAVAC} -d classes-ex -cp classes `find src-ex -name '*.java'` + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes-ex.dex \ + --dump-width=1000 ${DX_FLAGS} classes-ex + + # quick shuffle so that the stored name is "classes.dex" + mv classes.dex classes-1.dex + mv classes-ex.dex classes.dex + zip $TEST_NAME-ex.jar classes.dex + mv classes.dex classes-ex.dex + mv classes-1.dex classes.dex + fi fi fi # Create a single jar with two dex files for multidex. if [ -d src-multidex ]; then - mkdir classes2 - ${JAVAC} -implicit:none -classpath src -d classes2 `find src-multidex -name '*.java'` - if [ ${NEED_DEX} = "true" ]; then - ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex \ - --dump-width=1000 ${DX_FLAGS} classes2 - zip $TEST_NAME.jar classes.dex classes2.dex - fi + zip $TEST_NAME.jar classes.dex classes2.dex elif [ ${NEED_DEX} = "true" ]; then zip $TEST_NAME.jar classes.dex fi diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index cf6be83d6e..a1af5774f3 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -42,10 +42,11 @@ TIME_OUT="y" TIME_OUT_VALUE=10 USE_GDB="n" USE_JVM="n" -VERIFY="y" +VERIFY="y" # y=yes,n=no,s=softfail ZYGOTE="" DEX_VERIFY="" USE_DEX2OAT_AND_PATCHOAT="y" +INSTRUCTION_SET_FEATURES="" while true; do if [ "x$1" = "x--quiet" ]; then @@ -149,6 +150,9 @@ while true; do elif [ "x$1" = "x--no-verify" ]; then VERIFY="n" shift + elif [ "x$1" = "x--verify-soft-fail" ]; then + VERIFY="s" + shift elif [ "x$1" = "x--no-optimize" ]; then OPTIMIZE="n" shift @@ -156,6 +160,10 @@ while true; do shift ANDROID_ROOT="$1" shift + elif [ "x$1" = "x--instruction-set-features" ]; then + shift + INSTRUCTION_SET_FEATURES="$1" + shift elif [ "x$1" = "x--" ]; then shift break @@ -201,7 +209,11 @@ if [ "$ZYGOTE" = "" ]; then if [ "$VERIFY" = "y" ]; then JVM_VERIFY_ARG="-Xverify:all" msg "Performing verification" - else + elif [ "$VERIFY" = "s" ]; then + JVM_VERIFY_ARG="Xverify:all" + DEX_VERIFY="-Xverify:softfail" + msg "Forcing verification to be soft fail" + else # VERIFY = "n" DEX_VERIFY="-Xverify:none" JVM_VERIFY_ARG="-Xverify:none" msg "Skipping verification" @@ -263,7 +275,10 @@ if [ "$INTERPRETER" = "y" ]; then INT_OPTS="-Xint" if [ "$VERIFY" = "y" ] ; then COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only" - else + elif [ "$VERIFY" = "s" ]; then + COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime" + DEX_VERIFY="${DEX_VERIFY} -Xverify:softfail" + else # VERIFY = "n" COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none" DEX_VERIFY="${DEX_VERIFY} -Xverify:none" fi @@ -320,6 +335,25 @@ if [ "$PREBUILD" = "y" ]; then --dex-file=$DEX_LOCATION/$TEST_NAME.jar \ --oat-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g") \ --instruction-set=$ISA" + if [ "x$INSTRUCTION_SET_FEATURES" != "x" ] ; then + dex2oat_cmdline="${dex2oat_cmdline} --instruction-set-features=${INSTRUCTION_SET_FEATURES}" + fi + + # Add in a timeout. This is important for testing the compilation/verification time of + # pathological cases. + # Note: as we don't know how decent targets are (e.g., emulator), only do this on the host for + # now. We should try to improve this. + # The current value is rather arbitrary. run-tests should compile quickly. + if [ "$HOST" != "n" ]; then + # Use SIGRTMIN+2 to try to dump threads. + # Use -k 1m to SIGKILL it a minute later if it hasn't ended. + dex2oat_cmdline="timeout -k 1m -s SIGRTMIN+2 1m ${dex2oat_cmdline}" + fi +fi + +DALVIKVM_ISA_FEATURES_ARGS="" +if [ "x$INSTRUCTION_SET_FEATURES" != "x" ] ; then + DALVIKVM_ISA_FEATURES_ARGS="-Xcompiler-option --instruction-set-features=${INSTRUCTION_SET_FEATURES}" fi dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \ @@ -329,6 +363,7 @@ dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \ -XXlib:$LIB \ $PATCHOAT \ $DEX2OAT \ + $DALVIKVM_ISA_FEATURES_ARGS \ $ZYGOTE \ $JNI_OPTS \ $INT_OPTS \ @@ -458,7 +493,9 @@ else # When running under gdb, we cannot do piping and grepping... $cmdline "$@" else - $cmdline "$@" 2>&1 + trap 'kill -INT -$pid' INT + $cmdline "$@" 2>&1 & pid=$! + wait $pid # Add extra detail if time out is enabled. if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "y" ]; then echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2 diff --git a/test/run-test b/test/run-test index ffa25eb5ab..84c818b444 100755 --- a/test/run-test +++ b/test/run-test @@ -46,6 +46,7 @@ export JAVAC="javac -g" export RUN="${progdir}/etc/run-test-jar" export DEX_LOCATION=/data/run-test/${test_dir} export NEED_DEX="true" +export USE_JACK="false" # If dx was not set by the environment variable, assume it is in the path. if [ -z "$DX" ]; then @@ -67,6 +68,46 @@ if [ -z "$DXMERGER" ]; then export DXMERGER="dexmerger" fi +# If jack was not set by the environment variable, assume it is in the path. +if [ -z "$JACK" ]; then + export JACK="jack" +fi + +# If the tree is compiled with Jack, build test with Jack by default. +if [ "$ANDROID_COMPILE_WITH_JACK" = "true" ]; then + USE_JACK="true" +fi + +# ANDROID_BUILD_TOP is not set in a build environment. +if [ -z "$ANDROID_BUILD_TOP" ]; then + export ANDROID_BUILD_TOP=$oldwd +fi + +# If JACK_VM_COMMAND is not set, assume it launches the prebuilt jack-launcher. +if [ -z "$JACK_VM_COMMAND" ]; then + if [ ! -z "$TMPDIR" ]; then + jack_temp_dir="-Djava.io.tmpdir=$TMPDIR" + fi + export JACK_VM_COMMAND="java -Dfile.encoding=UTF-8 -Xms2560m -XX:+TieredCompilation $jack_temp_dir -jar $ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack-launcher.jar" +fi + +# If JACK_CLASSPATH is not set, assume it only contains core-libart. +if [ -z "$JACK_CLASSPATH" ]; then + export JACK_CLASSPATH="$ANDROID_BUILD_TOP/out/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack" +fi + +# If JACK_JAR is not set, assume it is located in the prebuilts directory. +if [ -z "$JACK_JAR" ]; then + export JACK_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack.jar" +fi + +# If JILL_JAR is not set, assume it is located in the prebuilts directory. +if [ -z "$JILL_JAR" ]; then + export JILL_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jill.jar" +fi + +export JACK="$JACK -g -cp $JACK_CLASSPATH" +export JILL="java -jar $JILL_JAR" info="info.txt" build="build" @@ -76,6 +117,7 @@ check_cmd="check" output="output.txt" build_output="build-output.txt" cfg_output="graph.cfg" +strace_output="strace-output.txt" lib="libartd.so" run_args="--quiet" build_args="" @@ -96,6 +138,7 @@ trace_stream="false" basic_verify="false" gc_verify="false" gc_stress="false" +strace="false" always_clean="no" never_clean="no" have_dex2oat="yes" @@ -116,6 +159,7 @@ while true; do runtime="jvm" prebuild_mode="no" NEED_DEX="false" + USE_JACK="false" run_args="${run_args} --jvm" shift elif [ "x$1" = "x-O" ]; then @@ -193,6 +237,10 @@ while true; do run_args="${run_args} --gdb" dev_mode="yes" shift + elif [ "x$1" = "x--strace" ]; then + strace="yes" + run_args="${run_args} --invoke-with strace --invoke-with -o --invoke-with $tmp_dir/$strace_output" + shift elif [ "x$1" = "x--zygote" ]; then run_args="${run_args} --zygote" shift @@ -214,6 +262,10 @@ while true; do elif [ "x$1" = "x--no-verify" ]; then run_args="${run_args} --no-verify" shift + elif [ "x$1" = "x--verify-soft-fail" ]; then + run_args="${run_args} --verify-soft-fail" + image_suffix="-interp-ac" + shift elif [ "x$1" = "x--no-optimize" ]; then run_args="${run_args} --no-optimize" shift @@ -237,6 +289,12 @@ while true; do elif [ "x$1" = "x--build-only" ]; then build_only="yes" shift + elif [ "x$1" = "x--build-with-javac-dx" ]; then + USE_JACK="false" + shift + elif [ "x$1" = "x--build-with-jack" ]; then + USE_JACK="true" + shift elif [ "x$1" = "x--output-path" ]; then shift tmp_dir=$1 @@ -281,6 +339,10 @@ while true; do elif [ "x$1" = "x--dex2oat-swap" ]; then run_args="${run_args} --dex2oat-swap" shift + elif [ "x$1" = "x--instruction-set-features" ]; then + shift + run_args="${run_args} --instruction-set-features $1" + shift elif expr "x$1" : "x--" >/dev/null 2>&1; then echo "unknown $0 option: $1" 1>&2 usage="yes" @@ -369,10 +431,7 @@ if [ "$runtime" = "dalvik" ]; then fi elif [ "$runtime" = "art" ]; then if [ "$target_mode" = "no" ]; then - # ANDROID_BUILD_TOP and ANDROID_HOST_OUT are not set in a build environment. - if [ -z "$ANDROID_BUILD_TOP" ]; then - export ANDROID_BUILD_TOP=$oldwd - fi + # ANDROID_HOST_OUT is not set in a build environment. if [ -z "$ANDROID_HOST_OUT" ]; then export ANDROID_HOST_OUT=$ANDROID_BUILD_TOP/out/host/linux-x86 fi @@ -462,11 +521,16 @@ if [ "$usage" = "yes" ]; then echo " --debuggable Whether to compile Java code for a debugger." echo " --gdb Run under gdb; incompatible with some tests." echo " --build-only Build test files only (off by default)." + echo " --build-with-javac-dx Build test files with javac and dx (on by default)." + echo " --build-with-jack Build test files with jack and jill (off by default)." echo " --interpreter Enable interpreter only mode (off by default)." echo " --jit Enable jit (off by default)." echo " --optimizing Enable optimizing compiler (default)." echo " --quick Use Quick compiler (off by default)." echo " --no-verify Turn off verification (on by default)." + echo " --verify-soft-fail Force soft fail verification (off by default)." + echo " Verification is enabled if neither --no-verify" + echo " nor --verify-soft-fail is specified." echo " --no-optimize Turn off optimization (on by default)." echo " --no-precise Turn off precise GC (on by default)." echo " --zygote Spawn the process from the Zygote." \ @@ -496,6 +560,8 @@ if [ "$usage" = "yes" ]; then echo " --never-clean Keep the test files even if the test succeeds." echo " --android-root [path] The path on target for the android root. (/system by default)." echo " --dex2oat-swap Use a dex2oat swap file." + echo " --instruction-set-features [string]" + echo " Set instruction-set-features for compilation." ) 1>&2 exit 1 fi @@ -556,22 +622,49 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then # if Checker is not invoked and the test only runs the program. build_args="${build_args} --dx-option --no-optimize" - if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then - run_checker="yes" - run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \ - -Xcompiler-option -j1" + # Jack does not necessarily generate the same DEX output than dx. Because these tests depend + # on a particular DEX output, keep building them with dx for now (b/19467889). + USE_JACK="false" + + if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$debuggable" = "no" ]; then + # In no-prebuild mode, the compiler is only invoked if both dex2oat and + # patchoat are available. Disable Checker otherwise (b/22552692). + if [ "$prebuild_mode" = "yes" ] || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then + run_checker="yes" + if [ "$target_mode" = "no" ]; then + cfg_output_dir="$tmp_dir" + checker_arch_option= + else + cfg_output_dir="$DEX_LOCATION" + checker_arch_option="--arch=${target_arch_name^^}" + fi + run_args="${run_args} -Xcompiler-option --dump-cfg=$cfg_output_dir/$cfg_output \ + -Xcompiler-option -j1" + fi fi fi # To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB. -file_size_limit=2048 +build_file_size_limit=2048 +run_file_size_limit=2048 if echo "$test_dir" | grep 089; then - file_size_limit=5120 + build_file_size_limit=5120 + run_file_size_limit=5120 elif echo "$test_dir" | grep 083; then - file_size_limit=5120 + build_file_size_limit=5120 + run_file_size_limit=5120 fi -if ! ulimit -S "$file_size_limit"; then - echo "ulimit file size setting failed" +if [ "$run_checker" = "yes" -a "$target_mode" = "yes" ]; then + # We will need to `adb pull` the .cfg output from the target onto the host to + # run checker on it. This file can be big. + build_file_size_limit=16384 + run_file_size_limit=16384 +fi +if [ ${USE_JACK} = "false" ]; then + # Set ulimit if we build with dx only, Jack can generate big temp files. + if ! ulimit -S "$build_file_size_limit"; then + echo "ulimit file size setting failed" + fi fi good="no" @@ -582,13 +675,19 @@ if [ "$dev_mode" = "yes" ]; then build_exit="$?" echo "build exit status: $build_exit" 1>&2 if [ "$build_exit" = '0' ]; then + if ! ulimit -S "$run_file_size_limit"; then + echo "ulimit file size setting failed" + fi echo "${test_dir}: running..." 1>&2 "./${run}" $run_args "$@" 2>&1 run_exit="$?" if [ "$run_exit" = "0" ]; then if [ "$run_checker" = "yes" ]; then - "$checker" "$cfg_output" "$tmp_dir" 2>&1 + if [ "$target_mode" = "yes" ]; then + adb pull $cfg_output_dir/$cfg_output &> /dev/null + fi + "$checker" $checker_arch_option "$cfg_output" "$tmp_dir" 2>&1 checker_exit="$?" if [ "$checker_exit" = "0" ]; then good="yes" @@ -604,10 +703,16 @@ elif [ "$update_mode" = "yes" ]; then "./${build}" $build_args >"$build_output" 2>&1 build_exit="$?" if [ "$build_exit" = '0' ]; then + if ! ulimit -S "$run_file_size_limit"; then + echo "ulimit file size setting failed" + fi echo "${test_dir}: running..." 1>&2 "./${run}" $run_args "$@" >"$output" 2>&1 if [ "$run_checker" = "yes" ]; then - "$checker" -q "$cfg_output" "$tmp_dir" >> "$output" 2>&1 + if [ "$target_mode" = "yes" ]; then + adb pull $cfg_output_dir/$cfg_output &> /dev/null + fi + "$checker" -q $checker_arch_option "$cfg_output" "$tmp_dir" >> "$output" 2>&1 fi sed -e 's/[[:cntrl:]]$//g' < "$output" >"${td_expected}" good="yes" @@ -635,6 +740,9 @@ else "./${build}" $build_args >"$build_output" 2>&1 build_exit="$?" if [ "$build_exit" = '0' ]; then + if ! ulimit -S "$run_file_size_limit"; then + echo "ulimit file size setting failed" + fi echo "${test_dir}: running..." 1>&2 "./${run}" $run_args "$@" >"$output" 2>&1 run_exit="$?" @@ -642,7 +750,10 @@ else echo "run exit status: $run_exit" 1>&2 good_run="no" elif [ "$run_checker" = "yes" ]; then - "$checker" -q "$cfg_output" "$tmp_dir" >> "$output" 2>&1 + if [ "$target_mode" = "yes" ]; then + adb pull $cfg_output_dir/$cfg_output &> /dev/null + fi + "$checker" -q $checker_arch_option "$cfg_output" "$tmp_dir" >> "$output" 2>&1 checker_exit="$?" if [ "$checker_exit" != "0" ]; then echo "checker exit status: $checker_exit" 1>&2 @@ -684,6 +795,11 @@ fi echo '#################### diffs' diff --strip-trailing-cr -u "$expected" "$output" | tail -n 2000 echo '####################' + if [ "$strace" = "yes" ]; then + echo '#################### strace output' + tail -n 2000 "$tmp_dir/$strace_output" + echo '####################' + fi echo ' ' fi diff --git a/tools/checker/README b/tools/checker/README index 858a773768..259691e500 100644 --- a/tools/checker/README +++ b/tools/checker/README @@ -52,3 +52,11 @@ Example: The engine will attempt to match the check lines against the output of the group named on the first line. Together they verify that the CFG after constant folding returns an integer constant with value either 11 or 22. + +A group of check lines can be made architecture-specific by inserting '-<arch>' +after the 'CHECK-START' keyword. The previous example can be updated to run for +arm64 only with: + + // CHECK-START-ARM64: int MyClass.MyMethod() constant_folding (after) + // CHECK: <<ID:i\d+>> IntConstant {{11|22}} + // CHECK: Return [<<ID>>] diff --git a/tools/checker/checker.py b/tools/checker/checker.py index ed630e3d12..bc5e17da6a 100755 --- a/tools/checker/checker.py +++ b/tools/checker/checker.py @@ -17,6 +17,7 @@ import argparse import os +from common.archs import archs_list from common.logger import Logger from file_format.c1visualizer.parser import ParseC1visualizerStream from file_format.checker.parser import ParseCheckerStream @@ -34,6 +35,8 @@ def ParseArguments(): help="print a list of all passes found in the tested file") parser.add_argument("--dump-pass", dest="dump_pass", metavar="PASS", help="print a compiler pass dump") + parser.add_argument("--arch", dest="arch", choices=archs_list, + help="Run the tests for the specified target architecture.") parser.add_argument("-q", "--quiet", action="store_true", help="print only errors") return parser.parse_args() @@ -62,7 +65,7 @@ def DumpPass(outputFilename, passName): def FindCheckerFiles(path): """ Returns a list of files to scan for check annotations in the given path. Path to a file is returned as a single-element list, directories are - recursively traversed and all '.java' files returned. + recursively traversed and all '.java' and '.smali' files returned. """ if not path: Logger.fail("No source path provided") @@ -80,13 +83,13 @@ def FindCheckerFiles(path): Logger.fail("Source path \"" + path + "\" not found") -def RunTests(checkPrefix, checkPath, outputFilename): +def RunTests(checkPrefix, checkPath, outputFilename, targetArch): c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r")) for checkFilename in FindCheckerFiles(checkPath): checkerFile = ParseCheckerStream(os.path.basename(checkFilename), checkPrefix, open(checkFilename, "r")) - MatchFiles(checkerFile, c1File) + MatchFiles(checkerFile, c1File, targetArch) if __name__ == "__main__": @@ -100,4 +103,4 @@ if __name__ == "__main__": elif args.dump_pass: DumpPass(args.tested_file, args.dump_pass) else: - RunTests(args.check_prefix, args.source_path, args.tested_file) + RunTests(args.check_prefix, args.source_path, args.tested_file, args.arch) diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py new file mode 100644 index 0000000000..84bded9281 --- /dev/null +++ b/tools/checker/common/archs.py @@ -0,0 +1,15 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +archs_list = ['ARM', 'ARM64', 'MIPS64', 'X86', 'X86_64'] diff --git a/tools/checker/file_format/c1visualizer/parser.py b/tools/checker/file_format/c1visualizer/parser.py index 335a195883..bdcde9db51 100644 --- a/tools/checker/file_format/c1visualizer/parser.py +++ b/tools/checker/file_format/c1visualizer/parser.py @@ -27,10 +27,12 @@ class C1ParserState: def __parseC1Line(line, lineNo, state, fileName): """ This function is invoked on each line of the output file and returns - a pair which instructs the parser how the line should be handled. If the + a triplet which instructs the parser how the line should be handled. If the line is to be included in the current group, it is returned in the first value. If the line starts a new output group, the name of the group is - returned in the second value. + returned in the second value. The third value is only here to make the + function prototype compatible with `SplitStream` and is always set to + `None` here. """ if state.currentState == C1ParserState.StartingCfgBlock: # Previous line started a new 'cfg' block which means that this one must @@ -39,16 +41,16 @@ def __parseC1Line(line, lineNo, state, fileName): # Extract the pass name, prepend it with the name of the method and # return as the beginning of a new group. state.currentState = C1ParserState.InsideCfgBlock - return (None, state.lastMethodName + " " + line.split("\"")[1]) + return (None, state.lastMethodName + " " + line.split("\"")[1], None) else: Logger.fail("Expected output group name", fileName, lineNo) elif state.currentState == C1ParserState.InsideCfgBlock: if line == "end_cfg": state.currentState = C1ParserState.OutsideBlock - return (None, None) + return (None, None, None) else: - return (line, None) + return (line, None, None) elif state.currentState == C1ParserState.InsideCompilationBlock: # Search for the method's name. Format: method "<name>" @@ -59,7 +61,7 @@ def __parseC1Line(line, lineNo, state, fileName): state.lastMethodName = methodName elif line == "end_compilation": state.currentState = C1ParserState.OutsideBlock - return (None, None) + return (None, None, None) else: assert state.currentState == C1ParserState.OutsideBlock @@ -69,10 +71,10 @@ def __parseC1Line(line, lineNo, state, fileName): if state.lastMethodName is None: Logger.fail("Expected method header", fileName, lineNo) state.currentState = C1ParserState.StartingCfgBlock - return (None, None) + return (None, None, None) elif line == "begin_compilation": state.currentState = C1ParserState.InsideCompilationBlock - return (None, None) + return (None, None, None) else: Logger.fail("C1visualizer line not inside a group", fileName, lineNo) @@ -82,6 +84,7 @@ def ParseC1visualizerStream(fileName, stream): fnProcessLine = lambda line, lineNo: __parseC1Line(line, lineNo, state, fileName) fnLineOutsideChunk = lambda line, lineNo: \ Logger.fail("C1visualizer line not inside a group", fileName, lineNo) - for passName, passLines, startLineNo in SplitStream(stream, fnProcessLine, fnLineOutsideChunk): + for passName, passLines, startLineNo, testArch in \ + SplitStream(stream, fnProcessLine, fnLineOutsideChunk): C1visualizerPass(c1File, passName, passLines, startLineNo + 1) return c1File diff --git a/tools/checker/file_format/checker/parser.py b/tools/checker/file_format/checker/parser.py index 33735cbea0..001f72a225 100644 --- a/tools/checker/file_format/checker/parser.py +++ b/tools/checker/file_format/checker/parser.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from common.archs import archs_list from common.logger import Logger from file_format.common import SplitStream from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression @@ -21,17 +22,18 @@ import re def __isCheckerLine(line): return line.startswith("///") or line.startswith("##") -def __extractLine(prefix, line): +def __extractLine(prefix, line, arch = None): """ Attempts to parse a check line. The regex searches for a comment symbol followed by the CHECK keyword, given attribute and a colon at the very beginning of the line. Whitespaces are ignored. """ rIgnoreWhitespace = r"\s*" rCommentSymbols = [r"///", r"##"] + arch_specifier = r"-%s" % arch if arch is not None else r"" regexPrefix = rIgnoreWhitespace + \ r"(" + r"|".join(rCommentSymbols) + r")" + \ rIgnoreWhitespace + \ - prefix + r":" + prefix + arch_specifier + r":" # The 'match' function succeeds only if the pattern is matched at the # beginning of the line. @@ -42,41 +44,44 @@ def __extractLine(prefix, line): return None def __processLine(line, lineNo, prefix, fileName): - """ This function is invoked on each line of the check file and returns a pair + """ This function is invoked on each line of the check file and returns a triplet which instructs the parser how the line should be handled. If the line is to be included in the current check group, it is returned in the first value. If the line starts a new check group, the name of the group is - returned in the second value. + returned in the second value. The third value indicates whether the line + contained an architecture-specific suffix. """ if not __isCheckerLine(line): - return None, None + return None, None, None # Lines beginning with 'CHECK-START' start a new test case. - startLine = __extractLine(prefix + "-START", line) - if startLine is not None: - return None, startLine + # We currently only consider the architecture suffix in "CHECK-START" lines. + for arch in [None] + archs_list: + startLine = __extractLine(prefix + "-START", line, arch) + if startLine is not None: + return None, startLine, arch # Lines starting only with 'CHECK' are matched in order. plainLine = __extractLine(prefix, line) if plainLine is not None: - return (plainLine, TestAssertion.Variant.InOrder, lineNo), None + return (plainLine, TestAssertion.Variant.InOrder, lineNo), None, None # 'CHECK-NEXT' lines are in-order but must match the very next line. nextLine = __extractLine(prefix + "-NEXT", line) if nextLine is not None: - return (nextLine, TestAssertion.Variant.NextLine, lineNo), None + return (nextLine, TestAssertion.Variant.NextLine, lineNo), None, None # 'CHECK-DAG' lines are no-order assertions. dagLine = __extractLine(prefix + "-DAG", line) if dagLine is not None: - return (dagLine, TestAssertion.Variant.DAG, lineNo), None + return (dagLine, TestAssertion.Variant.DAG, lineNo), None, None # 'CHECK-NOT' lines are no-order negative assertions. notLine = __extractLine(prefix + "-NOT", line) if notLine is not None: - return (notLine, TestAssertion.Variant.Not, lineNo), None + return (notLine, TestAssertion.Variant.Not, lineNo), None, None - Logger.fail("Checker assertion could not be parsed", fileName, lineNo) + Logger.fail("Checker assertion could not be parsed: '" + line + "'", fileName, lineNo) def __isMatchAtStart(match): """ Tests if the given Match occurred at the beginning of the line. """ @@ -146,8 +151,9 @@ def ParseCheckerStream(fileName, prefix, stream): fnProcessLine = lambda line, lineNo: __processLine(line, lineNo, prefix, fileName) fnLineOutsideChunk = lambda line, lineNo: \ Logger.fail("Checker line not inside a group", fileName, lineNo) - for caseName, caseLines, startLineNo in SplitStream(stream, fnProcessLine, fnLineOutsideChunk): - testCase = TestCase(checkerFile, caseName, startLineNo) + for caseName, caseLines, startLineNo, testArch in \ + SplitStream(stream, fnProcessLine, fnLineOutsideChunk): + testCase = TestCase(checkerFile, caseName, startLineNo, testArch) for caseLine in caseLines: ParseCheckerAssertion(testCase, caseLine[0], caseLine[1], caseLine[2]) return checkerFile diff --git a/tools/checker/file_format/checker/struct.py b/tools/checker/file_format/checker/struct.py index 6a541428df..2b2e4429e7 100644 --- a/tools/checker/file_format/checker/struct.py +++ b/tools/checker/file_format/checker/struct.py @@ -26,6 +26,9 @@ class CheckerFile(PrintableMixin): def addTestCase(self, new_test_case): self.testCases.append(new_test_case) + def testCasesForArch(self, targetArch): + return [t for t in self.testCases if t.testArch == targetArch] + def __eq__(self, other): return isinstance(other, self.__class__) \ and self.testCases == other.testCases @@ -33,13 +36,14 @@ class CheckerFile(PrintableMixin): class TestCase(PrintableMixin): - def __init__(self, parent, name, startLineNo): + def __init__(self, parent, name, startLineNo, testArch = None): assert isinstance(parent, CheckerFile) self.parent = parent self.name = name self.assertions = [] self.startLineNo = startLineNo + self.testArch = testArch if not self.name: Logger.fail("Test case does not have a name", self.fileName, self.startLineNo) diff --git a/tools/checker/file_format/checker/test.py b/tools/checker/file_format/checker/test.py index ff24cc1239..36ed4b1592 100644 --- a/tools/checker/file_format/checker/test.py +++ b/tools/checker/file_format/checker/test.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from common.archs import archs_list from common.testing import ToUnicode from file_format.checker.parser import ParseCheckerStream from file_format.checker.struct import CheckerFile, TestCase, TestAssertion, RegexExpression @@ -280,3 +281,51 @@ class CheckerParser_FileLayoutTest(unittest.TestCase): /// CHECK-START: Example Group /// CHECK-NEXT: bar """) + + +class CheckerParser_ArchTests(unittest.TestCase): + + noarch_block = """ + /// CHECK-START: Group + /// CHECK: foo + /// CHECK-NEXT: bar + /// CHECK-NOT: baz + /// CHECK-DAG: yoyo + """ + + arch_block = """ + /// CHECK-START-{test_arch}: Group + /// CHECK: foo + /// CHECK-NEXT: bar + /// CHECK-NOT: baz + /// CHECK-DAG: yoyo + """ + + def test_NonArchTests(self): + for arch in [None] + archs_list: + checkerFile = ParseCheckerStream("<test-file>", + "CHECK", + io.StringIO(ToUnicode(self.noarch_block))) + self.assertEqual(len(checkerFile.testCases), 1) + self.assertEqual(len(checkerFile.testCases[0].assertions), 4) + + def test_IgnoreNonTargetArch(self): + for targetArch in archs_list: + for testArch in [a for a in archs_list if a != targetArch]: + checkerText = self.arch_block.format(test_arch = testArch) + checkerFile = ParseCheckerStream("<test-file>", + "CHECK", + io.StringIO(ToUnicode(checkerText))) + self.assertEqual(len(checkerFile.testCases), 1) + self.assertEqual(len(checkerFile.testCasesForArch(testArch)), 1) + self.assertEqual(len(checkerFile.testCasesForArch(targetArch)), 0) + + def test_Arch(self): + for arch in archs_list: + checkerText = self.arch_block.format(test_arch = arch) + checkerFile = ParseCheckerStream("<test-file>", + "CHECK", + io.StringIO(ToUnicode(checkerText))) + self.assertEqual(len(checkerFile.testCases), 1) + self.assertEqual(len(checkerFile.testCasesForArch(arch)), 1) + self.assertEqual(len(checkerFile.testCases[0].assertions), 4) diff --git a/tools/checker/file_format/common.py b/tools/checker/file_format/common.py index f91fdeb9cc..4931550355 100644 --- a/tools/checker/file_format/common.py +++ b/tools/checker/file_format/common.py @@ -18,8 +18,9 @@ def SplitStream(stream, fnProcessLine, fnLineOutsideChunk): Arguments: - fnProcessLine: Called on each line with the text and line number. Must - return a pair, name of the chunk started on this line and data extracted - from this line (or None in both cases). + return a triplet, composed of the name of the chunk started on this line, + the data extracted, and the name of the architecture this test applies to + (or None to indicate that all architectures should run this test). - fnLineOutsideChunk: Called on attempt to attach data prior to creating a chunk. """ @@ -36,9 +37,11 @@ def SplitStream(stream, fnProcessLine, fnLineOutsideChunk): # Let the child class process the line and return information about it. # The _processLine method can modify the content of the line (or delete it # entirely) and specify whether it starts a new group. - processedLine, newChunkName = fnProcessLine(line, lineNo) + processedLine, newChunkName, testArch = fnProcessLine(line, lineNo) + # Currently, only a full chunk can be specified as architecture-specific. + assert testArch is None or newChunkName is not None if newChunkName is not None: - currentChunk = (newChunkName, [], lineNo) + currentChunk = (newChunkName, [], lineNo, testArch) allChunks.append(currentChunk) if processedLine is not None: if currentChunk is not None: diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py index b22211ab56..42ca7df439 100644 --- a/tools/checker/match/file.py +++ b/tools/checker/match/file.py @@ -150,8 +150,10 @@ def MatchTestCase(testCase, c1Pass): matchFrom = match.scope.end + 1 variables = match.variables -def MatchFiles(checkerFile, c1File): +def MatchFiles(checkerFile, c1File, targetArch): for testCase in checkerFile.testCases: + if testCase.testArch not in [None, targetArch]: + continue # TODO: Currently does not handle multiple occurrences of the same group # name, e.g. when a pass is run multiple times. It will always try to # match a check group against the first output group of the same name. diff --git a/tools/checker/run_unit_tests.py b/tools/checker/run_unit_tests.py index 01708dbd27..2f5b1feaa6 100755 --- a/tools/checker/run_unit_tests.py +++ b/tools/checker/run_unit_tests.py @@ -18,7 +18,8 @@ from common.logger import Logger from file_format.c1visualizer.test import C1visualizerParser_Test from file_format.checker.test import CheckerParser_PrefixTest, \ CheckerParser_RegexExpressionTest, \ - CheckerParser_FileLayoutTest + CheckerParser_FileLayoutTest, \ + CheckerParser_ArchTests from match.test import MatchLines_Test, \ MatchFiles_Test diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index 682a27bd8a..7ada1896e8 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -137,5 +137,25 @@ result: EXEC_FAILED, bug:22106064, name: "libcore.java.lang.OldThreadGroupTest#test_enumerateLThreadArrayLZtest_enumerateLThreadArrayLZ" +}, +{ + description: "test_xattr fails on arm64 on the buildbots only: needs investigation", + result: EXEC_FAILED, + modes: [device], + names: ["libcore.io.OsTest#test_xattr"], + bug: 22258911 +}, +{ + description: "fails on L builds: needs investigation", + result: EXEC_FAILED, + modes: [device], + names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"] +}, +{ + description: "TimeZoneTest.testAllDisplayNames times out, needs investigation", + result: EXEC_TIMEOUT, + modes: [device], + names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"], + bug: 22786792 } ] diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh index 7135dbaf55..116a611a80 100755 --- a/tools/run-jdwp-tests.sh +++ b/tools/run-jdwp-tests.sh @@ -33,7 +33,7 @@ art_debugee="sh /data/local/tmp/system/bin/art" # We use Quick's image on target because optimizing's image is not compiled debuggable. image="-Ximage:/data/art-test/core.art" args=$@ -debuggee_args="-Xcompiler-option --compiler-backend=Optimizing -Xcompiler-option --debuggable" +debuggee_args="-Xcompiler-option --debuggable" device_dir="--device-dir=/data/local/tmp" # We use the art script on target to ensure the runner and the debuggee share the same # image. @@ -92,6 +92,5 @@ vogar $vm_command \ --vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \ --vm-arg -Djpda.settings.debuggeeJavaPath="\"$art_debugee $image $debuggee_args\"" \ --classpath $test_jar \ - --vm-arg -Xcompiler-option --vm-arg --compiler-backend=Optimizing \ --vm-arg -Xcompiler-option --vm-arg --debuggable \ org.apache.harmony.jpda.tests.share.AllTests diff --git a/tools/symbolize.sh b/tools/symbolize.sh index 7365a9bb3d..f5686e6d0f 100755 --- a/tools/symbolize.sh +++ b/tools/symbolize.sh @@ -22,6 +22,7 @@ INTERACTIVE="no" if [ "x$1" = "x--interactive" ] ; then INTERACTIVE="yes" + shift fi # Pull the file from the device and symbolize it. @@ -57,4 +58,15 @@ function all() { done } -all +if [ "x$1" = "x" ] ; then + # No further arguments, iterate over all oat files on device. + all +else + # Take the parameters as a list of paths on device. + while (($#)); do + DIR=$(dirname $1) + NAME=$(basename $1) + one $DIR $NAME + shift + done +fi |