diff options
348 files changed, 10874 insertions, 7582 deletions
diff --git a/Android.bp b/Android.bp index f7e909d079..b9f1db5f46 100644 --- a/Android.bp +++ b/Android.bp @@ -20,6 +20,7 @@ art_static_dependencies = [ subdirs = [ "benchmark", "build", + "cmdline", "compiler", "dalvikvm", "dex2oat", @@ -33,6 +34,7 @@ subdirs = [ "profman", "runtime", "sigchainlib", + "test", "tools/cpp-define-generator", "tools/dmtracedump", ] diff --git a/Android.mk b/Android.mk index d43118cf97..3740ed8ea2 100644 --- a/Android.mk +++ b/Android.mk @@ -328,8 +328,6 @@ test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) endif -endif # art_test_bother - # Valgrind. .PHONY: valgrind-test-art-target valgrind-test-art-target: valgrind-test-art-target-gtest @@ -343,6 +341,8 @@ valgrind-test-art-target32: valgrind-test-art-target-gtest32 valgrind-test-art-target64: valgrind-test-art-target-gtest64 $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) +endif # art_test_bother + ######################################################################## # oat-target and oat-target-sync rules @@ -434,6 +434,16 @@ build-art-host: $(HOST_OUT_EXECUTABLES)/art $(ART_HOST_DEPENDENCIES) $(HOST_CO build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS) ######################################################################## +# Phony target for only building what go/lem requires on target. +.PHONY: build-art-target-golem +build-art-target-golem: dex2oat dalvikvm patchoat linker \ + $(TARGET_OUT)/etc/public.libraries.txt \ + $(ART_TARGET_DEX_DEPENDENCIES) \ + $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES) \ + $(TARGET_CORE_IMG_OUT_BASE).art \ + $(TARGET_CORE_IMG_OUT_BASE)-interpreter.art + +######################################################################## # Rules for building all dependencies for tests. .PHONY: build-art-host-tests diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk index 00d29b9ea8..e568ce283e 100644 --- a/build/Android.common_path.mk +++ b/build/Android.common_path.mk @@ -35,13 +35,6 @@ ART_TARGET_NATIVETEST_OUT := $(TARGET_OUT_DATA_NATIVE_TESTS)/art ART_TARGET_TEST_DIR := /data/art-test ART_TARGET_TEST_OUT := $(TARGET_OUT_DATA)/art-test -# Directory used for temporary test files on the host. -ifneq ($(TMPDIR),) -ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID) -else -ART_HOST_TEST_DIR := /tmp/$(USER)/test-art-$(shell echo $$PPID) -endif - # core.oat location on the device. TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$(DEX2OAT_TARGET_ARCH)/core.oat ifdef TARGET_2ND_ARCH diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk index 93e310e0eb..8124ca311d 100644 --- a/build/Android.common_test.mk +++ b/build/Android.common_test.mk @@ -19,6 +19,13 @@ ART_ANDROID_COMMON_TEST_MK = true include art/build/Android.common_path.mk +# Directory used for temporary test files on the host. +ifneq ($(TMPDIR),) +ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID) +else +ART_HOST_TEST_DIR := /tmp/$(USER)/test-art-$(shell echo $$PPID) +endif + # We need to set a define for the nativetest dir so that common_runtime_test will know the right # path. (The problem is being a 32b test on 64b device, which is still located in nativetest64). ART_TARGET_CFLAGS += -DART_TARGET_NATIVETEST_DIR=${ART_TARGET_NATIVETEST_DIR} @@ -60,8 +67,8 @@ ART_TEST_OPTIMIZING ?= true # Do you want to test the optimizing compiler with graph coloring register allocation? ART_TEST_OPTIMIZING_GRAPH_COLOR ?= $(ART_TEST_FULL) -# Do we want to test a PIC-compiled core image? -ART_TEST_PIC_IMAGE ?= $(ART_TEST_FULL) +# Do we want to test a non-PIC-compiled core image? +ART_TEST_NPIC_IMAGE ?= $(ART_TEST_FULL) # Do we want to test PIC-compiled tests ("apps")? ART_TEST_PIC_TEST ?= $(ART_TEST_FULL) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 76ee06e453..c70f005878 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -65,6 +65,16 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) cp $< $@ $(call dexpreopt-remove-classes.dex,$@) +ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali)) +ART_TEST_HOST_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +$(ART_TEST_HOST_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^) + +$(ART_TEST_TARGET_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali + $(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^) + # Dex file dependencies for each gtest. ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested @@ -88,18 +98,19 @@ ART_GTEST_profile_compilation_info_test_DEX_DEPS := ProfileTestMultiDex ART_GTEST_stub_test_DEX_DEPS := AllFields ART_GTEST_transaction_test_DEX_DEPS := Transaction ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup +ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps # The elf writer test has dependencies on core.oat. -ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) -ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32) +ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_optimizing_no-pic_64) $(HOST_CORE_IMAGE_optimizing_no-pic_32) +ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_optimizing_no-pic_64) $(TARGET_CORE_IMAGE_optimizing_no-pic_32) ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \ - $(HOST_CORE_IMAGE_default_no-pic_64) \ - $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_CORE_IMAGE_optimizing_pic_64) \ + $(HOST_CORE_IMAGE_optimizing_pic_32) \ $(HOST_OUT_EXECUTABLES)/patchoatd ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \ - $(TARGET_CORE_IMAGE_default_no-pic_64) \ - $(TARGET_CORE_IMAGE_default_no-pic_32) \ + $(TARGET_CORE_IMAGE_optimizing_pic_64) \ + $(TARGET_CORE_IMAGE_optimizing_pic_32) \ $(TARGET_OUT_EXECUTABLES)/patchoatd ART_GTEST_oat_file_assistant_test_HOST_DEPS := \ @@ -114,62 +125,63 @@ ART_GTEST_dex2oat_test_TARGET_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) # TODO: document why this is needed. -ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) +ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_optimizing_no-pic_64) $(HOST_CORE_IMAGE_optimizing_no-pic_32) # The dexdump test requires an image and the dexdump utility. # TODO: rename into dexdump when migration completes ART_GTEST_dexdump_test_HOST_DEPS := \ - $(HOST_CORE_IMAGE_default_no-pic_64) \ - $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_64) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_32) \ $(HOST_OUT_EXECUTABLES)/dexdump2 ART_GTEST_dexdump_test_TARGET_DEPS := \ - $(TARGET_CORE_IMAGE_default_no-pic_64) \ - $(TARGET_CORE_IMAGE_default_no-pic_32) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \ dexdump2 # The dexlayout test requires an image and the dexlayout utility. # TODO: rename into dexdump when migration completes ART_GTEST_dexlayout_test_HOST_DEPS := \ - $(HOST_CORE_IMAGE_default_no-pic_64) \ - $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_64) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_32) \ $(HOST_OUT_EXECUTABLES)/dexlayout \ $(HOST_OUT_EXECUTABLES)/dexdump2 ART_GTEST_dexlayout_test_TARGET_DEPS := \ - $(TARGET_CORE_IMAGE_default_no-pic_64) \ - $(TARGET_CORE_IMAGE_default_no-pic_32) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \ dexlayout \ dexdump2 # The dexlist test requires an image and the dexlist utility. ART_GTEST_dexlist_test_HOST_DEPS := \ - $(HOST_CORE_IMAGE_default_no-pic_64) \ - $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_64) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_32) \ $(HOST_OUT_EXECUTABLES)/dexlist ART_GTEST_dexlist_test_TARGET_DEPS := \ - $(TARGET_CORE_IMAGE_default_no-pic_64) \ - $(TARGET_CORE_IMAGE_default_no-pic_32) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \ dexlist # The imgdiag test has dependencies on core.oat since it needs to load it during the test. # For the host, also add the installed tool (in the base size, that should suffice). For the # target, just the module is fine, the sync will happen late enough. ART_GTEST_imgdiag_test_HOST_DEPS := \ - $(HOST_CORE_IMAGE_default_no-pic_64) \ - $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_64) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_32) \ $(HOST_OUT_EXECUTABLES)/imgdiagd ART_GTEST_imgdiag_test_TARGET_DEPS := \ - $(TARGET_CORE_IMAGE_default_no-pic_64) \ - $(TARGET_CORE_IMAGE_default_no-pic_32) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \ imgdiagd # Oatdump test requires an image and oatfile to dump. ART_GTEST_oatdump_test_HOST_DEPS := \ - $(HOST_CORE_IMAGE_default_no-pic_64) \ - $(HOST_CORE_IMAGE_default_no-pic_32) \ - $(HOST_OUT_EXECUTABLES)/oatdumpd + $(HOST_CORE_IMAGE_optimizing_no-pic_64) \ + $(HOST_CORE_IMAGE_optimizing_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/oatdumpd \ + $(HOST_OUT_EXECUTABLES)/oatdumpds ART_GTEST_oatdump_test_TARGET_DEPS := \ - $(TARGET_CORE_IMAGE_default_no-pic_64) \ - $(TARGET_CORE_IMAGE_default_no-pic_32) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_64) \ + $(TARGET_CORE_IMAGE_optimizing_no-pic_32) \ oatdump # Profile assistant tests requires profman utility. @@ -181,275 +193,38 @@ ART_GTEST_profile_assistant_test_TARGET_DEPS := \ # The path for which all the source files are relative, not actually the current directory. LOCAL_PATH := art -RUNTIME_GTEST_COMMON_SRC_FILES := \ - cmdline/cmdline_parser_test.cc \ - dexdump/dexdump_test.cc \ - dexlayout/dexlayout_test.cc \ - dexlist/dexlist_test.cc \ - dex2oat/dex2oat_test.cc \ - imgdiag/imgdiag_test.cc \ - oatdump/oatdump_test.cc \ - profman/profile_assistant_test.cc \ - runtime/arch/arch_test.cc \ - runtime/arch/instruction_set_test.cc \ - runtime/arch/instruction_set_features_test.cc \ - runtime/arch/memcmp16_test.cc \ - runtime/arch/stub_test.cc \ - runtime/arch/arm/instruction_set_features_arm_test.cc \ - runtime/arch/arm64/instruction_set_features_arm64_test.cc \ - runtime/arch/mips/instruction_set_features_mips_test.cc \ - runtime/arch/mips64/instruction_set_features_mips64_test.cc \ - runtime/arch/x86/instruction_set_features_x86_test.cc \ - runtime/arch/x86_64/instruction_set_features_x86_64_test.cc \ - runtime/barrier_test.cc \ - runtime/base/arena_allocator_test.cc \ - runtime/base/bit_field_test.cc \ - runtime/base/bit_utils_test.cc \ - runtime/base/bit_vector_test.cc \ - runtime/base/hash_set_test.cc \ - runtime/base/hex_dump_test.cc \ - runtime/base/histogram_test.cc \ - runtime/base/mutex_test.cc \ - runtime/base/scoped_flock_test.cc \ - runtime/base/stringprintf_test.cc \ - runtime/base/time_utils_test.cc \ - runtime/base/timing_logger_test.cc \ - runtime/base/variant_map_test.cc \ - runtime/base/unix_file/fd_file_test.cc \ - runtime/class_linker_test.cc \ - runtime/compiler_filter_test.cc \ - runtime/dex_file_test.cc \ - runtime/dex_file_verifier_test.cc \ - runtime/dex_instruction_test.cc \ - runtime/dex_instruction_visitor_test.cc \ - runtime/dex_method_iterator_test.cc \ - runtime/entrypoints/math_entrypoints_test.cc \ - runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc \ - runtime/entrypoints_order_test.cc \ - runtime/gc/accounting/card_table_test.cc \ - runtime/gc/accounting/mod_union_table_test.cc \ - runtime/gc/accounting/space_bitmap_test.cc \ - runtime/gc/collector/immune_spaces_test.cc \ - runtime/gc/heap_test.cc \ - runtime/gc/reference_queue_test.cc \ - runtime/gc/space/dlmalloc_space_static_test.cc \ - runtime/gc/space/dlmalloc_space_random_test.cc \ - runtime/gc/space/large_object_space_test.cc \ - runtime/gc/space/rosalloc_space_static_test.cc \ - runtime/gc/space/rosalloc_space_random_test.cc \ - runtime/gc/space/space_create_test.cc \ - runtime/gc/system_weak_test.cc \ - runtime/gc/task_processor_test.cc \ - runtime/gtest_test.cc \ - runtime/handle_scope_test.cc \ - runtime/indenter_test.cc \ - runtime/indirect_reference_table_test.cc \ - runtime/instrumentation_test.cc \ - runtime/intern_table_test.cc \ - runtime/interpreter/safe_math_test.cc \ - runtime/interpreter/unstarted_runtime_test.cc \ - runtime/java_vm_ext_test.cc \ - runtime/jit/profile_compilation_info_test.cc \ - runtime/leb128_test.cc \ - runtime/mem_map_test.cc \ - runtime/memory_region_test.cc \ - runtime/mirror/dex_cache_test.cc \ - runtime/mirror/object_test.cc \ - runtime/monitor_pool_test.cc \ - runtime/monitor_test.cc \ - runtime/oat_file_test.cc \ - runtime/oat_file_assistant_test.cc \ - runtime/parsed_options_test.cc \ - runtime/prebuilt_tools_test.cc \ - runtime/reference_table_test.cc \ - runtime/thread_pool_test.cc \ - runtime/transaction_test.cc \ - runtime/type_lookup_table_test.cc \ - runtime/utf_test.cc \ - runtime/utils_test.cc \ - runtime/verifier/method_verifier_test.cc \ - runtime/verifier/reg_type_test.cc \ - runtime/zip_archive_test.cc - -COMPILER_GTEST_COMMON_SRC_FILES := \ - runtime/jni_internal_test.cc \ - runtime/proxy_test.cc \ - runtime/reflection_test.cc \ - compiler/compiled_method_test.cc \ - compiler/debug/dwarf/dwarf_test.cc \ - compiler/driver/compiled_method_storage_test.cc \ - compiler/driver/compiler_driver_test.cc \ - compiler/elf_writer_test.cc \ - compiler/exception_test.cc \ - compiler/image_test.cc \ - compiler/jni/jni_compiler_test.cc \ - compiler/linker/multi_oat_relative_patcher_test.cc \ - compiler/linker/output_stream_test.cc \ - compiler/oat_test.cc \ - compiler/optimizing/bounds_check_elimination_test.cc \ - compiler/optimizing/dominator_test.cc \ - compiler/optimizing/find_loops_test.cc \ - compiler/optimizing/graph_checker_test.cc \ - compiler/optimizing/graph_test.cc \ - compiler/optimizing/gvn_test.cc \ - compiler/optimizing/induction_var_analysis_test.cc \ - compiler/optimizing/induction_var_range_test.cc \ - compiler/optimizing/licm_test.cc \ - compiler/optimizing/live_interval_test.cc \ - compiler/optimizing/nodes_test.cc \ - compiler/optimizing/parallel_move_test.cc \ - compiler/optimizing/pretty_printer_test.cc \ - compiler/optimizing/reference_type_propagation_test.cc \ - compiler/optimizing/side_effects_test.cc \ - compiler/optimizing/ssa_test.cc \ - compiler/optimizing/stack_map_test.cc \ - compiler/optimizing/suspend_check_test.cc \ - compiler/utils/dedupe_set_test.cc \ - compiler/utils/intrusive_forward_list_test.cc \ - compiler/utils/string_reference_test.cc \ - compiler/utils/swap_space_test.cc \ - compiler/utils/test_dex_file_builder_test.cc \ - compiler/utils/transform_array_ref_test.cc \ - compiler/utils/transform_iterator_test.cc \ - -COMPILER_GTEST_COMMON_SRC_FILES_all := \ - compiler/jni/jni_cfi_test.cc \ - compiler/optimizing/codegen_test.cc \ - compiler/optimizing/optimizing_cfi_test.cc \ - -COMPILER_GTEST_COMMON_SRC_FILES_arm := \ - compiler/linker/arm/relative_patcher_thumb2_test.cc \ - compiler/utils/arm/managed_register_arm_test.cc \ - -COMPILER_GTEST_COMMON_SRC_FILES_arm64 := \ - compiler/linker/arm64/relative_patcher_arm64_test.cc \ - compiler/utils/arm64/managed_register_arm64_test.cc \ - -COMPILER_GTEST_COMMON_SRC_FILES_mips := \ - compiler/linker/mips/relative_patcher_mips_test.cc \ - compiler/linker/mips/relative_patcher_mips32r6_test.cc \ - -COMPILER_GTEST_COMMON_SRC_FILES_mips64 := \ - -COMPILER_GTEST_COMMON_SRC_FILES_x86 := \ - compiler/linker/x86/relative_patcher_x86_test.cc \ - compiler/utils/x86/managed_register_x86_test.cc \ - -# These tests are testing architecture-independent functionality, but happen -# to use x86 codegen as part of the test. -COMPILER_GTEST_COMMON_SRC_FILES_x86 += \ - compiler/optimizing/constant_folding_test.cc \ - compiler/optimizing/dead_code_elimination_test.cc \ - compiler/optimizing/linearize_test.cc \ - compiler/optimizing/live_ranges_test.cc \ - compiler/optimizing/liveness_test.cc \ - compiler/optimizing/register_allocator_test.cc \ - -COMPILER_GTEST_COMMON_SRC_FILES_x86_64 := \ - compiler/linker/x86_64/relative_patcher_x86_64_test.cc \ - -RUNTIME_GTEST_TARGET_SRC_FILES := \ - $(RUNTIME_GTEST_COMMON_SRC_FILES) - -RUNTIME_GTEST_HOST_SRC_FILES := \ - $(RUNTIME_GTEST_COMMON_SRC_FILES) - -COMPILER_GTEST_TARGET_SRC_FILES := \ - $(COMPILER_GTEST_COMMON_SRC_FILES) - -COMPILER_GTEST_TARGET_SRC_FILES_all := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_all) \ - -COMPILER_GTEST_TARGET_SRC_FILES_arm := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_arm) \ - -COMPILER_GTEST_TARGET_SRC_FILES_arm64 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_arm64) \ - -COMPILER_GTEST_TARGET_SRC_FILES_mips := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ - -COMPILER_GTEST_TARGET_SRC_FILES_mips64 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ - -COMPILER_GTEST_TARGET_SRC_FILES_x86 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \ - -COMPILER_GTEST_TARGET_SRC_FILES_x86_64 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ - -$(foreach arch,$(ART_TARGET_CODEGEN_ARCHS),$(eval COMPILER_GTEST_TARGET_SRC_FILES += $$(COMPILER_GTEST_TARGET_SRC_FILES_$(arch)))) -COMPILER_GTEST_TARGET_SRC_FILES += $(COMPILER_GTEST_TARGET_SRC_FILES_all) - -COMPILER_GTEST_HOST_SRC_FILES := \ - $(COMPILER_GTEST_COMMON_SRC_FILES) \ - -COMPILER_GTEST_HOST_SRC_FILES_all := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_all) \ - -COMPILER_GTEST_HOST_SRC_FILES_arm := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_arm) \ - compiler/utils/arm/assembler_thumb2_test.cc \ - compiler/utils/assembler_thumb_test.cc \ - -COMPILER_GTEST_HOST_SRC_FILES_arm64 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_arm64) \ - -COMPILER_GTEST_HOST_SRC_FILES_mips := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ - compiler/utils/mips/assembler_mips_test.cc \ - compiler/utils/mips/assembler_mips32r6_test.cc \ - -COMPILER_GTEST_HOST_SRC_FILES_mips64 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ - compiler/utils/mips64/assembler_mips64_test.cc \ +ART_TEST_MODULES := \ + art_cmdline_tests \ + art_compiler_tests \ + art_compiler_host_tests \ + art_dex2oat_tests \ + art_dexdump_tests \ + art_dexlayout_tests \ + art_dexlist_tests \ + art_imgdiag_tests \ + art_oatdump_tests \ + art_profman_tests \ + art_runtime_tests \ + art_runtime_compiler_tests \ + +ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ + $(ART_TEST_LIST_device_$(TARGET_ARCH)_$(m))) -COMPILER_GTEST_HOST_SRC_FILES_x86 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \ - compiler/utils/x86/assembler_x86_test.cc \ +ifdef TARGET_2ND_ARCH +2ND_ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ + $(ART_TEST_LIST_device_$(2ND_TARGET_ARCH)_$(m))) +endif -COMPILER_GTEST_HOST_SRC_FILES_x86_64 := \ - $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ - compiler/utils/x86_64/assembler_x86_64_test.cc +ART_HOST_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ + $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_$(m))) -$(foreach arch,$(ART_HOST_CODEGEN_ARCHS),$(eval COMPILER_GTEST_HOST_SRC_FILES += $$(COMPILER_GTEST_HOST_SRC_FILES_$(arch)))) -COMPILER_GTEST_HOST_SRC_FILES += $(COMPILER_GTEST_HOST_SRC_FILES_all) +ifneq ($(HOST_PREFER_32_BIT),true) +2ND_ART_HOST_GTEST_FILES += $(foreach m,$(ART_TEST_MODULES),\ + $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_$(m))) +endif ART_TEST_CFLAGS := -include $(CLEAR_VARS) -LOCAL_MODULE := libart-gtest -LOCAL_MODULE_TAGS := optional -LOCAL_CPP_EXTENSION := cc -LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc -LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/cmdline art/compiler -LOCAL_SHARED_LIBRARIES := libartd libartd-compiler libdl -LOCAL_WHOLE_STATIC_LIBRARIES += libgtest -LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk -LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk -$(eval LOCAL_CLANG := $(ART_TARGET_CLANG)) -$(eval $(call set-target-local-cflags-vars,debug)) -LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue -include $(BUILD_SHARED_LIBRARY) - -include $(CLEAR_VARS) -LOCAL_MODULE := libart-gtest -LOCAL_MODULE_TAGS := optional -LOCAL_CPP_EXTENSION := cc -LOCAL_CFLAGS := $(ART_HOST_CFLAGS) -LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) -LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc -LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/cmdline art/compiler -LOCAL_SHARED_LIBRARIES := libartd libartd-compiler -LOCAL_WHOLE_STATIC_LIBRARIES := libgtest -LOCAL_LDLIBS += -ldl -lpthread -LOCAL_MULTILIB := both -LOCAL_CLANG := $(ART_HOST_CLANG) -LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue -LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk -LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk -include $(BUILD_HOST_SHARED_LIBRARY) - # Variables holding collections of gtest pre-requisits used to run a number of gtests. ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := @@ -494,63 +269,70 @@ include $(BUILD_PREBUILT) # Define a make rule for a target device gtest. # $(1): gtest name - the name of the test we're building such as leb128_test. -# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. -# $(3): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/ +# $(2): path relative to $OUT to the test binary +# $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +# $(4): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/ define define-art-gtest-rule-target - gtest_rule := test-art-target-gtest-$(1)$$($(2)ART_PHONY_TEST_TARGET_SUFFIX) + gtest_rule := test-art-target-gtest-$(1)$$($(3)ART_PHONY_TEST_TARGET_SUFFIX) + gtest_exe := $(OUT_DIR)/$(2) + gtest_target_exe := $$(patsubst $(PRODUCT_OUT)/%,/%,$$(gtest_exe)) # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test # to ensure files are pushed to the device. TEST_ART_TARGET_SYNC_DEPS += \ $$(ART_GTEST_$(1)_TARGET_DEPS) \ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \ - $$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \ - $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ - $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \ + $$(gtest_exe) \ + $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ + $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \ $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \ $$(ART_TARGET_TEST_OUT)/valgrind-target-suppressions.txt +$$(gtest_rule) valgrind-$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe) + .PHONY: $$(gtest_rule) $$(gtest_rule): test-art-target-sync - $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID - $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID - $(hide) adb shell chmod 755 $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) + $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID + $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID + $(hide) adb shell chmod 755 $$(PRIVATE_TARGET_EXE) $(hide) $$(call ART_TEST_SKIP,$$@) && \ - (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ - $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \ - && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \ + (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ + $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \ + && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \ && $$(call ART_TEST_PASSED,$$@)) \ || $$(call ART_TEST_FAILED,$$@)) $(hide) rm -f /tmp/$$@-$$$$PPID - ART_TEST_TARGET_GTEST$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule) + ART_TEST_TARGET_GTEST$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule) ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule) ART_TEST_TARGET_GTEST_$(1)_RULES += $$(gtest_rule) .PHONY: valgrind-$$(gtest_rule) valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-sync - $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID - $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID - $(hide) adb shell chmod 755 $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) + $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID + $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID + $(hide) adb shell chmod 755 $$(PRIVATE_TARGET_EXE) $(hide) $$(call ART_TEST_SKIP,$$@) && \ - (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ + (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ valgrind --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \ --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \ --num-callers=50 \ - $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \ - && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \ + $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \ + && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \ && $$(call ART_TEST_PASSED,$$@)) \ || $$(call ART_TEST_FAILED,$$@)) $(hide) rm -f /tmp/$$@-$$$$PPID - ART_TEST_TARGET_VALGRIND_GTEST$$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += valgrind-$$(gtest_rule) + ART_TEST_TARGET_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += valgrind-$$(gtest_rule) ART_TEST_TARGET_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule) ART_TEST_TARGET_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule) # Clear locally defined variables. valgrind_gtest_rule := gtest_rule := + gtest_exe := + gtest_target_exe := endef # define-art-gtest-rule-target ART_VALGRIND_DEPENDENCIES := \ @@ -565,14 +347,15 @@ ART_VALGRIND_DEPENDENCIES := \ # Define make rules for a host gtests. # $(1): gtest name - the name of the test we're building such as leb128_test. -# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +# $(2): path relative to $OUT to the test binary +# $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. define define-art-gtest-rule-host - gtest_rule := test-art-host-gtest-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX) - gtest_exe := $$(HOST_OUT_EXECUTABLES)/$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX) + gtest_rule := test-art-host-gtest-$(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX) + gtest_exe := $(OUT_DIR)/$(2) # Dependencies for all host gtests. gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \ - $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \ - $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \ + $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \ + $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \ $$(gtest_exe) \ $$(ART_GTEST_$(1)_HOST_DEPS) \ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) @@ -584,7 +367,7 @@ $$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \ || $$(call ART_TEST_FAILED,$$@) - ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule) + ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule) ART_TEST_HOST_GTEST_RULES += $$(gtest_rule) ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule) @@ -598,7 +381,7 @@ valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIE $$< && \ $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@) - ART_TEST_HOST_VALGRIND_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule) + ART_TEST_HOST_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule) ART_TEST_HOST_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule) ART_TEST_HOST_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule) @@ -610,74 +393,64 @@ valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIE endef # define-art-gtest-rule-host # Define the rules to build and run host and target gtests. -# $(1): target or host -# $(2): file name -# $(3): extra C includes -# $(4): extra shared libraries -define define-art-gtest - ifneq ($(1),target) - ifneq ($(1),host) - $$(error expected target or host for argument 1, received $(1)) - endif - endif - - art_target_or_host := $(1) - art_gtest_filename := $(2) - art_gtest_extra_c_includes := $(3) - art_gtest_extra_shared_libraries := $(4) +# $(1): file name +# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +define define-art-gtest-target + art_gtest_filename := $(1) include $$(CLEAR_VARS) art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename))) - LOCAL_MODULE := $$(art_gtest_name) - ifeq ($$(art_target_or_host),target) - LOCAL_MODULE_TAGS := tests - endif - LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION) - LOCAL_SRC_FILES := $$(art_gtest_filename) - LOCAL_C_INCLUDES += $$(ART_C_INCLUDES) art/runtime art/cmdline $$(art_gtest_extra_c_includes) - LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest libartd-disassembler - LOCAL_WHOLE_STATIC_LIBRARIES += libsigchain - - LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk - LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk - - # Mac OS linker doesn't understand --export-dynamic. - ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host) - # Allow jni_compiler_test to find Java_MyClassNatives_bar within itself using dlopen(NULL, ...). - LOCAL_LDFLAGS := -Wl,--export-dynamic -Wl,-u,Java_MyClassNatives_bar -Wl,-u,Java_MyClassNatives_sbar - endif - LOCAL_CFLAGS := $$(ART_TEST_CFLAGS) - ifeq ($$(art_target_or_host),target) - $$(eval LOCAL_CLANG := $$(ART_TARGET_CLANG)) - $$(eval $$(call set-target-local-cflags-vars,debug)) - LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixld-arm libvixld-arm64 - LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32) - LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64) - LOCAL_MULTILIB := both - LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue - include $$(BUILD_EXECUTABLE) - library_path := - 2nd_library_path := - ifneq ($$(ART_TEST_ANDROID_ROOT),) - ifdef TARGET_2ND_ARCH - 2nd_library_path := $$(ART_TEST_ANDROID_ROOT)/lib + library_path := + 2ND_library_path := + ifneq ($$(ART_TEST_ANDROID_ROOT),) + ifdef TARGET_2ND_ARCH + 2ND_library_path := $$(ART_TEST_ANDROID_ROOT)/lib + library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 + else + ifneq ($(filter %64,$(TARGET_ARCH)),) library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 else - ifneq ($(filter %64,$(TARGET_ARCH)),) - library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 - else - library_path := $$(ART_TEST_ANDROID_ROOT)/lib - endif + library_path := $$(ART_TEST_ANDROID_ROOT)/lib endif endif + endif + ifndef ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES := - ifdef TARGET_2ND_ARCH - $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),2ND_,$$(2nd_library_path))) - endif - $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),,$$(library_path))) + endif + $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),$$(art_gtest_filename),$(2),$$($(2)library_path))) + + # Clear locally defined variables. + art_gtest_filename := + art_gtest_name := + library_path := + 2ND_library_path := +endef # define-art-gtest-target + +# $(1): file name +# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +define define-art-gtest-host + art_gtest_filename := $(1) + + include $$(CLEAR_VARS) + art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename))) + ifndef ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES + ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := + ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := + endif + $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),$$(art_gtest_filename),$(2))) + + # Clear locally defined variables. + art_gtest_filename := + art_gtest_name := +endef # define-art-gtest-host + +# Define the rules to build and run gtests for both archs on target. +# $(1): test name +define define-art-gtest-target-both + art_gtest_name := $(1) # A rule to run the different architecture versions of the gtest. .PHONY: test-art-target-gtest-$$(art_gtest_name) @@ -688,30 +461,17 @@ test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_ valgrind-test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) - # Clear locally defined variables. - ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := - ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES := - else # host - LOCAL_CLANG := $$(ART_HOST_CLANG) - LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS) $$(ART_HOST_DEBUG_ASFLAGS) - LOCAL_SHARED_LIBRARIES += libicuuc libicui18n libnativehelper libziparchive libz-host libvixld-arm libvixld-arm64 - LOCAL_LDLIBS := -lpthread -ldl - LOCAL_IS_HOST_MODULE := true - LOCAL_MULTILIB := both - LOCAL_MODULE_STEM_32 := $$(art_gtest_name)32 - LOCAL_MODULE_STEM_64 := $$(art_gtest_name)64 - LOCAL_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue - include $$(BUILD_HOST_EXECUTABLE) + # Clear now unused variables. + ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := + ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES := + art_gtest_name := +endef # define-art-gtest-target-both - ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := - ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := - ifneq ($$(HOST_PREFER_32_BIT),true) - $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),2ND_)) - endif - $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),)) +# Define the rules to build and run gtests for both archs on host. +# $(1): test name +define define-art-gtest-host-both + art_gtest_name := $(1) - # Rules to run the different architecture versions of the gtest. .PHONY: test-art-host-gtest-$$(art_gtest_name) test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) @@ -720,36 +480,33 @@ test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name valgrind-test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) - # Clear locally defined variables. - ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := - ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := - endif # host_or_target - - # Clear locally defined variables. - art_target_or_host := - art_gtest_filename := - art_gtest_extra_c_includes := - art_gtest_extra_shared_libraries := + # Clear now unused variables. + ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := + ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := art_gtest_name := - library_path := - 2nd_library_path := -endef # define-art-gtest - +endef # define-art-gtest-host-both ifeq ($(ART_BUILD_TARGET),true) - $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace))) - $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) + $(foreach file,$(ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target,$(file),))) + ifdef TARGET_2ND_ARCH + $(foreach file,$(2ND_ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target,$(file),2ND_))) + endif + # Rules to run the different architecture versions of the gtest. + $(foreach file,$(ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target-both,$$(notdir $$(basename $$(file)))))) endif ifeq ($(ART_BUILD_HOST),true) - $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,libbacktrace))) - $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) + $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),))) + ifneq ($(HOST_PREFER_32_BIT),true) + $(foreach file,$(2ND_ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),2ND_))) + endif + # Rules to run the different architecture versions of the gtest. + $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host-both,$$(notdir $$(basename $$(file)))))) endif # Used outside the art project to get a list of the current tests RUNTIME_TARGET_GTEST_MAKE_TARGETS := -$(foreach file, $(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(basename $$(file))))) +$(foreach file, $(ART_TARGET_GTEST_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(basename $$(file))))) COMPILER_TARGET_GTEST_MAKE_TARGETS := -$(foreach file, $(COMPILER_GTEST_TARGET_SRC_FILES), $(eval COMPILER_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(basename $$(file))))) # Define all the combinations of host/target, valgrind and suffix such as: # test-art-host-gtest or valgrind-test-art-host-gtest64 @@ -849,11 +606,15 @@ ART_GTEST_reflection_test_DEX_DEPS := ART_GTEST_stub_test_DEX_DEPS := ART_GTEST_transaction_test_DEX_DEPS := ART_GTEST_dex2oat_environment_tests_DEX_DEPS := +ART_GTEST_verifier_deps_test_DEX_DEPS := ART_VALGRIND_DEPENDENCIES := ART_VALGRIND_TARGET_DEPENDENCIES := $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=)) $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=)) ART_TEST_HOST_GTEST_MainStripped_DEX := ART_TEST_TARGET_GTEST_MainStripped_DEX := +ART_TEST_GTEST_VerifierDeps_SRC := +ART_TEST_HOST_GTEST_VerifierDeps_DEX := +ART_TEST_TARGET_GTEST_VerifierDeps_DEX := GTEST_DEX_DIRECTORIES := LOCAL_PATH := diff --git a/build/Android.oat.mk b/build/Android.oat.mk index 884f698cd9..c4887e61ff 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -37,7 +37,7 @@ else endif # Use dex2oat debug version for better error reporting -# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. +# $(1): compiler - optimizing, interpreter or interpreter-access-checks. # $(2): pic/no-pic # $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds. # $(4): wrapper, e.g., valgrind. @@ -53,13 +53,9 @@ define create-core-oat-host-rules core_pic_infix := core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY) - ifeq ($(1),default) - core_compile_options += --compiler-backend=Quick - endif ifeq ($(1),optimizing) core_compile_options += --compiler-backend=Optimizing core_dex2oat_dependency := $(DEX2OAT) - core_infix := -optimizing endif ifeq ($(1),interpreter) core_compile_options += --compiler-filter=interpret-only @@ -69,24 +65,16 @@ define create-core-oat-host-rules core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail core_infix := -interp-ac endif - ifeq ($(1),jit) - core_compile_options += --compiler-filter=verify-at-runtime - core_infix := -jit - endif - ifeq ($(1),default) - # Default has no infix, no compile options. - endif - ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) + ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),) #Technically this test is not precise, but hopefully good enough. - $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) + $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing) endif ifeq ($(2),pic) core_compile_options += --compile-pic - core_pic_infix := -pic endif ifeq ($(2),no-pic) - # No change for non-pic + core_pic_infix := -npic endif ifneq ($(filter-out pic no-pic,$(2)),) # Technically this test is not precise, but hopefully good enough. @@ -148,7 +136,7 @@ $$(core_oat_name): $$(core_image_name) core_pic_infix := endef # create-core-oat-host-rules -# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. +# $(1): compiler - optimizing, interpreter or interpreter-access-checks. # $(2): wrapper. # $(3): dex2oat suffix. # $(4): multi-image. @@ -162,24 +150,18 @@ define create-core-oat-host-rule-combination endif endef -$(eval $(call create-core-oat-host-rule-combination,default,,,false)) $(eval $(call create-core-oat-host-rule-combination,optimizing,,,false)) $(eval $(call create-core-oat-host-rule-combination,interpreter,,,false)) $(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false)) -$(eval $(call create-core-oat-host-rule-combination,jit,,,false)) -$(eval $(call create-core-oat-host-rule-combination,default,,,true)) $(eval $(call create-core-oat-host-rule-combination,optimizing,,,true)) $(eval $(call create-core-oat-host-rule-combination,interpreter,,,true)) $(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true)) -$(eval $(call create-core-oat-host-rule-combination,jit,,,true)) valgrindHOST_CORE_IMG_OUTS := valgrindHOST_CORE_OAT_OUTS := -$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32,false)) $(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false)) $(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false)) $(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false)) -$(eval $(call create-core-oat-host-rule-combination,jit,valgrind,32,false)) valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS) @@ -193,15 +175,11 @@ define create-core-oat-target-rules core_pic_infix := core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY) - ifeq ($(1),default) - core_compile_options += --compiler-backend=Quick - endif ifeq ($(1),optimizing) core_compile_options += --compiler-backend=Optimizing # With the optimizing compiler, we want to rerun dex2oat whenever there is # a dex2oat change to catch regressions early. core_dex2oat_dependency := $(DEX2OAT) - core_infix := -optimizing endif ifeq ($(1),interpreter) core_compile_options += --compiler-filter=interpret-only @@ -211,24 +189,16 @@ define create-core-oat-target-rules core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail core_infix := -interp-ac endif - ifeq ($(1),jit) - core_compile_options += --compiler-filter=verify-at-runtime - core_infix := -jit - endif - ifeq ($(1),default) - # Default has no infix, no compile options. - endif - ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) + ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),) # Technically this test is not precise, but hopefully good enough. - $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) + $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing) endif ifeq ($(2),pic) core_compile_options += --compile-pic - core_pic_infix := -pic endif ifeq ($(2),no-pic) - # No change for non-pic + core_pic_infix := -npic endif ifneq ($(filter-out pic no-pic,$(2)),) #Technically this test is not precise, but hopefully good enough. @@ -283,7 +253,7 @@ $$(core_oat_name): $$(core_image_name) core_pic_infix := endef # create-core-oat-target-rules -# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. +# $(1): compiler - optimizing, interpreter or interpreter-access-checks. # $(2): wrapper. # $(3): dex2oat suffix. define create-core-oat-target-rule-combination @@ -296,19 +266,15 @@ define create-core-oat-target-rule-combination endif endef -$(eval $(call create-core-oat-target-rule-combination,default,,)) $(eval $(call create-core-oat-target-rule-combination,optimizing,,)) $(eval $(call create-core-oat-target-rule-combination,interpreter,,)) $(eval $(call create-core-oat-target-rule-combination,interp-ac,,)) -$(eval $(call create-core-oat-target-rule-combination,jit,,)) valgrindTARGET_CORE_IMG_OUTS := valgrindTARGET_CORE_OAT_OUTS := -$(eval $(call create-core-oat-target-rule-combination,default,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32)) -$(eval $(call create-core-oat-target-rule-combination,jit,valgrind,32)) valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS) diff --git a/build/art.go b/build/art.go index f694505fb4..ba5521a9ae 100644 --- a/build/art.go +++ b/build/art.go @@ -19,6 +19,7 @@ import ( "android/soong/android" "android/soong/cc" "fmt" + "sync" "github.com/google/blueprint" ) @@ -71,12 +72,6 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) { cflags = append(cflags, "-fstack-protector") } - // Are additional statically-linked ART host binaries - // (dex2oats, oatdumps, etc.) getting built? - if envTrue(ctx, "ART_BUILD_HOST_STATIC") { - cflags = append(cflags, "-DART_BUILD_HOST_STATIC=1") - } - return cflags, asflags } @@ -108,6 +103,11 @@ func deviceFlags(ctx android.BaseContext) []string { func hostFlags(ctx android.BaseContext) []string { var cflags []string hostFrameSizeLimit := 1736 + if len(ctx.AConfig().SanitizeHost()) > 0 { + // art/test/137-cfi/cfi.cc + // error: stack frame size of 1944 bytes in function 'Java_Main_unwindInProcess' + hostFrameSizeLimit = 6400 + } cflags = append(cflags, fmt.Sprintf("-Wframe-larger-than=%d", hostFrameSizeLimit), fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", hostFrameSizeLimit), @@ -122,7 +122,7 @@ func hostFlags(ctx android.BaseContext) []string { return cflags } -func (a *artGlobalDefaults) CustomizeProperties(ctx android.CustomizePropertiesContext) { +func globalDefaults(ctx android.LoadHookContext) { type props struct { Target struct { Android struct { @@ -143,9 +143,7 @@ func (a *artGlobalDefaults) CustomizeProperties(ctx android.CustomizePropertiesC ctx.AppendProperties(p) } -type artGlobalDefaults struct{} - -func (a *artCustomLinkerCustomizer) CustomizeProperties(ctx android.CustomizePropertiesContext) { +func customLinker(ctx android.LoadHookContext) { linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "") if linker != "" { type props struct { @@ -158,9 +156,7 @@ func (a *artCustomLinkerCustomizer) CustomizeProperties(ctx android.CustomizePro } } -type artCustomLinkerCustomizer struct{} - -func (a *artPrefer32BitCustomizer) CustomizeProperties(ctx android.CustomizePropertiesContext) { +func prefer32Bit(ctx android.LoadHookContext) { if envTrue(ctx, "HOST_PREFER_32_BIT") { type props struct { Target struct { @@ -176,28 +172,53 @@ func (a *artPrefer32BitCustomizer) CustomizeProperties(ctx android.CustomizeProp } } -type artPrefer32BitCustomizer struct{} +func testMap(config android.Config) map[string][]string { + return config.Once("artTests", func() interface{} { + return make(map[string][]string) + }).(map[string][]string) +} + +func testInstall(ctx android.InstallHookContext) { + testMap := testMap(ctx.AConfig()) + + var name string + if ctx.Host() { + name = "host_" + } else { + name = "device_" + } + name += ctx.Arch().ArchType.String() + "_" + ctx.ModuleName() + + artTestMutex.Lock() + defer artTestMutex.Unlock() + + tests := testMap[name] + tests = append(tests, ctx.Path().RelPathString()) + testMap[name] = tests +} + +var artTestMutex sync.Mutex func init() { soong.RegisterModuleType("art_cc_library", artLibrary) soong.RegisterModuleType("art_cc_binary", artBinary) soong.RegisterModuleType("art_cc_test", artTest) + soong.RegisterModuleType("art_cc_test_library", artTestLibrary) soong.RegisterModuleType("art_cc_defaults", artDefaultsFactory) soong.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory) } func artGlobalDefaultsFactory() (blueprint.Module, []interface{}) { - c := &artGlobalDefaults{} module, props := artDefaultsFactory() - android.AddCustomizer(module.(android.Module), c) + android.AddLoadHook(module, globalDefaults) return module, props } func artDefaultsFactory() (blueprint.Module, []interface{}) { - c := &codegenCustomizer{} - module, props := cc.DefaultsFactory(&c.codegenProperties) - android.AddCustomizer(module.(android.Module), c) + c := &codegenProperties{} + module, props := cc.DefaultsFactory(c) + android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, true) }) return module, props } @@ -206,9 +227,7 @@ func artLibrary() (blueprint.Module, []interface{}) { library, _ := cc.NewLibrary(android.HostAndDeviceSupported, true, true) module, props := library.Init() - c := &codegenCustomizer{} - android.AddCustomizer(library, c) - props = append(props, &c.codegenProperties) + props = installCodegenCustomizer(module, props, true) return module, props } @@ -217,8 +236,8 @@ func artBinary() (blueprint.Module, []interface{}) { binary, _ := cc.NewBinary(android.HostAndDeviceSupported) module, props := binary.Init() - android.AddCustomizer(binary, &artCustomLinkerCustomizer{}) - android.AddCustomizer(binary, &artPrefer32BitCustomizer{}) + android.AddLoadHook(module, customLinker) + android.AddLoadHook(module, prefer32Bit) return module, props } @@ -226,8 +245,22 @@ func artTest() (blueprint.Module, []interface{}) { test := cc.NewTest(android.HostAndDeviceSupported) module, props := test.Init() - android.AddCustomizer(test, &artCustomLinkerCustomizer{}) - android.AddCustomizer(test, &artPrefer32BitCustomizer{}) + props = installCodegenCustomizer(module, props, false) + + android.AddLoadHook(module, customLinker) + android.AddLoadHook(module, prefer32Bit) + android.AddInstallHook(module, testInstall) + return module, props +} + +func artTestLibrary() (blueprint.Module, []interface{}) { + test := cc.NewTestLibrary(android.HostAndDeviceSupported) + module, props := test.Init() + + props = installCodegenCustomizer(module, props, false) + + android.AddLoadHook(module, prefer32Bit) + android.AddInstallHook(module, testInstall) return module, props } diff --git a/build/codegen.go b/build/codegen.go index fde9420039..ba6f2142c9 100644 --- a/build/codegen.go +++ b/build/codegen.go @@ -22,11 +22,11 @@ import ( "android/soong/android" "sort" "strings" -) -func (a *codegenCustomizer) CustomizeProperties(ctx android.CustomizePropertiesContext) { - c := &a.codegenProperties.Codegen + "github.com/google/blueprint" +) +func codegen(ctx android.LoadHookContext, c *codegenProperties, library bool) { var hostArches, deviceArches []string e := envDefault(ctx, "ART_HOST_CODEGEN_ARCHS", "") @@ -43,54 +43,77 @@ func (a *codegenCustomizer) CustomizeProperties(ctx android.CustomizePropertiesC deviceArches = strings.Split(e, " ") } - type props struct { - Target struct { - Android *codegenArchProperties - Host *codegenArchProperties + addCodegenArchProperties := func(host bool, archName string) { + type props struct { + Target struct { + Android *CodegenCommonArchProperties + Host *CodegenCommonArchProperties + } } - } - addCodegenArchProperties := func(p *props, hod **codegenArchProperties, arch string) { - switch arch { + type libraryProps struct { + Target struct { + Android *CodegenLibraryArchProperties + Host *CodegenLibraryArchProperties + } + } + + var arch *codegenArchProperties + switch archName { case "arm": - *hod = &c.Arm + arch = &c.Codegen.Arm case "arm64": - *hod = &c.Arm64 + arch = &c.Codegen.Arm64 case "mips": - *hod = &c.Mips + arch = &c.Codegen.Mips case "mips64": - *hod = &c.Mips64 + arch = &c.Codegen.Mips64 case "x86": - *hod = &c.X86 + arch = &c.Codegen.X86 case "x86_64": - *hod = &c.X86_64 + arch = &c.Codegen.X86_64 default: - ctx.ModuleErrorf("Unknown codegen architecture %q", arch) + ctx.ModuleErrorf("Unknown codegen architecture %q", archName) return } + + p := &props{} + l := &libraryProps{} + if host { + p.Target.Host = &arch.CodegenCommonArchProperties + l.Target.Host = &arch.CodegenLibraryArchProperties + } else { + p.Target.Android = &arch.CodegenCommonArchProperties + l.Target.Android = &arch.CodegenLibraryArchProperties + } + ctx.AppendProperties(p) + if library { + ctx.AppendProperties(l) + } } - for _, a := range deviceArches { - p := &props{} - addCodegenArchProperties(p, &p.Target.Android, a) + for _, arch := range deviceArches { + addCodegenArchProperties(false, arch) if ctx.Failed() { return } } - for _, a := range hostArches { - p := &props{} - addCodegenArchProperties(p, &p.Target.Host, a) + for _, arch := range hostArches { + addCodegenArchProperties(true, arch) if ctx.Failed() { return } } } -type codegenArchProperties struct { +type CodegenCommonArchProperties struct { Srcs []string Cflags []string +} + +type CodegenLibraryArchProperties struct { Static struct { Whole_static_libs []string } @@ -99,6 +122,11 @@ type codegenArchProperties struct { } } +type codegenArchProperties struct { + CodegenCommonArchProperties + CodegenLibraryArchProperties +} + type codegenProperties struct { Codegen struct { Arm, Arm64, Mips, Mips64, X86, X86_64 codegenArchProperties @@ -106,10 +134,11 @@ type codegenProperties struct { } type codegenCustomizer struct { + library bool codegenProperties codegenProperties } -func defaultDeviceCodegenArches(ctx android.CustomizePropertiesContext) []string { +func defaultDeviceCodegenArches(ctx android.LoadHookContext) []string { arches := make(map[string]bool) for _, a := range ctx.DeviceConfig().Arches() { s := a.ArchType.String() @@ -129,3 +158,11 @@ func defaultDeviceCodegenArches(ctx android.CustomizePropertiesContext) []string sort.Strings(ret) return ret } + +func installCodegenCustomizer(module blueprint.Module, props []interface{}, library bool) []interface{} { + c := &codegenProperties{} + android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, library) }) + props = append(props, c) + + return props +} diff --git a/build/makevars.go b/build/makevars.go index 5655c55a6a..1faa0f6f36 100644 --- a/build/makevars.go +++ b/build/makevars.go @@ -14,7 +14,12 @@ package art -import "android/soong/android" +import ( + "sort" + "strings" + + "android/soong/android" +) var ( pctx = android.NewPackageContext("android/soong/art") @@ -27,4 +32,16 @@ func init() { func makeVarsProvider(ctx android.MakeVarsContext) { ctx.Strict("LIBART_IMG_HOST_BASE_ADDRESS", ctx.Config().LibartImgHostBaseAddress()) ctx.Strict("LIBART_IMG_TARGET_BASE_ADDRESS", ctx.Config().LibartImgDeviceBaseAddress()) + + testMap := testMap(ctx.Config()) + var testNames []string + for name := range testMap { + testNames = append(testNames, name) + } + + sort.Strings(testNames) + + for _, name := range testNames { + ctx.Strict("ART_TEST_LIST_"+name, strings.Join(testMap[name], " ")) + } } diff --git a/cmdline/Android.bp b/cmdline/Android.bp new file mode 100644 index 0000000000..c811cbdd33 --- /dev/null +++ b/cmdline/Android.bp @@ -0,0 +1,23 @@ +// +// Copyright (C) 2016 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +art_cc_test { + name: "art_cmdline_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["cmdline_parser_test.cc"], +} diff --git a/compiler/Android.bp b/compiler/Android.bp index 969747505b..09c53b6889 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -280,3 +280,153 @@ art_cc_library { }, shared_libs: ["libartd"], } + +art_cc_library { + name: "libart-compiler-gtest", + defaults: ["libart-gtest-defaults"], + srcs: ["common_compiler_test.cc"], + shared_libs: [ + "libartd-compiler", + "libart-runtime-gtest", + ], +} + +art_cc_test { + name: "art_compiler_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: [ + "compiled_method_test.cc", + "debug/dwarf/dwarf_test.cc", + "driver/compiled_method_storage_test.cc", + "driver/compiler_driver_test.cc", + "elf_writer_test.cc", + "exception_test.cc", + "image_test.cc", + "jni/jni_compiler_test.cc", + "linker/multi_oat_relative_patcher_test.cc", + "linker/output_stream_test.cc", + "oat_test.cc", + "optimizing/bounds_check_elimination_test.cc", + "optimizing/dominator_test.cc", + "optimizing/find_loops_test.cc", + "optimizing/graph_checker_test.cc", + "optimizing/graph_test.cc", + "optimizing/gvn_test.cc", + "optimizing/induction_var_analysis_test.cc", + "optimizing/induction_var_range_test.cc", + "optimizing/licm_test.cc", + "optimizing/live_interval_test.cc", + "optimizing/nodes_test.cc", + "optimizing/parallel_move_test.cc", + "optimizing/pretty_printer_test.cc", + "optimizing/reference_type_propagation_test.cc", + "optimizing/side_effects_test.cc", + "optimizing/ssa_test.cc", + "optimizing/stack_map_test.cc", + "optimizing/suspend_check_test.cc", + "utils/dedupe_set_test.cc", + "utils/intrusive_forward_list_test.cc", + "utils/string_reference_test.cc", + "utils/swap_space_test.cc", + "utils/test_dex_file_builder_test.cc", + + "jni/jni_cfi_test.cc", + "optimizing/codegen_test.cc", + "optimizing/optimizing_cfi_test.cc", + ], + + codegen: { + arm: { + srcs: [ + "linker/arm/relative_patcher_thumb2_test.cc", + "utils/arm/managed_register_arm_test.cc", + ], + }, + arm64: { + srcs: [ + "linker/arm64/relative_patcher_arm64_test.cc", + "utils/arm64/managed_register_arm64_test.cc", + ], + }, + mips: { + srcs: [ + "linker/mips/relative_patcher_mips_test.cc", + "linker/mips/relative_patcher_mips32r6_test.cc", + ], + }, + x86: { + srcs: [ + "linker/x86/relative_patcher_x86_test.cc", + "utils/x86/managed_register_x86_test.cc", + + // These tests are testing architecture-independent + // functionality, but happen to use x86 codegen as part of the + // test. + "optimizing/constant_folding_test.cc", + "optimizing/dead_code_elimination_test.cc", + "optimizing/linearize_test.cc", + "optimizing/live_ranges_test.cc", + "optimizing/liveness_test.cc", + "optimizing/register_allocator_test.cc", + ], + }, + x86_64: { + srcs: [ + "linker/x86_64/relative_patcher_x86_64_test.cc", + ], + }, + }, + + shared_libs: [ + "libartd-compiler", + "libvixld-arm", + "libvixld-arm64", + + "libbacktrace", + "libnativeloader", + ], +} + +art_cc_test { + name: "art_compiler_host_tests", + device_supported: false, + defaults: [ + "art_gtest_defaults", + ], + codegen: { + arm: { + srcs: [ + "utils/arm/assembler_thumb2_test.cc", + "utils/assembler_thumb_test.cc", + ], + }, + mips: { + srcs: [ + "utils/mips/assembler_mips_test.cc", + "utils/mips/assembler_mips32r6_test.cc", + ], + }, + mips64: { + srcs: [ + "utils/mips64/assembler_mips64_test.cc", + ], + }, + x86: { + srcs: [ + "utils/x86/assembler_x86_test.cc", + ], + }, + x86_64: { + srcs: [ + "utils/x86_64/assembler_x86_64_test.cc", + ], + }, + }, + shared_libs: [ + "libartd-compiler", + "libvixld-arm", + "libvixld-arm64", + ], +} diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h index 2a81804f64..1a87448e80 100644 --- a/compiler/compiled_method.h +++ b/compiler/compiled_method.h @@ -23,10 +23,10 @@ #include <vector> #include "arch/instruction_set.h" +#include "base/array_ref.h" #include "base/bit_utils.h" #include "base/length_prefixed_array.h" #include "method_reference.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/compiler.h b/compiler/compiler.h index ed42958a76..9a69456b5a 100644 --- a/compiler/compiler.h +++ b/compiler/compiler.h @@ -25,10 +25,14 @@ namespace art { namespace jit { class JitCodeCache; } +namespace mirror { + class DexCache; +} class ArtMethod; class CompilerDriver; class CompiledMethod; +template<class T> class Handle; class OatWriter; class Compiler { diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h index 146d9fddf5..28f108423e 100644 --- a/compiler/debug/dwarf/headers.h +++ b/compiler/debug/dwarf/headers.h @@ -19,13 +19,13 @@ #include <cstdint> +#include "base/array_ref.h" #include "debug/dwarf/debug_frame_opcode_writer.h" #include "debug/dwarf/debug_info_entry_writer.h" #include "debug/dwarf/debug_line_opcode_writer.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/register.h" #include "debug/dwarf/writer.h" -#include "utils/array_ref.h" namespace art { namespace dwarf { diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc index 5bfdd16083..d1c10a9246 100644 --- a/compiler/debug/elf_debug_writer.cc +++ b/compiler/debug/elf_debug_writer.cc @@ -18,6 +18,7 @@ #include <vector> +#include "base/array_ref.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/elf_compilation_unit.h" #include "debug/elf_debug_frame_writer.h" @@ -29,7 +30,6 @@ #include "debug/method_debug_info.h" #include "elf_builder.h" #include "linker/vector_output_stream.h" -#include "utils/array_ref.h" namespace art { namespace debug { diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h index b0542c7ac6..07f7229827 100644 --- a/compiler/debug/elf_debug_writer.h +++ b/compiler/debug/elf_debug_writer.h @@ -19,11 +19,11 @@ #include <vector> +#include "base/array_ref.h" #include "base/macros.h" #include "base/mutex.h" #include "debug/dwarf/dwarf_constants.h" #include "elf_builder.h" -#include "utils/array_ref.h" namespace art { class OatHeader; diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 3ce786e008..c902d289e9 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -90,7 +90,7 @@ class DexCompiler { // Compiles a virtual method invocation into a quick virtual method invocation. // The method index is replaced by the vtable index where the corresponding - // AbstractMethod can be found. Therefore, this does not involve any resolution + // Executable can be found. Therefore, this does not involve any resolution // at runtime. // Since the method index is encoded with 16 bits, we can replace it only if the // vtable index can be encoded with 16 bits too. diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h index 1f696863b6..824194c7bd 100644 --- a/compiler/dex/quick_compiler_callbacks.h +++ b/compiler/dex/quick_compiler_callbacks.h @@ -29,8 +29,10 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks { QuickCompilerCallbacks(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map, CompilerCallbacks::CallbackMode mode) - : CompilerCallbacks(mode), verification_results_(verification_results), - method_inliner_map_(method_inliner_map) { + : CompilerCallbacks(mode), + verification_results_(verification_results), + method_inliner_map_(method_inliner_map), + verifier_deps_(nullptr) { CHECK(verification_results != nullptr); CHECK(method_inliner_map != nullptr); } @@ -47,9 +49,18 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks { return true; } + verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { + return verifier_deps_; + } + + void SetVerifierDeps(verifier::VerifierDeps* deps) { + verifier_deps_ = deps; + } + private: VerificationResults* const verification_results_; DexFileToMethodInlinerMap* const method_inliner_map_; + verifier::VerifierDeps* verifier_deps_; }; } // namespace art diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc index 4bcd59ac90..e19fb7b300 100644 --- a/compiler/dex/verified_method.cc +++ b/compiler/dex/verified_method.cc @@ -231,7 +231,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi inst->VRegA_21c())); const verifier::RegType& cast_type = method_verifier->ResolveCheckedClass(inst->VRegB_21c()); - is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type); + is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier); } else { const verifier::RegType& array_type(line->GetRegisterType(method_verifier, inst->VRegB_23x())); @@ -243,7 +243,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi inst->VRegA_23x())); const verifier::RegType& component_type = method_verifier->GetRegTypeCache() ->GetComponentType(array_type, method_verifier->GetClassLoader()); - is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type); + is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type, method_verifier); } } if (is_safe_cast) { diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h index 8674abf815..124b5a6e25 100644 --- a/compiler/driver/compiled_method_storage.h +++ b/compiler/driver/compiled_method_storage.h @@ -20,9 +20,9 @@ #include <iosfwd> #include <memory> +#include "base/array_ref.h" #include "base/length_prefixed_array.h" #include "base/macros.h" -#include "utils/array_ref.h" #include "utils/dedupe_set.h" #include "utils/swap_space.h" diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 53e068edf2..a149c07beb 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -26,6 +26,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" +#include "base/array_ref.h" #include "base/bit_vector.h" #include "base/enums.h" #include "base/stl_util.h" @@ -67,7 +68,6 @@ #include "thread_pool.h" #include "trampolines/trampoline_compiler.h" #include "transaction.h" -#include "utils/array_ref.h" #include "utils/dex_cache_arrays_layout-inl.h" #include "utils/swap_space.h" #include "verifier/method_verifier.h" @@ -2474,7 +2474,7 @@ class InitializeClassVisitor : public CompilationVisitor { // mode which prevents the GC from visiting objects modified during the transaction. // Ensure GC is not run so don't access freed objects when aborting transaction. - ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end"); + ScopedAssertNoThreadSuspension ants("Transaction end"); runtime->ExitTransactionMode(); if (!success) { diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index fbc1edd0ea..ee21efa854 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -24,6 +24,7 @@ #include "arch/instruction_set.h" #include "base/arena_allocator.h" +#include "base/array_ref.h" #include "base/bit_utils.h" #include "base/mutex.h" #include "base/timing_logger.h" @@ -39,7 +40,6 @@ #include "runtime.h" #include "safe_map.h" #include "thread_pool.h" -#include "utils/array_ref.h" #include "utils/dex_cache_arrays_layout.h" namespace art { diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h index 7f2e1931d0..02831c9dc7 100644 --- a/compiler/elf_builder.h +++ b/compiler/elf_builder.h @@ -21,13 +21,13 @@ #include "arch/instruction_set.h" #include "arch/mips/instruction_set_features_mips.h" +#include "base/array_ref.h" #include "base/bit_utils.h" #include "base/casts.h" #include "base/unix_file/fd_file.h" #include "elf_utils.h" #include "leb128.h" #include "linker/error_delaying_output_stream.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h index c9ea0083d5..f8f91029d4 100644 --- a/compiler/elf_writer.h +++ b/compiler/elf_writer.h @@ -22,10 +22,10 @@ #include <string> #include <vector> +#include "base/array_ref.h" #include "base/macros.h" #include "base/mutex.h" #include "os.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 7634510457..cdb57a98ad 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -48,12 +48,12 @@ #include "intern_table.h" #include "linear_alloc.h" #include "lock_word.h" -#include "mirror/abstract_method.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "mirror/dex_cache-inl.h" +#include "mirror/executable.h" #include "mirror/method.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -868,7 +868,7 @@ void ImageWriter::PruneNonImageClasses() { // Clear references to removed classes from the DexCaches. ArtMethod* resolution_method = runtime->GetResolutionMethod(); - ScopedAssertNoThreadSuspension sa(self, __FUNCTION__); + ScopedAssertNoThreadSuspension sa(__FUNCTION__); ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable ReaderMutexLock mu2(self, *class_linker->DexLock()); for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { @@ -1989,14 +1989,10 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) { } else { if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) { // Need to go update the ArtMethod. - auto* dest = down_cast<mirror::AbstractMethod*>(copy); - auto* src = down_cast<mirror::AbstractMethod*>(orig); + auto* dest = down_cast<mirror::Executable*>(copy); + auto* src = down_cast<mirror::Executable*>(orig); ArtMethod* src_method = src->GetArtMethod(); - auto it = native_object_relocations_.find(src_method); - CHECK(it != native_object_relocations_.end()) - << "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method); - dest->SetArtMethod( - reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset)); + dest->SetArtMethod(GetImageMethodAddress(src_method)); } else if (!klass->IsArrayClass()) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) { diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h index 3d89146250..f541d8fa19 100644 --- a/compiler/jni/quick/calling_convention.h +++ b/compiler/jni/quick/calling_convention.h @@ -18,11 +18,11 @@ #define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ #include "base/arena_object.h" +#include "base/array_ref.h" #include "base/enums.h" #include "handle_scope.h" #include "primitive.h" #include "thread.h" -#include "utils/array_ref.h" #include "utils/managed_register.h" namespace art { diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h index 48ad1059b0..a4a80185dc 100644 --- a/compiler/linker/arm64/relative_patcher_arm64.h +++ b/compiler/linker/arm64/relative_patcher_arm64.h @@ -17,8 +17,8 @@ #ifndef ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_ #define ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_ +#include "base/array_ref.h" #include "linker/arm/relative_patcher_arm_base.h" -#include "utils/array_ref.h" namespace art { namespace linker { diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h index a22b9f2c2d..15e955b2c6 100644 --- a/compiler/linker/relative_patcher.h +++ b/compiler/linker/relative_patcher.h @@ -21,9 +21,9 @@ #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" +#include "base/array_ref.h" #include "base/macros.h" #include "method_reference.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index d21f33e46f..304b31ca84 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -19,6 +19,7 @@ #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" +#include "base/array_ref.h" #include "base/macros.h" #include "compiled_method.h" #include "dex/quick/dex_file_to_method_inliner_map.h" @@ -31,7 +32,6 @@ #include "method_reference.h" #include "oat.h" #include "oat_quick_method_header.h" -#include "utils/array_ref.h" #include "vector_output_stream.h" namespace art { diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 78e9ca91b7..24d102d4c0 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -453,7 +453,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { EXPECT_EQ(72U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); EXPECT_EQ(20U, sizeof(OatQuickMethodHeader)); - EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)), + EXPECT_EQ(163 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)), sizeof(QuickEntryPoints)); } diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 43e01d54a6..d629c0c887 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -259,7 +259,16 @@ class OatWriter::OatDexFile { // Data to write to a separate section. dchecked_vector<uint32_t> class_offsets_; + void InitTypeLookupTable(const DexFile& dex_file, uint8_t* storage) const { + lookup_table_.reset(TypeLookupTable::Create(dex_file, storage)); + } + + TypeLookupTable* GetTypeLookupTable() const { + return lookup_table_.get(); + } + private: + mutable std::unique_ptr<TypeLookupTable> lookup_table_; size_t GetClassOffsetsRawSize() const { return class_offsets_.size() * sizeof(class_offsets_[0]); } @@ -994,7 +1003,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { out_(out), file_offset_(file_offset), soa_(Thread::Current()), - no_thread_suspension_(soa_.Self(), "OatWriter patching"), + no_thread_suspension_("OatWriter patching"), class_linker_(Runtime::Current()->GetClassLinker()), dex_cache_(nullptr) { patched_code_.reserve(16 * KB); @@ -1036,7 +1045,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); // No thread suspension since dex_cache_ that may get invalidated if that occurs. - ScopedAssertNoThreadSuspension tsc(Thread::Current(), __FUNCTION__); + ScopedAssertNoThreadSuspension tsc(__FUNCTION__); if (compiled_method != nullptr) { // ie. not an abstract method size_t file_offset = file_offset_; OutputStream* out = out_; @@ -2285,9 +2294,9 @@ bool OatWriter::WriteTypeLookupTables( } // Create the lookup table. When `nullptr` is given as the storage buffer, - // TypeLookupTable allocates its own and DexFile takes ownership. - opened_dex_files[i]->CreateTypeLookupTable(/* storage */ nullptr); - TypeLookupTable* table = opened_dex_files[i]->GetTypeLookupTable(); + // TypeLookupTable allocates its own and OatDexFile takes ownership. + oat_dex_file->InitTypeLookupTable(*opened_dex_files[i], /* storage */ nullptr); + TypeLookupTable* table = oat_dex_file->GetTypeLookupTable(); // Type tables are required to be 4 byte aligned. size_t initial_offset = oat_size_; diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 77525f1a32..dd7d699eee 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -21,6 +21,7 @@ #include <cstddef> #include <memory> +#include "base/array_ref.h" #include "base/dchecked_vector.h" #include "linker/relative_patcher.h" // For linker::RelativePatcherTargetProvider. #include "mem_map.h" @@ -29,7 +30,6 @@ #include "oat.h" #include "os.h" #include "safe_map.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index 8aefd9ea1f..994d394a2a 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -887,7 +887,7 @@ class BCEVisitor : public HGraphVisitor { bool needs_finite_test = false; bool needs_taken_test = false; if (DynamicBCESeemsProfitable(loop, bounds_check->GetBlock()) && - induction_range_.CanGenerateCode( + induction_range_.CanGenerateRange( bounds_check, index, &needs_finite_test, &needs_taken_test) && CanHandleInfiniteLoop(loop, index, needs_finite_test) && // Do this test last, since it may generate code. @@ -1403,10 +1403,10 @@ class BCEVisitor : public HGraphVisitor { // whether code generation on the original and, thus, related bounds check was possible. // It handles either loop invariants (lower is not set) or unit strides. if (other_c == max_c) { - induction_range_.GenerateRangeCode( + induction_range_.GenerateRange( other_bounds_check, other_index, GetGraph(), block, &max_lower, &max_upper); } else if (other_c == min_c && base != nullptr) { - induction_range_.GenerateRangeCode( + induction_range_.GenerateRange( other_bounds_check, other_index, GetGraph(), block, &min_lower, &min_upper); } ReplaceInstruction(other_bounds_check, other_index); @@ -1699,11 +1699,8 @@ class BCEVisitor : public HGraphVisitor { // Insert the taken-test to see if the loop body is entered. If the // loop isn't entered at all, it jumps around the deoptimization block. if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder - HInstruction* condition = nullptr; - induction_range_.GenerateTakenTest(header->GetLastInstruction(), - GetGraph(), - if_block, - &condition); + HInstruction* condition = induction_range_.GenerateTakenTest( + header->GetLastInstruction(), GetGraph(), if_block); DCHECK(condition != nullptr); if_block->RemoveInstruction(if_block->GetLastInstruction()); if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition)); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 1444931b9c..cf633df496 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1090,13 +1090,6 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo } } -bool CodeGenerator::IsImplicitNullCheckAllowed(HNullCheck* null_check) const { - return compiler_options_.GetImplicitNullChecks() && - // Null checks which might throw into a catch block need to save live - // registers and therefore cannot be done implicitly. - !null_check->CanThrowIntoCatchBlock(); -} - bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves(); @@ -1105,6 +1098,10 @@ bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) { } void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { + if (!compiler_options_.GetImplicitNullChecks()) { + return; + } + // If we are from a static path don't record the pc as we can't throw NPE. // NB: having the checks here makes the code much less verbose in the arch // specific code generators. @@ -1123,16 +1120,35 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) { // and needs to record the pc. if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) { HNullCheck* null_check = first_prev_not_move->AsNullCheck(); - if (IsImplicitNullCheckAllowed(null_check)) { - // TODO: The parallel moves modify the environment. Their changes need to be - // reverted otherwise the stack maps at the throw point will not be correct. - RecordPcInfo(null_check, null_check->GetDexPc()); - } + // TODO: The parallel moves modify the environment. Their changes need to be + // reverted otherwise the stack maps at the throw point will not be correct. + RecordPcInfo(null_check, null_check->GetDexPc()); + } +} + +LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction, + RegisterSet caller_saves) { + // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the + // HSuspendCheck from entry block). However, it will still get a valid stack frame + // because the HNullCheck needs an environment. + LocationSummary::CallKind call_kind = LocationSummary::kNoCall; + // When throwing from a try block, we may need to retrieve dalvik registers from + // physical registers and we also need to set up stack mask for GC. This is + // implicitly achieved by passing kCallOnSlowPath to the LocationSummary. + bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock(); + if (can_throw_into_catch_block) { + call_kind = LocationSummary::kCallOnSlowPath; } + LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) { + locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers. + } + DCHECK(!instruction->HasUses()); + return locations; } void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) { - if (IsImplicitNullCheckAllowed(instruction)) { + if (compiler_options_.GetImplicitNullChecks()) { MaybeRecordStat(kImplicitNullCheckGenerated); GenerateImplicitNullCheck(instruction); } else { @@ -1172,37 +1188,51 @@ void CodeGenerator::EmitParallelMoves(Location from1, GetMoveResolver()->EmitNativeCode(¶llel_move); } -void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) { +void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + SlowPathCode* slow_path) { // Ensure that the call kind indication given to the register allocator is - // coherent with the runtime call generated, and that the GC side effect is - // set when required. + // coherent with the runtime call generated. if (slow_path == nullptr) { DCHECK(instruction->GetLocations()->WillCall()) << "instruction->DebugName()=" << instruction->DebugName(); - DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC())) - << "instruction->DebugName()=" << instruction->DebugName() - << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString(); } else { DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal()) << "instruction->DebugName()=" << instruction->DebugName() << " slow_path->GetDescription()=" << slow_path->GetDescription(); - DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) || - // When (non-Baker) read barriers are enabled, some instructions - // use a slow path to emit a read barrier, which does not trigger - // GC. - (kEmitCompilerReadBarrier && - !kUseBakerReadBarrier && - (instruction->IsInstanceFieldGet() || - instruction->IsStaticFieldGet() || - instruction->IsArrayGet() || - instruction->IsLoadClass() || - instruction->IsLoadString() || - instruction->IsInstanceOf() || - instruction->IsCheckCast() || - (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified())))) - << "instruction->DebugName()=" << instruction->DebugName() - << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString() - << " slow_path->GetDescription()=" << slow_path->GetDescription(); + } + + // Check that the GC side effect is set when required. + // TODO: Reverse EntrypointCanTriggerGC + if (EntrypointCanTriggerGC(entrypoint)) { + if (slow_path == nullptr) { + DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC())) + << "instruction->DebugName()=" << instruction->DebugName() + << " instruction->GetSideEffects().ToString()=" + << instruction->GetSideEffects().ToString(); + } else { + DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) || + // When (non-Baker) read barriers are enabled, some instructions + // use a slow path to emit a read barrier, which does not trigger + // GC. + (kEmitCompilerReadBarrier && + !kUseBakerReadBarrier && + (instruction->IsInstanceFieldGet() || + instruction->IsStaticFieldGet() || + instruction->IsArrayGet() || + instruction->IsLoadClass() || + instruction->IsLoadString() || + instruction->IsInstanceOf() || + instruction->IsCheckCast() || + (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified())))) + << "instruction->DebugName()=" << instruction->DebugName() + << " instruction->GetSideEffects().ToString()=" + << instruction->GetSideEffects().ToString() + << " slow_path->GetDescription()=" << slow_path->GetDescription(); + } + } else { + // The GC side effect is not required for the instruction. But the instruction might still have + // it, for example if it calls other entrypoints requiring it. } // Check the coherency of leaf information. @@ -1252,7 +1282,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo } const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); - for (size_t i : LowToHighBits(fp_spills)) { + for (uint32_t i : LowToHighBits(fp_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); saved_fpu_stack_offsets_[i] = stack_offset; @@ -1271,7 +1301,7 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* } const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); - for (size_t i : LowToHighBits(fp_spills)) { + for (uint32_t i : LowToHighBits(fp_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 0c60a98139..c0c798d862 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -313,6 +313,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { bool CanMoveNullCheckToUser(HNullCheck* null_check); void MaybeRecordImplicitNullCheck(HInstruction* instruction); + LocationSummary* CreateThrowingSlowPathLocations( + HInstruction* instruction, RegisterSet caller_saves = RegisterSet::Empty()); void GenerateNullCheck(HNullCheck* null_check); virtual void GenerateImplicitNullCheck(HNullCheck* null_check) = 0; virtual void GenerateExplicitNullCheck(HNullCheck* null_check) = 0; @@ -322,12 +324,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // TODO: Replace with a catch-entering instruction that records the environment. void RecordCatchBlockInfo(); - // Returns true if implicit null checks are allowed in the compiler options - // and if the null check is not inside a try block. We currently cannot do - // implicit null checks in that case because we need the NullCheckSlowPath to - // save live registers, which may be needed by the runtime to set catch phis. - bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const; - // TODO: Avoid creating the `std::unique_ptr` here. void AddSlowPath(SlowPathCode* slow_path) { slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path)); @@ -409,7 +405,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // Perfoms checks pertaining to an InvokeRuntime call. - void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path); + void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, + HInstruction* instruction, + SlowPathCode* slow_path); // Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call. static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction, @@ -582,6 +580,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { core_spill_mask_(0), fpu_spill_mask_(0), first_register_slot_in_slow_path_(0), + allocated_registers_(RegisterSet::Empty()), blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers, kArenaAllocCodeGenerator)), blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers, @@ -713,6 +712,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { bool is_leaf_; // Whether an instruction in the graph accesses the current method. + // TODO: Rename: this actually indicates that some instruction in the method + // needs the environment including a valid stack frame. bool requires_current_method_; friend class OptimizingCFITest; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 3cc2598f8f..a052873afd 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -63,9 +63,188 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7; #define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value() -class NullCheckSlowPathARM : public SlowPathCode { +static constexpr int kRegListThreshold = 4; + +// SaveLiveRegisters and RestoreLiveRegisters from SlowPathCodeARM operate on sets of S registers, +// for each live D registers they treat two corresponding S registers as live ones. +// +// Two following functions (SaveContiguousSRegisterList, RestoreContiguousSRegisterList) build +// from a list of contiguous S registers a list of contiguous D registers (processing first/last +// S registers corner cases) and save/restore this new list treating them as D registers. +// - decreasing code size +// - avoiding hazards on Cortex-A57, when a pair of S registers for an actual live D register is +// restored and then used in regular non SlowPath code as D register. +// +// For the following example (v means the S register is live): +// D names: | D0 | D1 | D2 | D4 | ... +// S names: | S0 | S1 | S2 | S3 | S4 | S5 | S6 | S7 | ... +// Live? | | v | v | v | v | v | v | | ... +// +// S1 and S6 will be saved/restored independently; D registers list (D1, D2) will be processed +// as D registers. +static size_t SaveContiguousSRegisterList(size_t first, + size_t last, + CodeGenerator* codegen, + size_t stack_offset) { + DCHECK_LE(first, last); + if ((first == last) && (first == 0)) { + stack_offset += codegen->SaveFloatingPointRegister(stack_offset, first); + return stack_offset; + } + if (first % 2 == 1) { + stack_offset += codegen->SaveFloatingPointRegister(stack_offset, first++); + } + + bool save_last = false; + if (last % 2 == 0) { + save_last = true; + --last; + } + + if (first < last) { + DRegister d_reg = static_cast<DRegister>(first / 2); + DCHECK_EQ((last - first + 1) % 2, 0u); + size_t number_of_d_regs = (last - first + 1) / 2; + + if (number_of_d_regs == 1) { + __ StoreDToOffset(d_reg, SP, stack_offset); + } else if (number_of_d_regs > 1) { + __ add(IP, SP, ShifterOperand(stack_offset)); + __ vstmiad(IP, d_reg, number_of_d_regs); + } + stack_offset += number_of_d_regs * kArmWordSize * 2; + } + + if (save_last) { + stack_offset += codegen->SaveFloatingPointRegister(stack_offset, last + 1); + } + + return stack_offset; +} + +static size_t RestoreContiguousSRegisterList(size_t first, + size_t last, + CodeGenerator* codegen, + size_t stack_offset) { + DCHECK_LE(first, last); + if ((first == last) && (first == 0)) { + stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, first); + return stack_offset; + } + if (first % 2 == 1) { + stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, first++); + } + + bool restore_last = false; + if (last % 2 == 0) { + restore_last = true; + --last; + } + + if (first < last) { + DRegister d_reg = static_cast<DRegister>(first / 2); + DCHECK_EQ((last - first + 1) % 2, 0u); + size_t number_of_d_regs = (last - first + 1) / 2; + if (number_of_d_regs == 1) { + __ LoadDFromOffset(d_reg, SP, stack_offset); + } else if (number_of_d_regs > 1) { + __ add(IP, SP, ShifterOperand(stack_offset)); + __ vldmiad(IP, d_reg, number_of_d_regs); + } + stack_offset += number_of_d_regs * kArmWordSize * 2; + } + + if (restore_last) { + stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, last + 1); + } + + return stack_offset; +} + +void SlowPathCodeARM::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + size_t orig_offset = stack_offset; + + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + // If the register holds an object, update the stack mask. + if (locations->RegisterContainsObject(i)) { + locations->SetStackBit(stack_offset / kVRegSize); + } + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_core_stack_offsets_[i] = stack_offset; + stack_offset += kArmWordSize; + } + + int reg_num = POPCOUNT(core_spills); + if (reg_num != 0) { + if (reg_num > kRegListThreshold) { + __ StoreList(RegList(core_spills), orig_offset); + } else { + stack_offset = orig_offset; + for (uint32_t i : LowToHighBits(core_spills)) { + stack_offset += codegen->SaveCoreRegister(stack_offset, i); + } + } + } + + uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + orig_offset = stack_offset; + for (uint32_t i : LowToHighBits(fp_spills)) { + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_fpu_stack_offsets_[i] = stack_offset; + stack_offset += kArmWordSize; + } + + stack_offset = orig_offset; + while (fp_spills != 0u) { + uint32_t begin = CTZ(fp_spills); + uint32_t tmp = fp_spills + (1u << begin); + fp_spills &= tmp; // Clear the contiguous range of 1s. + uint32_t end = (tmp == 0u) ? 32u : CTZ(tmp); // CTZ(0) is undefined. + stack_offset = SaveContiguousSRegisterList(begin, end - 1, codegen, stack_offset); + } + DCHECK_LE(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); +} + +void SlowPathCodeARM::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { + size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); + size_t orig_offset = stack_offset; + + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + stack_offset += kArmWordSize; + } + + int reg_num = POPCOUNT(core_spills); + if (reg_num != 0) { + if (reg_num > kRegListThreshold) { + __ LoadList(RegList(core_spills), orig_offset); + } else { + stack_offset = orig_offset; + for (uint32_t i : LowToHighBits(core_spills)) { + stack_offset += codegen->RestoreCoreRegister(stack_offset, i); + } + } + } + + uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + while (fp_spills != 0u) { + uint32_t begin = CTZ(fp_spills); + uint32_t tmp = fp_spills + (1u << begin); + fp_spills &= tmp; // Clear the contiguous range of 1s. + uint32_t end = (tmp == 0u) ? 32u : CTZ(tmp); // CTZ(0) is undefined. + stack_offset = RestoreContiguousSRegisterList(begin, end - 1, codegen, stack_offset); + } + DCHECK_LE(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); +} + +class NullCheckSlowPathARM : public SlowPathCodeARM { public: - explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCode(instruction) {} + explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCodeARM(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); @@ -89,17 +268,13 @@ class NullCheckSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM); }; -class DivZeroCheckSlowPathARM : public SlowPathCode { +class DivZeroCheckSlowPathARM : public SlowPathCodeARM { public: - explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} + explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCodeARM(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); __ Bind(GetEntryLabel()); - if (instruction_->CanThrowIntoCatchBlock()) { - // Live registers will be restored in the catch block if caught. - SaveLiveRegisters(codegen, instruction_->GetLocations()); - } arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } @@ -112,10 +287,10 @@ class DivZeroCheckSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM); }; -class SuspendCheckSlowPathARM : public SlowPathCode { +class SuspendCheckSlowPathARM : public SlowPathCodeARM { public: SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor) - : SlowPathCode(instruction), successor_(successor) {} + : SlowPathCodeARM(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); @@ -150,10 +325,10 @@ class SuspendCheckSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM); }; -class BoundsCheckSlowPathARM : public SlowPathCode { +class BoundsCheckSlowPathARM : public SlowPathCodeARM { public: explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction) - : SlowPathCode(instruction) {} + : SlowPathCodeARM(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); @@ -190,13 +365,13 @@ class BoundsCheckSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM); }; -class LoadClassSlowPathARM : public SlowPathCode { +class LoadClassSlowPathARM : public SlowPathCodeARM { public: LoadClassSlowPathARM(HLoadClass* cls, HInstruction* at, uint32_t dex_pc, bool do_clinit) - : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCodeARM(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -247,10 +422,10 @@ class LoadClassSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM); }; -class TypeCheckSlowPathARM : public SlowPathCode { +class TypeCheckSlowPathARM : public SlowPathCodeARM { public: TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal) - : SlowPathCode(instruction), is_fatal_(is_fatal) {} + : SlowPathCodeARM(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -307,10 +482,10 @@ class TypeCheckSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); }; -class DeoptimizationSlowPathARM : public SlowPathCode { +class DeoptimizationSlowPathARM : public SlowPathCodeARM { public: explicit DeoptimizationSlowPathARM(HDeoptimize* instruction) - : SlowPathCode(instruction) {} + : SlowPathCodeARM(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); @@ -325,9 +500,9 @@ class DeoptimizationSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM); }; -class ArraySetSlowPathARM : public SlowPathCode { +class ArraySetSlowPathARM : public SlowPathCodeARM { public: - explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCode(instruction) {} + explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCodeARM(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -367,10 +542,10 @@ class ArraySetSlowPathARM : public SlowPathCode { }; // Slow path marking an object during a read barrier. -class ReadBarrierMarkSlowPathARM : public SlowPathCode { +class ReadBarrierMarkSlowPathARM : public SlowPathCodeARM { public: ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location obj) - : SlowPathCode(instruction), obj_(obj) { + : SlowPathCodeARM(instruction), obj_(obj) { DCHECK(kEmitCompilerReadBarrier); } @@ -434,7 +609,7 @@ class ReadBarrierMarkSlowPathARM : public SlowPathCode { }; // Slow path generating a read barrier for a heap reference. -class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { +class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCodeARM { public: ReadBarrierForHeapReferenceSlowPathARM(HInstruction* instruction, Location out, @@ -442,7 +617,7 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { Location obj, uint32_t offset, Location index) - : SlowPathCode(instruction), + : SlowPathCodeARM(instruction), out_(out), ref_(ref), obj_(obj), @@ -614,10 +789,10 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { }; // Slow path generating a read barrier for a GC root. -class ReadBarrierForRootSlowPathARM : public SlowPathCode { +class ReadBarrierForRootSlowPathARM : public SlowPathCodeARM { public: ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root) - : SlowPathCode(instruction), out_(out), root_(root) { + : SlowPathCodeARM(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } @@ -1177,7 +1352,7 @@ void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { - ValidateInvokeRuntime(instruction, slow_path); + ValidateInvokeRuntime(entrypoint, instruction, slow_path); GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value()); if (EntrypointRequiresStackMap(entrypoint)) { RecordPcInfo(instruction, dex_pc, slow_path); @@ -1502,14 +1677,14 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) { void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } } void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) { - SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize); + SlowPathCodeARM* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize); GenerateTestAndBranch(deoptimize, /* condition_input_index */ 0, slow_path->GetEntryLabel(), @@ -3085,18 +3260,12 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) { } void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction); + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction); codegen_->AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -3931,7 +4100,7 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); @@ -4251,14 +4420,8 @@ void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet( } void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -4272,7 +4435,7 @@ void CodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) { } void CodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) { - SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction); + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction); AddSlowPath(slow_path); LocationSummary* locations = instruction->GetLocations(); @@ -4410,7 +4573,7 @@ void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); @@ -4690,7 +4853,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); Label done; - SlowPathCode* slow_path = nullptr; + SlowPathCodeARM* slow_path = nullptr; if (may_need_runtime_call_for_type_check) { slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM(instruction); @@ -4895,20 +5058,18 @@ void InstructionCodeGeneratorARM::VisitIntermediateAddress(HIntermediateAddress* } void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = instruction->GetLocations(); - SlowPathCode* slow_path = + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction); codegen_->AddSlowPath(slow_path); @@ -4947,7 +5108,7 @@ void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction) void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) { @@ -5269,7 +5430,7 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } HLoadClass::LoadKind load_kind = cls->GetLoadKind(); @@ -5377,7 +5538,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { if (generate_null_check || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); codegen_->AddSlowPath(slow_path); if (generate_null_check) { @@ -5402,7 +5563,7 @@ void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) { void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) { // We assume the class is not null. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( check->GetLoadClass(), check, check->GetDexPc(), true); codegen_->AddSlowPath(slow_path); GenerateClassInitializationCheck(slow_path, @@ -5410,7 +5571,7 @@ void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) { } void InstructionCodeGeneratorARM::GenerateClassInitializationCheck( - SlowPathCode* slow_path, Register class_reg) { + SlowPathCodeARM* slow_path, Register class_reg) { __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value()); __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized)); __ b(slow_path->GetEntryLabel(), LT); @@ -5573,7 +5734,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -5603,7 +5764,7 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); Label done, zero; - SlowPathCode* slow_path = nullptr; + SlowPathCodeARM* slow_path = nullptr; // Return 0 if `obj` is null. // avoid null check if we know obj is not null. @@ -5795,7 +5956,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { type_check_kind == TypeCheckKind::kClassHierarchyCheck || type_check_kind == TypeCheckKind::kArrayObjectCheck) && !instruction->CanThrowIntoCatchBlock(); - SlowPathCode* type_check_slow_path = + SlowPathCodeARM* type_check_slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction, is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); @@ -6289,7 +6450,7 @@ void InstructionCodeGeneratorARM::GenerateGcRootFieldLoad(HInstruction* instruct "have different sizes."); // Slow path marking the GC root `root`. - SlowPathCode* slow_path = + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, root); codegen_->AddSlowPath(slow_path); @@ -6430,7 +6591,7 @@ void CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i __ MaybeUnpoisonHeapReference(ref_reg); // Slow path marking the object `ref` when it is gray. - SlowPathCode* slow_path = + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref); AddSlowPath(slow_path); @@ -6466,7 +6627,7 @@ void CodeGeneratorARM::GenerateReadBarrierSlow(HInstruction* instruction, // not used by the artReadBarrierSlow entry point. // // TODO: Unpoison `ref` when it is used by artReadBarrierSlow. - SlowPathCode* slow_path = new (GetGraph()->GetArena()) + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ReadBarrierForHeapReferenceSlowPathARM(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); @@ -6501,7 +6662,7 @@ void CodeGeneratorARM::GenerateReadBarrierForRootSlow(HInstruction* instruction, // // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. - SlowPathCode* slow_path = + SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM(instruction, out, root); AddSlowPath(slow_path); diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index ce9d7e6056..424a1a1455 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -50,6 +50,18 @@ static constexpr SRegister kRuntimeParameterFpuRegisters[] = { S0, S1, S2, S3 }; static constexpr size_t kRuntimeParameterFpuRegistersLength = arraysize(kRuntimeParameterFpuRegisters); +class SlowPathCodeARM : public SlowPathCode { + public: + explicit SlowPathCodeARM(HInstruction* instruction) : SlowPathCode(instruction) {} + + void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) FINAL; + void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) FINAL; + + private: + DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM); +}; + + class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> { public: InvokeRuntimeCallingConvention() @@ -216,7 +228,7 @@ class InstructionCodeGeneratorARM : public InstructionCodeGenerator { // is the block to branch to if the suspend check is not needed, and after // the suspend call. void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor); - void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg); + void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg); void GenerateAndConst(Register out, Register first, uint32_t value); void GenerateOrrConst(Register out, Register first, uint32_t value); void GenerateEorConst(Register out, Register first, uint32_t value); @@ -556,10 +568,10 @@ class CodeGeneratorARM : public CodeGenerator { // artReadBarrierForRootSlow. void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); - void GenerateNop(); + void GenerateNop() OVERRIDE; - void GenerateImplicitNullCheck(HNullCheck* instruction); - void GenerateExplicitNullCheck(HNullCheck* instruction); + void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; private: Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 179bf76f5b..a29e9f3e80 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -139,18 +139,18 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type retur // Calculate memory accessing operand for save/restore live registers. static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen, - RegisterSet* register_set, + LocationSummary* locations, int64_t spill_offset, bool is_save) { - DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(), + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills, codegen->GetNumberOfCoreRegisters(), - register_set->GetFloatingPointRegisters(), + fp_spills, codegen->GetNumberOfFloatingPointRegisters())); - CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, - register_set->GetCoreRegisters() & (~callee_saved_core_registers.GetList())); - CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize, - register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.GetList())); + CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills); + CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize, fp_spills); MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler(); UseScratchRegisterScope temps(masm); @@ -184,38 +184,35 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen, } void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { - RegisterSet* register_set = locations->GetLiveRegisters(); size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); - for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) { - if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) { - // If the register holds an object, update the stack mask. - if (locations->RegisterContainsObject(i)) { - locations->SetStackBit(stack_offset / kVRegSize); - } - DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); - DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); - saved_core_stack_offsets_[i] = stack_offset; - stack_offset += kXRegSizeInBytes; + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + for (uint32_t i : LowToHighBits(core_spills)) { + // If the register holds an object, update the stack mask. + if (locations->RegisterContainsObject(i)) { + locations->SetStackBit(stack_offset / kVRegSize); } + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_core_stack_offsets_[i] = stack_offset; + stack_offset += kXRegSizeInBytes; } - for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) { - if (!codegen->IsFloatingPointCalleeSaveRegister(i) && - register_set->ContainsFloatingPointRegister(i)) { - DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); - DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); - saved_fpu_stack_offsets_[i] = stack_offset; - stack_offset += kDRegSizeInBytes; - } + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + for (uint32_t i : LowToHighBits(fp_spills)) { + DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); + DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); + saved_fpu_stack_offsets_[i] = stack_offset; + stack_offset += kDRegSizeInBytes; } - SaveRestoreLiveRegistersHelper(codegen, register_set, + SaveRestoreLiveRegistersHelper(codegen, + locations, codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */); } void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { - RegisterSet* register_set = locations->GetLiveRegisters(); - SaveRestoreLiveRegistersHelper(codegen, register_set, + SaveRestoreLiveRegistersHelper(codegen, + locations, codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */); } @@ -261,10 +258,6 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); __ Bind(GetEntryLabel()); - if (instruction_->CanThrowIntoCatchBlock()) { - // Live registers will be restored in the catch block if caught. - SaveLiveRegisters(codegen, instruction_->GetLocations()); - } arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } @@ -448,7 +441,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { } const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; } - bool IsFatal() const { return is_fatal_; } + bool IsFatal() const OVERRIDE { return is_fatal_; } private: const bool is_fatal_; @@ -1452,7 +1445,7 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { - ValidateInvokeRuntime(instruction, slow_path); + ValidateInvokeRuntime(entrypoint, instruction, slow_path); GenerateInvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value()); if (EntrypointRequiresStackMap(entrypoint)) { RecordPcInfo(instruction, dex_pc, slow_path); @@ -1608,7 +1601,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); if (Primitive::IsFloatingPointType(instruction->GetType())) { @@ -2036,7 +2029,7 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); @@ -2306,15 +2299,13 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) { } void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode())); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1).GetCode())); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction)); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) { @@ -2685,14 +2676,8 @@ void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) { } void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) { @@ -2924,7 +2909,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -3077,7 +3062,7 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); @@ -3944,7 +3929,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } HLoadClass::LoadKind load_kind = cls->GetLoadKind(); @@ -4384,14 +4369,8 @@ void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) { } void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -4677,7 +4656,7 @@ void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet( void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index f0d79106dc..f1dc7eecb5 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -644,10 +644,10 @@ class CodeGeneratorARM64 : public CodeGenerator { // artReadBarrierForRootSlow. void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); - void GenerateNop(); + void GenerateNop() OVERRIDE; - void GenerateImplicitNullCheck(HNullCheck* instruction); - void GenerateExplicitNullCheck(HNullCheck* instruction); + void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; private: using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>; diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index f07f8a0d91..2211ea3846 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -194,10 +194,6 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen); __ Bind(GetEntryLabel()); - if (instruction_->CanThrowIntoCatchBlock()) { - // Live registers will be restored in the catch block if caught. - SaveLiveRegisters(codegen, instruction_->GetLocations()); - } mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } @@ -905,7 +901,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) { } else { DCHECK(destination.IsStackSlot()) << "Cannot move " << c->DebugName() << " to " << destination; - __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP); + __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP); } } else if (c->IsLongConstant()) { // Move 64 bit constant. @@ -917,7 +913,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) { } else { DCHECK(destination.IsDoubleStackSlot()) << "Cannot move " << c->DebugName() << " to " << destination; - __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP); + __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP); } } else if (c->IsFloatConstant()) { // Move 32 bit float constant. @@ -927,7 +923,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) { } else { DCHECK(destination.IsStackSlot()) << "Cannot move " << c->DebugName() << " to " << destination; - __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP); + __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP); } } else { // Move 64 bit double constant. @@ -939,7 +935,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) { } else { DCHECK(destination.IsDoubleStackSlot()) << "Cannot move " << c->DebugName() << " to " << destination; - __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP); + __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP); } } } @@ -1224,7 +1220,7 @@ void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { - ValidateInvokeRuntime(instruction, slow_path); + ValidateInvokeRuntime(entrypoint, instruction, slow_path); bool reordering = __ SetReorder(false); __ LoadFromOffset(kLoadWord, T9, TR, GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value()); __ Jalr(T9); @@ -1960,6 +1956,25 @@ void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) { codegen_->MaybeRecordImplicitNullCheck(instruction); } +Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) { + return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern()) + ? Location::ConstantLocation(instruction->AsConstant()) + : Location::RequiresRegister(); +} + +Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) { + // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register. + // We can store a non-zero float or double constant without first loading it into the FPU, + // but we should only prefer this if the constant has a single use. + if (instruction->IsConstant() && + (instruction->AsConstant()->IsZeroBitPattern() || + instruction->GetUses().HasExactlyOneElement())) { + return Location::ConstantLocation(instruction->AsConstant()); + // Otherwise fall through and require an FPU register for the constant. + } + return Location::RequiresFpuRegister(); +} + void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) { bool needs_runtime_call = instruction->NeedsTypeCheck(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( @@ -1974,9 +1989,9 @@ void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) { - locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2))); } else { - locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2))); } } } @@ -1985,24 +2000,29 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { LocationSummary* locations = instruction->GetLocations(); Register obj = locations->InAt(0).AsRegister<Register>(); Location index = locations->InAt(1); + Location value_location = locations->InAt(2); Primitive::Type value_type = instruction->GetComponentType(); bool needs_runtime_call = locations->WillCall(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); auto null_checker = GetImplicitNullChecker(instruction); + Register base_reg = index.IsConstant() ? obj : TMP; switch (value_type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); - Register value = locations->InAt(2).AsRegister<Register>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; - __ StoreToOffset(kStoreByte, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1; } else { - __ Addu(TMP, obj, index.AsRegister<Register>()); - __ StoreToOffset(kStoreByte, value, TMP, data_offset, null_checker); + __ Addu(base_reg, obj, index.AsRegister<Register>()); + } + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker); + } else { + Register value = value_location.AsRegister<Register>(); + __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker); } break; } @@ -2010,15 +2030,18 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimShort: case Primitive::kPrimChar: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); - Register value = locations->InAt(2).AsRegister<Register>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; - __ StoreToOffset(kStoreHalfword, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2; } else { - __ Sll(TMP, index.AsRegister<Register>(), TIMES_2); - __ Addu(TMP, obj, TMP); - __ StoreToOffset(kStoreHalfword, value, TMP, data_offset, null_checker); + __ Sll(base_reg, index.AsRegister<Register>(), TIMES_2); + __ Addu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker); + } else { + Register value = value_location.AsRegister<Register>(); + __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker); } break; } @@ -2027,20 +2050,23 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimNot: { if (!needs_runtime_call) { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Register value = locations->InAt(2).AsRegister<Register>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ StoreToOffset(kStoreWord, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; } else { - DCHECK(index.IsRegister()) << index; - __ Sll(TMP, index.AsRegister<Register>(), TIMES_4); - __ Addu(TMP, obj, TMP); - __ StoreToOffset(kStoreWord, value, TMP, data_offset, null_checker); + __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4); + __ Addu(base_reg, obj, base_reg); } - if (needs_write_barrier) { - DCHECK_EQ(value_type, Primitive::kPrimNot); - codegen_->MarkGCCard(obj, value); + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker); + DCHECK(!needs_write_barrier); + } else { + Register value = value_location.AsRegister<Register>(); + __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker); + if (needs_write_barrier) { + DCHECK_EQ(value_type, Primitive::kPrimNot); + codegen_->MarkGCCard(obj, value); + } } } else { DCHECK_EQ(value_type, Primitive::kPrimNot); @@ -2052,47 +2078,54 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimLong: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); - Register value = locations->InAt(2).AsRegisterPairLow<Register>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ StoreToOffset(kStoreDoubleword, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; } else { - __ Sll(TMP, index.AsRegister<Register>(), TIMES_8); - __ Addu(TMP, obj, TMP); - __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker); + __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8); + __ Addu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker); + } else { + Register value = value_location.AsRegisterPairLow<Register>(); + __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker); } break; } case Primitive::kPrimFloat: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); - FRegister value = locations->InAt(2).AsFpuRegister<FRegister>(); - DCHECK(locations->InAt(2).IsFpuRegister()); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ StoreSToOffset(value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; } else { - __ Sll(TMP, index.AsRegister<Register>(), TIMES_4); - __ Addu(TMP, obj, TMP); - __ StoreSToOffset(value, TMP, data_offset, null_checker); + __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4); + __ Addu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker); + } else { + FRegister value = value_location.AsFpuRegister<FRegister>(); + __ StoreSToOffset(value, base_reg, data_offset, null_checker); } break; } case Primitive::kPrimDouble: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); - FRegister value = locations->InAt(2).AsFpuRegister<FRegister>(); - DCHECK(locations->InAt(2).IsFpuRegister()); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ StoreDToOffset(value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; } else { - __ Sll(TMP, index.AsRegister<Register>(), TIMES_8); - __ Addu(TMP, obj, TMP); - __ StoreDToOffset(value, TMP, data_offset, null_checker); + __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8); + __ Addu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker); + } else { + FRegister value = value_location.AsFpuRegister<FRegister>(); + __ StoreDToOffset(value, base_reg, data_offset, null_checker); } break; } @@ -2104,15 +2137,13 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) { } void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) { @@ -2627,14 +2658,8 @@ void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) { } void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) { @@ -3688,7 +3713,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) { void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -3888,9 +3913,9 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field } } else { if (Primitive::IsFloatingPointType(field_type)) { - locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1))); } else { - locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1))); } } } @@ -3901,6 +3926,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, Primitive::Type type = field_info.GetFieldType(); LocationSummary* locations = instruction->GetLocations(); Register obj = locations->InAt(0).AsRegister<Register>(); + Location value_location = locations->InAt(1); StoreOperandType store_type = kStoreByte; bool is_volatile = field_info.IsVolatile(); uint32_t offset = field_info.GetFieldOffset().Uint32Value(); @@ -3941,24 +3967,24 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, codegen_->RecordPcInfo(instruction, instruction->GetDexPc()); if (type == Primitive::kPrimDouble) { // Pass FP parameters in core registers. - Location in = locations->InAt(1); - if (in.IsFpuRegister()) { - __ Mfc1(locations->GetTemp(1).AsRegister<Register>(), in.AsFpuRegister<FRegister>()); + if (value_location.IsFpuRegister()) { + __ Mfc1(locations->GetTemp(1).AsRegister<Register>(), + value_location.AsFpuRegister<FRegister>()); __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(), - in.AsFpuRegister<FRegister>()); - } else if (in.IsDoubleStackSlot()) { + value_location.AsFpuRegister<FRegister>()); + } else if (value_location.IsDoubleStackSlot()) { __ LoadFromOffset(kLoadWord, locations->GetTemp(1).AsRegister<Register>(), SP, - in.GetStackIndex()); + value_location.GetStackIndex()); __ LoadFromOffset(kLoadWord, locations->GetTemp(2).AsRegister<Register>(), SP, - in.GetStackIndex() + 4); + value_location.GetStackIndex() + 4); } else { - DCHECK(in.IsConstant()); - DCHECK(in.GetConstant()->IsDoubleConstant()); - int64_t value = bit_cast<int64_t, double>(in.GetConstant()->AsDoubleConstant()->GetValue()); + DCHECK(value_location.IsConstant()); + DCHECK(value_location.GetConstant()->IsDoubleConstant()); + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); __ LoadConst64(locations->GetTemp(2).AsRegister<Register>(), locations->GetTemp(1).AsRegister<Register>(), value); @@ -3967,19 +3993,19 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc); CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>(); } else { - if (!Primitive::IsFloatingPointType(type)) { + if (value_location.IsConstant()) { + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker); + } else if (!Primitive::IsFloatingPointType(type)) { Register src; if (type == Primitive::kPrimLong) { - DCHECK(locations->InAt(1).IsRegisterPair()); - src = locations->InAt(1).AsRegisterPairLow<Register>(); + src = value_location.AsRegisterPairLow<Register>(); } else { - DCHECK(locations->InAt(1).IsRegister()); - src = locations->InAt(1).AsRegister<Register>(); + src = value_location.AsRegister<Register>(); } __ StoreToOffset(store_type, src, obj, offset, null_checker); } else { - DCHECK(locations->InAt(1).IsFpuRegister()); - FRegister src = locations->InAt(1).AsFpuRegister<FRegister>(); + FRegister src = value_location.AsFpuRegister<FRegister>(); if (type == Primitive::kPrimFloat) { __ StoreSToOffset(src, obj, offset, null_checker); } else { @@ -3990,8 +4016,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction, // TODO: memory barriers? if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) { - DCHECK(locations->InAt(1).IsRegister()); - Register src = locations->InAt(1).AsRegister<Register>(); + Register src = value_location.AsRegister<Register>(); codegen_->MarkGCCard(obj, src); } @@ -5075,14 +5100,8 @@ void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) { } void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -5376,7 +5395,7 @@ void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet( void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) { @@ -5824,13 +5843,11 @@ void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { locations->SetInAt(0, Location::RequiresRegister()); } -void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { - int32_t lower_bound = switch_instr->GetStartValue(); - int32_t num_entries = switch_instr->GetNumEntries(); - LocationSummary* locations = switch_instr->GetLocations(); - Register value_reg = locations->InAt(0).AsRegister<Register>(); - HBasicBlock* default_block = switch_instr->GetDefaultBlock(); - +void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block) { // Create a set of compare/jumps. Register temp_reg = TMP; __ Addiu32(temp_reg, value_reg, -lower_bound); @@ -5839,7 +5856,7 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr // this case, index >= num_entries must be true. So that we can save one branch instruction. __ Bltz(temp_reg, codegen_->GetLabelOf(default_block)); - const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors(); + const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors(); // Jump to successors[0] if value == lower_bound. __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0])); int32_t last_index = 0; @@ -5857,11 +5874,107 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr } // And the default for any other value. - if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) { + if (!codegen_->GoesToNextBlock(switch_block, default_block)) { __ B(codegen_->GetLabelOf(default_block)); } } +void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg, + Register constant_area, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block) { + // Create a jump table. + std::vector<MipsLabel*> labels(num_entries); + const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors(); + for (uint32_t i = 0; i < num_entries; i++) { + labels[i] = codegen_->GetLabelOf(successors[i]); + } + JumpTable* table = __ CreateJumpTable(std::move(labels)); + + // Is the value in range? + __ Addiu32(TMP, value_reg, -lower_bound); + if (IsInt<16>(static_cast<int32_t>(num_entries))) { + __ Sltiu(AT, TMP, num_entries); + __ Beqz(AT, codegen_->GetLabelOf(default_block)); + } else { + __ LoadConst32(AT, num_entries); + __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block)); + } + + // We are in the range of the table. + // Load the target address from the jump table, indexing by the value. + __ LoadLabelAddress(AT, constant_area, table->GetLabel()); + __ Sll(TMP, TMP, 2); + __ Addu(TMP, TMP, AT); + __ Lw(TMP, TMP, 0); + // Compute the absolute target address by adding the table start address + // (the table contains offsets to targets relative to its start). + __ Addu(TMP, TMP, AT); + // And jump. + __ Jr(TMP); + __ NopIfNoReordering(); +} + +void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) { + int32_t lower_bound = switch_instr->GetStartValue(); + uint32_t num_entries = switch_instr->GetNumEntries(); + LocationSummary* locations = switch_instr->GetLocations(); + Register value_reg = locations->InAt(0).AsRegister<Register>(); + HBasicBlock* switch_block = switch_instr->GetBlock(); + HBasicBlock* default_block = switch_instr->GetDefaultBlock(); + + if (codegen_->GetInstructionSetFeatures().IsR6() && + num_entries > kPackedSwitchJumpTableThreshold) { + // R6 uses PC-relative addressing to access the jump table. + // R2, OTOH, requires an HMipsComputeBaseMethodAddress input to access + // the jump table and it is implemented by changing HPackedSwitch to + // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress. + // See VisitMipsPackedSwitch() for the table-based implementation on R2. + GenTableBasedPackedSwitch(value_reg, + ZERO, + lower_bound, + num_entries, + switch_block, + default_block); + } else { + GenPackedSwitchWithCompares(value_reg, + lower_bound, + num_entries, + switch_block, + default_block); + } +} + +void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + // Constant area pointer (HMipsComputeBaseMethodAddress). + locations->SetInAt(1, Location::RequiresRegister()); +} + +void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) { + int32_t lower_bound = switch_instr->GetStartValue(); + uint32_t num_entries = switch_instr->GetNumEntries(); + LocationSummary* locations = switch_instr->GetLocations(); + Register value_reg = locations->InAt(0).AsRegister<Register>(); + Register constant_area = locations->InAt(1).AsRegister<Register>(); + HBasicBlock* switch_block = switch_instr->GetBlock(); + HBasicBlock* default_block = switch_instr->GetDefaultBlock(); + + // This is an R2-only path. HPackedSwitch has been changed to + // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress + // required to address the jump table relative to PC. + GenTableBasedPackedSwitch(value_reg, + constant_area, + lower_bound, + num_entries, + switch_block, + default_block); +} + void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress( HMipsComputeBaseMethodAddress* insn) { LocationSummary* locations = diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 003998129e..553a7e6674 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -191,6 +191,8 @@ class LocationsBuilderMIPS : public HGraphVisitor { void HandleShift(HBinaryOperation* operation); void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + Location RegisterOrZeroConstant(HInstruction* instruction); + Location FpuRegisterOrConstantForStore(HInstruction* instruction); InvokeDexCallingConventionVisitorMIPS parameter_visitor_; @@ -218,6 +220,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { MipsAssembler* GetAssembler() const { return assembler_; } + // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit + // instructions for N cases. + // Table-based packed switch generates approx. 11 32-bit instructions + // and N 32-bit data words for N cases. + // At N = 6 they come out as 18 and 17 32-bit words respectively. + // We switch to the table-based method starting with 7 cases. + static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6; + private: void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg); void GenerateMemoryBarrier(MemBarrierKind kind); @@ -262,6 +272,17 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator { void GenerateDivRemIntegral(HBinaryOperation* instruction); void HandleGoto(HInstruction* got, HBasicBlock* successor); auto GetImplicitNullChecker(HInstruction* instruction); + void GenPackedSwitchWithCompares(Register value_reg, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block); + void GenTableBasedPackedSwitch(Register value_reg, + Register constant_area, + int32_t lower_bound, + uint32_t num_entries, + HBasicBlock* switch_block, + HBasicBlock* default_block); MipsAssembler* const assembler_; CodeGeneratorMIPS* const codegen_; @@ -310,10 +331,10 @@ class CodeGeneratorMIPS : public CodeGenerator { void SetupBlockedRegisters() const OVERRIDE; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id); - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id); - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id); - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id); + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; void ClobberRA() { clobbered_ra_ = true; } @@ -344,7 +365,7 @@ class CodeGeneratorMIPS : public CodeGenerator { void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; - void MoveConstant(Location destination, int32_t value); + void MoveConstant(Location destination, int32_t value) OVERRIDE; void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; @@ -356,7 +377,7 @@ class CodeGeneratorMIPS : public CodeGenerator { ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } - bool NeedsTwoRegisters(Primitive::Type type) const { + bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE { return type == Primitive::kPrimLong; } @@ -384,9 +405,9 @@ class CodeGeneratorMIPS : public CodeGenerator { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS"; } - void GenerateNop(); - void GenerateImplicitNullCheck(HNullCheck* instruction); - void GenerateExplicitNullCheck(HNullCheck* instruction); + void GenerateNop() OVERRIDE; + void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; // The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays // and boot image strings. The only difference is the interpretation of the offset_or_index. diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 664d498b18..5039fad708 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -150,10 +150,6 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen); __ Bind(GetEntryLabel()); - if (instruction_->CanThrowIntoCatchBlock()) { - // Live registers will be restored in the catch block if caught. - SaveLiveRegisters(codegen, instruction_->GetLocations()); - } mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } @@ -946,7 +942,7 @@ void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { - ValidateInvokeRuntime(instruction, slow_path); + ValidateInvokeRuntime(entrypoint, instruction, slow_path); // TODO: anything related to T9/GP/GOT/PIC/.so's? __ LoadFromOffset(kLoadDoubleword, T9, @@ -1558,15 +1554,13 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { } void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) { @@ -2110,14 +2104,8 @@ void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) { } void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) { @@ -2630,7 +2618,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) { void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::RequiresRegister()); } @@ -3461,14 +3449,8 @@ void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) { } void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::RequiresRegister()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -3748,7 +3730,7 @@ void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet( void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) { diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 3910530eb5..2dd409a224 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -285,10 +285,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator { void SetupBlockedRegisters() const OVERRIDE; - size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id); - size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id); - size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id); - size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id); + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; + size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE; void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE; @@ -327,7 +327,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator { ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; } - bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; } + bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; } // Check if the desired_string_load_kind is supported. If it is, return it, // otherwise return a fall-back kind that should be used instead. @@ -353,9 +353,9 @@ class CodeGeneratorMIPS64 : public CodeGenerator { UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64"; } - void GenerateNop(); - void GenerateImplicitNullCheck(HNullCheck* instruction); - void GenerateExplicitNullCheck(HNullCheck* instruction); + void GenerateNop() OVERRIDE; + void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; private: // Labels for each block that will be compiled. diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index e18b366411..cc9fe832f1 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -84,10 +84,6 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); __ Bind(GetEntryLabel()); - if (instruction_->CanThrowIntoCatchBlock()) { - // Live registers will be restored in the catch block if caught. - SaveLiveRegisters(codegen, instruction_->GetLocations()); - } x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } @@ -754,7 +750,7 @@ void CodeGeneratorX86::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { - ValidateInvokeRuntime(instruction, slow_path); + ValidateInvokeRuntime(entrypoint, instruction, slow_path); GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value()); if (EntrypointRequiresStackMap(entrypoint)) { RecordPcInfo(instruction, dex_pc, slow_path); @@ -1069,15 +1065,11 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { __ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); - int64_t value; - if (constant->IsLongConstant()) { - value = constant->AsLongConstant()->GetValue(); - } else { - DCHECK(constant->IsDoubleConstant()); - value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue()); - } + DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant()); + int64_t value = GetInt64ValueOf(constant); __ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value))); - __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value))); + __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), + Immediate(High32Bits(value))); } else { DCHECK(source.IsDoubleStackSlot()) << source; EmitParallelMoves( @@ -1427,14 +1419,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio Location lhs = condition->GetLocations()->InAt(0); Location rhs = condition->GetLocations()->InAt(1); // LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition). - if (rhs.IsRegister()) { - __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>()); - } else if (rhs.IsConstant()) { - int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); - codegen_->Compare32BitValue(lhs.AsRegister<Register>(), constant); - } else { - __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex())); - } + codegen_->GenerateIntCompare(lhs, rhs); if (true_target == nullptr) { __ j(X86Condition(condition->GetOppositeCondition()), false_target); } else { @@ -1469,7 +1454,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) { void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::Any()); } @@ -1528,18 +1513,6 @@ void LocationsBuilderX86::VisitSelect(HSelect* select) { locations->SetOut(Location::SameAsFirstInput()); } -void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) { - Register lhs_reg = lhs.AsRegister<Register>(); - if (rhs.IsConstant()) { - int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); - Compare32BitValue(lhs_reg, value); - } else if (rhs.IsStackSlot()) { - assembler_.cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex())); - } else { - assembler_.cmpl(lhs_reg, rhs.AsRegister<Register>()); - } -} - void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) { LocationSummary* locations = select->GetLocations(); DCHECK(locations->InAt(0).Equals(locations->Out())); @@ -3571,10 +3544,7 @@ void InstructionCodeGeneratorX86::VisitRem(HRem* rem) { } void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); switch (instruction->GetType()) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: @@ -3594,9 +3564,6 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { default: LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType(); } - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) { @@ -3621,7 +3588,7 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) } else { DCHECK(value.IsConstant()) << value; if (value.GetConstant()->AsIntConstant()->GetValue() == 0) { - __ jmp(slow_path->GetEntryLabel()); + __ jmp(slow_path->GetEntryLabel()); } } break; @@ -4540,7 +4507,7 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); @@ -4950,17 +4917,11 @@ void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet( } void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - Location loc = codegen_->IsImplicitNullCheckAllowed(instruction) + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); + Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks() ? Location::RequiresRegister() : Location::Any(); locations->SetInAt(0, loc); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -5007,7 +4968,7 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); @@ -5039,56 +5000,31 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { switch (type) { case Primitive::kPrimBoolean: { Register out = out_loc.AsRegister<Register>(); - if (index.IsConstant()) { - __ movzxb(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); - } else { - __ movzxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset)); - } + __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } case Primitive::kPrimByte: { Register out = out_loc.AsRegister<Register>(); - if (index.IsConstant()) { - __ movsxb(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); - } else { - __ movsxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset)); - } + __ movsxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } case Primitive::kPrimShort: { Register out = out_loc.AsRegister<Register>(); - if (index.IsConstant()) { - __ movsxw(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset)); - } else { - __ movsxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset)); - } + __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset)); break; } case Primitive::kPrimChar: { Register out = out_loc.AsRegister<Register>(); - if (index.IsConstant()) { - __ movzxw(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset)); - } else { - __ movzxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset)); - } + __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset)); break; } case Primitive::kPrimInt: { Register out = out_loc.AsRegister<Register>(); - if (index.IsConstant()) { - __ movl(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset)); - } else { - __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset)); - } + __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } @@ -5105,21 +5041,16 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true); } else { Register out = out_loc.AsRegister<Register>(); + __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). if (index.IsConstant()) { uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ movl(out, Address(obj, offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); - // If read barriers are enabled, emit read barriers other than - // Baker's using a slow path (and also unpoison the loaded - // reference, if heap poisoning is enabled). codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset); } else { - __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); - // If read barriers are enabled, emit read barriers other than - // Baker's using a slow path (and also unpoison the loaded - // reference, if heap poisoning is enabled). codegen_->MaybeGenerateReadBarrierSlow( instruction, out_loc, out_loc, obj_loc, data_offset, index); } @@ -5129,40 +5060,23 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { case Primitive::kPrimLong: { DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>()); - if (index.IsConstant()) { - size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ movl(out_loc.AsRegisterPairLow<Register>(), Address(obj, offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); - __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize)); - } else { - __ movl(out_loc.AsRegisterPairLow<Register>(), - Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); - __ movl(out_loc.AsRegisterPairHigh<Register>(), - Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize)); - } + __ movl(out_loc.AsRegisterPairLow<Register>(), + CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + __ movl(out_loc.AsRegisterPairHigh<Register>(), + CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset + kX86WordSize)); break; } case Primitive::kPrimFloat: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); - if (index.IsConstant()) { - __ movss(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset)); - } else { - __ movss(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset)); - } + __ movss(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } case Primitive::kPrimDouble: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); - if (index.IsConstant()) { - __ movsd(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset)); - } else { - __ movsd(out, Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset)); - } + __ movsd(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset)); break; } @@ -5233,9 +5147,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset) - : Address(array, index.AsRegister<Register>(), TIMES_1, offset); + Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_1, offset); if (value.IsRegister()) { __ movb(address, value.AsRegister<ByteRegister>()); } else { @@ -5248,9 +5160,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimShort: case Primitive::kPrimChar: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset) - : Address(array, index.AsRegister<Register>(), TIMES_2, offset); + Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_2, offset); if (value.IsRegister()) { __ movw(address, value.AsRegister<Register>()); } else { @@ -5262,9 +5172,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimNot: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset) - : Address(array, index.AsRegister<Register>(), TIMES_4, offset); + Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset); if (!value.IsRegister()) { // Just setting null. @@ -5360,9 +5268,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimInt: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset) - : Address(array, index.AsRegister<Register>(), TIMES_4, offset); + Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset); if (value.IsRegister()) { __ movl(address, value.AsRegister<Register>()); } else { @@ -5376,44 +5282,27 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimLong: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); - if (index.IsConstant()) { - size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - if (value.IsRegisterPair()) { - __ movl(Address(array, offset), value.AsRegisterPairLow<Register>()); - codegen_->MaybeRecordImplicitNullCheck(instruction); - __ movl(Address(array, offset + kX86WordSize), value.AsRegisterPairHigh<Register>()); - } else { - DCHECK(value.IsConstant()); - int64_t val = value.GetConstant()->AsLongConstant()->GetValue(); - __ movl(Address(array, offset), Immediate(Low32Bits(val))); - codegen_->MaybeRecordImplicitNullCheck(instruction); - __ movl(Address(array, offset + kX86WordSize), Immediate(High32Bits(val))); - } + if (value.IsRegisterPair()) { + __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset), + value.AsRegisterPairLow<Register>()); + codegen_->MaybeRecordImplicitNullCheck(instruction); + __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize), + value.AsRegisterPairHigh<Register>()); } else { - if (value.IsRegisterPair()) { - __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset), - value.AsRegisterPairLow<Register>()); - codegen_->MaybeRecordImplicitNullCheck(instruction); - __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize), - value.AsRegisterPairHigh<Register>()); - } else { - DCHECK(value.IsConstant()); - int64_t val = value.GetConstant()->AsLongConstant()->GetValue(); - __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset), - Immediate(Low32Bits(val))); - codegen_->MaybeRecordImplicitNullCheck(instruction); - __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize), - Immediate(High32Bits(val))); - } + DCHECK(value.IsConstant()); + int64_t val = value.GetConstant()->AsLongConstant()->GetValue(); + __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset), + Immediate(Low32Bits(val))); + codegen_->MaybeRecordImplicitNullCheck(instruction); + __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize), + Immediate(High32Bits(val))); } break; } case Primitive::kPrimFloat: { uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset) - : Address(array, index.AsRegister<Register>(), TIMES_4, offset); + Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset); if (value.IsFpuRegister()) { __ movss(address, value.AsFpuRegister<XmmRegister>()); } else { @@ -5427,17 +5316,13 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimDouble: { uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset) - : Address(array, index.AsRegister<Register>(), TIMES_8, offset); + Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset); if (value.IsFpuRegister()) { __ movsd(address, value.AsFpuRegister<XmmRegister>()); } else { DCHECK(value.IsConstant()); - Address address_hi = index.IsConstant() ? - Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + - offset + kX86WordSize) : - Address(array, index.AsRegister<Register>(), TIMES_8, offset + kX86WordSize); + Address address_hi = + CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset + kX86WordSize); int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue()); __ movl(address, Immediate(Low32Bits(v))); codegen_->MaybeRecordImplicitNullCheck(instruction); @@ -5474,18 +5359,16 @@ void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) { } void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); HInstruction* length = instruction->InputAt(1); if (!length->IsEmittedAtUseSite()) { locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); } - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) { @@ -5531,13 +5414,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) { } codegen_->MaybeRecordImplicitNullCheck(array_length); } else { - Register length = length_loc.AsRegister<Register>(); - if (index_loc.IsConstant()) { - int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant()); - __ cmpl(length, Immediate(value)); - } else { - __ cmpl(length, index_loc.AsRegister<Register>()); - } + codegen_->GenerateIntCompare(length_loc, index_loc); } codegen_->AddSlowPath(slow_path); __ j(kBelowEqual, slow_path->GetEntryLabel()); @@ -5555,7 +5432,7 @@ void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction) void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) { @@ -5913,7 +5790,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } HLoadClass::LoadKind load_kind = cls->GetLoadKind(); @@ -6210,7 +6087,7 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); @@ -6915,9 +6792,7 @@ void CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instr "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ ref = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - Address src = index.IsConstant() ? - Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) : - Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset); + Address src = CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset); GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check); } @@ -7398,6 +7273,27 @@ void CodeGeneratorX86::Compare32BitValue(Register dest, int32_t value) { } } +void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) { + Register lhs_reg = lhs.AsRegister<Register>(); + if (rhs.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); + Compare32BitValue(lhs_reg, value); + } else if (rhs.IsStackSlot()) { + __ cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex())); + } else { + __ cmpl(lhs_reg, rhs.AsRegister<Register>()); + } +} + +Address CodeGeneratorX86::ArrayAddress(Register obj, + Location index, + ScaleFactor scale, + uint32_t data_offset) { + return index.IsConstant() ? + Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) : + Address(obj, index.AsRegister<Register>(), scale, data_offset); +} + Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value) { diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index e2250981bb..5866e65d88 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -427,8 +427,6 @@ class CodeGeneratorX86 : public CodeGenerator { Register value, bool value_can_be_null); - void GenerateIntCompare(Location lhs, Location rhs); - void GenerateMemoryBarrier(MemBarrierKind kind); Label* GetLabelOf(HBasicBlock* block) const { @@ -474,6 +472,15 @@ class CodeGeneratorX86 : public CodeGenerator { // Compare a register with a 32-bit value in the most efficient manner. void Compare32BitValue(Register dest, int32_t value); + // Compare int values. Supports only register locations for `lhs`. + void GenerateIntCompare(Location lhs, Location rhs); + + // Construct address for array access. + static Address ArrayAddress(Register obj, + Location index, + ScaleFactor scale, + uint32_t data_offset); + Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value); void Finalize(CodeAllocator* allocator) OVERRIDE; @@ -561,9 +568,9 @@ class CodeGeneratorX86 : public CodeGenerator { } } - void GenerateNop(); - void GenerateImplicitNullCheck(HNullCheck* instruction); - void GenerateExplicitNullCheck(HNullCheck* instruction); + void GenerateNop() OVERRIDE; + void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; // When we don't know the proper offset for the value, we use kDummy32BitOffset. // The correct value will be inserted when processing Assembler fixups. diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 15307fe50c..1d87bf6198 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -88,10 +88,6 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); __ Bind(GetEntryLabel()); - if (instruction_->CanThrowIntoCatchBlock()) { - // Live registers will be restored in the catch block if caught. - SaveLiveRegisters(codegen, instruction_->GetLocations()); - } x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes<kQuickThrowDivZero, void, void>(); } @@ -981,7 +977,7 @@ void CodeGeneratorX86_64::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path) { - ValidateInvokeRuntime(instruction, slow_path); + ValidateInvokeRuntime(entrypoint, instruction, slow_path); GenerateInvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value()); if (EntrypointRequiresStackMap(entrypoint)) { RecordPcInfo(instruction, dex_pc, slow_path); @@ -1204,13 +1200,8 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) { source.AsFpuRegister<XmmRegister>()); } else if (source.IsConstant()) { HConstant* constant = source.GetConstant(); - int64_t value; - if (constant->IsDoubleConstant()) { - value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue()); - } else { - DCHECK(constant->IsLongConstant()); - value = constant->AsLongConstant()->GetValue(); - } + DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant()); + int64_t value = GetInt64ValueOf(constant); Store64BitValueToStack(destination, value); } else { DCHECK(source.IsDoubleStackSlot()); @@ -1309,31 +1300,11 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition) case Primitive::kPrimShort: case Primitive::kPrimInt: case Primitive::kPrimNot: { - CpuRegister left_reg = left.AsRegister<CpuRegister>(); - if (right.IsConstant()) { - int32_t value = CodeGenerator::GetInt32ValueOf(right.GetConstant()); - if (value == 0) { - __ testl(left_reg, left_reg); - } else { - __ cmpl(left_reg, Immediate(value)); - } - } else if (right.IsStackSlot()) { - __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); - } else { - __ cmpl(left_reg, right.AsRegister<CpuRegister>()); - } + codegen_->GenerateIntCompare(left, right); break; } case Primitive::kPrimLong: { - CpuRegister left_reg = left.AsRegister<CpuRegister>(); - if (right.IsConstant()) { - int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); - codegen_->Compare64BitValue(left_reg, value); - } else if (right.IsDoubleStackSlot()) { - __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); - } else { - __ cmpq(left_reg, right.AsRegister<CpuRegister>()); - } + codegen_->GenerateLongCompare(left, right); break; } case Primitive::kPrimFloat: { @@ -1488,15 +1459,7 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc Location lhs = condition->GetLocations()->InAt(0); Location rhs = condition->GetLocations()->InAt(1); - if (rhs.IsRegister()) { - __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>()); - } else if (rhs.IsConstant()) { - int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); - codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant); - } else { - __ cmpl(lhs.AsRegister<CpuRegister>(), - Address(CpuRegister(RSP), rhs.GetStackIndex())); - } + codegen_->GenerateIntCompare(lhs, rhs); if (true_target == nullptr) { __ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target); } else { @@ -1531,7 +1494,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) { void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) { locations->SetInAt(0, Location::Any()); } @@ -1696,28 +1659,14 @@ void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) { // Clear output register: setcc only sets the low byte. __ xorl(reg, reg); - if (rhs.IsRegister()) { - __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>()); - } else if (rhs.IsConstant()) { - int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); - codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant); - } else { - __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex())); - } + codegen_->GenerateIntCompare(lhs, rhs); __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); return; case Primitive::kPrimLong: // Clear output register: setcc only sets the low byte. __ xorl(reg, reg); - if (rhs.IsRegister()) { - __ cmpq(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>()); - } else if (rhs.IsConstant()) { - int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue(); - codegen_->Compare64BitValue(lhs.AsRegister<CpuRegister>(), value); - } else { - __ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex())); - } + codegen_->GenerateLongCompare(lhs, rhs); __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg); return; case Primitive::kPrimFloat: { @@ -1885,27 +1834,11 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) { case Primitive::kPrimShort: case Primitive::kPrimChar: case Primitive::kPrimInt: { - CpuRegister left_reg = left.AsRegister<CpuRegister>(); - if (right.IsConstant()) { - int32_t value = right.GetConstant()->AsIntConstant()->GetValue(); - codegen_->Compare32BitValue(left_reg, value); - } else if (right.IsStackSlot()) { - __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); - } else { - __ cmpl(left_reg, right.AsRegister<CpuRegister>()); - } + codegen_->GenerateIntCompare(left, right); break; } case Primitive::kPrimLong: { - CpuRegister left_reg = left.AsRegister<CpuRegister>(); - if (right.IsConstant()) { - int64_t value = right.GetConstant()->AsLongConstant()->GetValue(); - codegen_->Compare64BitValue(left_reg, value); - } else if (right.IsDoubleStackSlot()) { - __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex())); - } else { - __ cmpq(left_reg, right.AsRegister<CpuRegister>()); - } + codegen_->GenerateLongCompare(left, right); break; } case Primitive::kPrimFloat: { @@ -3681,14 +3614,8 @@ void InstructionCodeGeneratorX86_64::VisitRem(HRem* rem) { } void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); locations->SetInAt(0, Location::Any()); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) { @@ -3714,7 +3641,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio } else { DCHECK(value.IsConstant()) << value; if (value.GetConstant()->AsIntConstant()->GetValue() == 0) { - __ jmp(slow_path->GetEntryLabel()); + __ jmp(slow_path->GetEntryLabel()); } } break; @@ -3729,7 +3656,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio } else { DCHECK(value.IsConstant()) << value; if (value.GetConstant()->AsLongConstant()->GetValue() == 0) { - __ jmp(slow_path->GetEntryLabel()); + __ jmp(slow_path->GetEntryLabel()); } } break; @@ -4084,7 +4011,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); if (Primitive::IsFloatingPointType(instruction->GetType())) { @@ -4459,17 +4386,11 @@ void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet( } void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); - Location loc = codegen_->IsImplicitNullCheckAllowed(instruction) + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction); + Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks() ? Location::RequiresRegister() : Location::Any(); locations->SetInAt(0, loc); - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) { @@ -4516,7 +4437,7 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); @@ -4544,56 +4465,31 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { switch (type) { case Primitive::kPrimBoolean: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (index.IsConstant()) { - __ movzxb(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); - } else { - __ movzxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset)); - } + __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } case Primitive::kPrimByte: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (index.IsConstant()) { - __ movsxb(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset)); - } else { - __ movsxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset)); - } + __ movsxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset)); break; } case Primitive::kPrimShort: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (index.IsConstant()) { - __ movsxw(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset)); - } else { - __ movsxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset)); - } + __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset)); break; } case Primitive::kPrimChar: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (index.IsConstant()) { - __ movzxw(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset)); - } else { - __ movzxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset)); - } + __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset)); break; } case Primitive::kPrimInt: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (index.IsConstant()) { - __ movl(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset)); - } else { - __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset)); - } + __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } @@ -4610,21 +4506,16 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true); } else { CpuRegister out = out_loc.AsRegister<CpuRegister>(); + __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset)); + codegen_->MaybeRecordImplicitNullCheck(instruction); + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). if (index.IsConstant()) { uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ movl(out, Address(obj, offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); - // If read barriers are enabled, emit read barriers other than - // Baker's using a slow path (and also unpoison the loaded - // reference, if heap poisoning is enabled). codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset); } else { - __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset)); - codegen_->MaybeRecordImplicitNullCheck(instruction); - // If read barriers are enabled, emit read barriers other than - // Baker's using a slow path (and also unpoison the loaded - // reference, if heap poisoning is enabled). codegen_->MaybeGenerateReadBarrierSlow( instruction, out_loc, out_loc, obj_loc, data_offset, index); } @@ -4634,34 +4525,19 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { case Primitive::kPrimLong: { CpuRegister out = out_loc.AsRegister<CpuRegister>(); - if (index.IsConstant()) { - __ movq(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset)); - } else { - __ movq(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset)); - } + __ movq(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset)); break; } case Primitive::kPrimFloat: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); - if (index.IsConstant()) { - __ movss(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset)); - } else { - __ movss(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset)); - } + __ movss(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset)); break; } case Primitive::kPrimDouble: { XmmRegister out = out_loc.AsFpuRegister<XmmRegister>(); - if (index.IsConstant()) { - __ movsd(out, Address(obj, - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset)); - } else { - __ movsd(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset)); - } + __ movsd(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset)); break; } @@ -4724,9 +4600,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_1, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_1, offset); if (value.IsRegister()) { __ movb(address, value.AsRegister<CpuRegister>()); } else { @@ -4739,9 +4613,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimShort: case Primitive::kPrimChar: { uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_2, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_2, offset); if (value.IsRegister()) { __ movw(address, value.AsRegister<CpuRegister>()); } else { @@ -4754,9 +4626,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimNot: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset); if (!value.IsRegister()) { // Just setting null. @@ -4852,9 +4722,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimInt: { uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset); if (value.IsRegister()) { __ movl(address, value.AsRegister<CpuRegister>()); } else { @@ -4868,18 +4736,14 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimLong: { uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset); if (value.IsRegister()) { __ movq(address, value.AsRegister<CpuRegister>()); codegen_->MaybeRecordImplicitNullCheck(instruction); } else { int64_t v = value.GetConstant()->AsLongConstant()->GetValue(); - Address address_high = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + - offset + sizeof(int32_t)) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t)); + Address address_high = + CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t)); codegen_->MoveInt64ToAddress(address, address_high, v, instruction); } break; @@ -4887,15 +4751,12 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimFloat: { uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset); if (value.IsFpuRegister()) { __ movss(address, value.AsFpuRegister<XmmRegister>()); } else { DCHECK(value.IsConstant()); - int32_t v = - bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue()); + int32_t v = bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue()); __ movl(address, Immediate(v)); } codegen_->MaybeRecordImplicitNullCheck(instruction); @@ -4904,19 +4765,15 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimDouble: { uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); - Address address = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset); + Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset); if (value.IsFpuRegister()) { __ movsd(address, value.AsFpuRegister<XmmRegister>()); codegen_->MaybeRecordImplicitNullCheck(instruction); } else { int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue()); - Address address_high = index.IsConstant() - ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + - offset + sizeof(int32_t)) - : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t)); + Address address_high = + CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t)); codegen_->MoveInt64ToAddress(address, address_high, v, instruction); } break; @@ -4951,18 +4808,16 @@ void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) } void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) { - LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() - ? LocationSummary::kCallOnSlowPath - : LocationSummary::kNoCall; - LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); + RegisterSet caller_saves = RegisterSet::Empty(); + InvokeRuntimeCallingConvention calling_convention; + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves); locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0))); HInstruction* length = instruction->InputAt(1); if (!length->IsEmittedAtUseSite()) { locations->SetInAt(1, Location::RegisterOrConstant(length)); } - if (instruction->HasUses()) { - locations->SetOut(Location::SameAsFirstInput()); - } } void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) { @@ -5007,13 +4862,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) } codegen_->MaybeRecordImplicitNullCheck(array_length); } else { - CpuRegister length = length_loc.AsRegister<CpuRegister>(); - if (index_loc.IsConstant()) { - int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant()); - __ cmpl(length, Immediate(value)); - } else { - __ cmpl(length, index_loc.AsRegister<CpuRegister>()); - } + codegen_->GenerateIntCompare(length_loc, index_loc); } codegen_->AddSlowPath(slow_path); __ j(kBelowEqual, slow_path->GetEntryLabel()); @@ -5051,7 +4900,7 @@ void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instructio void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) { @@ -5352,7 +5201,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } HLoadClass::LoadKind load_kind = cls->GetLoadKind(); @@ -5627,7 +5476,7 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); if (baker_read_barrier_slow_path) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::Any()); @@ -6367,9 +6216,7 @@ void CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ ref = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - Address src = index.IsConstant() ? - Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) : - Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset); + Address src = CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset); GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check); } @@ -6674,6 +6521,39 @@ void CodeGeneratorX86_64::Compare64BitValue(CpuRegister dest, int64_t value) { } } +void CodeGeneratorX86_64::GenerateIntCompare(Location lhs, Location rhs) { + CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>(); + if (rhs.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant()); + Compare32BitValue(lhs_reg, value); + } else if (rhs.IsStackSlot()) { + __ cmpl(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex())); + } else { + __ cmpl(lhs_reg, rhs.AsRegister<CpuRegister>()); + } +} + +void CodeGeneratorX86_64::GenerateLongCompare(Location lhs, Location rhs) { + CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>(); + if (rhs.IsConstant()) { + int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue(); + Compare64BitValue(lhs_reg, value); + } else if (rhs.IsDoubleStackSlot()) { + __ cmpq(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex())); + } else { + __ cmpq(lhs_reg, rhs.AsRegister<CpuRegister>()); + } +} + +Address CodeGeneratorX86_64::ArrayAddress(CpuRegister obj, + Location index, + ScaleFactor scale, + uint32_t data_offset) { + return index.IsConstant() ? + Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) : + Address(obj, index.AsRegister<CpuRegister>(), scale, data_offset); +} + void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) { DCHECK(dest.IsDoubleStackSlot()); if (IsInt<32>(value)) { diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index d93908343d..7108676b8e 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -510,6 +510,18 @@ class CodeGeneratorX86_64 : public CodeGenerator { void Compare32BitValue(CpuRegister dest, int32_t value); void Compare64BitValue(CpuRegister dest, int64_t value); + // Compare int values. Supports only register locations for `lhs`. + void GenerateIntCompare(Location lhs, Location rhs); + + // Compare long values. Supports only register locations for `lhs`. + void GenerateLongCompare(Location lhs, Location rhs); + + // Construct address for array access. + static Address ArrayAddress(CpuRegister obj, + Location index, + ScaleFactor scale, + uint32_t data_offset); + Address LiteralCaseTable(HPackedSwitch* switch_instr); // Store a 64 bit value into a DoubleStackSlot in the most efficient manner. @@ -533,9 +545,9 @@ class CodeGeneratorX86_64 : public CodeGenerator { } } - void GenerateNop(); - void GenerateImplicitNullCheck(HNullCheck* instruction); - void GenerateExplicitNullCheck(HNullCheck* instruction); + void GenerateNop() OVERRIDE; + void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE; + void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE; // When we don't know the proper offset for the value, we use kDummy32BitOffset. // We will fix this up in the linker later to have the right value. diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index e8d6bae1b5..f19faa324c 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -1039,17 +1039,7 @@ TEST_F(CodegenTest, ComparisonsInt) { } TEST_F(CodegenTest, ComparisonsLong) { - // TODO: make MIPS work for long - if (kRuntimeISA == kMips || kRuntimeISA == kMips64) { - return; - } - for (CodegenTargetConfig target_config : GetTargetConfigs()) { - if ((target_config.GetInstructionSet() == kMips) || - (target_config.GetInstructionSet() == kMips64)) { - continue; - } - for (int64_t i = -1; i <= 1; i++) { for (int64_t j = -1; j <= 1; j++) { for (int cond = kCondFirst; cond <= kCondLast; cond++) { diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index eda0971ecc..776a483d43 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -273,9 +273,9 @@ inline Location ARM64EncodableConstantOrRegister(HInstruction* constant, // only SP/WSP and ZXR/WZR codes are different between art and vixl. // Note: This function is only used for debug checks. inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers, - size_t num_core, - uint32_t art_fpu_registers, - size_t num_fpu) { + size_t num_core, + uint32_t art_fpu_registers, + size_t num_fpu) { // The register masks won't work if the number of register is larger than 32. DCHECK_GE(sizeof(art_core_registers) * 8, num_core); DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu); diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index e1bde7c737..aa3f26809a 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -16,7 +16,7 @@ #include "dead_code_elimination.h" -#include "utils/array_ref.h" +#include "base/array_ref.h" #include "base/bit_vector-inl.h" #include "ssa_phi_elimination.h" diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc index 129c2a94b5..c501ccf80f 100644 --- a/compiler/optimizing/induction_var_analysis.cc +++ b/compiler/optimizing/induction_var_analysis.cc @@ -714,10 +714,12 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop, case kCondGE: op = kGE; break; default: LOG(FATAL) << "CONDITION UNREACHABLE"; } + // Associate trip count with control instruction, rather than the condition (even + // though it's its use) since former provides a convenient use-free placeholder. + HInstruction* control = loop->GetHeader()->GetLastInstruction(); InductionInfo* taken_test = CreateInvariantOp(op, lower_expr, upper_expr); - AssignInfo(loop, - loop->GetHeader()->GetLastInstruction(), - CreateTripCount(tcKind, trip_count, taken_test, type)); + DCHECK(control->IsIf()); + AssignInfo(loop, control, CreateTripCount(tcKind, trip_count, taken_test, type)); } bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr, diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc index 580d24b74b..292bc4e06e 100644 --- a/compiler/optimizing/induction_var_analysis_test.cc +++ b/compiler/optimizing/induction_var_analysis_test.cc @@ -157,6 +157,13 @@ class InductionVarAnalysisTest : public CommonCompilerTest { iva_->LookupInfo(loop_body_[d]->GetLoopInformation(), instruction)); } + // Returns induction information of the trip-count of loop at depth d. + std::string GetTripCount(int d) { + HInstruction* control = loop_header_[d]->GetLastInstruction(); + DCHECK(control->IsIf()); + return GetInductionInfo(control, d); + } + // Returns true if instructions have identical induction. bool HaveSameInduction(HInstruction* instruction1, HInstruction* instruction2) { return HInductionVarAnalysis::InductionEqual( @@ -239,8 +246,7 @@ TEST_F(InductionVarAnalysisTest, FindBasicInduction) { EXPECT_FALSE(HaveSameInduction(store->InputAt(1), increment_[0])); // Trip-count. - EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", - GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, FindDerivedInduction) { @@ -579,8 +585,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) { } EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[d], d).c_str()); // Trip-count. - EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", - GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str()); + EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(d).c_str()); } } @@ -607,8 +612,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) { EXPECT_FALSE(HaveSameInduction(store1->InputAt(1), store2->InputAt(1))); // Trip-count. - EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", - GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, ByteLoopControl1) { @@ -626,8 +630,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl1) { EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str()); // Trip-count. - EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))", - GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, ByteLoopControl2) { @@ -645,7 +648,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl2) { EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str()); // Trip-count undefined. - EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, ShortLoopControl1) { @@ -664,8 +667,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl1) { EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort", GetInductionInfo(increment_[0], 0).c_str()); // Trip-count. - EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))", - GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, ShortLoopControl2) { @@ -684,7 +686,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl2) { EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort", GetInductionInfo(increment_[0], 0).c_str()); // Trip-count undefined. - EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, CharLoopControl1) { @@ -701,8 +703,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl1) { EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str()); // Trip-count. - EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))", - GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))", GetTripCount(0).c_str()); } TEST_F(InductionVarAnalysisTest, CharLoopControl2) { @@ -719,7 +720,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl2) { EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str()); // Trip-count undefined. - EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str()); + EXPECT_STREQ("", GetTripCount(0).c_str()); } } // namespace art diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index 5e587e0810..cd8b7c7960 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -106,6 +106,12 @@ static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) { return instruction; } +/** Helper method to obtain loop's control instruction. */ +static HInstruction* GetLoopControl(HLoopInformation* loop) { + DCHECK(loop != nullptr); + return loop->GetHeader()->GetLastInstruction(); +} + // // Public class methods. // @@ -143,42 +149,134 @@ bool InductionVarRange::GetInductionRange(HInstruction* context, // Find range. chase_hint_ = chase_hint; bool in_body = context->GetBlock() != loop->GetHeader(); + int64_t stride_value = 0; *min_val = GetVal(info, trip, in_body, /* is_min */ true); *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false)); - *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip); + *needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip); return true; } -bool InductionVarRange::CanGenerateCode(HInstruction* context, - HInstruction* instruction, - /*out*/bool* needs_finite_test, - /*out*/bool* needs_taken_test) { +bool InductionVarRange::CanGenerateRange(HInstruction* context, + HInstruction* instruction, + /*out*/bool* needs_finite_test, + /*out*/bool* needs_taken_test) { + bool is_last_value = false; + int64_t stride_value = 0; return GenerateCode(context, instruction, - nullptr, nullptr, nullptr, nullptr, nullptr, // nothing generated yet + is_last_value, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, // nothing generated yet + &stride_value, needs_finite_test, - needs_taken_test); -} - -void InductionVarRange::GenerateRangeCode(HInstruction* context, - HInstruction* instruction, - HGraph* graph, - HBasicBlock* block, - /*out*/HInstruction** lower, - /*out*/HInstruction** upper) { + needs_taken_test) + && (stride_value == -1 || + stride_value == 0 || + stride_value == 1); // avoid wrap-around anomalies. +} + +void InductionVarRange::GenerateRange(HInstruction* context, + HInstruction* instruction, + HGraph* graph, + HBasicBlock* block, + /*out*/HInstruction** lower, + /*out*/HInstruction** upper) { + bool is_last_value = false; + int64_t stride_value = 0; bool b1, b2; // unused - if (!GenerateCode(context, instruction, graph, block, lower, upper, nullptr, &b1, &b2)) { - LOG(FATAL) << "Failed precondition: GenerateCode()"; - } -} - -void InductionVarRange::GenerateTakenTest(HInstruction* context, - HGraph* graph, - HBasicBlock* block, - /*out*/HInstruction** taken_test) { + if (!GenerateCode(context, + instruction, + is_last_value, + graph, + block, + lower, + upper, + nullptr, + &stride_value, + &b1, + &b2)) { + LOG(FATAL) << "Failed precondition: CanGenerateRange()"; + } +} + +HInstruction* InductionVarRange::GenerateTakenTest(HInstruction* context, + HGraph* graph, + HBasicBlock* block) { + HInstruction* taken_test = nullptr; + bool is_last_value = false; + int64_t stride_value = 0; + bool b1, b2; // unused + if (!GenerateCode(context, + context, + is_last_value, + graph, + block, + nullptr, + nullptr, + &taken_test, + &stride_value, + &b1, + &b2)) { + LOG(FATAL) << "Failed precondition: CanGenerateRange()"; + } + return taken_test; +} + +bool InductionVarRange::CanGenerateLastValue(HInstruction* instruction) { + bool is_last_value = true; + int64_t stride_value = 0; + bool needs_finite_test = false; + bool needs_taken_test = false; + return GenerateCode(instruction, + instruction, + is_last_value, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, // nothing generated yet + &stride_value, + &needs_finite_test, + &needs_taken_test) + && !needs_finite_test && !needs_taken_test; +} + +HInstruction* InductionVarRange::GenerateLastValue(HInstruction* instruction, + HGraph* graph, + HBasicBlock* block) { + HInstruction* last_value = nullptr; + bool is_last_value = true; + int64_t stride_value = 0; bool b1, b2; // unused - if (!GenerateCode(context, context, graph, block, nullptr, nullptr, taken_test, &b1, &b2)) { - LOG(FATAL) << "Failed precondition: GenerateCode()"; + if (!GenerateCode(instruction, + instruction, + is_last_value, + graph, + block, + &last_value, + &last_value, + nullptr, + &stride_value, + &b1, + &b2)) { + LOG(FATAL) << "Failed precondition: CanGenerateLastValue()"; + } + return last_value; +} + +void InductionVarRange::Replace(HInstruction* instruction, + HInstruction* fetch, + HInstruction* replacement) { + for (HLoopInformation* lp = instruction->GetBlock()->GetLoopInformation(); // closest enveloping loop + lp != nullptr; + lp = lp->GetPreHeader()->GetLoopInformation()) { + // Update instruction's information. + ReplaceInduction(induction_analysis_->LookupInfo(lp, instruction), fetch, replacement); + // Update loop's trip-count information. + ReplaceInduction(induction_analysis_->LookupInfo(lp, GetLoopControl(lp)), fetch, replacement); } } @@ -221,13 +319,13 @@ bool InductionVarRange::HasInductionInfo( /*out*/ HLoopInformation** loop, /*out*/ HInductionVarAnalysis::InductionInfo** info, /*out*/ HInductionVarAnalysis::InductionInfo** trip) const { - HLoopInformation* l = context->GetBlock()->GetLoopInformation(); // closest enveloping loop - if (l != nullptr) { - HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(l, instruction); + HLoopInformation* lp = context->GetBlock()->GetLoopInformation(); // closest enveloping loop + if (lp != nullptr) { + HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(lp, instruction); if (i != nullptr) { - *loop = l; + *loop = lp; *info = i; - *trip = induction_analysis_->LookupInfo(l, l->GetHeader()->GetLastInstruction()); + *trip = induction_analysis_->LookupInfo(lp, GetLoopControl(lp)); return true; } } @@ -260,12 +358,13 @@ bool InductionVarRange::HasFetchInLoop(HInductionVarAnalysis::InductionInfo* inf return false; } -bool InductionVarRange::NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const { +bool InductionVarRange::NeedsTripCount(HInductionVarAnalysis::InductionInfo* info, + int64_t* stride_value) const { if (info != nullptr) { if (info->induction_class == HInductionVarAnalysis::kLinear) { - return true; + return IsConstant(info->op_a, kExact, stride_value); } else if (info->induction_class == HInductionVarAnalysis::kWrapAround) { - return NeedsTripCount(info->op_b); + return NeedsTripCount(info->op_b, stride_value); } } return false; @@ -618,11 +717,13 @@ InductionVarRange::Value InductionVarRange::MergeVal(Value v1, Value v2, bool is bool InductionVarRange::GenerateCode(HInstruction* context, HInstruction* instruction, + bool is_last_value, HGraph* graph, HBasicBlock* block, /*out*/HInstruction** lower, /*out*/HInstruction** upper, /*out*/HInstruction** taken_test, + /*out*/int64_t* stride_value, /*out*/bool* needs_finite_test, /*out*/bool* needs_taken_test) const { HLoopInformation* loop = nullptr; @@ -637,8 +738,19 @@ bool InductionVarRange::GenerateCode(HInstruction* context, // code does not use the trip-count explicitly (since there could be an implicit relation // between e.g. an invariant subscript and a not-taken condition). bool in_body = context->GetBlock() != loop->GetHeader(); - *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip); + *stride_value = 0; + *needs_finite_test = NeedsTripCount(info, stride_value) && IsUnsafeTripCount(trip); *needs_taken_test = IsBodyTripCount(trip); + // Handle last value request. + if (is_last_value) { + if (info->induction_class != HInductionVarAnalysis::kLinear) { + return false; + } else if (*stride_value > 0) { + lower = nullptr; + } else { + upper = nullptr; + } + } // Code generation for taken test: generate the code when requested or otherwise analyze // if code generation is feasible when taken test is needed. if (taken_test != nullptr) { @@ -666,6 +778,10 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, bool in_body, bool is_min) const { if (info != nullptr) { + // If during codegen, the result is not needed (nullptr), simply return success. + if (graph != nullptr && result == nullptr) { + return true; + } // Verify type safety. Primitive::Type type = Primitive::kPrimInt; if (info->type != type) { @@ -757,25 +873,29 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, } break; case HInductionVarAnalysis::kLinear: { - // Linear induction a * i + b, for normalized 0 <= i < TC. Restrict to unit stride only - // to avoid arithmetic wrap-around situations that are hard to guard against. + // Linear induction a * i + b, for normalized 0 <= i < TC. For ranges, this should + // be restricted to a unit stride to avoid arithmetic wrap-around situations that + // are harder to guard against. For a last value, requesting min/max based on any + // stride yields right value. int64_t stride_value = 0; if (IsConstant(info->op_a, kExact, &stride_value)) { - if (stride_value == 1 || stride_value == -1) { - const bool is_min_a = stride_value == 1 ? is_min : !is_min; - if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) && - GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) { - if (graph != nullptr) { - HInstruction* oper; - if (stride_value == 1) { - oper = new (graph->GetArena()) HAdd(type, opa, opb); - } else { - oper = new (graph->GetArena()) HSub(type, opb, opa); - } - *result = Insert(block, oper); + const bool is_min_a = stride_value >= 0 ? is_min : !is_min; + if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) && + GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) { + if (graph != nullptr) { + HInstruction* oper; + if (stride_value == 1) { + oper = new (graph->GetArena()) HAdd(type, opa, opb); + } else if (stride_value == -1) { + oper = new (graph->GetArena()) HSub(type, opb, opa); + } else { + HInstruction* mul = new (graph->GetArena()) HMul( + type, graph->GetIntConstant(stride_value), opa); + oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb); } - return true; + *result = Insert(block, oper); } + return true; } } break; @@ -800,4 +920,18 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info, return false; } +void InductionVarRange::ReplaceInduction(HInductionVarAnalysis::InductionInfo* info, + HInstruction* fetch, + HInstruction* replacement) { + if (info != nullptr) { + if (info->induction_class == HInductionVarAnalysis::kInvariant && + info->operation == HInductionVarAnalysis::kFetch && + info->fetch == fetch) { + info->fetch = replacement; + } + ReplaceInduction(info->op_a, fetch, replacement); + ReplaceInduction(info->op_b, fetch, replacement); + } +} + } // namespace art diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h index 00aaa167f8..63850b34b8 100644 --- a/compiler/optimizing/induction_var_range.h +++ b/compiler/optimizing/induction_var_range.h @@ -76,10 +76,10 @@ class InductionVarRange { * and need_taken test flags denote if an additional finite-test and/or taken-test * are needed to protect the range evaluation inside its loop. */ - bool CanGenerateCode(HInstruction* context, - HInstruction* instruction, - /*out*/ bool* needs_finite_test, - /*out*/ bool* needs_taken_test); + bool CanGenerateRange(HInstruction* context, + HInstruction* instruction, + /*out*/ bool* needs_finite_test, + /*out*/ bool* needs_taken_test); /** * Generates the actual code in the HIR for the lower and upper bound expressions on the @@ -94,25 +94,42 @@ class InductionVarRange { * lower: add x, 0 * upper: add x, 5 * - * Precondition: CanGenerateCode() returns true. + * Precondition: CanGenerateRange() returns true. */ - void GenerateRangeCode(HInstruction* context, - HInstruction* instruction, - HGraph* graph, - HBasicBlock* block, - /*out*/ HInstruction** lower, - /*out*/ HInstruction** upper); + void GenerateRange(HInstruction* context, + HInstruction* instruction, + HGraph* graph, + HBasicBlock* block, + /*out*/ HInstruction** lower, + /*out*/ HInstruction** upper); /** * Generates explicit taken-test for the loop in the given context. Code is generated in - * given block and graph. The taken-test is returned in parameter test. + * given block and graph. Returns generated taken-test. * - * Precondition: CanGenerateCode() returns true and needs_taken_test is set. + * Precondition: CanGenerateRange() returns true and needs_taken_test is set. */ - void GenerateTakenTest(HInstruction* context, - HGraph* graph, - HBasicBlock* block, - /*out*/ HInstruction** taken_test); + HInstruction* GenerateTakenTest(HInstruction* context, HGraph* graph, HBasicBlock* block); + + /** + * Returns true if induction analysis is able to generate code for last value of + * the given instruction inside the closest enveloping loop. + */ + bool CanGenerateLastValue(HInstruction* instruction); + + /** + * Generates last value of the given instruction in the closest enveloping loop. + * Code is generated in given block and graph. Returns generated last value. + * + * Precondition: CanGenerateLastValue() returns true. + */ + HInstruction* GenerateLastValue(HInstruction* instruction, HGraph* graph, HBasicBlock* block); + + /** + * Updates all matching fetches with the given replacement in all induction information + * that is associated with the given instruction. + */ + void Replace(HInstruction* instruction, HInstruction* fetch, HInstruction* replacement); private: /* @@ -140,7 +157,8 @@ class InductionVarRange { /*out*/ HInductionVarAnalysis::InductionInfo** trip) const; bool HasFetchInLoop(HInductionVarAnalysis::InductionInfo* info) const; - bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const; + bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info, + /*out*/ int64_t* stride_value) const; bool IsBodyTripCount(HInductionVarAnalysis::InductionInfo* trip) const; bool IsUnsafeTripCount(HInductionVarAnalysis::InductionInfo* trip) const; bool IsWellBehavedTripCount(HInductionVarAnalysis::InductionInfo* trip) const; @@ -186,17 +204,19 @@ class InductionVarRange { Value MergeVal(Value v1, Value v2, bool is_min) const; /** - * Generates code for lower/upper/taken-test in the HIR. Returns true on success. - * With values nullptr, the method can be used to determine if code generation + * Generates code for lower/upper/taken-test or last value in the HIR. Returns true on + * success. With values nullptr, the method can be used to determine if code generation * would be successful without generating actual code yet. */ bool GenerateCode(HInstruction* context, HInstruction* instruction, + bool is_last_val, HGraph* graph, HBasicBlock* block, /*out*/ HInstruction** lower, /*out*/ HInstruction** upper, /*out*/ HInstruction** taken_test, + /*out*/ int64_t* stride_value, /*out*/ bool* needs_finite_test, /*out*/ bool* needs_taken_test) const; @@ -208,6 +228,10 @@ class InductionVarRange { bool in_body, bool is_min) const; + void ReplaceInduction(HInductionVarAnalysis::InductionInfo* info, + HInstruction* fetch, + HInstruction* replacement); + /** Results of prior induction variable analysis. */ HInductionVarAnalysis* induction_analysis_; diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc index 4ea170f659..8bbdd4acb7 100644 --- a/compiler/optimizing/induction_var_range_test.cc +++ b/compiler/optimizing/induction_var_range_test.cc @@ -75,34 +75,34 @@ class InductionVarRangeTest : public CommonCompilerTest { // Control flow. loop_preheader_ = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(loop_preheader_); - HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_); - graph_->AddBlock(loop_header); - HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_); - graph_->AddBlock(loop_body); + loop_header_ = new (&allocator_) HBasicBlock(graph_); + graph_->AddBlock(loop_header_); + loop_body_ = new (&allocator_) HBasicBlock(graph_); + graph_->AddBlock(loop_body_); HBasicBlock* return_block = new (&allocator_) HBasicBlock(graph_); graph_->AddBlock(return_block); entry_block_->AddSuccessor(loop_preheader_); - loop_preheader_->AddSuccessor(loop_header); - loop_header->AddSuccessor(loop_body); - loop_header->AddSuccessor(return_block); - loop_body->AddSuccessor(loop_header); + loop_preheader_->AddSuccessor(loop_header_); + loop_header_->AddSuccessor(loop_body_); + loop_header_->AddSuccessor(return_block); + loop_body_->AddSuccessor(loop_header_); return_block->AddSuccessor(exit_block_); // Instructions. loop_preheader_->AddInstruction(new (&allocator_) HGoto()); HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt); - loop_header->AddPhi(phi); + loop_header_->AddPhi(phi); phi->AddInput(graph_->GetIntConstant(lower)); // i = l if (stride > 0) { condition_ = new (&allocator_) HLessThan(phi, upper); // i < u } else { condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u } - loop_header->AddInstruction(condition_); - loop_header->AddInstruction(new (&allocator_) HIf(condition_)); + loop_header_->AddInstruction(condition_); + loop_header_->AddInstruction(new (&allocator_) HIf(condition_)); increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, phi, graph_->GetIntConstant(stride)); - loop_body->AddInstruction(increment_); // i += s + loop_body_->AddInstruction(increment_); // i += s phi->AddInput(increment_); - loop_body->AddInstruction(new (&allocator_) HGoto()); + loop_body_->AddInstruction(new (&allocator_) HGoto()); return_block->AddInstruction(new (&allocator_) HReturnVoid()); exit_block_->AddInstruction(new (&allocator_) HExit()); } @@ -192,7 +192,8 @@ class InductionVarRangeTest : public CommonCompilerTest { // bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) { - return range_.NeedsTripCount(info); + int64_t s = 0; + return range_.NeedsTripCount(info, &s); } bool IsBodyTripCount(HInductionVarAnalysis::InductionInfo* trip) { @@ -251,6 +252,8 @@ class InductionVarRangeTest : public CommonCompilerTest { HBasicBlock* entry_block_; HBasicBlock* exit_block_; HBasicBlock* loop_preheader_; + HBasicBlock* loop_header_; + HBasicBlock* loop_body_; HInductionVarAnalysis* iva_; InductionVarRange range_; @@ -600,15 +603,19 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) { Value v1, v2; bool needs_finite_test = true; + bool needs_taken_test = true; + + HInstruction* phi = condition_->InputAt(0); + HInstruction* exit = exit_block_->GetLastInstruction(); // In context of header: known. - range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(0), v1); ExpectEqual(Value(1000), v2); // In context of loop-body: known. - range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(0), v1); ExpectEqual(Value(999), v2); @@ -616,6 +623,20 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) { EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(1), v1); ExpectEqual(Value(1000), v2); + + // Induction vs. no-induction. + EXPECT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test)); + EXPECT_TRUE(range_.CanGenerateLastValue(phi)); + EXPECT_FALSE(range_.CanGenerateRange(exit, exit, &needs_finite_test, &needs_taken_test)); + EXPECT_FALSE(range_.CanGenerateLastValue(exit)); + + // Last value (unsimplified). + HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_); + ASSERT_TRUE(last->IsAdd()); + ASSERT_TRUE(last->InputAt(0)->IsIntConstant()); + EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue()); + ASSERT_TRUE(last->InputAt(1)->IsIntConstant()); + EXPECT_EQ(0, last->InputAt(1)->AsIntConstant()->GetValue()); } TEST_F(InductionVarRangeTest, ConstantTripCountDown) { @@ -624,15 +645,19 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) { Value v1, v2; bool needs_finite_test = true; + bool needs_taken_test = true; + + HInstruction* phi = condition_->InputAt(0); + HInstruction* exit = exit_block_->GetLastInstruction(); // In context of header: known. - range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(0), v1); ExpectEqual(Value(1000), v2); // In context of loop-body: known. - range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(1), v1); ExpectEqual(Value(1000), v2); @@ -640,6 +665,25 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) { EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(0), v1); ExpectEqual(Value(999), v2); + + // Induction vs. no-induction. + EXPECT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test)); + EXPECT_TRUE(range_.CanGenerateLastValue(phi)); + EXPECT_FALSE(range_.CanGenerateRange(exit, exit, &needs_finite_test, &needs_taken_test)); + EXPECT_FALSE(range_.CanGenerateLastValue(exit)); + + // Last value (unsimplified). + HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_); + ASSERT_TRUE(last->IsSub()); + ASSERT_TRUE(last->InputAt(0)->IsIntConstant()); + EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue()); + ASSERT_TRUE(last->InputAt(1)->IsNeg()); + last = last->InputAt(1)->InputAt(0); + ASSERT_TRUE(last->IsSub()); + ASSERT_TRUE(last->InputAt(0)->IsIntConstant()); + EXPECT_EQ(0, last->InputAt(0)->AsIntConstant()->GetValue()); + ASSERT_TRUE(last->InputAt(1)->IsIntConstant()); + EXPECT_EQ(1000, last->InputAt(1)->AsIntConstant()->GetValue()); } TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { @@ -650,14 +694,16 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { bool needs_finite_test = true; bool needs_taken_test = true; + HInstruction* phi = condition_->InputAt(0); + // In context of header: upper unknown. - range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(0), v1); ExpectEqual(Value(), v2); // In context of loop-body: known. - range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(0), v1); ExpectEqual(Value(x_, 1, -1), v2); @@ -668,19 +714,15 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { HInstruction* lower = nullptr; HInstruction* upper = nullptr; - HInstruction* taken = nullptr; // Can generate code in context of loop-body only. - EXPECT_FALSE(range_.CanGenerateCode( - condition_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test)); - ASSERT_TRUE(range_.CanGenerateCode( - increment_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test)); + EXPECT_FALSE(range_.CanGenerateRange(condition_, phi, &needs_finite_test, &needs_taken_test)); + ASSERT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test)); EXPECT_FALSE(needs_finite_test); EXPECT_TRUE(needs_taken_test); - // Generates code. - range_.GenerateRangeCode( - increment_, condition_->InputAt(0), graph_, loop_preheader_, &lower, &upper); + // Generates code (unsimplified). + range_.GenerateRange(increment_, phi, graph_, loop_preheader_, &lower, &upper); // Verify lower is 0+0. ASSERT_TRUE(lower != nullptr); @@ -701,12 +743,19 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) { EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue()); // Verify taken-test is 0<V. - range_.GenerateTakenTest(increment_, graph_, loop_preheader_, &taken); + HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_); ASSERT_TRUE(taken != nullptr); ASSERT_TRUE(taken->IsLessThan()); ASSERT_TRUE(taken->InputAt(0)->IsIntConstant()); EXPECT_EQ(0, taken->InputAt(0)->AsIntConstant()->GetValue()); EXPECT_TRUE(taken->InputAt(1)->IsParameterValue()); + + // Replacement. + range_.Replace(loop_header_->GetLastInstruction(), x_, y_); + range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test); + EXPECT_FALSE(needs_finite_test); + ExpectEqual(Value(1), v1); + ExpectEqual(Value(y_, 1, 0), v2); } TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { @@ -717,14 +766,16 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { bool needs_finite_test = true; bool needs_taken_test = true; + HInstruction* phi = condition_->InputAt(0); + // In context of header: lower unknown. - range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(), v1); ExpectEqual(Value(1000), v2); // In context of loop-body: known. - range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test); + range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test); EXPECT_FALSE(needs_finite_test); ExpectEqual(Value(x_, 1, 1), v1); ExpectEqual(Value(1000), v2); @@ -735,19 +786,15 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { HInstruction* lower = nullptr; HInstruction* upper = nullptr; - HInstruction* taken = nullptr; // Can generate code in context of loop-body only. - EXPECT_FALSE(range_.CanGenerateCode( - condition_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test)); - ASSERT_TRUE(range_.CanGenerateCode( - increment_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test)); + EXPECT_FALSE(range_.CanGenerateRange(condition_, phi, &needs_finite_test, &needs_taken_test)); + ASSERT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test)); EXPECT_FALSE(needs_finite_test); EXPECT_TRUE(needs_taken_test); - // Generates code. - range_.GenerateRangeCode( - increment_, condition_->InputAt(0), graph_, loop_preheader_, &lower, &upper); + // Generates code (unsimplified). + range_.GenerateRange(increment_, phi, graph_, loop_preheader_, &lower, &upper); // Verify lower is 1000-((1000-V)-1). ASSERT_TRUE(lower != nullptr); @@ -773,12 +820,19 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) { EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue()); // Verify taken-test is 1000>V. - range_.GenerateTakenTest(increment_, graph_, loop_preheader_, &taken); + HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_); ASSERT_TRUE(taken != nullptr); ASSERT_TRUE(taken->IsGreaterThan()); ASSERT_TRUE(taken->InputAt(0)->IsIntConstant()); EXPECT_EQ(1000, taken->InputAt(0)->AsIntConstant()->GetValue()); EXPECT_TRUE(taken->InputAt(1)->IsParameterValue()); + + // Replacement. + range_.Replace(loop_header_->GetLastInstruction(), x_, y_); + range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test); + EXPECT_FALSE(needs_finite_test); + ExpectEqual(Value(y_, 1, 0), v1); + ExpectEqual(Value(999), v2); } } // namespace art diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h index 7905104ed4..7fe1067aa9 100644 --- a/compiler/optimizing/instruction_simplifier.h +++ b/compiler/optimizing/instruction_simplifier.h @@ -35,9 +35,9 @@ namespace art { */ class InstructionSimplifier : public HOptimization { public: - InstructionSimplifier(HGraph* graph, - OptimizingCompilerStats* stats = nullptr, - const char* name = kInstructionSimplifierPassName) + explicit InstructionSimplifier(HGraph* graph, + OptimizingCompilerStats* stats = nullptr, + const char* name = kInstructionSimplifierPassName) : HOptimization(graph, name, stats) {} static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier"; diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index 8f7778fe68..04e063c92e 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -259,7 +259,8 @@ bool TryExtractArrayAccessAddress(HInstruction* access, HIntConstant* offset = graph->GetIntConstant(data_offset); HIntermediateAddress* address = new (arena) HIntermediateAddress(array, offset, kNoDexPc); - address->SetReferenceTypeInfo(array->GetReferenceTypeInfo()); + // TODO: Is it ok to not have this on the intermediate address? + // address->SetReferenceTypeInfo(array->GetReferenceTypeInfo()); access->GetBlock()->InsertInstructionBefore(address, access); access->ReplaceInput(address, 0); // Both instructions must depend on GC to prevent any instruction that can diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 67640a1ebf..fd2da1004b 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -657,7 +657,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, LocationSummary::kNoCall, kIntrinsified); if (can_call && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 082076d79b..ce58657bcd 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -895,7 +895,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke LocationSummary::kNoCall, kIntrinsified); if (can_call && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index be8eb51e42..1d153e2e18 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -1857,11 +1857,11 @@ static void GenHighestOneBit(LocationSummary* locations, if (type == Primitive::kPrimLong) { __ Dclz(TMP, in); __ LoadConst64(AT, INT64_C(0x8000000000000000)); - __ Dsrlv(out, AT, TMP); + __ Dsrlv(AT, AT, TMP); } else { __ Clz(TMP, in); __ LoadConst32(AT, 0x80000000); - __ Srlv(out, AT, TMP); + __ Srlv(AT, AT, TMP); } // For either value of "type", when "in" is zero, "out" should also // be zero. Without this extra "and" operation, when "in" is zero, @@ -1869,7 +1869,7 @@ static void GenHighestOneBit(LocationSummary* locations, // the MIPS logical shift operations "dsrlv", and "srlv" don't use // the shift amount (TMP) directly; they use either (TMP % 64) or // (TMP % 32), respectively. - __ And(out, out, in); + __ And(out, AT, in); } // int java.lang.Integer.highestOneBit(int) diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index d17f85ec8d..e61aba05b4 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -1977,7 +1977,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, LocationSummary::kNoCall, kIntrinsified); if (can_call && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index f8f30d9015..0f31fabbfb 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -2110,7 +2110,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke LocationSummary::kNoCall, kIntrinsified); if (can_call && kUseBakerReadBarrier) { - locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers. + locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers. } locations->SetInAt(0, Location::NoLocation()); // Unused receiver. locations->SetInAt(1, Location::RequiresRegister()); diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc index 1b1b3a79ab..d157509758 100644 --- a/compiler/optimizing/locations.cc +++ b/compiler/optimizing/locations.cc @@ -33,8 +33,8 @@ LocationSummary::LocationSummary(HInstruction* instruction, output_overlaps_(Location::kOutputOverlap), stack_mask_(nullptr), register_mask_(0), - live_registers_(), - custom_slow_path_caller_saves_() { + live_registers_(RegisterSet::Empty()), + custom_slow_path_caller_saves_(RegisterSet::Empty()) { instruction->SetLocations(this); if (NeedsSafepoint()) { diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index 43840422ca..da27928ef2 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -420,7 +420,7 @@ std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs); class RegisterSet : public ValueObject { public: - RegisterSet() : core_registers_(0), floating_point_registers_(0) {} + static RegisterSet Empty() { return RegisterSet(); } void Add(Location loc) { if (loc.IsRegister()) { @@ -465,6 +465,8 @@ class RegisterSet : public ValueObject { } private: + RegisterSet() : core_registers_(0), floating_point_registers_(0) {} + uint32_t core_registers_; uint32_t floating_point_registers_; }; @@ -488,9 +490,9 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { kCallOnMainOnly }; - LocationSummary(HInstruction* instruction, - CallKind call_kind = kNoCall, - bool intrinsified = false); + explicit LocationSummary(HInstruction* instruction, + CallKind call_kind = kNoCall, + bool intrinsified = false); void SetInAt(uint32_t at, Location location) { inputs_[at] = location; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 8f37236ede..9cfa89b7d0 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -460,6 +460,113 @@ GraphAnalysisResult HGraph::AnalyzeLoops() const { return kAnalysisSuccess; } +static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) { + return first_loop == second_loop; +} + +static bool IsLoop(HLoopInformation* info) { + return info != nullptr; +} + +static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) { + return (inner != outer) + && (inner != nullptr) + && (outer != nullptr) + && inner->IsIn(*outer); +} + +// Helper method to update work list for linear order. +static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) { + HLoopInformation* block_loop = block->GetLoopInformation(); + auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position. + for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) { + HBasicBlock* current = *insert_pos; + HLoopInformation* current_loop = current->GetLoopInformation(); + if (InSameLoop(block_loop, current_loop) + || !IsLoop(current_loop) + || IsInnerLoop(current_loop, block_loop)) { + // The block can be processed immediately. + break; + } + } + worklist->insert(insert_pos.base(), block); +} + +// Helper method to validate linear order. +static bool IsLinearOrderWellFormed(const HGraph& graph) { + for (HBasicBlock* header : graph.GetBlocks()) { + if (header == nullptr || !header->IsLoopHeader()) { + continue; + } + HLoopInformation* loop = header->GetLoopInformation(); + size_t num_blocks = loop->GetBlocks().NumSetBits(); + size_t found_blocks = 0u; + for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) { + HBasicBlock* current = it.Current(); + if (loop->Contains(*current)) { + found_blocks++; + if (found_blocks == 1u && current != header) { + // First block is not the header. + return false; + } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) { + // Last block is not a back edge. + return false; + } + } else if (found_blocks != 0u && found_blocks != num_blocks) { + // Blocks are not adjacent. + return false; + } + } + DCHECK_EQ(found_blocks, num_blocks); + } + return true; +} + +void HGraph::Linearize() { + // Create a reverse post ordering with the following properties: + // - Blocks in a loop are consecutive, + // - Back-edge is the last block before loop exits. + + // (1): Record the number of forward predecessors for each block. This is to + // ensure the resulting order is reverse post order. We could use the + // current reverse post order in the graph, but it would require making + // order queries to a GrowableArray, which is not the best data structure + // for it. + ArenaVector<uint32_t> forward_predecessors(blocks_.size(), + arena_->Adapter(kArenaAllocSsaLiveness)); + for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { + HBasicBlock* block = it.Current(); + size_t number_of_forward_predecessors = block->GetPredecessors().size(); + if (block->IsLoopHeader()) { + number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges(); + } + forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors; + } + + // (2): Following a worklist approach, first start with the entry block, and + // iterate over the successors. When all non-back edge predecessors of a + // successor block are visited, the successor block is added in the worklist + // following an order that satisfies the requirements to build our linear graph. + linear_order_.reserve(GetReversePostOrder().size()); + ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocSsaLiveness)); + worklist.push_back(GetEntryBlock()); + do { + HBasicBlock* current = worklist.back(); + worklist.pop_back(); + linear_order_.push_back(current); + for (HBasicBlock* successor : current->GetSuccessors()) { + int block_id = successor->GetBlockId(); + size_t number_of_remaining_predecessors = forward_predecessors[block_id]; + if (number_of_remaining_predecessors == 1) { + AddToListForLinearization(&worklist, successor); + } + forward_predecessors[block_id] = number_of_remaining_predecessors - 1; + } + } while (!worklist.empty()); + + DCHECK(HasIrreducibleLoops() || IsLinearOrderWellFormed(*this)); +} + void HLoopInformation::Dump(std::ostream& os) { os << "header: " << header_->GetBlockId() << std::endl; os << "pre header: " << GetPreHeader()->GetBlockId() << std::endl; diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 99d7673467..6d207765e3 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -24,7 +24,9 @@ #include "base/arena_bit_vector.h" #include "base/arena_containers.h" #include "base/arena_object.h" +#include "base/array_ref.h" #include "base/stl_util.h" +#include "base/transform_array_ref.h" #include "dex_file.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "handle.h" @@ -35,9 +37,7 @@ #include "mirror/class.h" #include "offsets.h" #include "primitive.h" -#include "utils/array_ref.h" #include "utils/intrusive_forward_list.h" -#include "utils/transform_array_ref.h" namespace art { @@ -365,6 +365,13 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // is a throw-catch loop, i.e. the header is a catch block. GraphAnalysisResult AnalyzeLoops() const; + // Computes the linear order (should be called before using HLinearOrderIterator). + // Linearizes the graph such that: + // (1): a block is always after its dominator, + // (2): blocks of loops are contiguous. + // This creates a natural and efficient ordering when visualizing live ranges. + void Linearize(); + // Iterate over blocks to compute try block membership. Needs reverse post // order and loop information. void ComputeTryBlockInformation(); @@ -830,7 +837,7 @@ static constexpr uint32_t kInvalidBlockId = static_cast<uint32_t>(-1); class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> { public: - HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc) + explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc) : graph_(graph), predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)), successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)), @@ -1314,7 +1321,8 @@ class HLoopInformationOutwardIterator : public ValueObject { #else #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \ M(MipsComputeBaseMethodAddress, Instruction) \ - M(MipsDexCacheArraysBase, Instruction) + M(MipsDexCacheArraysBase, Instruction) \ + M(MipsPackedSwitch, Instruction) #endif #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) @@ -4366,7 +4374,7 @@ class HDiv FINAL : public HBinaryOperation { HInstruction* left, HInstruction* right, uint32_t dex_pc) - : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {} + : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {} template <typename T> T ComputeIntegral(T x, T y) const { @@ -4401,11 +4409,6 @@ class HDiv FINAL : public HBinaryOperation { ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } - static SideEffects SideEffectsForArchRuntimeCalls() { - // The generated code can use a runtime call. - return SideEffects::CanTriggerGC(); - } - DECLARE_INSTRUCTION(Div); private: @@ -4418,7 +4421,7 @@ class HRem FINAL : public HBinaryOperation { HInstruction* left, HInstruction* right, uint32_t dex_pc) - : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {} + : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {} template <typename T> T ComputeIntegral(T x, T y) const { @@ -4453,10 +4456,6 @@ class HRem FINAL : public HBinaryOperation { ComputeFP(x->GetValue(), y->GetValue()), GetDexPc()); } - static SideEffects SideEffectsForArchRuntimeCalls() { - return SideEffects::CanTriggerGC(); - } - DECLARE_INSTRUCTION(Rem); private: @@ -4909,9 +4908,7 @@ class HTypeConversion FINAL : public HExpression<1> { public: // Instantiate a type conversion of `input` to `result_type`. HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc) - : HExpression(result_type, - SideEffectsForArchRuntimeCalls(input->GetType(), result_type), - dex_pc) { + : HExpression(result_type, SideEffects::None(), dex_pc) { SetRawInputAt(0, input); // Invariant: We should never generate a conversion to a Boolean value. DCHECK_NE(Primitive::kPrimBoolean, result_type); @@ -4930,18 +4927,6 @@ class HTypeConversion FINAL : public HExpression<1> { // containing the result. If the input cannot be converted, return nullptr. HConstant* TryStaticEvaluation() const; - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type input_type, - Primitive::Type result_type) { - // Some architectures may not require the 'GC' side effects, but at this point - // in the compilation process we do not know what architecture we will - // generate code for, so we must be conservative. - if ((Primitive::IsFloatingPointType(input_type) && Primitive::IsIntegralType(result_type)) - || (input_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(result_type))) { - return SideEffects::CanTriggerGC(); - } - return SideEffects::None(); - } - DECLARE_INSTRUCTION(TypeConversion); private: @@ -5023,9 +5008,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> { const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) - : HExpression(field_type, - SideEffectsForArchRuntimeCalls(field_type, is_volatile), - dex_pc), + : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc), field_info_(field_offset, field_type, is_volatile, @@ -5056,16 +5039,6 @@ class HInstanceFieldGet FINAL : public HExpression<1> { Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); } bool IsVolatile() const { return field_info_.IsVolatile(); } - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) { - SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile); - - // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper. - if (Primitive::Is64BitType(field_type)) { - side_effects.Add(SideEffects::CanTriggerGC()); - } - return side_effects; - } - DECLARE_INSTRUCTION(InstanceFieldGet); private: @@ -5086,8 +5059,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) - : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile), - dex_pc), + : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc), field_info_(field_offset, field_type, is_volatile, @@ -5112,16 +5084,6 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); } void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); } - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) { - SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile); - - // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper. - if (Primitive::Is64BitType(field_type)) { - side_effects.Add(SideEffects::CanTriggerGC()); - } - return side_effects; - } - DECLARE_INSTRUCTION(InstanceFieldSet); private: @@ -5926,9 +5888,7 @@ class HStaticFieldGet FINAL : public HExpression<1> { const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) - : HExpression(field_type, - SideEffectsForArchRuntimeCalls(field_type, is_volatile), - dex_pc), + : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc), field_info_(field_offset, field_type, is_volatile, @@ -5956,16 +5916,6 @@ class HStaticFieldGet FINAL : public HExpression<1> { Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); } bool IsVolatile() const { return field_info_.IsVolatile(); } - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) { - SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile); - - // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper. - if (Primitive::Is64BitType(field_type)) { - side_effects.Add(SideEffects::CanTriggerGC()); - } - return side_effects; - } - DECLARE_INSTRUCTION(StaticFieldGet); private: @@ -5986,8 +5936,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, uint32_t dex_pc) - : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile), - dex_pc), + : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc), field_info_(field_offset, field_type, is_volatile, @@ -6009,16 +5958,6 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> { bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); } void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); } - static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) { - SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile); - - // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper. - if (Primitive::Is64BitType(field_type)) { - side_effects.Add(SideEffects::CanTriggerGC()); - } - return side_effects; - } - DECLARE_INSTRUCTION(StaticFieldSet); private: @@ -6274,7 +6213,7 @@ class HInstanceOf FINAL : public HExpression<2> { class HBoundType FINAL : public HExpression<1> { public: - HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc) + explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc), upper_bound_(ReferenceTypeInfo::CreateInvalid()) { SetPackedFlag<kFlagUpperCanBeNull>(true); diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h index de77245e17..36431c1fb9 100644 --- a/compiler/optimizing/nodes_mips.h +++ b/compiler/optimizing/nodes_mips.h @@ -66,6 +66,41 @@ class HMipsDexCacheArraysBase : public HExpression<0> { DISALLOW_COPY_AND_ASSIGN(HMipsDexCacheArraysBase); }; +// Mips version of HPackedSwitch that holds a pointer to the base method address. +class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> { + public: + HMipsPackedSwitch(int32_t start_value, + int32_t num_entries, + HInstruction* input, + HMipsComputeBaseMethodAddress* method_base, + uint32_t dex_pc) + : HTemplateInstruction(SideEffects::None(), dex_pc), + start_value_(start_value), + num_entries_(num_entries) { + SetRawInputAt(0, input); + SetRawInputAt(1, method_base); + } + + bool IsControlFlow() const OVERRIDE { return true; } + + int32_t GetStartValue() const { return start_value_; } + + int32_t GetNumEntries() const { return num_entries_; } + + HBasicBlock* GetDefaultBlock() const { + // Last entry is the default block. + return GetBlock()->GetSuccessors()[num_entries_]; + } + + DECLARE_INSTRUCTION(MipsPackedSwitch); + + private: + const int32_t start_value_; + const int32_t num_entries_; + + DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch); +}; + } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_MIPS_H_ diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h index 8bd8667f84..814202e97b 100644 --- a/compiler/optimizing/nodes_shared.h +++ b/compiler/optimizing/nodes_shared.h @@ -17,6 +17,11 @@ #ifndef ART_COMPILER_OPTIMIZING_NODES_SHARED_H_ #define ART_COMPILER_OPTIMIZING_NODES_SHARED_H_ +// This `#include` should never be used by compilation, as this file (`nodes_shared.h`) is included +// in `nodes.h`. However it helps editing tools (e.g. YouCompleteMe) by giving them better context +// (defining `HInstruction` and co). +#include "nodes.h" + namespace art { class HMultiplyAccumulate FINAL : public HExpression<3> { @@ -117,10 +122,15 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation { // This instruction computes an intermediate address pointing in the 'middle' of an object. The // result pointer cannot be handled by GC, so extra care is taken to make sure that this value is // never used across anything that can trigger GC. +// The result of this instruction is not a pointer in the sense of `Primitive::kPrimNot`. So we +// represent it by the type `Primitive::kPrimInt`. class HIntermediateAddress FINAL : public HExpression<2> { public: HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc) - : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) { + : HExpression(Primitive::kPrimInt, SideEffects::DependsOnGC(), dex_pc) { + DCHECK_EQ(Primitive::ComponentSize(Primitive::kPrimInt), + Primitive::ComponentSize(Primitive::kPrimNot)) + << "kPrimInt and kPrimNot have different sizes."; SetRawInputAt(0, base_address); SetRawInputAt(1, offset); } diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc index c6d297df4f..6006e6cf5d 100644 --- a/compiler/optimizing/pc_relative_fixups_mips.cc +++ b/compiler/optimizing/pc_relative_fixups_mips.cc @@ -92,6 +92,25 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } + void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE { + if (switch_insn->GetNumEntries() <= + InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) { + return; + } + // We need to replace the HPackedSwitch with a HMipsPackedSwitch in order to + // address the constant area. + InitializePCRelativeBasePointer(); + HGraph* graph = GetGraph(); + HBasicBlock* block = switch_insn->GetBlock(); + HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch( + switch_insn->GetStartValue(), + switch_insn->GetNumEntries(), + switch_insn->InputAt(0), + base_, + switch_insn->GetDexPc()); + block->ReplaceAndRemoveInstructionWith(switch_insn, mips_switch); + } + void HandleInvoke(HInvoke* invoke) { // If this is an invoke-static/-direct with PC-relative dex cache array // addressing, we need the PC-relative address base. diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h index a70ceae076..d48b1a0bb9 100644 --- a/compiler/optimizing/register_allocation_resolver.h +++ b/compiler/optimizing/register_allocation_resolver.h @@ -18,9 +18,9 @@ #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_ #include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/value_object.h" #include "primitive.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index b8e1379ef9..e64c005410 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -157,20 +157,11 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { } void HSharpening::ProcessLoadClass(HLoadClass* load_class) { - if (load_class->NeedsAccessCheck()) { - // We need to call the runtime anyway, so we simply get the class as that call's return value. - return; - } - if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) { - // Loading from the ArtMethod* is the most efficient retrieval. - // TODO: This may not actually be true for all architectures and - // locations of target classes. The additional register pressure - // for using the ArtMethod* should be considered. - return; - } - - DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod); + DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod || + load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) + << load_class->GetLoadKind(); DCHECK(!load_class->IsInDexCache()) << "HLoadClass should not be optimized before sharpening."; + DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening."; const DexFile& dex_file = load_class->GetDexFile(); uint32_t type_index = load_class->GetTypeIndex(); @@ -242,13 +233,28 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) { } } } - if (is_in_dex_cache) { - load_class->MarkInDexCache(); - } + if (is_in_boot_image) { load_class->MarkInBootImage(); } + if (load_class->NeedsAccessCheck()) { + // We need to call the runtime anyway, so we simply get the class as that call's return value. + return; + } + + if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) { + // Loading from the ArtMethod* is the most efficient retrieval in code size. + // TODO: This may not actually be true for all architectures and + // locations of target classes. The additional register pressure + // for using the ArtMethod* should be considered. + return; + } + + if (is_in_dex_cache) { + load_class->MarkInDexCache(); + } + HLoadClass::LoadKind load_kind = codegen_->GetSupportedLoadClassKind(desired_load_kind); switch (load_kind) { case HLoadClass::LoadKind::kBootImageLinkTimeAddress: diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index a4d52d7761..9ce34aa80b 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -23,119 +23,11 @@ namespace art { void SsaLivenessAnalysis::Analyze() { - LinearizeGraph(); + graph_->Linearize(); NumberInstructions(); ComputeLiveness(); } -static bool IsLoop(HLoopInformation* info) { - return info != nullptr; -} - -static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) { - return first_loop == second_loop; -} - -static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) { - return (inner != outer) - && (inner != nullptr) - && (outer != nullptr) - && inner->IsIn(*outer); -} - -static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) { - HLoopInformation* block_loop = block->GetLoopInformation(); - auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position. - for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) { - HBasicBlock* current = *insert_pos; - HLoopInformation* current_loop = current->GetLoopInformation(); - if (InSameLoop(block_loop, current_loop) - || !IsLoop(current_loop) - || IsInnerLoop(current_loop, block_loop)) { - // The block can be processed immediately. - break; - } - } - worklist->insert(insert_pos.base(), block); -} - -static bool IsLinearOrderWellFormed(const HGraph& graph) { - for (HBasicBlock* header : graph.GetBlocks()) { - if (header == nullptr || !header->IsLoopHeader()) { - continue; - } - - HLoopInformation* loop = header->GetLoopInformation(); - size_t num_blocks = loop->GetBlocks().NumSetBits(); - size_t found_blocks = 0u; - - for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) { - HBasicBlock* current = it.Current(); - if (loop->Contains(*current)) { - found_blocks++; - if (found_blocks == 1u && current != header) { - // First block is not the header. - return false; - } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) { - // Last block is not a back edge. - return false; - } - } else if (found_blocks != 0u && found_blocks != num_blocks) { - // Blocks are not adjacent. - return false; - } - } - DCHECK_EQ(found_blocks, num_blocks); - } - - return true; -} - -void SsaLivenessAnalysis::LinearizeGraph() { - // Create a reverse post ordering with the following properties: - // - Blocks in a loop are consecutive, - // - Back-edge is the last block before loop exits. - - // (1): Record the number of forward predecessors for each block. This is to - // ensure the resulting order is reverse post order. We could use the - // current reverse post order in the graph, but it would require making - // order queries to a GrowableArray, which is not the best data structure - // for it. - ArenaVector<uint32_t> forward_predecessors(graph_->GetBlocks().size(), - graph_->GetArena()->Adapter(kArenaAllocSsaLiveness)); - for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { - HBasicBlock* block = it.Current(); - size_t number_of_forward_predecessors = block->GetPredecessors().size(); - if (block->IsLoopHeader()) { - number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges(); - } - forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors; - } - - // (2): Following a worklist approach, first start with the entry block, and - // iterate over the successors. When all non-back edge predecessors of a - // successor block are visited, the successor block is added in the worklist - // following an order that satisfies the requirements to build our linear graph. - graph_->linear_order_.reserve(graph_->GetReversePostOrder().size()); - ArenaVector<HBasicBlock*> worklist(graph_->GetArena()->Adapter(kArenaAllocSsaLiveness)); - worklist.push_back(graph_->GetEntryBlock()); - do { - HBasicBlock* current = worklist.back(); - worklist.pop_back(); - graph_->linear_order_.push_back(current); - for (HBasicBlock* successor : current->GetSuccessors()) { - int block_id = successor->GetBlockId(); - size_t number_of_remaining_predecessors = forward_predecessors[block_id]; - if (number_of_remaining_predecessors == 1) { - AddToListForLinearization(&worklist, successor); - } - forward_predecessors[block_id] = number_of_remaining_predecessors - 1; - } - } while (!worklist.empty()); - - DCHECK(graph_->HasIrreducibleLoops() || IsLinearOrderWellFormed(*graph_)); -} - void SsaLivenessAnalysis::NumberInstructions() { int ssa_index = 0; size_t lifetime_position = 0; diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 0be16118b1..b62bf4e5f9 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1186,12 +1186,6 @@ class SsaLivenessAnalysis : public ValueObject { static constexpr const char* kLivenessPassName = "liveness"; private: - // Linearize the graph so that: - // (1): a block is always after its dominator, - // (2): blocks of loops are contiguous. - // This creates a natural and efficient ordering when visualizing live ranges. - void LinearizeGraph(); - // Give an SSA number to each instruction that defines a value used by another instruction, // and setup the lifetime information of each instruction and block. void NumberInstructions(); diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index 3084e6e2b6..ee5811c3c0 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -246,7 +246,7 @@ class Address : public ValueObject { NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback }; - Address(Register rn, int32_t offset = 0, Mode am = Offset) : rn_(rn), rm_(R0), + explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) : rn_(rn), rm_(R0), offset_(offset), am_(am), is_immed_offset_(true), shift_(LSL) { } @@ -763,6 +763,9 @@ class ArmAssembler : public Assembler { virtual void PushList(RegList regs, Condition cond = AL) = 0; virtual void PopList(RegList regs, Condition cond = AL) = 0; + virtual void StoreList(RegList regs, size_t stack_offset) = 0; + virtual void LoadList(RegList regs, size_t stack_offset) = 0; + virtual void Mov(Register rd, Register rm, Condition cond = AL) = 0; // Convenience shift instructions. Use mov instruction with shifter operand diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index ebdfc98554..2269ba2d20 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -2018,6 +2018,45 @@ inline size_t Thumb2Assembler::Fixup::IncreaseSize(Size new_size) { return adjustment; } +bool Thumb2Assembler::Fixup::IsCandidateForEmitEarly() const { + DCHECK(size_ == original_size_); + if (target_ == kUnresolved) { + return false; + } + // GetOffset() does not depend on current_code_size for branches, only for literals. + constexpr uint32_t current_code_size = 0u; + switch (GetSize()) { + case kBranch16Bit: + return IsInt(cond_ != AL ? 9 : 12, GetOffset(current_code_size)); + case kBranch32Bit: + // We don't support conditional branches beyond +-1MiB + // or unconditional branches beyond +-16MiB. + return true; + + case kCbxz16Bit: + return IsUint<7>(GetOffset(current_code_size)); + case kCbxz32Bit: + return IsInt<9>(GetOffset(current_code_size)); + case kCbxz48Bit: + // We don't support conditional branches beyond +-1MiB. + return true; + + case kLiteral1KiB: + case kLiteral4KiB: + case kLiteral64KiB: + case kLiteral1MiB: + case kLiteralFar: + case kLiteralAddr1KiB: + case kLiteralAddr4KiB: + case kLiteralAddr64KiB: + case kLiteralAddrFar: + case kLongOrFPLiteral1KiB: + case kLongOrFPLiteral64KiB: + case kLongOrFPLiteralFar: + return false; + } +} + uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size) { uint32_t old_code_size = current_code_size; switch (GetSize()) { @@ -3333,6 +3372,30 @@ void Thumb2Assembler::PopList(RegList regs, Condition cond) { ldm(IA_W, SP, regs, cond); } +void Thumb2Assembler::StoreList(RegList regs, size_t stack_offset) { + DCHECK_NE(regs, 0u); + DCHECK_EQ(regs & (1u << IP), 0u); + if (IsPowerOfTwo(regs)) { + Register reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs))); + str(reg, Address(SP, stack_offset)); + } else { + add(IP, SP, ShifterOperand(stack_offset)); + stm(IA, IP, regs); + } +} + +void Thumb2Assembler::LoadList(RegList regs, size_t stack_offset) { + DCHECK_NE(regs, 0u); + DCHECK_EQ(regs & (1u << IP), 0u); + if (IsPowerOfTwo(regs)) { + Register reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs))); + ldr(reg, Address(SP, stack_offset)); + } else { + Register lowest_reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs))); + add(lowest_reg, SP, ShifterOperand(stack_offset)); + ldm(IA, lowest_reg, regs); + } +} void Thumb2Assembler::Mov(Register rd, Register rm, Condition cond) { if (cond != AL || rd != rm) { @@ -3343,6 +3406,30 @@ void Thumb2Assembler::Mov(Register rd, Register rm, Condition cond) { void Thumb2Assembler::Bind(Label* label) { BindLabel(label, buffer_.Size()); + + // Try to emit some Fixups now to reduce the memory needed during the branch fixup later. + while (!fixups_.empty() && fixups_.back().IsCandidateForEmitEarly()) { + const Fixup& last_fixup = fixups_.back(); + // Fixups are ordered by location, so the candidate can surely be emitted if it is + // a forward branch. If it's a backward branch, it may go over any number of other + // fixups. We could check for any number of emit early candidates but we want this + // heuristics to be quick, so check just one. + uint32_t target = last_fixup.GetTarget(); + if (target < last_fixup.GetLocation() && + fixups_.size() >= 2u && + fixups_[fixups_.size() - 2u].GetLocation() >= target) { + const Fixup& prev_fixup = fixups_[fixups_.size() - 2u]; + if (!prev_fixup.IsCandidateForEmitEarly()) { + break; + } + uint32_t min_target = std::min(target, prev_fixup.GetTarget()); + if (fixups_.size() >= 3u && fixups_[fixups_.size() - 3u].GetLocation() >= min_target) { + break; + } + } + last_fixup.Emit(&buffer_, buffer_.Size()); + fixups_.pop_back(); + } } diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h index 13f3becb6d..1c495aa7a7 100644 --- a/compiler/utils/arm/assembler_thumb2.h +++ b/compiler/utils/arm/assembler_thumb2.h @@ -22,11 +22,11 @@ #include <vector> #include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/logging.h" #include "constants_arm.h" #include "utils/arm/managed_register_arm.h" #include "utils/arm/assembler_arm.h" -#include "utils/array_ref.h" #include "offsets.h" namespace art { @@ -293,6 +293,8 @@ class Thumb2Assembler FINAL : public ArmAssembler { void PushList(RegList regs, Condition cond = AL) OVERRIDE; void PopList(RegList regs, Condition cond = AL) OVERRIDE; + void StoreList(RegList regs, size_t stack_offset) OVERRIDE; + void LoadList(RegList regs, size_t stack_offset) OVERRIDE; void Mov(Register rd, Register rm, Condition cond = AL) OVERRIDE; @@ -573,6 +575,10 @@ class Thumb2Assembler FINAL : public ArmAssembler { return location_; } + uint32_t GetTarget() const { + return target_; + } + uint32_t GetAdjustment() const { return adjustment_; } @@ -592,6 +598,11 @@ class Thumb2Assembler FINAL : public ArmAssembler { target_ = target; } + // Branches with bound targets that are in range can be emitted early. + // However, the caller still needs to check if the branch doesn't go over + // another Fixup that's not ready to be emitted. + bool IsCandidateForEmitEarly() const; + // Check if the current size is OK for current location_, target_ and adjustment_. // If not, increase the size. Return the size increase, 0 if unchanged. // If the target if after this Fixup, also add the difference to adjustment_, diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index a03dd74657..14d29c4f1a 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -314,11 +314,21 @@ void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst, CHECK(src.IsCoreRegister()) << src; ___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister()); } else if (dst.IsDRegister()) { - CHECK(src.IsDRegister()) << src; - ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister()); + if (src.IsDRegister()) { + ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister()); + } else { + // VMOV Dn, Rlo, Rhi (Dn = {Rlo, Rhi}) + CHECK(src.IsRegisterPair()) << src; + ___ Vmov(dst.AsVIXLDRegister(), src.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairHigh()); + } } else if (dst.IsSRegister()) { - CHECK(src.IsSRegister()) << src; - ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister()); + if (src.IsSRegister()) { + ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister()); + } else { + // VMOV Sn, Rn (Sn = Rn) + CHECK(src.IsCoreRegister()) << src; + ___ Vmov(dst.AsVIXLSRegister(), src.AsVIXLRegister()); + } } else { CHECK(dst.IsRegisterPair()) << dst; CHECK(src.IsRegisterPair()) << src; diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index b616057e79..314ff8cf7a 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -24,6 +24,7 @@ #include "arm/constants_arm.h" #include "base/arena_allocator.h" #include "base/arena_object.h" +#include "base/array_ref.h" #include "base/enums.h" #include "base/logging.h" #include "base/macros.h" @@ -33,7 +34,6 @@ #include "memory_region.h" #include "mips/constants_mips.h" #include "offsets.h" -#include "utils/array_ref.h" #include "x86/constants_x86.h" #include "x86_64/constants_x86_64.h" diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc index 60a891d6a2..4c0979e0b7 100644 --- a/compiler/utils/dedupe_set_test.cc +++ b/compiler/utils/dedupe_set_test.cc @@ -20,10 +20,10 @@ #include <cstdio> #include <vector> +#include "base/array_ref.h" #include "dedupe_set-inl.h" #include "gtest/gtest.h" #include "thread-inl.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/utils/intrusive_forward_list.h b/compiler/utils/intrusive_forward_list.h index ec2c08722c..b5fc2f2456 100644 --- a/compiler/utils/intrusive_forward_list.h +++ b/compiler/utils/intrusive_forward_list.h @@ -59,7 +59,7 @@ class IntrusiveForwardListIterator : public std::iterator<std::forward_iterator_ // Conversion from iterator to const_iterator. template <typename OtherT, typename = typename std::enable_if<std::is_same<T, const OtherT>::value>::type> - IntrusiveForwardListIterator(const IntrusiveForwardListIterator<OtherT, HookTraits>& src) + IntrusiveForwardListIterator(const IntrusiveForwardListIterator<OtherT, HookTraits>& src) // NOLINT, implicit : hook_(src.hook_) { } // Iteration. diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h index 6f45bd62db..0119ae9bfb 100644 --- a/compiler/utils/jni_macro_assembler.h +++ b/compiler/utils/jni_macro_assembler.h @@ -22,12 +22,12 @@ #include "arch/instruction_set.h" #include "base/arena_allocator.h" #include "base/arena_object.h" +#include "base/array_ref.h" #include "base/enums.h" #include "base/logging.h" #include "base/macros.h" #include "managed_register.h" #include "offsets.h" -#include "utils/array_ref.h" namespace art { diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 4b580b620f..b972c70eb9 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -230,12 +230,14 @@ void MipsAssembler::FinalizeCode() { DsFsmCommitLabel(); SetReorder(false); EmitLiterals(); + ReserveJumpTableSpace(); PromoteBranches(); } void MipsAssembler::FinalizeInstructions(const MemoryRegion& region) { size_t number_of_delayed_adjust_pcs = cfi().NumberOfDelayedAdvancePCs(); EmitBranches(); + EmitJumpTables(); Assembler::FinalizeInstructions(region); PatchCFI(number_of_delayed_adjust_pcs); } @@ -1724,47 +1726,68 @@ void MipsAssembler::Branch::InitShortOrLong(MipsAssembler::Branch::OffsetBits of type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type; } -void MipsAssembler::Branch::InitializeType(bool is_call, bool is_literal, bool is_r6) { - CHECK_EQ(is_call && is_literal, false); +void MipsAssembler::Branch::InitializeType(Type initial_type, bool is_r6) { OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_); if (is_r6) { // R6 - if (is_literal) { - CHECK(!IsResolved()); - type_ = kR6Literal; - } else if (is_call) { - InitShortOrLong(offset_size, kR6Call, kR6LongCall); - } else { - switch (condition_) { - case kUncond: - InitShortOrLong(offset_size, kR6UncondBranch, kR6LongUncondBranch); - break; - case kCondEQZ: - case kCondNEZ: - // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions. - type_ = (offset_size <= kOffset23) ? kR6CondBranch : kR6LongCondBranch; - break; - default: - InitShortOrLong(offset_size, kR6CondBranch, kR6LongCondBranch); - break; - } + switch (initial_type) { + case kLabel: + CHECK(!IsResolved()); + type_ = kR6Label; + break; + case kLiteral: + CHECK(!IsResolved()); + type_ = kR6Literal; + break; + case kCall: + InitShortOrLong(offset_size, kR6Call, kR6LongCall); + break; + case kCondBranch: + switch (condition_) { + case kUncond: + InitShortOrLong(offset_size, kR6UncondBranch, kR6LongUncondBranch); + break; + case kCondEQZ: + case kCondNEZ: + // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions. + type_ = (offset_size <= kOffset23) ? kR6CondBranch : kR6LongCondBranch; + break; + default: + InitShortOrLong(offset_size, kR6CondBranch, kR6LongCondBranch); + break; + } + break; + default: + LOG(FATAL) << "Unexpected branch type " << initial_type; + UNREACHABLE(); } } else { // R2 - if (is_literal) { - CHECK(!IsResolved()); - type_ = kLiteral; - } else if (is_call) { - InitShortOrLong(offset_size, kCall, kLongCall); - } else { - switch (condition_) { - case kUncond: - InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch); - break; - default: - InitShortOrLong(offset_size, kCondBranch, kLongCondBranch); - break; - } + switch (initial_type) { + case kLabel: + CHECK(!IsResolved()); + type_ = kLabel; + break; + case kLiteral: + CHECK(!IsResolved()); + type_ = kLiteral; + break; + case kCall: + InitShortOrLong(offset_size, kCall, kLongCall); + break; + case kCondBranch: + switch (condition_) { + case kUncond: + InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch); + break; + default: + InitShortOrLong(offset_size, kCondBranch, kLongCondBranch); + break; + } + break; + default: + LOG(FATAL) << "Unexpected branch type " << initial_type; + UNREACHABLE(); } } old_type_ = type_; @@ -1804,7 +1827,7 @@ MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, uint32_t target, bo rhs_reg_(0), condition_(kUncond), delayed_instruction_(kUnfilledDelaySlot) { - InitializeType(is_call, /* is_literal */ false, is_r6); + InitializeType((is_call ? kCall : kCondBranch), is_r6); } MipsAssembler::Branch::Branch(bool is_r6, @@ -1862,10 +1885,14 @@ MipsAssembler::Branch::Branch(bool is_r6, // Branch condition is always true, make the branch unconditional. condition_ = kUncond; } - InitializeType(/* is_call */ false, /* is_literal */ false, is_r6); + InitializeType(kCondBranch, is_r6); } -MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, Register dest_reg, Register base_reg) +MipsAssembler::Branch::Branch(bool is_r6, + uint32_t location, + Register dest_reg, + Register base_reg, + Type label_or_literal_type) : old_location_(location), location_(location), target_(kUnresolved), @@ -1879,7 +1906,7 @@ MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, Register dest_reg, } else { CHECK_NE(base_reg, ZERO); } - InitializeType(/* is_call */ false, /* is_literal */ true, is_r6); + InitializeType(label_or_literal_type, is_r6); } MipsAssembler::BranchCondition MipsAssembler::Branch::OppositeCondition( @@ -2007,12 +2034,16 @@ bool MipsAssembler::Branch::IsLong() const { case kUncondBranch: case kCondBranch: case kCall: + // R2 near label. + case kLabel: // R2 near literal. case kLiteral: // R6 short branches. case kR6UncondBranch: case kR6CondBranch: case kR6Call: + // R6 near label. + case kR6Label: // R6 near literal. case kR6Literal: return false; @@ -2020,12 +2051,16 @@ bool MipsAssembler::Branch::IsLong() const { case kLongUncondBranch: case kLongCondBranch: case kLongCall: + // R2 far label. + case kFarLabel: // R2 far literal. case kFarLiteral: // R6 long branches. case kR6LongUncondBranch: case kR6LongCondBranch: case kR6LongCall: + // R6 far label. + case kR6FarLabel: // R6 far literal. case kR6FarLiteral: return true; @@ -2096,6 +2131,10 @@ void MipsAssembler::Branch::PromoteToLong() { case kCall: type_ = kLongCall; break; + // R2 near label. + case kLabel: + type_ = kFarLabel; + break; // R2 near literal. case kLiteral: type_ = kFarLiteral; @@ -2110,6 +2149,10 @@ void MipsAssembler::Branch::PromoteToLong() { case kR6Call: type_ = kR6LongCall; break; + // R6 near label. + case kR6Label: + type_ = kR6FarLabel; + break; // R6 near literal. case kR6Literal: type_ = kR6FarLiteral; @@ -2123,6 +2166,8 @@ void MipsAssembler::Branch::PromoteToLong() { uint32_t MipsAssembler::GetBranchLocationOrPcRelBase(const MipsAssembler::Branch* branch) const { switch (branch->GetType()) { + case Branch::kLabel: + case Branch::kFarLabel: case Branch::kLiteral: case Branch::kFarLiteral: return GetLabelLocation(&pc_rel_base_label_); @@ -2132,7 +2177,7 @@ uint32_t MipsAssembler::GetBranchLocationOrPcRelBase(const MipsAssembler::Branch } uint32_t MipsAssembler::Branch::PromoteIfNeeded(uint32_t location, uint32_t max_short_distance) { - // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 literals or + // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 labels/literals or // `this->GetLocation()` for everything else. // If the branch is still unresolved or already long, nothing to do. if (IsLong() || !IsResolved()) { @@ -2170,6 +2215,8 @@ uint32_t MipsAssembler::Branch::GetOffsetLocation() const { uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const { switch (branch->GetType()) { + case Branch::kLabel: + case Branch::kFarLabel: case Branch::kLiteral: case Branch::kFarLiteral: return GetLabelLocation(&pc_rel_base_label_); @@ -2180,7 +2227,7 @@ uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Bra } uint32_t MipsAssembler::Branch::GetOffset(uint32_t location) const { - // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 literals or + // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 labels/literals or // `this->GetOffsetLocation() + branch_info_[this->GetType()].pc_org * sizeof(uint32_t)` // for everything else. CHECK(IsResolved()); @@ -2457,6 +2504,13 @@ void MipsAssembler::Call(MipsLabel* label) { FinalizeLabeledBranch(label); } +void MipsAssembler::LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label) { + // Label address loads are treated as pseudo branches since they require very similar handling. + DCHECK(!label->IsBound()); + branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLabel); + FinalizeLabeledBranch(label); +} + Literal* MipsAssembler::NewLiteral(size_t size, const uint8_t* data) { DCHECK(size == 4u || size == 8u) << size; literals_.emplace_back(size, data); @@ -2468,13 +2522,17 @@ void MipsAssembler::LoadLiteral(Register dest_reg, Register base_reg, Literal* l DCHECK_EQ(literal->GetSize(), 4u); MipsLabel* label = literal->GetLabel(); DCHECK(!label->IsBound()); - branches_.emplace_back(IsR6(), - buffer_.Size(), - dest_reg, - base_reg); + branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLiteral); FinalizeLabeledBranch(label); } +JumpTable* MipsAssembler::CreateJumpTable(std::vector<MipsLabel*>&& labels) { + jump_tables_.emplace_back(std::move(labels)); + JumpTable* table = &jump_tables_.back(); + DCHECK(!table->GetLabel()->IsBound()); + return table; +} + void MipsAssembler::EmitLiterals() { if (!literals_.empty()) { // We don't support byte and half-word literals. @@ -2491,6 +2549,60 @@ void MipsAssembler::EmitLiterals() { } } +void MipsAssembler::ReserveJumpTableSpace() { + if (!jump_tables_.empty()) { + for (JumpTable& table : jump_tables_) { + MipsLabel* label = table.GetLabel(); + Bind(label); + + // Bulk ensure capacity, as this may be large. + size_t orig_size = buffer_.Size(); + size_t required_capacity = orig_size + table.GetSize(); + if (required_capacity > buffer_.Capacity()) { + buffer_.ExtendCapacity(required_capacity); + } +#ifndef NDEBUG + buffer_.has_ensured_capacity_ = true; +#endif + + // Fill the space with dummy data as the data is not final + // until the branches have been promoted. And we shouldn't + // be moving uninitialized data during branch promotion. + for (size_t cnt = table.GetData().size(), i = 0; i < cnt; i++) { + buffer_.Emit<uint32_t>(0x1abe1234u); + } + +#ifndef NDEBUG + buffer_.has_ensured_capacity_ = false; +#endif + } + } +} + +void MipsAssembler::EmitJumpTables() { + if (!jump_tables_.empty()) { + CHECK(!overwriting_); + // Switch from appending instructions at the end of the buffer to overwriting + // existing instructions (here, jump tables) in the buffer. + overwriting_ = true; + + for (JumpTable& table : jump_tables_) { + MipsLabel* table_label = table.GetLabel(); + uint32_t start = GetLabelLocation(table_label); + overwrite_location_ = start; + + for (MipsLabel* target : table.GetData()) { + CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u); + // The table will contain target addresses relative to the table start. + uint32_t offset = GetLabelLocation(target) - start; + Emit(offset); + } + } + + overwriting_ = false; + } +} + void MipsAssembler::PromoteBranches() { // Promote short branches to long as necessary. bool changed; @@ -2539,12 +2651,16 @@ const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] = { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kUncondBranch { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kCondBranch { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kCall + // R2 near label. + { 1, 0, 0, MipsAssembler::Branch::kOffset16, 0 }, // kLabel // R2 near literal. { 1, 0, 0, MipsAssembler::Branch::kOffset16, 0 }, // kLiteral // R2 long branches. { 9, 3, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongUncondBranch { 10, 4, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCondBranch { 6, 1, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCall + // R2 far label. + { 3, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kFarLabel // R2 far literal. { 3, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kFarLiteral // R6 short branches. @@ -2552,12 +2668,16 @@ const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] = { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kR6CondBranch // Exception: kOffset23 for beqzc/bnezc. { 1, 0, 1, MipsAssembler::Branch::kOffset28, 2 }, // kR6Call + // R6 near label. + { 1, 0, 0, MipsAssembler::Branch::kOffset21, 2 }, // kR6Label // R6 near literal. { 1, 0, 0, MipsAssembler::Branch::kOffset21, 2 }, // kR6Literal // R6 long branches. { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongUncondBranch { 3, 1, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCondBranch { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCall + // R6 far label. + { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6FarLabel // R6 far literal. { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6FarLiteral }; @@ -2614,6 +2734,12 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) { Emit(delayed_instruction); break; + // R2 near label. + case Branch::kLabel: + DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); + CHECK_EQ(overwrite_location_, branch->GetOffsetLocation()); + Addiu(lhs, rhs, offset); + break; // R2 near literal. case Branch::kLiteral: DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); @@ -2691,6 +2817,14 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) { Nop(); break; + // R2 far label. + case Branch::kFarLabel: + DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); + CHECK_EQ(overwrite_location_, branch->GetOffsetLocation()); + Lui(AT, High16Bits(offset)); + Ori(AT, AT, Low16Bits(offset)); + Addu(lhs, AT, rhs); + break; // R2 far literal. case Branch::kFarLiteral: DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); @@ -2725,6 +2859,12 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) { Balc(offset); break; + // R6 near label. + case Branch::kR6Label: + DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); + CHECK_EQ(overwrite_location_, branch->GetOffsetLocation()); + Addiupc(lhs, offset); + break; // R6 near literal. case Branch::kR6Literal: DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); @@ -2759,6 +2899,14 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) { Jialc(AT, Low16Bits(offset)); break; + // R6 far label. + case Branch::kR6FarLabel: + DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); + offset += (offset & 0x8000) << 1; // Account for sign extension in addiu. + CHECK_EQ(overwrite_location_, branch->GetOffsetLocation()); + Auipc(AT, High16Bits(offset)); + Addiu(lhs, AT, Low16Bits(offset)); + break; // R6 far literal. case Branch::kR6FarLiteral: DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot); diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index d50c439418..e1255f7f23 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -126,6 +126,36 @@ class Literal { DISALLOW_COPY_AND_ASSIGN(Literal); }; +// Jump table: table of labels emitted after the literals. Similar to literals. +class JumpTable { + public: + explicit JumpTable(std::vector<MipsLabel*>&& labels) + : label_(), labels_(std::move(labels)) { + } + + uint32_t GetSize() const { + return static_cast<uint32_t>(labels_.size()) * sizeof(uint32_t); + } + + const std::vector<MipsLabel*>& GetData() const { + return labels_; + } + + MipsLabel* GetLabel() { + return &label_; + } + + const MipsLabel* GetLabel() const { + return &label_; + } + + private: + MipsLabel label_; + std::vector<MipsLabel*> labels_; + + DISALLOW_COPY_AND_ASSIGN(JumpTable); +}; + // Slowpath entered when Thread::Current()->_exception is non-null. class MipsExceptionSlowPath { public: @@ -158,6 +188,7 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi ds_fsm_state_(kExpectingLabel), ds_fsm_target_pc_(0), literals_(arena->Adapter(kArenaAllocAssembler)), + jump_tables_(arena->Adapter(kArenaAllocAssembler)), last_position_adjustment_(0), last_old_position_(0), last_branch_id_(0), @@ -465,46 +496,61 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi public: template <typename ImplicitNullChecker = NoImplicitNullChecker> - void StoreConst32ToOffset(int32_t value, - Register base, - int32_t offset, - Register temp, - ImplicitNullChecker null_checker = NoImplicitNullChecker()) { + void StoreConstToOffset(StoreOperandType type, + int64_t value, + Register base, + int32_t offset, + Register temp, + ImplicitNullChecker null_checker = NoImplicitNullChecker()) { + // We permit `base` and `temp` to coincide (however, we check that neither is AT), + // in which case the `base` register may be overwritten in the process. CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base. - AdjustBaseAndOffset(base, offset, /* is_doubleword */ false); - if (value == 0) { - temp = ZERO; - } else { - LoadConst32(temp, value); - } - Sw(temp, base, offset); - null_checker(); - } - - template <typename ImplicitNullChecker = NoImplicitNullChecker> - void StoreConst64ToOffset(int64_t value, - Register base, - int32_t offset, - Register temp, - ImplicitNullChecker null_checker = NoImplicitNullChecker()) { - CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base. - AdjustBaseAndOffset(base, offset, /* is_doubleword */ true); + AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword)); uint32_t low = Low32Bits(value); uint32_t high = High32Bits(value); + Register reg; + // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp` + // to load and hold the value but we can use AT instead as AT hasn't been used yet. + // Otherwise, `temp` can be used for the value. And if `temp` is the same as the + // original `base` (that is, `base` prior to the adjustment), the original `base` + // register will be overwritten. + if (base == temp) { + temp = AT; + } if (low == 0) { - Sw(ZERO, base, offset); + reg = ZERO; } else { - LoadConst32(temp, low); - Sw(temp, base, offset); + reg = temp; + LoadConst32(reg, low); } - null_checker(); - if (high == 0) { - Sw(ZERO, base, offset + kMipsWordSize); - } else { - if (high != low) { - LoadConst32(temp, high); - } - Sw(temp, base, offset + kMipsWordSize); + switch (type) { + case kStoreByte: + Sb(reg, base, offset); + break; + case kStoreHalfword: + Sh(reg, base, offset); + break; + case kStoreWord: + Sw(reg, base, offset); + break; + case kStoreDoubleword: + Sw(reg, base, offset); + null_checker(); + if (high == 0) { + reg = ZERO; + } else { + reg = temp; + if (high != low) { + LoadConst32(reg, high); + } + } + Sw(reg, base, offset + kMipsWordSize); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } + if (type != kStoreDoubleword) { + null_checker(); } } @@ -685,6 +731,11 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value)); } + // Load label address using the base register (for R2 only) or using PC-relative loads + // (for R6 only; base_reg must be ZERO). To be used with data labels in the literal / + // jump table area only and not with regular code labels. + void LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label); + // Create a new literal with the given data. Literal* NewLiteral(size_t size, const uint8_t* data); @@ -692,6 +743,12 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi // (for R6 only; base_reg must be ZERO). void LoadLiteral(Register dest_reg, Register base_reg, Literal* literal); + // Create a jump table for the given labels that will be emitted when finalizing. + // When the table is emitted, offsets will be relative to the location of the table. + // The table location is determined by the location of its label (the label precedes + // the table data) and should be loaded using LoadLabelAddress(). + JumpTable* CreateJumpTable(std::vector<MipsLabel*>&& labels); + // // Overridden common assembler high-level functionality. // @@ -935,24 +992,32 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi kUncondBranch, kCondBranch, kCall, + // R2 near label. + kLabel, // R2 near literal. kLiteral, // R2 long branches. kLongUncondBranch, kLongCondBranch, kLongCall, + // R2 far label. + kFarLabel, // R2 far literal. kFarLiteral, // R6 short branches. kR6UncondBranch, kR6CondBranch, kR6Call, + // R6 near label. + kR6Label, // R6 near literal. kR6Literal, // R6 long branches. kR6LongUncondBranch, kR6LongCondBranch, kR6LongCall, + // R6 far label. + kR6FarLabel, // R6 far literal. kR6FarLiteral, }; @@ -1009,8 +1074,12 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi BranchCondition condition, Register lhs_reg, Register rhs_reg); - // Literal. - Branch(bool is_r6, uint32_t location, Register dest_reg, Register base_reg); + // Label address (in literal area) or literal. + Branch(bool is_r6, + uint32_t location, + Register dest_reg, + Register base_reg, + Type label_or_literal_type); // Some conditional branches with lhs = rhs are effectively NOPs, while some // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs. @@ -1105,7 +1174,7 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi private: // Completes branch construction by determining and recording its type. - void InitializeType(bool is_call, bool is_literal, bool is_r6); + void InitializeType(Type initial_type, bool is_r6); // Helper for the above. void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type); @@ -1178,6 +1247,8 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi uint32_t GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const; void EmitLiterals(); + void ReserveJumpTableSpace(); + void EmitJumpTables(); void PromoteBranches(); void EmitBranch(Branch* branch); void EmitBranches(); @@ -1227,6 +1298,9 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi // without invalidating pointers and references to existing elements. ArenaDeque<Literal> literals_; + // Jump table list. + ArenaDeque<JumpTable> jump_tables_; + // There's no PC-relative addressing on MIPS32R2. So, in order to access literals relative to PC // we get PC using the NAL instruction. This label marks the position within the assembler buffer // that PC (from NAL) points to. diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc index fabb0962fb..750a94df02 100644 --- a/compiler/utils/mips/assembler_mips32r6_test.cc +++ b/compiler/utils/mips/assembler_mips32r6_test.cc @@ -309,6 +309,12 @@ TEST_F(AssemblerMIPS32r6Test, Lwpc) { DriverStr(RepeatRIb(&mips::MipsAssembler::Lwpc, 19, code), "Lwpc"); } +TEST_F(AssemblerMIPS32r6Test, Addiupc) { + // The comment from the Lwpc() test applies to this Addiupc() test as well. + const char* code = ".set imm, {imm}\naddiupc ${reg}, (imm - ((imm & 0x40000) << 1)) << 2"; + DriverStr(RepeatRIb(&mips::MipsAssembler::Addiupc, 19, code), "Addiupc"); +} + TEST_F(AssemblerMIPS32r6Test, Bitswap) { DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap"); } @@ -635,6 +641,40 @@ TEST_F(AssemblerMIPS32r6Test, StoreDToOffset) { DriverStr(expected, "StoreDToOffset"); } +TEST_F(AssemblerMIPS32r6Test, LoadFarthestNearLabelAddress) { + mips::MipsLabel label; + __ LoadLabelAddress(mips::V0, mips::ZERO, &label); + constexpr size_t kAdduCount = 0x3FFDE; + for (size_t i = 0; i != kAdduCount; ++i) { + __ Addu(mips::ZERO, mips::ZERO, mips::ZERO); + } + __ Bind(&label); + + std::string expected = + "lapc $v0, 1f\n" + + RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") + + "1:\n"; + DriverStr(expected, "LoadFarthestNearLabelAddress"); +} + +TEST_F(AssemblerMIPS32r6Test, LoadNearestFarLabelAddress) { + mips::MipsLabel label; + __ LoadLabelAddress(mips::V0, mips::ZERO, &label); + constexpr size_t kAdduCount = 0x3FFDF; + for (size_t i = 0; i != kAdduCount; ++i) { + __ Addu(mips::ZERO, mips::ZERO, mips::ZERO); + } + __ Bind(&label); + + std::string expected = + "1:\n" + "auipc $at, %hi(2f - 1b)\n" + "addiu $v0, $at, %lo(2f - 1b)\n" + + RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") + + "2:\n"; + DriverStr(expected, "LoadNearestFarLabelAddress"); +} + TEST_F(AssemblerMIPS32r6Test, LoadFarthestNearLiteral) { mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678); __ LoadLiteral(mips::V0, mips::ZERO, literal); @@ -811,8 +851,7 @@ TEST_F(AssemblerMIPS32r6Test, LongBranchReorder) { DriverStr(expected, "LongBeqc"); } -// TODO: MipsAssembler::Addiupc -// MipsAssembler::Bc +// TODO: MipsAssembler::Bc // MipsAssembler::Jic // MipsAssembler::Jialc // MipsAssembler::Bltc diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc index 708bc3d50d..a9abf2f86e 100644 --- a/compiler/utils/mips/assembler_mips_test.cc +++ b/compiler/utils/mips/assembler_mips_test.cc @@ -1977,6 +1977,85 @@ TEST_F(AssemblerMIPSTest, StoreDToOffset) { DriverStr(expected, "StoreDToOffset"); } +TEST_F(AssemblerMIPSTest, StoreConstToOffset) { + __ StoreConstToOffset(mips::kStoreByte, 0xFF, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreHalfword, 0xFFFF, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreDoubleword, 0x123456789ABCDEF0, mips::A1, +0, mips::T8); + + __ StoreConstToOffset(mips::kStoreByte, 0, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreHalfword, 0, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreDoubleword, 0, mips::A1, +0, mips::T8); + + __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567812345678, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567800000000, mips::A1, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreDoubleword, 0x0000000012345678, mips::A1, +0, mips::T8); + + __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, +0, mips::T8); + __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0, mips::T8); + + __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, -0xFFF0, mips::T8); + __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0xFFF0, mips::T8); + + __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, -0xFFF0, mips::T8); + __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0xFFF0, mips::T8); + + const char* expected = + "ori $t8, $zero, 0xFF\n" + "sb $t8, 0($a1)\n" + "ori $t8, $zero, 0xFFFF\n" + "sh $t8, 0($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 0($a1)\n" + "lui $t8, 0x9ABC\n" + "ori $t8, $t8, 0xDEF0\n" + "sw $t8, 0($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 4($a1)\n" + + "sb $zero, 0($a1)\n" + "sh $zero, 0($a1)\n" + "sw $zero, 0($a1)\n" + "sw $zero, 0($a1)\n" + "sw $zero, 4($a1)\n" + + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 0($a1)\n" + "sw $t8, 4($a1)\n" + "sw $zero, 0($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 4($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 0($a1)\n" + "sw $zero, 4($a1)\n" + + "sw $zero, 0($t8)\n" + "lui $at, 0x1234\n" + "ori $at, $at, 0x5678\n" + "sw $at, 0($t8)\n" + + "addiu $at, $a1, -0x7FF8\n" + "sw $zero, -0x7FF8($at)\n" + "addiu $at, $a1, 0x7FF8\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 0x7FF8($at)\n" + + "addiu $at, $t8, -0x7FF8\n" + "sw $zero, -0x7FF8($at)\n" + "addiu $at, $t8, 0x7FF8\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sw $t8, 0x7FF8($at)\n"; + DriverStr(expected, "StoreConstToOffset"); +} + TEST_F(AssemblerMIPSTest, B) { mips::MipsLabel label1, label2; __ B(&label1); @@ -2307,6 +2386,44 @@ TEST_F(AssemblerMIPSTest, LoadConst32) { DriverStr(expected, "LoadConst32"); } +TEST_F(AssemblerMIPSTest, LoadFarthestNearLabelAddress) { + mips::MipsLabel label; + __ BindPcRelBaseLabel(); + __ LoadLabelAddress(mips::V0, mips::V1, &label); + constexpr size_t kAddiuCount = 0x1FDE; + for (size_t i = 0; i != kAddiuCount; ++i) { + __ Addiu(mips::A0, mips::A1, 0); + } + __ Bind(&label); + + std::string expected = + "1:\n" + "addiu $v0, $v1, %lo(2f - 1b)\n" + + RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") + + "2:\n"; + DriverStr(expected, "LoadFarthestNearLabelAddress"); +} + +TEST_F(AssemblerMIPSTest, LoadNearestFarLabelAddress) { + mips::MipsLabel label; + __ BindPcRelBaseLabel(); + __ LoadLabelAddress(mips::V0, mips::V1, &label); + constexpr size_t kAdduCount = 0x1FDF; + for (size_t i = 0; i != kAdduCount; ++i) { + __ Addu(mips::ZERO, mips::ZERO, mips::ZERO); + } + __ Bind(&label); + + std::string expected = + "1:\n" + "lui $at, %hi(2f - 1b)\n" + "ori $at, $at, %lo(2f - 1b)\n" + "addu $v0, $at, $v1\n" + + RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") + + "2:\n"; + DriverStr(expected, "LoadNearestFarLabelAddress"); +} + TEST_F(AssemblerMIPSTest, LoadFarthestNearLiteral) { mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678); __ BindPcRelBaseLabel(); diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h index bf06675d72..9600907278 100644 --- a/compiler/utils/swap_space.h +++ b/compiler/utils/swap_space.h @@ -114,7 +114,8 @@ class SwapAllocator<void> { explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {} template <typename U> - SwapAllocator(const SwapAllocator<U>& other) : swap_space_(other.swap_space_) {} + SwapAllocator(const SwapAllocator<U>& other) // NOLINT, implicit + : swap_space_(other.swap_space_) {} SwapAllocator(const SwapAllocator& other) = default; SwapAllocator& operator=(const SwapAllocator& other) = default; @@ -149,7 +150,8 @@ class SwapAllocator { explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {} template <typename U> - SwapAllocator(const SwapAllocator<U>& other) : swap_space_(other.swap_space_) {} + SwapAllocator(const SwapAllocator<U>& other) // NOLINT, implicit + : swap_space_(other.swap_space_) {} SwapAllocator(const SwapAllocator& other) = default; SwapAllocator& operator=(const SwapAllocator& other) = default; diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 9738784d45..114986b3e7 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -20,6 +20,7 @@ #include <vector> #include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/bit_utils.h" #include "base/enums.h" #include "base/macros.h" @@ -27,7 +28,6 @@ #include "globals.h" #include "managed_register_x86.h" #include "offsets.h" -#include "utils/array_ref.h" #include "utils/assembler.h" namespace art { diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h index 3f07ede865..015584cbc1 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.h +++ b/compiler/utils/x86/jni_macro_assembler_x86.h @@ -21,10 +21,10 @@ #include "assembler_x86.h" #include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/enums.h" #include "base/macros.h" #include "offsets.h" -#include "utils/array_ref.h" #include "utils/jni_macro_assembler.h" namespace art { diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index fdd3aa9317..acad86d161 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -20,13 +20,13 @@ #include <vector> #include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/bit_utils.h" #include "base/macros.h" #include "constants_x86_64.h" #include "globals.h" #include "managed_register_x86_64.h" #include "offsets.h" -#include "utils/array_ref.h" #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h index cc4e57c999..9107f3c422 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h @@ -21,10 +21,10 @@ #include "assembler_x86_64.h" #include "base/arena_containers.h" +#include "base/array_ref.h" #include "base/enums.h" #include "base/macros.h" #include "offsets.h" -#include "utils/array_ref.h" #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp index 43df6ae6b1..11c18b0924 100644 --- a/dex2oat/Android.bp +++ b/dex2oat/Android.bp @@ -120,3 +120,11 @@ art_cc_binary { "libvixld-arm64", ] + art_static_dependencies, } + +art_cc_test { + name: "art_dex2oat_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["dex2oat_test.cc"], +} diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 1dd91321c5..d99d2d615e 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -77,6 +77,7 @@ #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "utils.h" +#include "verifier/verifier_deps.h" #include "well_known_classes.h" #include "zip_archive.h" @@ -1472,6 +1473,12 @@ class Dex2Oat FINAL { dex_files_ = MakeNonOwningPointerVector(opened_dex_files_); + if (!IsBootImage()) { + // Collect verification dependencies when compiling an app. + verifier_deps_.reset(new verifier::VerifierDeps(dex_files_)); + callbacks_->SetVerifierDeps(verifier_deps_.get()); + } + // We had to postpone the swap decision till now, as this is the point when we actually // know about the dex files we're going to use. @@ -2626,6 +2633,9 @@ class Dex2Oat FINAL { std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_; std::unordered_map<const DexFile*, size_t> dex_file_oat_index_map_; + // Collector of verifier dependencies. + std::unique_ptr<verifier::VerifierDeps> verifier_deps_; + // Backing storage. std::vector<std::string> char_backing_storage_; @@ -2782,6 +2792,10 @@ static int dex2oat(int argc, char** argv) { return EXIT_FAILURE; } + // Helps debugging on device. Can be used to determine which dalvikvm instance invoked a dex2oat + // instance. Used by tools/bisection_search/bisection_search.py. + VLOG(compiler) << "Running dex2oat (parent PID = " << getppid() << ")"; + bool result; if (dex2oat->IsImage()) { result = CompileImage(*dex2oat); diff --git a/dexdump/Android.bp b/dexdump/Android.bp index e77f809d4b..64f2299fa5 100644 --- a/dexdump/Android.bp +++ b/dexdump/Android.bp @@ -24,3 +24,11 @@ art_cc_binary { cflags: ["-Wall"], shared_libs: ["libart"], } + +art_cc_test { + name: "art_dexdump_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["dexdump_test.cc"], +} diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc index f716ba8be7..5c032a0bb6 100644 --- a/dexdump/dexdump_main.cc +++ b/dexdump/dexdump_main.cc @@ -28,8 +28,8 @@ #include <string.h> #include <unistd.h> +#include "base/logging.h" #include "mem_map.h" -#include "runtime.h" namespace art { diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc index 9819233a32..d28ca2834e 100644 --- a/dexdump/dexdump_test.cc +++ b/dexdump/dexdump_test.cc @@ -24,8 +24,6 @@ #include "base/stringprintf.h" #include "common_runtime_test.h" #include "runtime/arch/instruction_set.h" -#include "runtime/gc/heap.h" -#include "runtime/gc/space/image_space.h" #include "runtime/os.h" #include "runtime/utils.h" #include "utils.h" diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp index 852f6c27a4..c411572237 100644 --- a/dexlayout/Android.bp +++ b/dexlayout/Android.bp @@ -18,8 +18,17 @@ art_cc_binary { srcs: [ "dexlayout_main.cc", "dexlayout.cc", + "dex_ir.cc", "dex_ir_builder.cc", ], cflags: ["-Wall"], shared_libs: ["libart"], } + +art_cc_test { + name: "art_dexlayout_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["dexlayout_test.cc"], +} diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc new file mode 100644 index 0000000000..aff03cd6ea --- /dev/null +++ b/dexlayout/dex_ir.cc @@ -0,0 +1,487 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Implementation file of the dexlayout utility. + * + * This is a tool to read dex files into an internal representation, + * reorganize the representation, and emit dex files with a better + * file layout. + */ + +#include "dex_ir.h" +#include "dex_ir_builder.h" + +namespace art { +namespace dex_ir { + +static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) { + uint64_t value = 0; + for (uint32_t i = 0; i <= length; i++) { + value |= static_cast<uint64_t>(*(*data)++) << (i * 8); + } + if (sign_extend) { + int shift = (7 - length) * 8; + return (static_cast<int64_t>(value) << shift) >> shift; + } + return value; +} + +static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) { + DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context); + PositionInfoVector& positions = debug_info->GetPositionInfo(); + positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_))); + return false; +} + +static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) { + DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context); + LocalInfoVector& locals = debug_info->GetLocalInfo(); + const char* name = entry.name_ != nullptr ? entry.name_ : "(null)"; + const char* signature = entry.signature_ != nullptr ? entry.signature_ : ""; + locals.push_back(std::unique_ptr<LocalInfo>( + new LocalInfo(name, entry.descriptor_, signature, entry.start_address_, + entry.end_address_, entry.reg_))); +} + +EncodedValue* Collections::ReadEncodedValue(const uint8_t** data) { + const uint8_t encoded_value = *(*data)++; + const uint8_t type = encoded_value & 0x1f; + EncodedValue* item = new EncodedValue(type); + ReadEncodedValue(data, type, encoded_value >> 5, item); + return item; +} + +EncodedValue* Collections::ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length) { + EncodedValue* item = new EncodedValue(type); + ReadEncodedValue(data, type, length, item); + return item; +} + +void Collections::ReadEncodedValue( + const uint8_t** data, uint8_t type, uint8_t length, EncodedValue* item) { + switch (type) { + case DexFile::kDexAnnotationByte: + item->SetByte(static_cast<int8_t>(ReadVarWidth(data, length, false))); + break; + case DexFile::kDexAnnotationShort: + item->SetShort(static_cast<int16_t>(ReadVarWidth(data, length, true))); + break; + case DexFile::kDexAnnotationChar: + item->SetChar(static_cast<uint16_t>(ReadVarWidth(data, length, false))); + break; + case DexFile::kDexAnnotationInt: + item->SetInt(static_cast<int32_t>(ReadVarWidth(data, length, true))); + break; + case DexFile::kDexAnnotationLong: + item->SetLong(static_cast<int64_t>(ReadVarWidth(data, length, true))); + break; + case DexFile::kDexAnnotationFloat: { + // Fill on right. + union { + float f; + uint32_t data; + } conv; + conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8; + item->SetFloat(conv.f); + break; + } + case DexFile::kDexAnnotationDouble: { + // Fill on right. + union { + double d; + uint64_t data; + } conv; + conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8; + item->SetDouble(conv.d); + break; + } + case DexFile::kDexAnnotationString: { + const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); + item->SetStringId(GetStringId(string_index)); + break; + } + case DexFile::kDexAnnotationType: { + const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); + item->SetTypeId(GetTypeId(string_index)); + break; + } + case DexFile::kDexAnnotationField: + case DexFile::kDexAnnotationEnum: { + const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); + item->SetFieldId(GetFieldId(field_index)); + break; + } + case DexFile::kDexAnnotationMethod: { + const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); + item->SetMethodId(GetMethodId(method_index)); + break; + } + case DexFile::kDexAnnotationArray: { + EncodedValueVector* values = new EncodedValueVector(); + const uint32_t size = DecodeUnsignedLeb128(data); + // Decode all elements. + for (uint32_t i = 0; i < size; i++) { + values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(data))); + } + item->SetEncodedArray(new EncodedArrayItem(values)); + break; + } + case DexFile::kDexAnnotationAnnotation: { + AnnotationElementVector* elements = new AnnotationElementVector(); + const uint32_t type_idx = DecodeUnsignedLeb128(data); + const uint32_t size = DecodeUnsignedLeb128(data); + // Decode all name=value pairs. + for (uint32_t i = 0; i < size; i++) { + const uint32_t name_index = DecodeUnsignedLeb128(data); + elements->push_back(std::unique_ptr<AnnotationElement>( + new AnnotationElement(GetStringId(name_index), ReadEncodedValue(data)))); + } + item->SetEncodedAnnotation(new EncodedAnnotation(GetTypeId(type_idx), elements)); + break; + } + case DexFile::kDexAnnotationNull: + break; + case DexFile::kDexAnnotationBoolean: + item->SetBoolean(length != 0); + break; + default: + break; + } +} + +void Collections::CreateStringId(const DexFile& dex_file, uint32_t i) { + const DexFile::StringId& disk_string_id = dex_file.GetStringId(i); + StringData* string_data = new StringData(dex_file.GetStringData(disk_string_id)); + string_datas_.AddItem(string_data, disk_string_id.string_data_off_); + + StringId* string_id = new StringId(string_data); + string_ids_.AddIndexedItem(string_id, StringIdsOffset() + i * StringId::ItemSize(), i); +} + +void Collections::CreateTypeId(const DexFile& dex_file, uint32_t i) { + const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(i); + TypeId* type_id = new TypeId(GetStringId(disk_type_id.descriptor_idx_)); + type_ids_.AddIndexedItem(type_id, TypeIdsOffset() + i * TypeId::ItemSize(), i); +} + +void Collections::CreateProtoId(const DexFile& dex_file, uint32_t i) { + const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(i); + const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id); + TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_, true); + + ProtoId* proto_id = new ProtoId(GetStringId(disk_proto_id.shorty_idx_), + GetTypeId(disk_proto_id.return_type_idx_), + parameter_type_list); + proto_ids_.AddIndexedItem(proto_id, ProtoIdsOffset() + i * ProtoId::ItemSize(), i); +} + +void Collections::CreateFieldId(const DexFile& dex_file, uint32_t i) { + const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i); + FieldId* field_id = new FieldId(GetTypeId(disk_field_id.class_idx_), + GetTypeId(disk_field_id.type_idx_), + GetStringId(disk_field_id.name_idx_)); + field_ids_.AddIndexedItem(field_id, FieldIdsOffset() + i * FieldId::ItemSize(), i); +} + +void Collections::CreateMethodId(const DexFile& dex_file, uint32_t i) { + const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i); + MethodId* method_id = new MethodId(GetTypeId(disk_method_id.class_idx_), + GetProtoId(disk_method_id.proto_idx_), + GetStringId(disk_method_id.name_idx_)); + method_ids_.AddIndexedItem(method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i); +} + +void Collections::CreateClassDef(const DexFile& dex_file, uint32_t i) { + const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i); + const TypeId* class_type = GetTypeId(disk_class_def.class_idx_); + uint32_t access_flags = disk_class_def.access_flags_; + const TypeId* superclass = GetTypeIdOrNullPtr(disk_class_def.superclass_idx_); + + const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def); + TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_, false); + + const StringId* source_file = GetStringIdOrNullPtr(disk_class_def.source_file_idx_); + // Annotations. + AnnotationsDirectoryItem* annotations = nullptr; + const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item = + dex_file.GetAnnotationsDirectory(disk_class_def); + if (disk_annotations_directory_item != nullptr) { + annotations = CreateAnnotationsDirectoryItem( + dex_file, disk_annotations_directory_item, disk_class_def.annotations_off_); + } + // Static field initializers. + const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def); + EncodedArrayItem* static_values = + CreateEncodedArrayItem(static_data, disk_class_def.static_values_off_); + ClassData* class_data = CreateClassData( + dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_); + ClassDef* class_def = new ClassDef(class_type, access_flags, superclass, interfaces_type_list, + source_file, annotations, static_values, class_data); + class_defs_.AddIndexedItem(class_def, ClassDefsOffset() + i * ClassDef::ItemSize(), i); +} + +TypeList* Collections::CreateTypeList( + const DexFile::TypeList* dex_type_list, uint32_t offset, bool allow_empty) { + if (dex_type_list == nullptr && !allow_empty) { + return nullptr; + } + // TODO: Create more efficient lookup for existing type lists. + for (std::unique_ptr<TypeList>& type_list : TypeLists()) { + if (type_list->GetOffset() == offset) { + return type_list.get(); + } + } + TypeIdVector* type_vector = new TypeIdVector(); + uint32_t size = dex_type_list == nullptr ? 0 : dex_type_list->Size(); + for (uint32_t index = 0; index < size; ++index) { + type_vector->push_back(GetTypeId(dex_type_list->GetTypeItem(index).type_idx_)); + } + TypeList* new_type_list = new TypeList(type_vector); + type_lists_.AddItem(new_type_list, offset); + return new_type_list; +} + +EncodedArrayItem* Collections::CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset) { + if (static_data == nullptr) { + return nullptr; + } + uint32_t size = DecodeUnsignedLeb128(&static_data); + EncodedValueVector* values = new EncodedValueVector(); + for (uint32_t i = 0; i < size; ++i) { + values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(&static_data))); + } + // TODO: Calculate the size of the encoded array. + EncodedArrayItem* encoded_array_item = new EncodedArrayItem(values); + encoded_array_items_.AddItem(encoded_array_item, offset); + return encoded_array_item; +} + +AnnotationItem* Collections::CreateAnnotationItem(const DexFile::AnnotationItem* annotation, + uint32_t offset) { + uint8_t visibility = annotation->visibility_; + const uint8_t* annotation_data = annotation->annotation_; + EncodedValue* encoded_value = + ReadEncodedValue(&annotation_data, DexFile::kDexAnnotationAnnotation, 0); + // TODO: Calculate the size of the annotation. + AnnotationItem* annotation_item = + new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation()); + annotation_items_.AddItem(annotation_item, offset); + return annotation_item; +} + + +AnnotationSetItem* Collections::CreateAnnotationSetItem(const DexFile& dex_file, + const DexFile::AnnotationSetItem& disk_annotations_item, uint32_t offset) { + if (disk_annotations_item.size_ == 0) { + return nullptr; + } + std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>(); + for (uint32_t i = 0; i < disk_annotations_item.size_; ++i) { + const DexFile::AnnotationItem* annotation = + dex_file.GetAnnotationItem(&disk_annotations_item, i); + if (annotation == nullptr) { + continue; + } + AnnotationItem* annotation_item = + CreateAnnotationItem(annotation, disk_annotations_item.entries_[i]); + items->push_back(annotation_item); + } + AnnotationSetItem* annotation_set_item = new AnnotationSetItem(items); + annotation_set_items_.AddItem(annotation_set_item, offset); + return annotation_set_item; +} + +AnnotationsDirectoryItem* Collections::CreateAnnotationsDirectoryItem(const DexFile& dex_file, + const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) { + const DexFile::AnnotationSetItem* class_set_item = + dex_file.GetClassAnnotationSet(disk_annotations_item); + AnnotationSetItem* class_annotation = nullptr; + if (class_set_item != nullptr) { + uint32_t offset = disk_annotations_item->class_annotations_off_; + class_annotation = CreateAnnotationSetItem(dex_file, *class_set_item, offset); + } + const DexFile::FieldAnnotationsItem* fields = + dex_file.GetFieldAnnotations(disk_annotations_item); + FieldAnnotationVector* field_annotations = nullptr; + if (fields != nullptr) { + field_annotations = new FieldAnnotationVector(); + for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) { + FieldId* field_id = GetFieldId(fields[i].field_idx_); + const DexFile::AnnotationSetItem* field_set_item = + dex_file.GetFieldAnnotationSetItem(fields[i]); + uint32_t annotation_set_offset = fields[i].annotations_off_; + AnnotationSetItem* annotation_set_item = + CreateAnnotationSetItem(dex_file, *field_set_item, annotation_set_offset); + field_annotations->push_back(std::unique_ptr<FieldAnnotation>( + new FieldAnnotation(field_id, annotation_set_item))); + } + } + const DexFile::MethodAnnotationsItem* methods = + dex_file.GetMethodAnnotations(disk_annotations_item); + MethodAnnotationVector* method_annotations = nullptr; + if (methods != nullptr) { + method_annotations = new MethodAnnotationVector(); + for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) { + MethodId* method_id = GetMethodId(methods[i].method_idx_); + const DexFile::AnnotationSetItem* method_set_item = + dex_file.GetMethodAnnotationSetItem(methods[i]); + uint32_t annotation_set_offset = methods[i].annotations_off_; + AnnotationSetItem* annotation_set_item = + CreateAnnotationSetItem(dex_file, *method_set_item, annotation_set_offset); + method_annotations->push_back(std::unique_ptr<MethodAnnotation>( + new MethodAnnotation(method_id, annotation_set_item))); + } + } + const DexFile::ParameterAnnotationsItem* parameters = + dex_file.GetParameterAnnotations(disk_annotations_item); + ParameterAnnotationVector* parameter_annotations = nullptr; + if (parameters != nullptr) { + parameter_annotations = new ParameterAnnotationVector(); + for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) { + MethodId* method_id = GetMethodId(parameters[i].method_idx_); + const DexFile::AnnotationSetRefList* list = + dex_file.GetParameterAnnotationSetRefList(¶meters[i]); + parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>( + GenerateParameterAnnotation(dex_file, method_id, list, parameters[i].annotations_off_))); + } + } + // TODO: Calculate the size of the annotations directory. + AnnotationsDirectoryItem* annotations_directory_item = new AnnotationsDirectoryItem( + class_annotation, field_annotations, method_annotations, parameter_annotations); + annotations_directory_items_.AddItem(annotations_directory_item, offset); + return annotations_directory_item; +} + +ParameterAnnotation* Collections::GenerateParameterAnnotation( + const DexFile& dex_file, MethodId* method_id, + const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset) { + std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>(); + for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) { + const DexFile::AnnotationSetItem* annotation_set_item = + dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]); + uint32_t set_offset = annotation_set_ref_list->list_[i].annotations_off_; + annotations->push_back(CreateAnnotationSetItem(dex_file, *annotation_set_item, set_offset)); + } + AnnotationSetRefList* new_ref_list = new AnnotationSetRefList(annotations); + annotation_set_ref_lists_.AddItem(new_ref_list, offset); + return new ParameterAnnotation(method_id, new_ref_list); +} + +CodeItem* Collections::CreateCodeItem(const DexFile& dex_file, + const DexFile::CodeItem& disk_code_item, uint32_t offset) { + uint16_t registers_size = disk_code_item.registers_size_; + uint16_t ins_size = disk_code_item.ins_size_; + uint16_t outs_size = disk_code_item.outs_size_; + uint32_t tries_size = disk_code_item.tries_size_; + + // TODO: Calculate the size of the debug info. + const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(&disk_code_item); + DebugInfoItem* debug_info = nullptr; + if (debug_info_stream != nullptr) { + debug_info = new DebugInfoItem(); + debug_info_items_.AddItem(debug_info, disk_code_item.debug_info_off_); + } + + uint32_t insns_size = disk_code_item.insns_size_in_code_units_; + uint16_t* insns = new uint16_t[insns_size]; + memcpy(insns, disk_code_item.insns_, insns_size * sizeof(uint16_t)); + + TryItemVector* tries = nullptr; + if (tries_size > 0) { + tries = new TryItemVector(); + for (uint32_t i = 0; i < tries_size; ++i) { + const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i); + uint32_t start_addr = disk_try_item->start_addr_; + uint16_t insn_count = disk_try_item->insn_count_; + CatchHandlerVector* handlers = new CatchHandlerVector(); + for (CatchHandlerIterator it(disk_code_item, *disk_try_item); it.HasNext(); it.Next()) { + const uint16_t type_index = it.GetHandlerTypeIndex(); + const TypeId* type_id = GetTypeIdOrNullPtr(type_index); + handlers->push_back(std::unique_ptr<const CatchHandler>( + new CatchHandler(type_id, it.GetHandlerAddress()))); + } + TryItem* try_item = new TryItem(start_addr, insn_count, handlers); + tries->push_back(std::unique_ptr<const TryItem>(try_item)); + } + } + // TODO: Calculate the size of the code item. + CodeItem* code_item = + new CodeItem(registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries); + code_items_.AddItem(code_item, offset); + return code_item; +} + +MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) { + MethodId* method_item = GetMethodId(cdii.GetMemberIndex()); + uint32_t access_flags = cdii.GetRawMemberAccessFlags(); + const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem(); + CodeItem* code_item = nullptr; + DebugInfoItem* debug_info = nullptr; + if (disk_code_item != nullptr) { + code_item = CreateCodeItem(dex_file, *disk_code_item, cdii.GetMethodCodeItemOffset()); + debug_info = code_item->DebugInfo(); + } + if (debug_info != nullptr) { + bool is_static = (access_flags & kAccStatic) != 0; + dex_file.DecodeDebugLocalInfo( + disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info); + dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info); + } + return new MethodItem(access_flags, method_item, code_item); +} + +ClassData* Collections::CreateClassData( + const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) { + // Read the fields and methods defined by the class, resolving the circular reference from those + // to classes by setting class at the same time. + ClassData* class_data = nullptr; + if (encoded_data != nullptr) { + ClassDataItemIterator cdii(dex_file, encoded_data); + // Static fields. + FieldItemVector* static_fields = new FieldItemVector(); + for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) { + FieldId* field_item = GetFieldId(cdii.GetMemberIndex()); + uint32_t access_flags = cdii.GetRawMemberAccessFlags(); + static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item))); + } + // Instance fields. + FieldItemVector* instance_fields = new FieldItemVector(); + for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) { + FieldId* field_item = GetFieldId(cdii.GetMemberIndex()); + uint32_t access_flags = cdii.GetRawMemberAccessFlags(); + instance_fields->push_back( + std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item))); + } + // Direct methods. + MethodItemVector* direct_methods = new MethodItemVector(); + for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) { + direct_methods->push_back( + std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii))); + } + // Virtual methods. + MethodItemVector* virtual_methods = new MethodItemVector(); + for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) { + virtual_methods->push_back( + std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii))); + } + // TODO: Calculate the size of the class data. + class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods); + class_datas_.AddItem(class_data, offset); + } + return class_data; +} + +} // namespace dex_ir +} // namespace art diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h index cbb4404234..6ae9f1c938 100644 --- a/dexlayout/dex_ir.h +++ b/dexlayout/dex_ir.h @@ -23,18 +23,23 @@ #include <stdint.h> #include "dex_file-inl.h" +#include "leb128.h" namespace art { namespace dex_ir { // Forward declarations for classes used in containers or pointed to. +class AnnotationItem; class AnnotationsDirectoryItem; class AnnotationSetItem; -class ArrayItem; +class AnnotationSetRefList; class ClassData; class ClassDef; class CodeItem; class DebugInfoItem; +class EncodedAnnotation; +class EncodedArrayItem; +class EncodedValue; class FieldId; class FieldItem; class Header; @@ -42,10 +47,22 @@ class MapList; class MapItem; class MethodId; class MethodItem; +class ParameterAnnotation; class ProtoId; +class StringData; class StringId; class TryItem; class TypeId; +class TypeList; + +// Item size constants. +static constexpr size_t kHeaderItemSize = 112; +static constexpr size_t kStringIdItemSize = 4; +static constexpr size_t kTypeIdItemSize = 4; +static constexpr size_t kProtoIdItemSize = 12; +static constexpr size_t kFieldIdItemSize = 8; +static constexpr size_t kMethodIdItemSize = 8; +static constexpr size_t kClassDefItemSize = 32; // Visitor support class AbstractDispatcher { @@ -54,6 +71,7 @@ class AbstractDispatcher { virtual ~AbstractDispatcher() { } virtual void Dispatch(Header* header) = 0; + virtual void Dispatch(const StringData* string_data) = 0; virtual void Dispatch(const StringId* string_id) = 0; virtual void Dispatch(const TypeId* type_id) = 0; virtual void Dispatch(const ProtoId* proto_id) = 0; @@ -63,11 +81,13 @@ class AbstractDispatcher { virtual void Dispatch(ClassDef* class_def) = 0; virtual void Dispatch(FieldItem* field_item) = 0; virtual void Dispatch(MethodItem* method_item) = 0; - virtual void Dispatch(ArrayItem* array_item) = 0; + virtual void Dispatch(EncodedArrayItem* array_item) = 0; virtual void Dispatch(CodeItem* code_item) = 0; virtual void Dispatch(TryItem* try_item) = 0; virtual void Dispatch(DebugInfoItem* debug_info_item) = 0; + virtual void Dispatch(AnnotationItem* annotation_item) = 0; virtual void Dispatch(AnnotationSetItem* annotation_set_item) = 0; + virtual void Dispatch(AnnotationSetRefList* annotation_set_ref_list) = 0; virtual void Dispatch(AnnotationsDirectoryItem* annotations_directory_item) = 0; virtual void Dispatch(MapList* map_list) = 0; virtual void Dispatch(MapItem* map_item) = 0; @@ -82,9 +102,14 @@ template<class T> class CollectionWithOffset { CollectionWithOffset() = default; std::vector<std::unique_ptr<T>>& Collection() { return collection_; } // Read-time support methods - void AddWithPosition(uint32_t position, T* object) { + void AddItem(T* object, uint32_t offset) { + object->SetOffset(offset); + collection_.push_back(std::unique_ptr<T>(object)); + } + void AddIndexedItem(T* object, uint32_t offset, uint32_t index) { + object->SetOffset(offset); + object->SetIndex(index); collection_.push_back(std::unique_ptr<T>(object)); - collection_.back()->SetOffset(position); } // Ordinary object insertion into collection. void Insert(T object ATTRIBUTE_UNUSED) { @@ -98,18 +123,160 @@ template<class T> class CollectionWithOffset { private: std::vector<std::unique_ptr<T>> collection_; uint32_t offset_ = 0; + DISALLOW_COPY_AND_ASSIGN(CollectionWithOffset); }; +class Collections { + public: + Collections() = default; + + std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); } + std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); } + std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); } + std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); } + std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); } + std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); } + + std::vector<std::unique_ptr<TypeList>>& TypeLists() { return type_lists_.Collection(); } + std::vector<std::unique_ptr<EncodedArrayItem>>& EncodedArrayItems() + { return encoded_array_items_.Collection(); } + + void CreateStringId(const DexFile& dex_file, uint32_t i); + void CreateTypeId(const DexFile& dex_file, uint32_t i); + void CreateProtoId(const DexFile& dex_file, uint32_t i); + void CreateFieldId(const DexFile& dex_file, uint32_t i); + void CreateMethodId(const DexFile& dex_file, uint32_t i); + void CreateClassDef(const DexFile& dex_file, uint32_t i); + + TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset, bool allow_empty); + EncodedArrayItem* CreateEncodedArrayItem(const uint8_t* static_data, uint32_t offset); + AnnotationItem* CreateAnnotationItem(const DexFile::AnnotationItem* annotation, uint32_t offset); + AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file, + const DexFile::AnnotationSetItem& disk_annotations_item, uint32_t offset); + AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file, + const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset); + CodeItem* CreateCodeItem( + const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset); + ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset); + + StringId* GetStringId(uint32_t index) { return StringIds()[index].get(); } + TypeId* GetTypeId(uint32_t index) { return TypeIds()[index].get(); } + ProtoId* GetProtoId(uint32_t index) { return ProtoIds()[index].get(); } + FieldId* GetFieldId(uint32_t index) { return FieldIds()[index].get(); } + MethodId* GetMethodId(uint32_t index) { return MethodIds()[index].get(); } + ClassDef* GetClassDef(uint32_t index) { return ClassDefs()[index].get(); } + + StringId* GetStringIdOrNullPtr(uint32_t index) { + return index == DexFile::kDexNoIndex ? nullptr : GetStringId(index); + } + TypeId* GetTypeIdOrNullPtr(uint16_t index) { + return index == DexFile::kDexNoIndex16 ? nullptr : GetTypeId(index); + } + + uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); } + uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); } + uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); } + uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); } + uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); } + uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); } + uint32_t StringDatasOffset() const { return string_datas_.GetOffset(); } + uint32_t TypeListsOffset() const { return type_lists_.GetOffset(); } + uint32_t EncodedArrayOffset() const { return encoded_array_items_.GetOffset(); } + uint32_t AnnotationOffset() const { return annotation_items_.GetOffset(); } + uint32_t AnnotationSetOffset() const { return annotation_set_items_.GetOffset(); } + uint32_t AnnotationSetRefListsOffset() const { return annotation_set_ref_lists_.GetOffset(); } + uint32_t AnnotationsDirectoryOffset() const { return annotations_directory_items_.GetOffset(); } + uint32_t DebugInfoOffset() const { return debug_info_items_.GetOffset(); } + uint32_t CodeItemsOffset() const { return code_items_.GetOffset(); } + uint32_t ClassDatasOffset() const { return class_datas_.GetOffset(); } + + void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); } + void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); } + void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); } + void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); } + void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); } + void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); } + void SetStringDatasOffset(uint32_t new_offset) { string_datas_.SetOffset(new_offset); } + void SetTypeListsOffset(uint32_t new_offset) { type_lists_.SetOffset(new_offset); } + void SetEncodedArrayOffset(uint32_t new_offset) { encoded_array_items_.SetOffset(new_offset); } + void SetAnnotationOffset(uint32_t new_offset) { annotation_items_.SetOffset(new_offset); } + void SetAnnotationSetOffset(uint32_t new_offset) { annotation_set_items_.SetOffset(new_offset); } + void SetAnnotationSetRefListsOffset(uint32_t new_offset) + { annotation_set_ref_lists_.SetOffset(new_offset); } + void SetAnnotationsDirectoryOffset(uint32_t new_offset) + { annotations_directory_items_.SetOffset(new_offset); } + void SetDebugInfoOffset(uint32_t new_offset) { debug_info_items_.SetOffset(new_offset); } + void SetCodeItemsOffset(uint32_t new_offset) { code_items_.SetOffset(new_offset); } + void SetClassDatasOffset(uint32_t new_offset) { class_datas_.SetOffset(new_offset); } + + uint32_t StringIdsSize() const { return string_ids_.Size(); } + uint32_t TypeIdsSize() const { return type_ids_.Size(); } + uint32_t ProtoIdsSize() const { return proto_ids_.Size(); } + uint32_t FieldIdsSize() const { return field_ids_.Size(); } + uint32_t MethodIdsSize() const { return method_ids_.Size(); } + uint32_t ClassDefsSize() const { return class_defs_.Size(); } + + private: + EncodedValue* ReadEncodedValue(const uint8_t** data); + EncodedValue* ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length); + void ReadEncodedValue(const uint8_t** data, uint8_t type, uint8_t length, EncodedValue* item); + + ParameterAnnotation* GenerateParameterAnnotation(const DexFile& dex_file, MethodId* method_id, + const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset); + MethodItem* GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii); + + CollectionWithOffset<StringId> string_ids_; + CollectionWithOffset<TypeId> type_ids_; + CollectionWithOffset<ProtoId> proto_ids_; + CollectionWithOffset<FieldId> field_ids_; + CollectionWithOffset<MethodId> method_ids_; + CollectionWithOffset<ClassDef> class_defs_; + + CollectionWithOffset<StringData> string_datas_; + CollectionWithOffset<TypeList> type_lists_; + CollectionWithOffset<EncodedArrayItem> encoded_array_items_; + CollectionWithOffset<AnnotationItem> annotation_items_; + CollectionWithOffset<AnnotationSetItem> annotation_set_items_; + CollectionWithOffset<AnnotationSetRefList> annotation_set_ref_lists_; + CollectionWithOffset<AnnotationsDirectoryItem> annotations_directory_items_; + CollectionWithOffset<DebugInfoItem> debug_info_items_; + CollectionWithOffset<CodeItem> code_items_; + CollectionWithOffset<ClassData> class_datas_; + + DISALLOW_COPY_AND_ASSIGN(Collections); +}; + class Item { public: + Item() { } virtual ~Item() { } uint32_t GetOffset() const { return offset_; } + uint32_t GetSize() const { return size_; } void SetOffset(uint32_t offset) { offset_ = offset; } + void SetSize(uint32_t size) { size_ = size; } protected: + Item(uint32_t offset, uint32_t size) : offset_(offset), size_(size) { } + uint32_t offset_ = 0; + uint32_t size_ = 0; +}; + +class IndexedItem : public Item { + public: + IndexedItem() { } + virtual ~IndexedItem() { } + + uint32_t GetIndex() const { return index_; } + void SetIndex(uint32_t index) { index_ = index; } + + protected: + IndexedItem(uint32_t offset, uint32_t size, uint32_t index) + : Item(offset, size), index_(index) { } + + uint32_t index_ = 0; }; class Header : public Item { @@ -124,7 +291,8 @@ class Header : public Item { uint32_t link_offset, uint32_t data_size, uint32_t data_offset) - : checksum_(checksum), + : Item(0, kHeaderItemSize), + checksum_(checksum), endian_tag_(endian_tag), file_size_(file_size), header_size_(header_size), @@ -137,6 +305,8 @@ class Header : public Item { } ~Header() OVERRIDE { } + static size_t ItemSize() { return kHeaderItemSize; } + const uint8_t* Magic() const { return magic_; } uint32_t Checksum() const { return checksum_; } const uint8_t* Signature() const { return signature_; } @@ -159,39 +329,7 @@ class Header : public Item { void SetDataSize(uint32_t new_data_size) { data_size_ = new_data_size; } void SetDataOffset(uint32_t new_data_offset) { data_offset_ = new_data_offset; } - // Collections. - std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); } - std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); } - std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); } - std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); } - std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); } - std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); } - uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); } - uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); } - uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); } - uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); } - uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); } - uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); } - void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); } - void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); } - void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); } - void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); } - void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); } - void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); } - uint32_t StringIdsSize() const { return string_ids_.Size(); } - uint32_t TypeIdsSize() const { return type_ids_.Size(); } - uint32_t ProtoIdsSize() const { return proto_ids_.Size(); } - uint32_t FieldIdsSize() const { return field_ids_.Size(); } - uint32_t MethodIdsSize() const { return method_ids_.Size(); } - uint32_t ClassDefsSize() const { return class_defs_.Size(); } - - TypeId* GetTypeIdOrNullPtr(uint16_t index) { - return index == DexFile::kDexNoIndex16 ? nullptr : TypeIds()[index].get(); - } - - StringId* GetStringIdOrNullPtr(uint32_t index) { - return index == DexFile::kDexNoIndex ? nullptr : StringIds()[index].get(); - } + Collections& GetCollections() { return collections_; } void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } @@ -207,19 +345,16 @@ class Header : public Item { uint32_t data_size_; uint32_t data_offset_; - CollectionWithOffset<StringId> string_ids_; - CollectionWithOffset<TypeId> type_ids_; - CollectionWithOffset<ProtoId> proto_ids_; - CollectionWithOffset<FieldId> field_ids_; - CollectionWithOffset<MethodId> method_ids_; - CollectionWithOffset<ClassDef> class_defs_; + Collections collections_; + DISALLOW_COPY_AND_ASSIGN(Header); }; -class StringId : public Item { +class StringData : public Item { public: - explicit StringId(const char* data) : data_(strdup(data)) { } - ~StringId() OVERRIDE { } + explicit StringData(const char* data) : data_(strdup(data)) { + size_ = UnsignedLeb128Size(strlen(data)) + strlen(data); + } const char* Data() const { return data_.get(); } @@ -227,50 +362,95 @@ class StringId : public Item { private: std::unique_ptr<const char> data_; + + DISALLOW_COPY_AND_ASSIGN(StringData); +}; + +class StringId : public IndexedItem { + public: + explicit StringId(StringData* string_data) : string_data_(string_data) { + size_ = kStringIdItemSize; + } + ~StringId() OVERRIDE { } + + static size_t ItemSize() { return kStringIdItemSize; } + + const char* Data() const { return string_data_->Data(); } + StringData* DataItem() const { return string_data_; } + + void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); } + + private: + StringData* string_data_; + DISALLOW_COPY_AND_ASSIGN(StringId); }; -class TypeId : public Item { +class TypeId : public IndexedItem { public: - explicit TypeId(StringId* string_id) : string_id_(string_id) { } + explicit TypeId(StringId* string_id) : string_id_(string_id) { size_ = kTypeIdItemSize; } ~TypeId() OVERRIDE { } + static size_t ItemSize() { return kTypeIdItemSize; } + StringId* GetStringId() const { return string_id_; } void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); } private: StringId* string_id_; + DISALLOW_COPY_AND_ASSIGN(TypeId); }; using TypeIdVector = std::vector<const TypeId*>; -class ProtoId : public Item { +class TypeList : public Item { + public: + explicit TypeList(TypeIdVector* type_list) : type_list_(type_list) { + size_ = sizeof(uint32_t) + (type_list->size() * sizeof(uint16_t)); + } + ~TypeList() OVERRIDE { } + + const TypeIdVector* GetTypeList() const { return type_list_.get(); } + + private: + std::unique_ptr<TypeIdVector> type_list_; + + DISALLOW_COPY_AND_ASSIGN(TypeList); +}; + +class ProtoId : public IndexedItem { public: - ProtoId(const StringId* shorty, const TypeId* return_type, TypeIdVector* parameters) - : shorty_(shorty), return_type_(return_type), parameters_(parameters) { } + ProtoId(const StringId* shorty, const TypeId* return_type, TypeList* parameters) + : shorty_(shorty), return_type_(return_type), parameters_(parameters) + { size_ = kProtoIdItemSize; } ~ProtoId() OVERRIDE { } + static size_t ItemSize() { return kProtoIdItemSize; } + const StringId* Shorty() const { return shorty_; } const TypeId* ReturnType() const { return return_type_; } - const std::vector<const TypeId*>& Parameters() const { return *parameters_; } + const TypeIdVector& Parameters() const { return *parameters_->GetTypeList(); } void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); } private: const StringId* shorty_; const TypeId* return_type_; - std::unique_ptr<TypeIdVector> parameters_; + TypeList* parameters_; + DISALLOW_COPY_AND_ASSIGN(ProtoId); }; -class FieldId : public Item { +class FieldId : public IndexedItem { public: FieldId(const TypeId* klass, const TypeId* type, const StringId* name) - : class_(klass), type_(type), name_(name) { } + : class_(klass), type_(type), name_(name) { size_ = kFieldIdItemSize; } ~FieldId() OVERRIDE { } + static size_t ItemSize() { return kFieldIdItemSize; } + const TypeId* Class() const { return class_; } const TypeId* Type() const { return type_; } const StringId* Name() const { return name_; } @@ -281,15 +461,18 @@ class FieldId : public Item { const TypeId* class_; const TypeId* type_; const StringId* name_; + DISALLOW_COPY_AND_ASSIGN(FieldId); }; -class MethodId : public Item { +class MethodId : public IndexedItem { public: MethodId(const TypeId* klass, const ProtoId* proto, const StringId* name) - : class_(klass), proto_(proto), name_(name) { } + : class_(klass), proto_(proto), name_(name) { size_ = kMethodIdItemSize; } ~MethodId() OVERRIDE { } + static size_t ItemSize() { return kMethodIdItemSize; } + const TypeId* Class() const { return class_; } const ProtoId* Proto() const { return proto_; } const StringId* Name() const { return name_; } @@ -300,6 +483,7 @@ class MethodId : public Item { const TypeId* class_; const ProtoId* proto_; const StringId* name_; + DISALLOW_COPY_AND_ASSIGN(MethodId); }; @@ -317,6 +501,7 @@ class FieldItem : public Item { private: uint32_t access_flags_; const FieldId* field_id_; + DISALLOW_COPY_AND_ASSIGN(FieldItem); }; @@ -330,93 +515,126 @@ class MethodItem : public Item { uint32_t GetAccessFlags() const { return access_flags_; } const MethodId* GetMethodId() const { return method_id_; } - const CodeItem* GetCodeItem() const { return code_.get(); } + const CodeItem* GetCodeItem() const { return code_; } void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } private: uint32_t access_flags_; const MethodId* method_id_; - std::unique_ptr<const CodeItem> code_; + const CodeItem* code_; + DISALLOW_COPY_AND_ASSIGN(MethodItem); }; using MethodItemVector = std::vector<std::unique_ptr<MethodItem>>; -class ArrayItem : public Item { - public: - class NameValuePair { - public: - NameValuePair(StringId* name, ArrayItem* value) - : name_(name), value_(value) { } - - StringId* Name() const { return name_; } - ArrayItem* Value() const { return value_.get(); } - - private: - StringId* name_; - std::unique_ptr<ArrayItem> value_; - DISALLOW_COPY_AND_ASSIGN(NameValuePair); - }; - - struct ArrayItemVariant { - public: - union { - bool bool_val_; - int8_t byte_val_; - int16_t short_val_; - uint16_t char_val_; - int32_t int_val_; - int64_t long_val_; - float float_val_; - double double_val_; - StringId* string_val_; - FieldId* field_val_; - MethodId* method_val_; - } u_; - std::unique_ptr<std::vector<std::unique_ptr<ArrayItem>>> annotation_array_val_; - struct { - StringId* string_; - std::unique_ptr<std::vector<std::unique_ptr<NameValuePair>>> array_; - } annotation_annotation_val_; - }; - - explicit ArrayItem(uint8_t type) : type_(type) { } - ~ArrayItem() OVERRIDE { } +class EncodedValue { + public: + explicit EncodedValue(uint8_t type) : type_(type) { } int8_t Type() const { return type_; } - bool GetBoolean() const { return item_.u_.bool_val_; } - int8_t GetByte() const { return item_.u_.byte_val_; } - int16_t GetShort() const { return item_.u_.short_val_; } - uint16_t GetChar() const { return item_.u_.char_val_; } - int32_t GetInt() const { return item_.u_.int_val_; } - int64_t GetLong() const { return item_.u_.long_val_; } - float GetFloat() const { return item_.u_.float_val_; } - double GetDouble() const { return item_.u_.double_val_; } - StringId* GetStringId() const { return item_.u_.string_val_; } - FieldId* GetFieldId() const { return item_.u_.field_val_; } - MethodId* GetMethodId() const { return item_.u_.method_val_; } - std::vector<std::unique_ptr<ArrayItem>>* GetAnnotationArray() const { - return item_.annotation_array_val_.get(); - } - StringId* GetAnnotationAnnotationString() const { - return item_.annotation_annotation_val_.string_; - } - std::vector<std::unique_ptr<NameValuePair>>* GetAnnotationAnnotationNameValuePairArray() const { - return item_.annotation_annotation_val_.array_.get(); - } - // Used to construct the item union. Ugly, but necessary. - ArrayItemVariant* GetArrayItemVariant() { return &item_; } - void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } + void SetBoolean(bool z) { u_.bool_val_ = z; } + void SetByte(int8_t b) { u_.byte_val_ = b; } + void SetShort(int16_t s) { u_.short_val_ = s; } + void SetChar(uint16_t c) { u_.char_val_ = c; } + void SetInt(int32_t i) { u_.int_val_ = i; } + void SetLong(int64_t l) { u_.long_val_ = l; } + void SetFloat(float f) { u_.float_val_ = f; } + void SetDouble(double d) { u_.double_val_ = d; } + void SetStringId(StringId* string_id) { u_.string_val_ = string_id; } + void SetTypeId(TypeId* type_id) { u_.type_val_ = type_id; } + void SetFieldId(FieldId* field_id) { u_.field_val_ = field_id; } + void SetMethodId(MethodId* method_id) { u_.method_val_ = method_id; } + void SetEncodedArray(EncodedArrayItem* encoded_array) { encoded_array_.reset(encoded_array); } + void SetEncodedAnnotation(EncodedAnnotation* encoded_annotation) + { encoded_annotation_.reset(encoded_annotation); } + + bool GetBoolean() const { return u_.bool_val_; } + int8_t GetByte() const { return u_.byte_val_; } + int16_t GetShort() const { return u_.short_val_; } + uint16_t GetChar() const { return u_.char_val_; } + int32_t GetInt() const { return u_.int_val_; } + int64_t GetLong() const { return u_.long_val_; } + float GetFloat() const { return u_.float_val_; } + double GetDouble() const { return u_.double_val_; } + StringId* GetStringId() const { return u_.string_val_; } + TypeId* GetTypeId() const { return u_.type_val_; } + FieldId* GetFieldId() const { return u_.field_val_; } + MethodId* GetMethodId() const { return u_.method_val_; } + EncodedArrayItem* GetEncodedArray() const { return encoded_array_.get(); } + EncodedAnnotation* GetEncodedAnnotation() const { return encoded_annotation_.get(); } + + EncodedAnnotation* ReleaseEncodedAnnotation() { return encoded_annotation_.release(); } private: uint8_t type_; - ArrayItemVariant item_; - DISALLOW_COPY_AND_ASSIGN(ArrayItem); + union { + bool bool_val_; + int8_t byte_val_; + int16_t short_val_; + uint16_t char_val_; + int32_t int_val_; + int64_t long_val_; + float float_val_; + double double_val_; + StringId* string_val_; + TypeId* type_val_; + FieldId* field_val_; + MethodId* method_val_; + } u_; + std::unique_ptr<EncodedArrayItem> encoded_array_; + std::unique_ptr<EncodedAnnotation> encoded_annotation_; + + DISALLOW_COPY_AND_ASSIGN(EncodedValue); +}; + +using EncodedValueVector = std::vector<std::unique_ptr<EncodedValue>>; + +class AnnotationElement { + public: + AnnotationElement(StringId* name, EncodedValue* value) : name_(name), value_(value) { } + + StringId* GetName() const { return name_; } + EncodedValue* GetValue() const { return value_.get(); } + + private: + StringId* name_; + std::unique_ptr<EncodedValue> value_; + + DISALLOW_COPY_AND_ASSIGN(AnnotationElement); }; -using ArrayItemVector = std::vector<std::unique_ptr<ArrayItem>>; +using AnnotationElementVector = std::vector<std::unique_ptr<AnnotationElement>>; + +class EncodedAnnotation { + public: + EncodedAnnotation(TypeId* type, AnnotationElementVector* elements) + : type_(type), elements_(elements) { } + + TypeId* GetType() const { return type_; } + AnnotationElementVector* GetAnnotationElements() const { return elements_.get(); } + + private: + TypeId* type_; + std::unique_ptr<AnnotationElementVector> elements_; + + DISALLOW_COPY_AND_ASSIGN(EncodedAnnotation); +}; + +class EncodedArrayItem : public Item { + public: + explicit EncodedArrayItem(EncodedValueVector* encoded_values) + : encoded_values_(encoded_values) { } + + EncodedValueVector* GetEncodedValues() const { return encoded_values_.get(); } + + private: + std::unique_ptr<EncodedValueVector> encoded_values_; + + DISALLOW_COPY_AND_ASSIGN(EncodedArrayItem); +}; class ClassData : public Item { public: @@ -442,42 +660,43 @@ class ClassData : public Item { std::unique_ptr<FieldItemVector> instance_fields_; std::unique_ptr<MethodItemVector> direct_methods_; std::unique_ptr<MethodItemVector> virtual_methods_; + DISALLOW_COPY_AND_ASSIGN(ClassData); }; -class ClassDef : public Item { +class ClassDef : public IndexedItem { public: ClassDef(const TypeId* class_type, uint32_t access_flags, const TypeId* superclass, - TypeIdVector* interfaces, - uint32_t interfaces_offset, + TypeList* interfaces, const StringId* source_file, AnnotationsDirectoryItem* annotations, - ArrayItemVector* static_values, + EncodedArrayItem* static_values, ClassData* class_data) : class_type_(class_type), access_flags_(access_flags), superclass_(superclass), interfaces_(interfaces), - interfaces_offset_(interfaces_offset), source_file_(source_file), annotations_(annotations), static_values_(static_values), - class_data_(class_data) { } + class_data_(class_data) { size_ = kClassDefItemSize; } ~ClassDef() OVERRIDE { } + static size_t ItemSize() { return kClassDefItemSize; } + const TypeId* ClassType() const { return class_type_; } uint32_t GetAccessFlags() const { return access_flags_; } const TypeId* Superclass() const { return superclass_; } - TypeIdVector* Interfaces() { return interfaces_.get(); } - uint32_t InterfacesOffset() const { return interfaces_offset_; } - void SetInterfacesOffset(uint32_t new_offset) { interfaces_offset_ = new_offset; } + const TypeIdVector* Interfaces() + { return interfaces_ == nullptr ? nullptr: interfaces_->GetTypeList(); } + uint32_t InterfacesOffset() { return interfaces_ == nullptr ? 0 : interfaces_->GetOffset(); } const StringId* SourceFile() const { return source_file_; } - AnnotationsDirectoryItem* Annotations() const { return annotations_.get(); } - ArrayItemVector* StaticValues() { return static_values_.get(); } - ClassData* GetClassData() { return class_data_.get(); } + AnnotationsDirectoryItem* Annotations() const { return annotations_; } + EncodedArrayItem* StaticValues() { return static_values_; } + ClassData* GetClassData() { return class_data_; } MethodItem* GenerateMethodItem(Header& header, ClassDataItemIterator& cdii); @@ -487,12 +706,12 @@ class ClassDef : public Item { const TypeId* class_type_; uint32_t access_flags_; const TypeId* superclass_; - std::unique_ptr<TypeIdVector> interfaces_; - uint32_t interfaces_offset_; + TypeList* interfaces_; const StringId* source_file_; - std::unique_ptr<AnnotationsDirectoryItem> annotations_; - std::unique_ptr<ArrayItemVector> static_values_; - std::unique_ptr<ClassData> class_data_; + AnnotationsDirectoryItem* annotations_; + EncodedArrayItem* static_values_; + ClassData* class_data_; + DISALLOW_COPY_AND_ASSIGN(ClassDef); }; @@ -506,6 +725,7 @@ class CatchHandler { private: const TypeId* type_id_; uint32_t address_; + DISALLOW_COPY_AND_ASSIGN(CatchHandler); }; @@ -527,6 +747,7 @@ class TryItem : public Item { uint32_t start_addr_; uint16_t insn_count_; std::unique_ptr<CatchHandlerVector> handlers_; + DISALLOW_COPY_AND_ASSIGN(TryItem); }; @@ -555,7 +776,7 @@ class CodeItem : public Item { uint16_t InsSize() const { return ins_size_; } uint16_t OutsSize() const { return outs_size_; } uint16_t TriesSize() const { return tries_ == nullptr ? 0 : tries_->size(); } - DebugInfoItem* DebugInfo() const { return debug_info_.get(); } + DebugInfoItem* DebugInfo() const { return debug_info_; } uint32_t InsnsSize() const { return insns_size_; } uint16_t* Insns() const { return insns_.get(); } TryItemVector* Tries() const { return tries_.get(); } @@ -566,14 +787,14 @@ class CodeItem : public Item { uint16_t registers_size_; uint16_t ins_size_; uint16_t outs_size_; - std::unique_ptr<DebugInfoItem> debug_info_; + DebugInfoItem* debug_info_; uint32_t insns_size_; std::unique_ptr<uint16_t[]> insns_; std::unique_ptr<TryItemVector> tries_; + DISALLOW_COPY_AND_ASSIGN(CodeItem); }; - struct PositionInfo { PositionInfo(uint32_t address, uint32_t line) : address_(address), line_(line) { } @@ -617,39 +838,60 @@ class DebugInfoItem : public Item { private: PositionInfoVector positions_; LocalInfoVector locals_; + DISALLOW_COPY_AND_ASSIGN(DebugInfoItem); }; -class AnnotationItem { +class AnnotationItem : public Item { public: - AnnotationItem(uint8_t visibility, ArrayItem* item) : visibility_(visibility), item_(item) { } + AnnotationItem(uint8_t visibility, EncodedAnnotation* annotation) + : visibility_(visibility), annotation_(annotation) { } uint8_t GetVisibility() const { return visibility_; } - ArrayItem* GetItem() const { return item_.get(); } + EncodedAnnotation* GetAnnotation() const { return annotation_.get(); } + + void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } private: uint8_t visibility_; - std::unique_ptr<ArrayItem> item_; + std::unique_ptr<EncodedAnnotation> annotation_; + DISALLOW_COPY_AND_ASSIGN(AnnotationItem); }; -using AnnotationItemVector = std::vector<std::unique_ptr<AnnotationItem>>; - class AnnotationSetItem : public Item { public: - explicit AnnotationSetItem(AnnotationItemVector* items) : items_(items) { } + explicit AnnotationSetItem(std::vector<AnnotationItem*>* items) : items_(items) { + size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t); + } ~AnnotationSetItem() OVERRIDE { } - AnnotationItemVector* GetItems() { return items_.get(); } + std::vector<AnnotationItem*>* GetItems() { return items_.get(); } void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } private: - std::unique_ptr<AnnotationItemVector> items_; + std::unique_ptr<std::vector<AnnotationItem*>> items_; + DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem); }; -using AnnotationSetItemVector = std::vector<std::unique_ptr<AnnotationSetItem>>; +class AnnotationSetRefList : public Item { + public: + explicit AnnotationSetRefList(std::vector<AnnotationSetItem*>* items) : items_(items) { + size_ = sizeof(uint32_t) + items->size() * sizeof(uint32_t); + } + ~AnnotationSetRefList() OVERRIDE { } + + std::vector<AnnotationSetItem*>* GetItems() { return items_.get(); } + + void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } + + private: + std::unique_ptr<std::vector<AnnotationSetItem*>> items_; + + DISALLOW_COPY_AND_ASSIGN(AnnotationSetRefList); +}; class FieldAnnotation { public: @@ -657,11 +899,12 @@ class FieldAnnotation { : field_id_(field_id), annotation_set_item_(annotation_set_item) { } FieldId* GetFieldId() const { return field_id_; } - AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); } + AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_; } private: FieldId* field_id_; - std::unique_ptr<AnnotationSetItem> annotation_set_item_; + AnnotationSetItem* annotation_set_item_; + DISALLOW_COPY_AND_ASSIGN(FieldAnnotation); }; @@ -673,11 +916,12 @@ class MethodAnnotation { : method_id_(method_id), annotation_set_item_(annotation_set_item) { } MethodId* GetMethodId() const { return method_id_; } - AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); } + AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_; } private: MethodId* method_id_; - std::unique_ptr<AnnotationSetItem> annotation_set_item_; + AnnotationSetItem* annotation_set_item_; + DISALLOW_COPY_AND_ASSIGN(MethodAnnotation); }; @@ -685,15 +929,16 @@ using MethodAnnotationVector = std::vector<std::unique_ptr<MethodAnnotation>>; class ParameterAnnotation { public: - ParameterAnnotation(MethodId* method_id, AnnotationSetItemVector* annotations) + ParameterAnnotation(MethodId* method_id, AnnotationSetRefList* annotations) : method_id_(method_id), annotations_(annotations) { } MethodId* GetMethodId() const { return method_id_; } - AnnotationSetItemVector* GetAnnotations() { return annotations_.get(); } + AnnotationSetRefList* GetAnnotations() { return annotations_; } private: MethodId* method_id_; - std::unique_ptr<AnnotationSetItemVector> annotations_; + AnnotationSetRefList* annotations_; + DISALLOW_COPY_AND_ASSIGN(ParameterAnnotation); }; @@ -710,7 +955,7 @@ class AnnotationsDirectoryItem : public Item { method_annotations_(method_annotations), parameter_annotations_(parameter_annotations) { } - AnnotationSetItem* GetClassAnnotation() const { return class_annotation_.get(); } + AnnotationSetItem* GetClassAnnotation() const { return class_annotation_; } FieldAnnotationVector* GetFieldAnnotations() { return field_annotations_.get(); } MethodAnnotationVector* GetMethodAnnotations() { return method_annotations_.get(); } ParameterAnnotationVector* GetParameterAnnotations() { return parameter_annotations_.get(); } @@ -718,10 +963,11 @@ class AnnotationsDirectoryItem : public Item { void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); } private: - std::unique_ptr<AnnotationSetItem> class_annotation_; + AnnotationSetItem* class_annotation_; std::unique_ptr<FieldAnnotationVector> field_annotations_; std::unique_ptr<MethodAnnotationVector> method_annotations_; std::unique_ptr<ParameterAnnotationVector> parameter_annotations_; + DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem); }; diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc index 30f57d95a4..e6868d74bc 100644 --- a/dexlayout/dex_ir_builder.cc +++ b/dexlayout/dex_ir_builder.cc @@ -24,401 +24,6 @@ namespace art { namespace dex_ir { -namespace { - -static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) { - uint64_t value = 0; - for (uint32_t i = 0; i <= length; i++) { - value |= static_cast<uint64_t>(*(*data)++) << (i * 8); - } - if (sign_extend) { - int shift = (7 - length) * 8; - return (static_cast<int64_t>(value) << shift) >> shift; - } - return value; -} - -// Prototype to break cyclic dependency. -void ReadArrayItemVariant(Header& header, - const uint8_t** data, - uint8_t type, - uint8_t length, - ArrayItem::ArrayItemVariant* item); - -ArrayItem* ReadArrayItem(Header& header, const uint8_t** data, uint8_t type, uint8_t length) { - ArrayItem* item = new ArrayItem(type); - ReadArrayItemVariant(header, data, type, length, item->GetArrayItemVariant()); - return item; -} - -ArrayItem* ReadArrayItem(Header& header, const uint8_t** data) { - const uint8_t encoded_value = *(*data)++; - const uint8_t type = encoded_value & 0x1f; - ArrayItem* item = new ArrayItem(type); - ReadArrayItemVariant(header, data, type, encoded_value >> 5, item->GetArrayItemVariant()); - return item; -} - -void ReadArrayItemVariant(Header& header, - const uint8_t** data, - uint8_t type, - uint8_t length, - ArrayItem::ArrayItemVariant* item) { - switch (type) { - case DexFile::kDexAnnotationByte: - item->u_.byte_val_ = static_cast<int8_t>(ReadVarWidth(data, length, false)); - break; - case DexFile::kDexAnnotationShort: - item->u_.short_val_ = static_cast<int16_t>(ReadVarWidth(data, length, true)); - break; - case DexFile::kDexAnnotationChar: - item->u_.char_val_ = static_cast<uint16_t>(ReadVarWidth(data, length, false)); - break; - case DexFile::kDexAnnotationInt: - item->u_.int_val_ = static_cast<int32_t>(ReadVarWidth(data, length, true)); - break; - case DexFile::kDexAnnotationLong: - item->u_.long_val_ = static_cast<int64_t>(ReadVarWidth(data, length, true)); - break; - case DexFile::kDexAnnotationFloat: { - // Fill on right. - union { - float f; - uint32_t data; - } conv; - conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8; - item->u_.float_val_ = conv.f; - break; - } - case DexFile::kDexAnnotationDouble: { - // Fill on right. - union { - double d; - uint64_t data; - } conv; - conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8; - item->u_.double_val_ = conv.d; - break; - } - case DexFile::kDexAnnotationString: { - const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); - item->u_.string_val_ = header.StringIds()[string_index].get(); - break; - } - case DexFile::kDexAnnotationType: { - const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); - item->u_.string_val_ = header.TypeIds()[string_index]->GetStringId(); - break; - } - case DexFile::kDexAnnotationField: - case DexFile::kDexAnnotationEnum: { - const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); - item->u_.field_val_ = header.FieldIds()[field_index].get(); - break; - } - case DexFile::kDexAnnotationMethod: { - const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false)); - item->u_.method_val_ = header.MethodIds()[method_index].get(); - break; - } - case DexFile::kDexAnnotationArray: { - item->annotation_array_val_.reset(new ArrayItemVector()); - // Decode all elements. - const uint32_t size = DecodeUnsignedLeb128(data); - for (uint32_t i = 0; i < size; i++) { - item->annotation_array_val_->push_back( - std::unique_ptr<ArrayItem>(ReadArrayItem(header, data))); - } - break; - } - case DexFile::kDexAnnotationAnnotation: { - const uint32_t type_idx = DecodeUnsignedLeb128(data); - item->annotation_annotation_val_.string_ = header.TypeIds()[type_idx]->GetStringId(); - item->annotation_annotation_val_.array_.reset( - new std::vector<std::unique_ptr<ArrayItem::NameValuePair>>()); - // Decode all name=value pairs. - const uint32_t size = DecodeUnsignedLeb128(data); - for (uint32_t i = 0; i < size; i++) { - const uint32_t name_index = DecodeUnsignedLeb128(data); - item->annotation_annotation_val_.array_->push_back( - std::unique_ptr<ArrayItem::NameValuePair>( - new ArrayItem::NameValuePair(header.StringIds()[name_index].get(), - ReadArrayItem(header, data)))); - } - break; - } - case DexFile::kDexAnnotationNull: - break; - case DexFile::kDexAnnotationBoolean: - item->u_.bool_val_ = (length != 0); - break; - default: - break; - } -} - -static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) { - DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context); - PositionInfoVector& positions = debug_info->GetPositionInfo(); - positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_))); - return false; -} - -static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) { - DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context); - LocalInfoVector& locals = debug_info->GetLocalInfo(); - const char* name = entry.name_ != nullptr ? entry.name_ : "(null)"; - const char* signature = entry.signature_ != nullptr ? entry.signature_ : ""; - locals.push_back(std::unique_ptr<LocalInfo>( - new LocalInfo(name, entry.descriptor_, signature, entry.start_address_, - entry.end_address_, entry.reg_))); -} - -CodeItem* ReadCodeItem(const DexFile& dex_file, - const DexFile::CodeItem& disk_code_item, - Header& header) { - uint16_t registers_size = disk_code_item.registers_size_; - uint16_t ins_size = disk_code_item.ins_size_; - uint16_t outs_size = disk_code_item.outs_size_; - uint32_t tries_size = disk_code_item.tries_size_; - - const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(&disk_code_item); - DebugInfoItem* debug_info = nullptr; - if (debug_info_stream != nullptr) { - debug_info = new DebugInfoItem(); - } - - uint32_t insns_size = disk_code_item.insns_size_in_code_units_; - uint16_t* insns = new uint16_t[insns_size]; - memcpy(insns, disk_code_item.insns_, insns_size * sizeof(uint16_t)); - - TryItemVector* tries = nullptr; - if (tries_size > 0) { - tries = new TryItemVector(); - for (uint32_t i = 0; i < tries_size; ++i) { - const DexFile::TryItem* disk_try_item = dex_file.GetTryItems(disk_code_item, i); - uint32_t start_addr = disk_try_item->start_addr_; - uint16_t insn_count = disk_try_item->insn_count_; - CatchHandlerVector* handlers = new CatchHandlerVector(); - for (CatchHandlerIterator it(disk_code_item, *disk_try_item); it.HasNext(); it.Next()) { - const uint16_t type_index = it.GetHandlerTypeIndex(); - const TypeId* type_id = header.GetTypeIdOrNullPtr(type_index); - handlers->push_back(std::unique_ptr<const CatchHandler>( - new CatchHandler(type_id, it.GetHandlerAddress()))); - } - TryItem* try_item = new TryItem(start_addr, insn_count, handlers); - tries->push_back(std::unique_ptr<const TryItem>(try_item)); - } - } - return new CodeItem(registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries); -} - -MethodItem* GenerateMethodItem(const DexFile& dex_file, - dex_ir::Header& header, - ClassDataItemIterator& cdii) { - MethodId* method_item = header.MethodIds()[cdii.GetMemberIndex()].get(); - uint32_t access_flags = cdii.GetRawMemberAccessFlags(); - const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem(); - CodeItem* code_item = nullptr; - DebugInfoItem* debug_info = nullptr; - if (disk_code_item != nullptr) { - code_item = ReadCodeItem(dex_file, *disk_code_item, header); - code_item->SetOffset(cdii.GetMethodCodeItemOffset()); - debug_info = code_item->DebugInfo(); - } - if (debug_info != nullptr) { - bool is_static = (access_flags & kAccStatic) != 0; - dex_file.DecodeDebugLocalInfo( - disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info); - dex_file.DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info); - } - return new MethodItem(access_flags, method_item, code_item); -} - -AnnotationSetItem* ReadAnnotationSetItem(const DexFile& dex_file, - const DexFile::AnnotationSetItem& disk_annotations_item, - Header& header) { - if (disk_annotations_item.size_ == 0) { - return nullptr; - } - AnnotationItemVector* items = new AnnotationItemVector(); - for (uint32_t i = 0; i < disk_annotations_item.size_; ++i) { - const DexFile::AnnotationItem* annotation = - dex_file.GetAnnotationItem(&disk_annotations_item, i); - if (annotation == nullptr) { - continue; - } - uint8_t visibility = annotation->visibility_; - const uint8_t* annotation_data = annotation->annotation_; - ArrayItem* array_item = - ReadArrayItem(header, &annotation_data, DexFile::kDexAnnotationAnnotation, 0); - items->push_back(std::unique_ptr<AnnotationItem>(new AnnotationItem(visibility, array_item))); - } - return new AnnotationSetItem(items); -} - -ParameterAnnotation* ReadParameterAnnotation( - const DexFile& dex_file, - MethodId* method_id, - const DexFile::AnnotationSetRefList* annotation_set_ref_list, - Header& header) { - AnnotationSetItemVector* annotations = new AnnotationSetItemVector(); - for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) { - const DexFile::AnnotationSetItem* annotation_set_item = - dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]); - annotations->push_back(std::unique_ptr<AnnotationSetItem>( - ReadAnnotationSetItem(dex_file, *annotation_set_item, header))); - } - return new ParameterAnnotation(method_id, annotations); -} - -AnnotationsDirectoryItem* ReadAnnotationsDirectoryItem( - const DexFile& dex_file, - const DexFile::AnnotationsDirectoryItem* disk_annotations_item, - Header& header) { - const DexFile::AnnotationSetItem* class_set_item = - dex_file.GetClassAnnotationSet(disk_annotations_item); - AnnotationSetItem* class_annotation = nullptr; - if (class_set_item != nullptr) { - class_annotation = ReadAnnotationSetItem(dex_file, *class_set_item, header); - } - const DexFile::FieldAnnotationsItem* fields = - dex_file.GetFieldAnnotations(disk_annotations_item); - FieldAnnotationVector* field_annotations = nullptr; - if (fields != nullptr) { - field_annotations = new FieldAnnotationVector(); - for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) { - FieldId* field_id = header.FieldIds()[fields[i].field_idx_].get(); - const DexFile::AnnotationSetItem* field_set_item = - dex_file.GetFieldAnnotationSetItem(fields[i]); - AnnotationSetItem* annotation_set_item = - ReadAnnotationSetItem(dex_file, *field_set_item, header); - field_annotations->push_back(std::unique_ptr<FieldAnnotation>( - new FieldAnnotation(field_id, annotation_set_item))); - } - } - const DexFile::MethodAnnotationsItem* methods = - dex_file.GetMethodAnnotations(disk_annotations_item); - MethodAnnotationVector* method_annotations = nullptr; - if (methods != nullptr) { - method_annotations = new MethodAnnotationVector(); - for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) { - MethodId* method_id = header.MethodIds()[methods[i].method_idx_].get(); - const DexFile::AnnotationSetItem* method_set_item = - dex_file.GetMethodAnnotationSetItem(methods[i]); - AnnotationSetItem* annotation_set_item = - ReadAnnotationSetItem(dex_file, *method_set_item, header); - method_annotations->push_back(std::unique_ptr<MethodAnnotation>( - new MethodAnnotation(method_id, annotation_set_item))); - } - } - const DexFile::ParameterAnnotationsItem* parameters = - dex_file.GetParameterAnnotations(disk_annotations_item); - ParameterAnnotationVector* parameter_annotations = nullptr; - if (parameters != nullptr) { - parameter_annotations = new ParameterAnnotationVector(); - for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) { - MethodId* method_id = header.MethodIds()[parameters[i].method_idx_].get(); - const DexFile::AnnotationSetRefList* list = - dex_file.GetParameterAnnotationSetRefList(¶meters[i]); - parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>( - ReadParameterAnnotation(dex_file, method_id, list, header))); - } - } - - return new AnnotationsDirectoryItem(class_annotation, - field_annotations, - method_annotations, - parameter_annotations); -} - -ClassDef* ReadClassDef(const DexFile& dex_file, - const DexFile::ClassDef& disk_class_def, - Header& header) { - const TypeId* class_type = header.TypeIds()[disk_class_def.class_idx_].get(); - uint32_t access_flags = disk_class_def.access_flags_; - const TypeId* superclass = header.GetTypeIdOrNullPtr(disk_class_def.superclass_idx_); - - TypeIdVector* interfaces = nullptr; - const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def); - uint32_t interfaces_offset = disk_class_def.interfaces_off_; - if (type_list != nullptr) { - interfaces = new TypeIdVector(); - for (uint32_t index = 0; index < type_list->Size(); ++index) { - interfaces->push_back(header.TypeIds()[type_list->GetTypeItem(index).type_idx_].get()); - } - } - const StringId* source_file = header.GetStringIdOrNullPtr(disk_class_def.source_file_idx_); - // Annotations. - AnnotationsDirectoryItem* annotations = nullptr; - const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item = - dex_file.GetAnnotationsDirectory(disk_class_def); - if (disk_annotations_directory_item != nullptr) { - annotations = ReadAnnotationsDirectoryItem(dex_file, disk_annotations_directory_item, header); - annotations->SetOffset(disk_class_def.annotations_off_); - } - // Static field initializers. - ArrayItemVector* static_values = nullptr; - const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def); - if (static_data != nullptr) { - uint32_t static_value_count = static_data == nullptr ? 0 : DecodeUnsignedLeb128(&static_data); - if (static_value_count > 0) { - static_values = new ArrayItemVector(); - for (uint32_t i = 0; i < static_value_count; ++i) { - static_values->push_back(std::unique_ptr<ArrayItem>(ReadArrayItem(header, &static_data))); - } - } - } - // Read the fields and methods defined by the class, resolving the circular reference from those - // to classes by setting class at the same time. - const uint8_t* encoded_data = dex_file.GetClassData(disk_class_def); - ClassData* class_data = nullptr; - if (encoded_data != nullptr) { - uint32_t offset = disk_class_def.class_data_off_; - ClassDataItemIterator cdii(dex_file, encoded_data); - // Static fields. - FieldItemVector* static_fields = new FieldItemVector(); - for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) { - FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get(); - uint32_t access_flags = cdii.GetRawMemberAccessFlags(); - static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item))); - } - // Instance fields. - FieldItemVector* instance_fields = new FieldItemVector(); - for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) { - FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get(); - uint32_t access_flags = cdii.GetRawMemberAccessFlags(); - instance_fields->push_back( - std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item))); - } - // Direct methods. - MethodItemVector* direct_methods = new MethodItemVector(); - for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) { - direct_methods->push_back( - std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, header, cdii))); - } - // Virtual methods. - MethodItemVector* virtual_methods = new MethodItemVector(); - for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) { - virtual_methods->push_back( - std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, header, cdii))); - } - class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods); - class_data->SetOffset(offset); - } - return new ClassDef(class_type, - access_flags, - superclass, - interfaces, - interfaces_offset, - source_file, - annotations, - static_values, - class_data); -} - -} // namespace - Header* DexIrBuilder(const DexFile& dex_file) { const DexFile::Header& disk_header = dex_file.GetHeader(); Header* header = new Header(disk_header.magic_, @@ -431,73 +36,37 @@ Header* DexIrBuilder(const DexFile& dex_file) { disk_header.link_off_, disk_header.data_size_, disk_header.data_off_); + Collections& collections = header->GetCollections(); // Walk the rest of the header fields. // StringId table. - std::vector<std::unique_ptr<StringId>>& string_ids = header->StringIds(); - header->SetStringIdsOffset(disk_header.string_ids_off_); + collections.SetStringIdsOffset(disk_header.string_ids_off_); for (uint32_t i = 0; i < dex_file.NumStringIds(); ++i) { - const DexFile::StringId& disk_string_id = dex_file.GetStringId(i); - StringId* string_id = new StringId(dex_file.GetStringData(disk_string_id)); - string_id->SetOffset(i); - string_ids.push_back(std::unique_ptr<StringId>(string_id)); + collections.CreateStringId(dex_file, i); } // TypeId table. - std::vector<std::unique_ptr<TypeId>>& type_ids = header->TypeIds(); - header->SetTypeIdsOffset(disk_header.type_ids_off_); + collections.SetTypeIdsOffset(disk_header.type_ids_off_); for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) { - const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(i); - TypeId* type_id = new TypeId(header->StringIds()[disk_type_id.descriptor_idx_].get()); - type_id->SetOffset(i); - type_ids.push_back(std::unique_ptr<TypeId>(type_id)); + collections.CreateTypeId(dex_file, i); } // ProtoId table. - std::vector<std::unique_ptr<ProtoId>>& proto_ids = header->ProtoIds(); - header->SetProtoIdsOffset(disk_header.proto_ids_off_); + collections.SetProtoIdsOffset(disk_header.proto_ids_off_); for (uint32_t i = 0; i < dex_file.NumProtoIds(); ++i) { - const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(i); - // Build the parameter type vector. - TypeIdVector* parameters = new TypeIdVector(); - DexFileParameterIterator dfpi(dex_file, disk_proto_id); - while (dfpi.HasNext()) { - parameters->push_back(header->TypeIds()[dfpi.GetTypeIdx()].get()); - dfpi.Next(); - } - ProtoId* proto_id = new ProtoId(header->StringIds()[disk_proto_id.shorty_idx_].get(), - header->TypeIds()[disk_proto_id.return_type_idx_].get(), - parameters); - proto_id->SetOffset(i); - proto_ids.push_back(std::unique_ptr<ProtoId>(proto_id)); + collections.CreateProtoId(dex_file, i); } // FieldId table. - std::vector<std::unique_ptr<FieldId>>& field_ids = header->FieldIds(); - header->SetFieldIdsOffset(disk_header.field_ids_off_); + collections.SetFieldIdsOffset(disk_header.field_ids_off_); for (uint32_t i = 0; i < dex_file.NumFieldIds(); ++i) { - const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i); - FieldId* field_id = new FieldId(header->TypeIds()[disk_field_id.class_idx_].get(), - header->TypeIds()[disk_field_id.type_idx_].get(), - header->StringIds()[disk_field_id.name_idx_].get()); - field_id->SetOffset(i); - field_ids.push_back(std::unique_ptr<FieldId>(field_id)); + collections.CreateFieldId(dex_file, i); } // MethodId table. - std::vector<std::unique_ptr<MethodId>>& method_ids = header->MethodIds(); - header->SetMethodIdsOffset(disk_header.method_ids_off_); + collections.SetMethodIdsOffset(disk_header.method_ids_off_); for (uint32_t i = 0; i < dex_file.NumMethodIds(); ++i) { - const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i); - MethodId* method_id = new MethodId(header->TypeIds()[disk_method_id.class_idx_].get(), - header->ProtoIds()[disk_method_id.proto_idx_].get(), - header->StringIds()[disk_method_id.name_idx_].get()); - method_id->SetOffset(i); - method_ids.push_back(std::unique_ptr<MethodId>(method_id)); + collections.CreateMethodId(dex_file, i); } // ClassDef table. - std::vector<std::unique_ptr<ClassDef>>& class_defs = header->ClassDefs(); - header->SetClassDefsOffset(disk_header.class_defs_off_); + collections.SetClassDefsOffset(disk_header.class_defs_off_); for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) { - const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i); - ClassDef* class_def = ReadClassDef(dex_file, disk_class_def, *header); - class_def->SetOffset(i); - class_defs.push_back(std::unique_ptr<ClassDef>(class_def)); + collections.CreateClassDef(dex_file, i); } return header; diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc index 3a3f417825..6f34a33ed7 100644 --- a/dexlayout/dexlayout.cc +++ b/dexlayout/dexlayout.cc @@ -30,9 +30,11 @@ #include <sstream> #include <vector> +#include "base/unix_file/fd_file.h" #include "dex_ir_builder.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" +#include "os.h" #include "utils.h" namespace art { @@ -348,10 +350,26 @@ static void DumpXmlAttribute(const char* p) { } // for } +// Forward declare to resolve circular dependence. +static void DumpEncodedValue(const dex_ir::EncodedValue* data); + +/* + * Dumps encoded annotation. + */ +static void DumpEncodedAnnotation(dex_ir::EncodedAnnotation* annotation) { + fputs(annotation->GetType()->GetStringId()->Data(), out_file_); + // Display all name=value pairs. + for (auto& subannotation : *annotation->GetAnnotationElements()) { + fputc(' ', out_file_); + fputs(subannotation->GetName()->Data(), out_file_); + fputc('=', out_file_); + DumpEncodedValue(subannotation->GetValue()); + } +} /* * Dumps encoded value. */ -static void DumpEncodedValue(const dex_ir::ArrayItem* data) { +static void DumpEncodedValue(const dex_ir::EncodedValue* data) { switch (data->Type()) { case DexFile::kDexAnnotationByte: fprintf(out_file_, "%" PRId8, data->GetByte()); @@ -386,8 +404,8 @@ static void DumpEncodedValue(const dex_ir::ArrayItem* data) { break; } case DexFile::kDexAnnotationType: { - dex_ir::StringId* string_id = data->GetStringId(); - fputs(string_id->Data(), out_file_); + dex_ir::TypeId* type_id = data->GetTypeId(); + fputs(type_id->GetStringId()->Data(), out_file_); break; } case DexFile::kDexAnnotationField: @@ -404,22 +422,15 @@ static void DumpEncodedValue(const dex_ir::ArrayItem* data) { case DexFile::kDexAnnotationArray: { fputc('{', out_file_); // Display all elements. - for (auto& array : *data->GetAnnotationArray()) { + for (auto& value : *data->GetEncodedArray()->GetEncodedValues()) { fputc(' ', out_file_); - DumpEncodedValue(array.get()); + DumpEncodedValue(value.get()); } fputs(" }", out_file_); break; } case DexFile::kDexAnnotationAnnotation: { - fputs(data->GetAnnotationAnnotationString()->Data(), out_file_); - // Display all name=value pairs. - for (auto& subannotation : *data->GetAnnotationAnnotationNameValuePairArray()) { - fputc(' ', out_file_); - fputs(subannotation->Name()->Data(), out_file_); - fputc('=', out_file_); - DumpEncodedValue(subannotation->Value()); - } + DumpEncodedAnnotation(data->GetEncodedAnnotation()); break; } case DexFile::kDexAnnotationNull: @@ -437,8 +448,9 @@ static void DumpEncodedValue(const dex_ir::ArrayItem* data) { /* * Dumps the file header. */ -static void DumpFileHeader(const dex_ir::Header* header) { +static void DumpFileHeader(dex_ir::Header* header) { char sanitized[8 * 2 + 1]; + dex_ir::Collections& collections = header->GetCollections(); fprintf(out_file_, "DEX file header:\n"); Asciify(sanitized, header->Magic(), 8); fprintf(out_file_, "magic : '%s'\n", sanitized); @@ -452,24 +464,24 @@ static void DumpFileHeader(const dex_ir::Header* header) { fprintf(out_file_, "link_size : %d\n", header->LinkSize()); fprintf(out_file_, "link_off : %d (0x%06x)\n", header->LinkOffset(), header->LinkOffset()); - fprintf(out_file_, "string_ids_size : %d\n", header->StringIdsSize()); + fprintf(out_file_, "string_ids_size : %d\n", collections.StringIdsSize()); fprintf(out_file_, "string_ids_off : %d (0x%06x)\n", - header->StringIdsOffset(), header->StringIdsOffset()); - fprintf(out_file_, "type_ids_size : %d\n", header->TypeIdsSize()); + collections.StringIdsOffset(), collections.StringIdsOffset()); + fprintf(out_file_, "type_ids_size : %d\n", collections.TypeIdsSize()); fprintf(out_file_, "type_ids_off : %d (0x%06x)\n", - header->TypeIdsOffset(), header->TypeIdsOffset()); - fprintf(out_file_, "proto_ids_size : %d\n", header->ProtoIdsSize()); + collections.TypeIdsOffset(), collections.TypeIdsOffset()); + fprintf(out_file_, "proto_ids_size : %d\n", collections.ProtoIdsSize()); fprintf(out_file_, "proto_ids_off : %d (0x%06x)\n", - header->ProtoIdsOffset(), header->ProtoIdsOffset()); - fprintf(out_file_, "field_ids_size : %d\n", header->FieldIdsSize()); + collections.ProtoIdsOffset(), collections.ProtoIdsOffset()); + fprintf(out_file_, "field_ids_size : %d\n", collections.FieldIdsSize()); fprintf(out_file_, "field_ids_off : %d (0x%06x)\n", - header->FieldIdsOffset(), header->FieldIdsOffset()); - fprintf(out_file_, "method_ids_size : %d\n", header->MethodIdsSize()); + collections.FieldIdsOffset(), collections.FieldIdsOffset()); + fprintf(out_file_, "method_ids_size : %d\n", collections.MethodIdsSize()); fprintf(out_file_, "method_ids_off : %d (0x%06x)\n", - header->MethodIdsOffset(), header->MethodIdsOffset()); - fprintf(out_file_, "class_defs_size : %d\n", header->ClassDefsSize()); + collections.MethodIdsOffset(), collections.MethodIdsOffset()); + fprintf(out_file_, "class_defs_size : %d\n", collections.ClassDefsSize()); fprintf(out_file_, "class_defs_off : %d (0x%06x)\n", - header->ClassDefsOffset(), header->ClassDefsOffset()); + collections.ClassDefsOffset(), collections.ClassDefsOffset()); fprintf(out_file_, "data_size : %d\n", header->DataSize()); fprintf(out_file_, "data_off : %d (0x%06x)\n\n", header->DataOffset(), header->DataOffset()); @@ -480,19 +492,19 @@ static void DumpFileHeader(const dex_ir::Header* header) { */ static void DumpClassDef(dex_ir::Header* header, int idx) { // General class information. - dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get(); + dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx); fprintf(out_file_, "Class #%d header:\n", idx); - fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetOffset()); + fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetIndex()); fprintf(out_file_, "access_flags : %d (0x%04x)\n", class_def->GetAccessFlags(), class_def->GetAccessFlags()); uint32_t superclass_idx = class_def->Superclass() == nullptr ? - DexFile::kDexNoIndex16 : class_def->Superclass()->GetOffset(); + DexFile::kDexNoIndex16 : class_def->Superclass()->GetIndex(); fprintf(out_file_, "superclass_idx : %d\n", superclass_idx); fprintf(out_file_, "interfaces_off : %d (0x%06x)\n", class_def->InterfacesOffset(), class_def->InterfacesOffset()); uint32_t source_file_offset = 0xffffffffU; if (class_def->SourceFile() != nullptr) { - source_file_offset = class_def->SourceFile()->GetOffset(); + source_file_offset = class_def->SourceFile()->GetIndex(); } fprintf(out_file_, "source_file_idx : %d\n", source_file_offset); uint32_t annotations_offset = 0; @@ -541,7 +553,7 @@ static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) { fputs(" empty-annotation-set\n", out_file_); return; } - for (std::unique_ptr<dex_ir::AnnotationItem>& annotation : *set_item->GetItems()) { + for (dex_ir::AnnotationItem* annotation : *set_item->GetItems()) { if (annotation == nullptr) { continue; } @@ -552,10 +564,7 @@ static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) { case DexFile::kDexVisibilitySystem: fputs("VISIBILITY_SYSTEM ", out_file_); break; default: fputs("VISIBILITY_UNKNOWN ", out_file_); break; } // switch - // Decode raw bytes in annotation. - // const uint8_t* rData = annotation->annotation_; - dex_ir::ArrayItem* data = annotation->GetItem(); - DumpEncodedValue(data); + DumpEncodedAnnotation(annotation->GetAnnotation()); fputc('\n', out_file_); } } @@ -564,7 +573,7 @@ static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) { * Dumps class annotations. */ static void DumpClassAnnotations(dex_ir::Header* header, int idx) { - dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get(); + dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx); dex_ir::AnnotationsDirectoryItem* annotations_directory = class_def->Annotations(); if (annotations_directory == nullptr) { return; // none @@ -587,7 +596,7 @@ static void DumpClassAnnotations(dex_ir::Header* header, int idx) { if (fields != nullptr) { for (auto& field : *fields) { const dex_ir::FieldId* field_id = field->GetFieldId(); - const uint32_t field_idx = field_id->GetOffset(); + const uint32_t field_idx = field_id->GetIndex(); const char* field_name = field_id->Name()->Data(); fprintf(out_file_, "Annotations on field #%u '%s'\n", field_idx, field_name); DumpAnnotationSetItem(field->GetAnnotationSetItem()); @@ -598,7 +607,7 @@ static void DumpClassAnnotations(dex_ir::Header* header, int idx) { if (methods != nullptr) { for (auto& method : *methods) { const dex_ir::MethodId* method_id = method->GetMethodId(); - const uint32_t method_idx = method_id->GetOffset(); + const uint32_t method_idx = method_id->GetIndex(); const char* method_name = method_id->Name()->Data(); fprintf(out_file_, "Annotations on method #%u '%s'\n", method_idx, method_name); DumpAnnotationSetItem(method->GetAnnotationSetItem()); @@ -609,13 +618,13 @@ static void DumpClassAnnotations(dex_ir::Header* header, int idx) { if (parameters != nullptr) { for (auto& parameter : *parameters) { const dex_ir::MethodId* method_id = parameter->GetMethodId(); - const uint32_t method_idx = method_id->GetOffset(); + const uint32_t method_idx = method_id->GetIndex(); const char* method_name = method_id->Name()->Data(); fprintf(out_file_, "Annotations on method #%u '%s' parameters\n", method_idx, method_name); uint32_t j = 0; - for (auto& annotation : *parameter->GetAnnotations()) { + for (dex_ir::AnnotationSetItem* annotation : *parameter->GetAnnotations()->GetItems()) { fprintf(out_file_, "#%u\n", j); - DumpAnnotationSetItem(annotation.get()); + DumpAnnotationSetItem(annotation); ++j; } } @@ -748,24 +757,24 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header, outSize = snprintf(buf.get(), buf_size, "<no-index>"); break; case Instruction::kIndexTypeRef: - if (index < header->TypeIdsSize()) { - const char* tp = header->TypeIds()[index]->GetStringId()->Data(); + if (index < header->GetCollections().TypeIdsSize()) { + const char* tp = header->GetCollections().GetTypeId(index)->GetStringId()->Data(); outSize = snprintf(buf.get(), buf_size, "%s // type@%0*x", tp, width, index); } else { outSize = snprintf(buf.get(), buf_size, "<type?> // type@%0*x", width, index); } break; case Instruction::kIndexStringRef: - if (index < header->StringIdsSize()) { - const char* st = header->StringIds()[index]->Data(); + if (index < header->GetCollections().StringIdsSize()) { + const char* st = header->GetCollections().GetStringId(index)->Data(); outSize = snprintf(buf.get(), buf_size, "\"%s\" // string@%0*x", st, width, index); } else { outSize = snprintf(buf.get(), buf_size, "<string?> // string@%0*x", width, index); } break; case Instruction::kIndexMethodRef: - if (index < header->MethodIdsSize()) { - dex_ir::MethodId* method_id = header->MethodIds()[index].get(); + if (index < header->GetCollections().MethodIdsSize()) { + dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(index); const char* name = method_id->Name()->Data(); std::string type_descriptor = GetSignatureForProtoId(method_id->Proto()); const char* back_descriptor = method_id->Class()->GetStringId()->Data(); @@ -776,8 +785,8 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header, } break; case Instruction::kIndexFieldRef: - if (index < header->FieldIdsSize()) { - dex_ir::FieldId* field_id = header->FieldIds()[index].get(); + if (index < header->GetCollections().FieldIdsSize()) { + dex_ir::FieldId* field_id = header->GetCollections().GetFieldId(index); const char* name = field_id->Name()->Data(); const char* type_descriptor = field_id->Type()->GetStringId()->Data(); const char* back_descriptor = field_id->Class()->GetStringId()->Data(); @@ -1028,7 +1037,7 @@ static void DumpInstruction(dex_ir::Header* header, const dex_ir::CodeItem* code */ static void DumpBytecodes(dex_ir::Header* header, uint32_t idx, const dex_ir::CodeItem* code, uint32_t code_offset) { - dex_ir::MethodId* method_id = header->MethodIds()[idx].get(); + dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(idx); const char* name = method_id->Name()->Data(); std::string type_descriptor = GetSignatureForProtoId(method_id->Proto()); const char* back_descriptor = method_id->Class()->GetStringId()->Data(); @@ -1088,7 +1097,7 @@ static void DumpMethod(dex_ir::Header* header, uint32_t idx, uint32_t flags, return; } - dex_ir::MethodId* method_id = header->MethodIds()[idx].get(); + dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(idx); const char* name = method_id->Name()->Data(); char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str()); const char* back_descriptor = method_id->Class()->GetStringId()->Data(); @@ -1187,13 +1196,13 @@ static void DumpMethod(dex_ir::Header* header, uint32_t idx, uint32_t flags, * Dumps a static (class) field. */ static void DumpSField(dex_ir::Header* header, uint32_t idx, uint32_t flags, - int i, dex_ir::ArrayItem* init) { + int i, dex_ir::EncodedValue* init) { // Bail for anything private if export only requested. if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) { return; } - dex_ir::FieldId* field_id = header->FieldIds()[idx].get(); + dex_ir::FieldId* field_id = header->GetCollections().GetFieldId(idx); const char* name = field_id->Name()->Data(); const char* type_descriptor = field_id->Type()->GetStringId()->Data(); const char* back_descriptor = field_id->Class()->GetStringId()->Data(); @@ -1293,7 +1302,7 @@ static void DumpClass(const DexFile* dex_file, dex_ir::Header* header, int idx, char** last_package) { - dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get(); + dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(idx); // Omitting non-public class. if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) { return; @@ -1316,7 +1325,8 @@ static void DumpClass(const DexFile* dex_file, // up the classes, sort them, and dump them alphabetically so the // package name wouldn't jump around, but that's not a great plan // for something that needs to run on the device. - const char* class_descriptor = header->ClassDefs()[idx]->ClassType()->GetStringId()->Data(); + const char* class_descriptor = + header->GetCollections().GetClassDef(idx)->ClassType()->GetStringId()->Data(); if (!(class_descriptor[0] == 'L' && class_descriptor[strlen(class_descriptor)-1] == ';')) { // Arrays and primitives should not be defined explicitly. Keep going? @@ -1386,7 +1396,7 @@ static void DumpClass(const DexFile* dex_file, } // Interfaces. - dex_ir::TypeIdVector* interfaces = class_def->Interfaces(); + const dex_ir::TypeIdVector* interfaces = class_def->Interfaces(); if (interfaces != nullptr) { for (uint32_t i = 0; i < interfaces->size(); i++) { DumpInterface((*interfaces)[i], i); @@ -1396,8 +1406,10 @@ static void DumpClass(const DexFile* dex_file, // Fields and methods. dex_ir::ClassData* class_data = class_def->GetClassData(); // Prepare data for static fields. - std::vector<std::unique_ptr<dex_ir::ArrayItem>>* static_values = class_def->StaticValues(); - const uint32_t static_values_size = (static_values == nullptr) ? 0 : static_values->size(); + dex_ir::EncodedArrayItem* static_values = class_def->StaticValues(); + dex_ir::EncodedValueVector* encoded_values = + static_values == nullptr ? nullptr : static_values->GetEncodedValues(); + const uint32_t encoded_values_size = (encoded_values == nullptr) ? 0 : encoded_values->size(); // Static fields. if (options_.output_format_ == kOutputPlain) { @@ -1408,10 +1420,10 @@ static void DumpClass(const DexFile* dex_file, if (static_fields != nullptr) { for (uint32_t i = 0; i < static_fields->size(); i++) { DumpSField(header, - (*static_fields)[i]->GetFieldId()->GetOffset(), + (*static_fields)[i]->GetFieldId()->GetIndex(), (*static_fields)[i]->GetAccessFlags(), i, - i < static_values_size ? (*static_values)[i].get() : nullptr); + i < encoded_values_size ? (*encoded_values)[i].get() : nullptr); } // for } } @@ -1425,7 +1437,7 @@ static void DumpClass(const DexFile* dex_file, if (instance_fields != nullptr) { for (uint32_t i = 0; i < instance_fields->size(); i++) { DumpIField(header, - (*instance_fields)[i]->GetFieldId()->GetOffset(), + (*instance_fields)[i]->GetFieldId()->GetIndex(), (*instance_fields)[i]->GetAccessFlags(), i); } // for @@ -1441,7 +1453,7 @@ static void DumpClass(const DexFile* dex_file, if (direct_methods != nullptr) { for (uint32_t i = 0; i < direct_methods->size(); i++) { DumpMethod(header, - (*direct_methods)[i]->GetMethodId()->GetOffset(), + (*direct_methods)[i]->GetMethodId()->GetIndex(), (*direct_methods)[i]->GetAccessFlags(), (*direct_methods)[i]->GetCodeItem(), i); @@ -1458,7 +1470,7 @@ static void DumpClass(const DexFile* dex_file, if (virtual_methods != nullptr) { for (uint32_t i = 0; i < virtual_methods->size(); i++) { DumpMethod(header, - (*virtual_methods)[i]->GetMethodId()->GetOffset(), + (*virtual_methods)[i]->GetMethodId()->GetIndex(), (*virtual_methods)[i]->GetAccessFlags(), (*virtual_methods)[i]->GetCodeItem(), i); @@ -1474,7 +1486,7 @@ static void DumpClass(const DexFile* dex_file, } const dex_ir::StringId* source_file = class_def->SourceFile(); fprintf(out_file_, " source_file_idx : %d (%s)\n\n", - source_file == nullptr ? 0xffffffffU : source_file->GetOffset(), file_name); + source_file == nullptr ? 0xffffffffU : source_file->GetIndex(), file_name); } else if (options_.output_format_ == kOutputXml) { fprintf(out_file_, "</class>\n"); } @@ -1483,6 +1495,96 @@ static void DumpClass(const DexFile* dex_file, } /* +static uint32_t GetDataSectionOffset(dex_ir::Header& header) { + return dex_ir::Header::ItemSize() + + header.GetCollections().StringIdsSize() * dex_ir::StringId::ItemSize() + + header.GetCollections().TypeIdsSize() * dex_ir::TypeId::ItemSize() + + header.GetCollections().ProtoIdsSize() * dex_ir::ProtoId::ItemSize() + + header.GetCollections().FieldIdsSize() * dex_ir::FieldId::ItemSize() + + header.GetCollections().MethodIdsSize() * dex_ir::MethodId::ItemSize() + + header.GetCollections().ClassDefsSize() * dex_ir::ClassDef::ItemSize(); +} + +static bool Align(File* file, uint32_t& offset) { + uint8_t zero_buffer[] = { 0, 0, 0 }; + uint32_t zeroes = (-offset) & 3; + if (zeroes > 0) { + if (!file->PwriteFully(zero_buffer, zeroes, offset)) { + return false; + } + offset += zeroes; + } + return true; +} + +static bool WriteStrings(File* dex_file, dex_ir::Header& header, + uint32_t& index_offset, uint32_t& data_offset) { + uint32_t index = 0; + uint32_t index_buffer[1]; + uint32_t string_length; + uint32_t length_length; + uint8_t length_buffer[8]; + for (std::unique_ptr<dex_ir::StringId>& string_id : header.GetCollections().StringIds()) { + string_id->SetOffset(index); + index_buffer[0] = data_offset; + string_length = strlen(string_id->Data()); + length_length = UnsignedLeb128Size(string_length); + EncodeUnsignedLeb128(length_buffer, string_length); + + if (!dex_file->PwriteFully(index_buffer, 4, index_offset) || + !dex_file->PwriteFully(length_buffer, length_length, data_offset) || + !dex_file->PwriteFully(string_id->Data(), string_length, data_offset + length_length)) { + return false; + } + + index++; + index_offset += 4; + data_offset += string_length + length_length; + } + return true; +} + +static bool WriteTypes(File* dex_file, dex_ir::Header& header, uint32_t& index_offset) { + uint32_t index = 0; + uint32_t index_buffer[1]; + for (std::unique_ptr<dex_ir::TypeId>& type_id : header.GetCollections().TypeIds()) { + type_id->SetIndex(index); + index_buffer[0] = type_id->GetStringId()->GetOffset(); + + if (!dex_file->PwriteFully(index_buffer, 4, index_offset)) { + return false; + } + + index++; + index_offset += 4; + } + return true; +} + +static bool WriteTypeLists(File* dex_file, dex_ir::Header& header, uint32_t& data_offset) { + if (!Align(dex_file, data_offset)) { + return false; + } + + return true; +} + +static void OutputDexFile(dex_ir::Header& header, const char* file_name) { + LOG(INFO) << "FILE NAME: " << file_name; + std::unique_ptr<File> dex_file(OS::CreateEmptyFileWriteOnly(file_name)); + if (dex_file == nullptr) { + fprintf(stderr, "Can't open %s\n", file_name); + return; + } + + uint32_t index_offset = dex_ir::Header::ItemSize(); + uint32_t data_offset = GetDataSectionOffset(header); + WriteStrings(dex_file.get(), header, index_offset, data_offset); + WriteTypes(dex_file.get(), header, index_offset); +} +*/ + +/* * Dumps the requested sections of the file. */ static void ProcessDexFile(const char* file_name, const DexFile* dex_file) { @@ -1504,7 +1606,7 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file) { // Iterate over all classes. char* package = nullptr; - const uint32_t class_defs_size = header->ClassDefsSize(); + const uint32_t class_defs_size = header->GetCollections().ClassDefsSize(); for (uint32_t i = 0; i < class_defs_size; i++) { DumpClass(dex_file, header.get(), i, &package); } // for @@ -1519,6 +1621,14 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file) { if (options_.output_format_ == kOutputXml) { fprintf(out_file_, "</api>\n"); } + + /* + // Output dex file. + if (options_.output_dex_files_) { + std::string output_dex_filename = dex_file->GetLocation() + ".out"; + OutputDexFile(*header, output_dex_filename.c_str()); + } + */ } /* diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h index bae587dfb2..736d230a99 100644 --- a/dexlayout/dexlayout.h +++ b/dexlayout/dexlayout.h @@ -41,6 +41,7 @@ struct Options { bool disassemble_; bool exports_only_; bool ignore_bad_checksum_; + bool output_dex_files_; bool show_annotations_; bool show_cfg_; bool show_file_headers_; diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc index 286a0c6058..ec5edf4065 100644 --- a/dexlayout/dexlayout_main.cc +++ b/dexlayout/dexlayout_main.cc @@ -26,8 +26,8 @@ #include <string.h> #include <unistd.h> +#include "base/logging.h" #include "mem_map.h" -#include "runtime.h" namespace art { @@ -38,7 +38,7 @@ static const char* kProgramName = "dexlayout"; */ static void Usage(void) { fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n"); - fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]" + fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-w]" " dexfile...\n\n", kProgramName); fprintf(stderr, " -a : display annotations\n"); fprintf(stderr, " -b : build dex_ir\n"); @@ -51,6 +51,7 @@ static void Usage(void) { fprintf(stderr, " -i : ignore checksum failures\n"); fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n"); fprintf(stderr, " -o : output file name (defaults to stdout)\n"); + fprintf(stderr, " -w : output dex files\n"); } /* @@ -68,7 +69,7 @@ int DexlayoutDriver(int argc, char** argv) { // Parse all arguments. while (1) { - const int ic = getopt(argc, argv, "abcdefghil:o:"); + const int ic = getopt(argc, argv, "abcdefghil:o:w"); if (ic < 0) { break; // done } @@ -113,6 +114,9 @@ int DexlayoutDriver(int argc, char** argv) { case 'o': // output file options_.output_file_name_ = optarg; break; + case 'w': // output dex files + options_.output_dex_files_ = true; + break; default: want_usage = true; break; diff --git a/dexlist/Android.bp b/dexlist/Android.bp index 8e3c91d378..52b1ee97d2 100644 --- a/dexlist/Android.bp +++ b/dexlist/Android.bp @@ -19,3 +19,11 @@ art_cc_binary { cflags: ["-Wall"], shared_libs: ["libart"], } + +art_cc_test { + name: "art_dexlist_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["dexlist_test.cc"], +} diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc index 9a65ba647c..da1dd7fd89 100644 --- a/dexlist/dexlist_test.cc +++ b/dexlist/dexlist_test.cc @@ -43,11 +43,7 @@ class DexListTest : public CommonRuntimeTest { // Runs test with given arguments. bool Exec(const std::vector<std::string>& args, std::string* error_msg) { std::string file_path = GetTestAndroidRoot(); - if (IsHost()) { - file_path += "/bin/dexlist"; - } else { - file_path += "/xbin/dexlist"; - } + file_path += "/bin/dexlist"; EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path"; std::vector<std::string> exec_argv = { file_path }; exec_argv.insert(exec_argv.end(), args.begin(), args.end()); diff --git a/disassembler/Android.bp b/disassembler/Android.bp index b074d9f9bf..8dfada223b 100644 --- a/disassembler/Android.bp +++ b/disassembler/Android.bp @@ -38,7 +38,8 @@ art_cc_library { name: "libart-disassembler", defaults: ["libart-disassembler-defaults"], shared_libs: [ - // For disassembler_arm64. + // For disassembler_arm*. + "libvixl-arm", "libvixl-arm64", ], } @@ -50,7 +51,8 @@ art_cc_library { "art_debug_defaults", ], shared_libs: [ - // For disassembler_arm64. + // For disassembler_arm*. + "libvixld-arm", "libvixld-arm64", ], } diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index c3e288deae..925047f8b0 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -16,1938 +16,232 @@ #include "disassembler_arm.h" -#include <inttypes.h> - -#include <ostream> -#include <sstream> +#include <memory> +#include <string> #include "android-base/logging.h" -#include "android-base/stringprintf.h" #include "arch/arm/registers_arm.h" #include "base/bit_utils.h" -using android::base::StringPrintf; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#include "aarch32/instructions-aarch32.h" +#include "aarch32/disasm-aarch32.h" +#pragma GCC diagnostic pop namespace art { namespace arm { -size_t DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin) { - if ((reinterpret_cast<intptr_t>(begin) & 1) == 0) { - DumpArm(os, begin); - return 4; - } else { - // remove thumb specifier bits - begin = reinterpret_cast<const uint8_t*>(reinterpret_cast<uintptr_t>(begin) & ~1); - return DumpThumb16(os, begin); - } -} - -void DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) { - if ((reinterpret_cast<intptr_t>(begin) & 1) == 0) { - for (const uint8_t* cur = begin; cur < end; cur += 4) { - DumpArm(os, cur); - } - } else { - // remove thumb specifier bits - begin = reinterpret_cast<const uint8_t*>(reinterpret_cast<uintptr_t>(begin) & ~1); - end = reinterpret_cast<const uint8_t*>(reinterpret_cast<uintptr_t>(end) & ~1); - for (const uint8_t* cur = begin; cur < end;) { - cur += DumpThumb16(os, cur); +using vixl::aarch32::MemOperand; +using vixl::aarch32::PrintDisassembler; +using vixl::aarch32::pc; + +static const vixl::aarch32::Register tr(TR); + +class DisassemblerArm::CustomDisassembler FINAL : public PrintDisassembler { + class CustomDisassemblerStream FINAL : public DisassemblerStream { + public: + CustomDisassemblerStream(std::ostream& os, + const CustomDisassembler* disasm, + const DisassemblerOptions* options) + : DisassemblerStream(os), disasm_(disasm), options_(options) {} + + DisassemblerStream& operator<<(const PrintLabel& label) OVERRIDE { + const LocationType type = label.GetLocationType(); + + switch (type) { + case kLoadByteLocation: + case kLoadHalfWordLocation: + case kLoadWordLocation: + case kLoadDoubleWordLocation: + case kLoadSignedByteLocation: + case kLoadSignedHalfWordLocation: + case kLoadSinglePrecisionLocation: + case kLoadDoublePrecisionLocation: + case kVld1Location: + case kVld2Location: + case kVld3Location: + case kVld4Location: { + const uintptr_t pc_delta = disasm_->IsT32() + ? vixl::aarch32::kT32PcDelta + : vixl::aarch32::kA32PcDelta; + const int32_t offset = label.GetLabel()->GetLocation(); + + os() << "[pc, #" << offset - pc_delta << "]"; + PrintLiteral(type, offset); + return *this; + } + default: + return DisassemblerStream::operator<<(label); + } } - } -} - -static const char* kConditionCodeNames[] = { - "eq", // 0000 - equal - "ne", // 0001 - not-equal - "cs", // 0010 - carry-set, greater than, equal or unordered - "cc", // 0011 - carry-clear, less than - "mi", // 0100 - minus, negative - "pl", // 0101 - plus, positive or zero - "vs", // 0110 - overflow - "vc", // 0111 - no overflow - "hi", // 1000 - unsigned higher - "ls", // 1001 - unsigned lower or same - "ge", // 1010 - signed greater than or equal - "lt", // 1011 - signed less than - "gt", // 1100 - signed greater than - "le", // 1101 - signed less than or equal - "", // 1110 - always - "nv", // 1111 - never (mostly obsolete, but might be a clue that we're mistranslating) -}; -void DisassemblerArm::DumpCond(std::ostream& os, uint32_t cond) { - if (cond < 15) { - os << kConditionCodeNames[cond]; - } else { - os << "Unexpected condition: " << cond; - } -} - -void DisassemblerArm::DumpMemoryDomain(std::ostream& os, uint32_t domain) { - switch (domain) { - case 15U /* 0b1111 */: os << "sy"; break; - case 14U /* 0b1110 */: os << "st"; break; - case 11U /* 0b1011 */: os << "ish"; break; - case 10U /* 0b1010 */: os << "ishst"; break; - case 7U /* 0b0111 */: os << "nsh"; break; - case 6U /* 0b0110 */: os << "nshst"; break; - case 3U /* 0b0011 */: os << "osh"; break; - case 2U /* 0b0010 */: os << "oshst"; break; - } -} + DisassemblerStream& operator<<(const vixl::aarch32::Register reg) OVERRIDE { + if (reg.Is(tr)) { + os() << "tr"; + return *this; + } else { + return DisassemblerStream::operator<<(reg); + } + } -void DisassemblerArm::DumpBranchTarget(std::ostream& os, const uint8_t* instr_ptr, int32_t imm32) { - os << StringPrintf("%+d (", imm32) << FormatInstructionPointer(instr_ptr + imm32) << ")"; -} + DisassemblerStream& operator<<(const MemOperand& operand) OVERRIDE { + // VIXL must use a PrintLabel object whenever the base register is PC; + // the following check verifies this invariant, and guards against bugs. + DCHECK(!operand.GetBaseRegister().Is(pc)); + DisassemblerStream::operator<<(operand); -static uint32_t ReadU16(const uint8_t* ptr) { - return ptr[0] | (ptr[1] << 8); -} + if (operand.GetBaseRegister().Is(tr) && operand.IsImmediate()) { + os() << " ; "; + options_->thread_offset_name_function_(os(), operand.GetOffsetImmediate()); + } -static uint32_t ReadU32(const uint8_t* ptr) { - return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24); -} + return *this; + } -static const char* kDataProcessingOperations[] = { - "and", "eor", "sub", "rsb", "add", "adc", "sbc", "rsc", - "tst", "teq", "cmp", "cmn", "orr", "mov", "bic", "mvn", -}; + DisassemblerStream& operator<<(const vixl::aarch32::AlignedMemOperand& operand) OVERRIDE { + // VIXL must use a PrintLabel object whenever the base register is PC; + // the following check verifies this invariant, and guards against bugs. + DCHECK(!operand.GetBaseRegister().Is(pc)); + return DisassemblerStream::operator<<(operand); + } -static const char* kThumbDataProcessingOperations[] = { - "and", "eor", "lsl", "lsr", "asr", "adc", "sbc", "ror", - "tst", "rsb", "cmp", "cmn", "orr", "mul", "bic", "mvn", -}; + private: + void PrintLiteral(LocationType type, int32_t offset); -static const char* const kThumb2ShiftOperations[] = { - "lsl", "lsr", "asr", "ror" -}; + const CustomDisassembler* disasm_; + const DisassemblerOptions* options_; + }; -static const char* kThumbReverseOperations[] = { - "rev", "rev16", "rbit", "revsh" -}; + public: + CustomDisassembler(std::ostream& os, const DisassemblerOptions* options) + // vixl::aarch32::Disassembler::~Disassembler() will delete the stream. + : PrintDisassembler(new CustomDisassemblerStream(os, this, options)) {} -struct ArmRegister { - explicit ArmRegister(uint32_t r_in) : r(r_in) { CHECK_LE(r_in, 15U); } - ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { - CHECK_LE(r, 15U); - } - uint32_t r; -}; -std::ostream& operator<<(std::ostream& os, const ArmRegister& r) { - if (r.r == 13) { - os << "sp"; - } else if (r.r == 14) { - os << "lr"; - } else if (r.r == 15) { - os << "pc"; - } else { - os << "r" << r.r; + void PrintPc(uint32_t prog_ctr) OVERRIDE { + os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << prog_ctr << ": "; } - return os; -} - -struct ThumbRegister : ArmRegister { - ThumbRegister(uint16_t instruction, uint16_t at_bit) : ArmRegister((instruction >> at_bit) & 0x7) {} -}; -struct RmLslImm2 { - explicit RmLslImm2(uint32_t instr) : imm2((instr >> 4) & 0x3), rm(instr & 0xf) {} - uint32_t imm2; - ArmRegister rm; -}; -std::ostream& operator<<(std::ostream& os, const RmLslImm2& r) { - os << r.rm; - if (r.imm2 != 0) { - os << ", lsl #" << r.imm2; + bool IsT32() const { + return is_t32_; } - return os; -} -struct ShiftedImmediate { - explicit ShiftedImmediate(uint32_t instruction) { - uint32_t rotate = ((instruction >> 8) & 0xf); - uint32_t imm = (instruction & 0xff); - value = (imm >> (2 * rotate)) | (imm << (32 - (2 * rotate))); + void SetT32(bool is_t32) { + is_t32_ = is_t32; } - uint32_t value; -}; -std::ostream& operator<<(std::ostream& os, const ShiftedImmediate& rhs) { - os << "#" << rhs.value; - return os; -} -struct RegisterList { - explicit RegisterList(uint32_t instruction) : register_list(instruction & 0xffff) {} - uint32_t register_list; + private: + bool is_t32_; }; -std::ostream& operator<<(std::ostream& os, const RegisterList& rhs) { - if (rhs.register_list == 0) { - os << "<no register list?>"; - return os; - } - os << "{"; - bool first = true; - for (size_t i = 0; i < 16; i++) { - if ((rhs.register_list & (1 << i)) != 0) { - if (first) { - first = false; - } else { - os << ", "; - } - os << ArmRegister(i); - } - } - os << "}"; - return os; -} -struct FpRegister { - FpRegister(uint32_t instr, uint16_t at_bit, uint16_t extra_at_bit) { - size = (instr >> 8) & 1; - uint32_t Vn = (instr >> at_bit) & 0xF; - uint32_t N = (instr >> extra_at_bit) & 1; - r = (size != 0 ? ((N << 4) | Vn) : ((Vn << 1) | N)); - } - FpRegister(uint32_t instr, uint16_t at_bit, uint16_t extra_at_bit, uint32_t forced_size) { - size = forced_size; - uint32_t Vn = (instr >> at_bit) & 0xF; - uint32_t N = (instr >> extra_at_bit) & 1; - r = (size != 0 ? ((N << 4) | Vn) : ((Vn << 1) | N)); - } - FpRegister(const FpRegister& other, uint32_t offset) - : size(other.size), r(other.r + offset) {} +void DisassemblerArm::CustomDisassembler::CustomDisassemblerStream::PrintLiteral(LocationType type, + int32_t offset) { + // Literal offsets are not required to be aligned, so we may need unaligned access. + typedef const int16_t unaligned_int16_t __attribute__ ((aligned (1))); + typedef const uint16_t unaligned_uint16_t __attribute__ ((aligned (1))); + typedef const int32_t unaligned_int32_t __attribute__ ((aligned (1))); + typedef const int64_t unaligned_int64_t __attribute__ ((aligned (1))); + typedef const float unaligned_float __attribute__ ((aligned (1))); + typedef const double unaligned_double __attribute__ ((aligned (1))); - uint32_t size; // 0 = f32, 1 = f64 - uint32_t r; -}; -std::ostream& operator<<(std::ostream& os, const FpRegister& rhs) { - return os << ((rhs.size != 0) ? "d" : "s") << rhs.r; -} + // Zeros are used for the LocationType values this function does not care about. + const size_t literal_size[kVst4Location + 1] = { + 0, 0, 0, 0, sizeof(uint8_t), sizeof(unaligned_uint16_t), sizeof(unaligned_int32_t), + sizeof(unaligned_int64_t), sizeof(int8_t), sizeof(unaligned_int16_t), + sizeof(unaligned_float), sizeof(unaligned_double)}; + const uintptr_t begin = reinterpret_cast<uintptr_t>(options_->base_address_); + const uintptr_t end = reinterpret_cast<uintptr_t>(options_->end_address_); + uintptr_t literal_addr = RoundDown(disasm_->GetPc(), vixl::aarch32::kRegSizeInBytes) + offset; -struct FpRegisterRange { - explicit FpRegisterRange(uint32_t instr) - : first(instr, 12, 22), imm8(instr & 0xFF) {} - FpRegister first; - uint32_t imm8; -}; -std::ostream& operator<<(std::ostream& os, const FpRegisterRange& rhs) { - os << "{" << rhs.first; - int count = (rhs.first.size != 0 ? ((rhs.imm8 + 1u) >> 1) : rhs.imm8); - if (count > 1) { - os << "-" << FpRegister(rhs.first, count - 1); - } - if (rhs.imm8 == 0) { - os << " (EMPTY)"; - } else if (rhs.first.size != 0 && (rhs.imm8 & 1) != 0) { - os << rhs.first << " (HALF)"; + if (!options_->absolute_addresses_) { + literal_addr += begin; } - os << "}"; - return os; -} -void DisassemblerArm::DumpArm(std::ostream& os, const uint8_t* instr_ptr) { - uint32_t instruction = ReadU32(instr_ptr); - uint32_t cond = (instruction >> 28) & 0xf; - uint32_t op1 = (instruction >> 25) & 0x7; - std::string opcode; - std::string suffixes; - std::ostringstream args; - switch (op1) { - case 0: - case 1: // Data processing instructions. - { - if ((instruction & 0x0ff000f0) == 0x01200070) { // BKPT - opcode = "bkpt"; - uint32_t imm12 = (instruction >> 8) & 0xfff; - uint32_t imm4 = (instruction & 0xf); - args << '#' << ((imm12 << 4) | imm4); - break; - } - if ((instruction & 0x0fffffd0) == 0x012fff10) { // BX and BLX (register) - opcode = (((instruction >> 5) & 1) ? "blx" : "bx"); - args << ArmRegister(instruction & 0xf); - break; - } - bool i = (instruction & (1 << 25)) != 0; - bool s = (instruction & (1 << 20)) != 0; - uint32_t op = (instruction >> 21) & 0xf; - opcode = kDataProcessingOperations[op]; - bool implicit_s = ((op & ~3) == 8); // TST, TEQ, CMP, and CMN. - bool is_mov = op == 13U /* 0b1101 */ || op == 15U /* 0b1111 */; - if (is_mov) { - // Show only Rd and Rm. - if (s) { - suffixes += 's'; - } - args << ArmRegister(instruction, 12) << ", "; - if (i) { - args << ShiftedImmediate(instruction); - } else { - // TODO: Shifted register. - args << ArmRegister(instruction, 16) << ", " << ArmRegister(instruction, 0); - } - } else { - if (implicit_s) { - // Rd is unused (and not shown), and we don't show the 's' suffix either. - } else { - if (s) { - suffixes += 's'; - } - args << ArmRegister(instruction, 12) << ", "; - } - if (i) { - args << ArmRegister(instruction, 16) << ", " << ShiftedImmediate(instruction); - } else { - // TODO: Shifted register. - args << ArmRegister(instruction, 16) << ", " << ArmRegister(instruction, 0); - } - } - } - break; - case 2: // Load/store word and unsigned byte. - { - bool p = (instruction & (1 << 24)) != 0; - bool b = (instruction & (1 << 22)) != 0; - bool w = (instruction & (1 << 21)) != 0; - bool l = (instruction & (1 << 20)) != 0; - opcode = StringPrintf("%s%s", (l ? "ldr" : "str"), (b ? "b" : "")); - args << ArmRegister(instruction, 12) << ", "; - ArmRegister rn(instruction, 16); - if (rn.r == 0xf) { - UNIMPLEMENTED(FATAL) << "literals"; - } else { - bool wback = !p || w; - uint32_t offset = (instruction & 0xfff); - if (p && !wback) { - args << "[" << rn << ", #" << offset << "]"; - } else if (p && wback) { - args << "[" << rn << ", #" << offset << "]!"; - } else if (!p && wback) { - args << "[" << rn << "], #" << offset; - } else { - LOG(FATAL) << p << " " << w; - } - if (rn.r == 9) { - args << " ; "; - GetDisassemblerOptions()->thread_offset_name_function_(args, offset); - } - } - } - break; - case 4: // Load/store multiple. - { - bool p = (instruction & (1 << 24)) != 0; - bool u = (instruction & (1 << 23)) != 0; - bool w = (instruction & (1 << 21)) != 0; - bool l = (instruction & (1 << 20)) != 0; - opcode = StringPrintf("%s%c%c", (l ? "ldm" : "stm"), (u ? 'i' : 'd'), (p ? 'b' : 'a')); - args << ArmRegister(instruction, 16) << (w ? "!" : "") << ", " << RegisterList(instruction); + os() << " ; "; + + // Bail out if not within expected buffer range to avoid trying to fetch invalid literals + // (we can encounter them when interpreting raw data as instructions). + if (literal_addr < begin || literal_addr > end - literal_size[type]) { + os() << "(?)"; + } else { + switch (type) { + case kLoadByteLocation: + os() << *reinterpret_cast<const uint8_t*>(literal_addr); + break; + case kLoadHalfWordLocation: + os() << *reinterpret_cast<unaligned_uint16_t*>(literal_addr); + break; + case kLoadWordLocation: { + const int32_t value = *reinterpret_cast<unaligned_int32_t*>(literal_addr); + os() << "0x" << std::hex << std::setw(8) << std::setfill('0') << value; + break; } - break; - case 5: // Branch/branch with link. - { - bool bl = (instruction & (1 << 24)) != 0; - opcode = (bl ? "bl" : "b"); - int32_t imm26 = (instruction & 0xffffff) << 2; - int32_t imm32 = (imm26 << 6) >> 6; // Sign extend. - DumpBranchTarget(args, instr_ptr + 8, imm32); + case kLoadDoubleWordLocation: { + const int64_t value = *reinterpret_cast<unaligned_int64_t*>(literal_addr); + os() << "0x" << std::hex << std::setw(16) << std::setfill('0') << value; + break; } - break; - default: - opcode = "???"; - break; - } - opcode += kConditionCodeNames[cond]; - opcode += suffixes; - // TODO: a more complete ARM disassembler could generate wider opcodes. - os << FormatInstructionPointer(instr_ptr) - << StringPrintf(": %08x\t%-7s ", instruction, opcode.c_str()) - << args.str() << '\n'; -} - -int32_t ThumbExpand(int32_t imm12) { - if ((imm12 & 0xC00) == 0) { - switch ((imm12 >> 8) & 3) { - case 0: - return imm12 & 0xFF; - case 1: - return ((imm12 & 0xFF) << 16) | (imm12 & 0xFF); - case 2: - return ((imm12 & 0xFF) << 24) | ((imm12 & 0xFF) << 8); - default: // 3 - return ((imm12 & 0xFF) << 24) | ((imm12 & 0xFF) << 16) | ((imm12 & 0xFF) << 8) | - (imm12 & 0xFF); + case kLoadSignedByteLocation: + os() << *reinterpret_cast<const int8_t*>(literal_addr); + break; + case kLoadSignedHalfWordLocation: + os() << *reinterpret_cast<unaligned_int16_t*>(literal_addr); + break; + case kLoadSinglePrecisionLocation: + os() << *reinterpret_cast<unaligned_float*>(literal_addr); + break; + case kLoadDoublePrecisionLocation: + os() << *reinterpret_cast<unaligned_double*>(literal_addr); + break; + default: + UNIMPLEMENTED(FATAL) << "Unexpected literal type: " << type; } - } else { - uint32_t val = 0x80 | (imm12 & 0x7F); - int32_t rotate = (imm12 >> 7) & 0x1F; - return (val >> rotate) | (val << (32 - rotate)); } } -uint32_t VFPExpand32(uint32_t imm8) { - CHECK_EQ(imm8 & 0xffu, imm8); - uint32_t bit_a = (imm8 >> 7) & 1; - uint32_t bit_b = (imm8 >> 6) & 1; - uint32_t slice = imm8 & 0x3f; - return (bit_a << 31) | ((1 << 30) - (bit_b << 25)) | (slice << 19); -} +DisassemblerArm::DisassemblerArm(DisassemblerOptions* options) + : Disassembler(options), disasm_(std::make_unique<CustomDisassembler>(output_, options)) {} -static uint64_t VFPExpand64(uint32_t imm8) { - CHECK_EQ(imm8 & 0xffu, imm8); - uint64_t bit_a = (imm8 >> 7) & 1; - uint64_t bit_b = (imm8 >> 6) & 1; - uint64_t slice = imm8 & 0x3f; - return (bit_a << 63) | ((UINT64_C(1) << 62) - (bit_b << 54)) | (slice << 48); -} +size_t DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin) { + uintptr_t next; + // Remove the Thumb specifier bit; no effect if begin does not point to T32 code. + const uintptr_t instr_ptr = reinterpret_cast<uintptr_t>(begin) & ~1; -enum T2LitType { - kT2LitInvalid, - kT2LitUByte, - kT2LitSByte, - kT2LitUHalf, - kT2LitSHalf, - kT2LitUWord, - kT2LitSWord, - kT2LitHexWord, - kT2LitULong, - kT2LitSLong, - kT2LitHexLong, -}; -std::ostream& operator<<(std::ostream& os, T2LitType type) { - return os << static_cast<int>(type); -} + disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0); + disasm_->JumpToPc(GetPc(instr_ptr)); -void DumpThumb2Literal(std::ostream& args, - const uint8_t* instr_ptr, - const uintptr_t lo_adr, - const uintptr_t hi_adr, - uint32_t U, - uint32_t imm32, - T2LitType type) { - // Literal offsets (imm32) are not required to be aligned so we may need unaligned access. - typedef const int16_t unaligned_int16_t __attribute__ ((aligned (1))); - typedef const uint16_t unaligned_uint16_t __attribute__ ((aligned (1))); - typedef const int32_t unaligned_int32_t __attribute__ ((aligned (1))); - typedef const uint32_t unaligned_uint32_t __attribute__ ((aligned (1))); - typedef const int64_t unaligned_int64_t __attribute__ ((aligned (1))); - typedef const uint64_t unaligned_uint64_t __attribute__ ((aligned (1))); - - // Get address of literal. Bail if not within expected buffer range to - // avoid trying to fetch invalid literals (we can encounter this when - // interpreting raw data as instructions). - uintptr_t pc = RoundDown(reinterpret_cast<intptr_t>(instr_ptr) + 4, 4); - uintptr_t lit_adr = U ? pc + imm32 : pc - imm32; - if (lit_adr < lo_adr || lit_adr >= hi_adr) { - args << " ; (?)"; - return; + if (disasm_->IsT32()) { + const uint16_t* const ip = reinterpret_cast<const uint16_t*>(instr_ptr); + next = reinterpret_cast<uintptr_t>(disasm_->DecodeT32At(ip)); + } else { + const uint32_t* const ip = reinterpret_cast<const uint32_t*>(instr_ptr); + next = reinterpret_cast<uintptr_t>(disasm_->DecodeA32At(ip)); } - args << " ; "; - switch (type) { - case kT2LitUByte: - args << *reinterpret_cast<const uint8_t*>(lit_adr); - break; - case kT2LitSByte: - args << *reinterpret_cast<const int8_t*>(lit_adr); - break; - case kT2LitUHalf: - args << *reinterpret_cast<const unaligned_uint16_t*>(lit_adr); - break; - case kT2LitSHalf: - args << *reinterpret_cast<const unaligned_int16_t*>(lit_adr); - break; - case kT2LitUWord: - args << *reinterpret_cast<const unaligned_uint32_t*>(lit_adr); - break; - case kT2LitSWord: - args << *reinterpret_cast<const unaligned_int32_t*>(lit_adr); - break; - case kT2LitHexWord: - args << StringPrintf("0x%08x", *reinterpret_cast<const unaligned_uint32_t*>(lit_adr)); - break; - case kT2LitULong: - args << *reinterpret_cast<const unaligned_uint64_t*>(lit_adr); - break; - case kT2LitSLong: - args << *reinterpret_cast<const unaligned_int64_t*>(lit_adr); - break; - case kT2LitHexLong: - args << StringPrintf("0x%" PRIx64, *reinterpret_cast<unaligned_int64_t*>(lit_adr)); - break; - default: - LOG(FATAL) << "Invalid type: " << type; - break; - } + os << output_.str(); + output_.str(std::string()); + return next - instr_ptr; } -size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) { - uint32_t instr = (ReadU16(instr_ptr) << 16) | ReadU16(instr_ptr + 2); - // |111|1 1|1000000|0000|1111110000000000| - // |5 3|2 1|0987654|3 0|5 0 5 0| - // |---|---|-------|----|----------------| - // |332|2 2|2222222|1111|1111110000000000| - // |1 9|8 7|6543210|9 6|5 0 5 0| - // |---|---|-------|----|----------------| - // |111|op1| op2 | | | - uint32_t op1 = (instr >> 27) & 3; - if (op1 == 0) { - return DumpThumb16(os, instr_ptr); - } - - // Set valid address range of backing buffer. - const uintptr_t lo_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->base_address_); - const uintptr_t hi_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->end_address_); - - uint32_t op2 = (instr >> 20) & 0x7F; - std::ostringstream opcode; - std::ostringstream args; - switch (op1) { - case 0: - break; - case 1: - if ((op2 & 0x64) == 0) { // 00x x0xx - // |111|11|10|00|0|00|0000|1111110000000000| - // |5 3|21|09|87|6|54|3 0|5 0 5 0| - // |---|--|--|--|-|--|----|----------------| - // |332|22|22|22|2|22|1111|1111110000000000| - // |1 9|87|65|43|2|10|9 6|5 0 5 0| - // |---|--|--|--|-|--|----|----------------| - // |111|01|00|op|0|WL| Rn | | - // |111|01| op2 | | | - // STM - 111 01 00-01-0-W0 nnnn rrrrrrrrrrrrrrrr - // LDM - 111 01 00-01-0-W1 nnnn rrrrrrrrrrrrrrrr - // PUSH- 111 01 00-01-0-10 1101 0M0rrrrrrrrrrrrr - // POP - 111 01 00-01-0-11 1101 PM0rrrrrrrrrrrrr - uint32_t op = (instr >> 23) & 3; - uint32_t W = (instr >> 21) & 1; - uint32_t L = (instr >> 20) & 1; - ArmRegister Rn(instr, 16); - if (op == 1 || op == 2) { - if (op == 1) { - if (L == 0) { - opcode << "stm"; - args << Rn << (W == 0 ? "" : "!") << ", "; - } else { - if (Rn.r != 13) { - opcode << "ldm"; - args << Rn << (W == 0 ? "" : "!") << ", "; - } else { - opcode << "pop"; - } - } - } else { - if (L == 0) { - if (Rn.r != 13) { - opcode << "stmdb"; - args << Rn << (W == 0 ? "" : "!") << ", "; - } else { - opcode << "push"; - } - } else { - opcode << "ldmdb"; - args << Rn << (W == 0 ? "" : "!") << ", "; - } - } - args << RegisterList(instr); - } - } else if ((op2 & 0x64) == 4) { // 00x x1xx - uint32_t op3 = (instr >> 23) & 3; - uint32_t op4 = (instr >> 20) & 3; - // uint32_t op5 = (instr >> 4) & 0xF; - ArmRegister Rn(instr, 16); - ArmRegister Rt(instr, 12); - ArmRegister Rd(instr, 8); - uint32_t imm8 = instr & 0xFF; - if ((op3 & 2) == 2) { // 1x - int W = (instr >> 21) & 1; - int U = (instr >> 23) & 1; - int P = (instr >> 24) & 1; - - if ((op4 & 1) == 1) { - opcode << "ldrd"; - } else { - opcode << "strd"; - } - args << Rt << "," << Rd << ", [" << Rn; - const char *sign = U ? "+" : "-"; - if (P == 0 && W == 1) { - args << "], #" << sign << (imm8 << 2); - } else { - args << ", #" << sign << (imm8 << 2) << "]"; - if (W == 1) { - args << "!"; - } - } - } else { // 0x - switch (op4) { - case 0: - if (op3 == 0) { // op3 is 00, op4 is 00 - opcode << "strex"; - args << Rd << ", " << Rt << ", [" << Rn << ", #" << (imm8 << 2) << "]"; - if (Rd.r == 13 || Rd.r == 15 || Rt.r == 13 || Rt.r == 15 || Rn.r == 15 || - Rd.r == Rn.r || Rd.r == Rt.r) { - args << " (UNPREDICTABLE)"; - } - } else { // op3 is 01, op4 is 00 - // this is one of strexb, strexh or strexd - int op5 = (instr >> 4) & 0xf; - switch (op5) { - case 4: - case 5: - opcode << ((op5 == 4) ? "strexb" : "strexh"); - Rd = ArmRegister(instr, 0); - args << Rd << ", " << Rt << ", [" << Rn << "]"; - if (Rd.r == 13 || Rd.r == 15 || Rt.r == 13 || Rt.r == 15 || Rn.r == 15 || - Rd.r == Rn.r || Rd.r == Rt.r || (instr & 0xf00) != 0xf00) { - args << " (UNPREDICTABLE)"; - } - break; - case 7: - opcode << "strexd"; - ArmRegister Rt2 = Rd; - Rd = ArmRegister(instr, 0); - args << Rd << ", " << Rt << ", " << Rt2 << ", [" << Rn << "]"; - if (Rd.r == 13 || Rd.r == 15 || Rt.r == 13 || Rt.r == 15 || - Rt2.r == 13 || Rt2.r == 15 || Rn.r == 15 || - Rd.r == Rn.r || Rd.r == Rt.r || Rd.r == Rt2.r) { - args << " (UNPREDICTABLE)"; - } - break; - } - } - break; - case 1: - if (op3 == 0) { // op3 is 00, op4 is 01 - opcode << "ldrex"; - args << Rt << ", [" << Rn << ", #" << (imm8 << 2) << "]"; - if (Rt.r == 13 || Rt.r == 15 || Rn.r == 15 || (instr & 0xf00) != 0xf00) { - args << " (UNPREDICTABLE)"; - } - } else { // op3 is 01, op4 is 01 - // this is one of strexb, strexh or strexd - int op5 = (instr >> 4) & 0xf; - switch (op5) { - case 0: - opcode << "tbb"; - break; - case 1: - opcode << "tbh"; - break; - case 4: - case 5: - opcode << ((op5 == 4) ? "ldrexb" : "ldrexh"); - args << Rt << ", [" << Rn << "]"; - if (Rt.r == 13 || Rt.r == 15 || Rn.r == 15 || (instr & 0xf0f) != 0xf0f) { - args << " (UNPREDICTABLE)"; - } - break; - case 7: - opcode << "ldrexd"; - args << Rt << ", " << Rd /* Rt2 */ << ", [" << Rn << "]"; - if (Rt.r == 13 || Rt.r == 15 || Rd.r == 13 /* Rt2 */ || Rd.r == 15 /* Rt2 */ || - Rn.r == 15 || (instr & 0x00f) != 0x00f) { - args << " (UNPREDICTABLE)"; - } - break; - } - } - break; - case 2: // op3 is 0x, op4 is 10 - case 3: // op3 is 0x, op4 is 11 - if (op4 == 2) { - opcode << "strd"; - } else { - opcode << "ldrd"; - } - int W = (instr >> 21) & 1; - int U = (instr >> 23) & 1; - int P = (instr >> 24) & 1; - - args << Rt << "," << Rd << ", [" << Rn; - const char *sign = U ? "+" : "-"; - if (P == 0 && W == 1) { - args << "], #" << sign << imm8; - } else { - args << ", #" << sign << imm8 << "]"; - if (W == 1) { - args << "!"; - } - } - break; - } - } - - } else if ((op2 & 0x60) == 0x20) { // 01x xxxx - // Data-processing (shifted register) - // |111|1110|0000|0|0000|1111|1100|00|00|0000| - // |5 3|2109|8765|4|3 0|5 |10 8|7 |5 |3 0| - // |---|----|----|-|----|----|----|--|--|----| - // |332|2222|2222|2|1111|1111|1100|00|00|0000| - // |1 9|8765|4321|0|9 6|5 |10 8|7 |5 |3 0| - // |---|----|----|-|----|----|----|--|--|----| - // |111|0101| op3|S| Rn |imm3| Rd |i2|ty| Rm | - uint32_t op3 = (instr >> 21) & 0xF; - uint32_t S = (instr >> 20) & 1; - uint32_t imm3 = ((instr >> 12) & 0x7); - uint32_t imm2 = ((instr >> 6) & 0x3); - uint32_t imm5 = ((imm3 << 2) | imm2); - uint32_t shift_type = ((instr >> 4) & 0x3); - ArmRegister Rd(instr, 8); - ArmRegister Rn(instr, 16); - ArmRegister Rm(instr, 0); - switch (op3) { - case 0x0: - if (Rd.r != 0xF) { - opcode << "and"; - } else { - if (S != 1U) { - opcode << "UNKNOWN TST-" << S; - break; - } - opcode << "tst"; - S = 0; // don't print 's' - } - break; - case 0x1: opcode << "bic"; break; - case 0x2: - if (Rn.r != 0xF) { - opcode << "orr"; - } else { - // TODO: use canonical form if there is a shift (lsl, ...). - opcode << "mov"; - } - break; - case 0x3: - if (Rn.r != 0xF) { - opcode << "orn"; - } else { - opcode << "mvn"; - } - break; - case 0x4: - if (Rd.r != 0xF) { - opcode << "eor"; - } else { - if (S != 1U) { - opcode << "UNKNOWN TEQ-" << S; - break; - } - opcode << "teq"; - S = 0; // don't print 's' - } - break; - case 0x6: opcode << "pkh"; break; - case 0x8: - if (Rd.r != 0xF) { - opcode << "add"; - } else { - if (S != 1U) { - opcode << "UNKNOWN CMN-" << S; - break; - } - opcode << "cmn"; - S = 0; // don't print 's' - } - break; - case 0xA: opcode << "adc"; break; - case 0xB: opcode << "sbc"; break; - case 0xD: - if (Rd.r != 0xF) { - opcode << "sub"; - } else { - if (S != 1U) { - opcode << "UNKNOWN CMP-" << S; - break; - } - opcode << "cmp"; - S = 0; // don't print 's' - } - break; - case 0xE: opcode << "rsb"; break; - default: opcode << "UNKNOWN DPSR-" << op3; break; - } - - if (S == 1) { - opcode << "s"; - } - opcode << ".w"; - - if (Rd.r != 0xF) { - args << Rd << ", "; - } - if (Rn.r != 0xF) { - args << Rn << ", "; - } - args << Rm; - - // Shift operand. - bool noShift = (imm5 == 0 && shift_type == 0x0); - if (!noShift) { - args << ", "; - if (shift_type == 0x3u && imm5 == 0u) { - args << "rrx"; - } else { - args << kThumb2ShiftOperations[shift_type] << " #" << ((0 != imm5) ? imm5 : 32); - } - } - - } else if ((op2 & 0x40) == 0x40) { // 1xx xxxx - // Co-processor instructions - // |111|1|11|000000|0000|1111|1100|000|0 |0000| - // |5 3|2|10|987654|3 0|54 2|10 8|7 5|4 | 0| - // |---|-|--|------|----|----|----|---|---|----| - // |332|2|22|222222|1111|1111|1100|000|0 |0000| - // |1 9|8|76|543210|9 6|54 2|10 8|7 5|4 | 0| - // |---|-|--|------|----|----|----|---|---|----| - // |111| |11| op3 | Rn | |copr| |op4| | - uint32_t op3 = (instr >> 20) & 0x3F; - uint32_t coproc = (instr >> 8) & 0xF; - uint32_t op4 = (instr >> 4) & 0x1; - - if (coproc == 0xA || coproc == 0xB) { // 101x - if (op3 < 0x20 && (op3 & ~5) != 0) { // 0xxxxx and not 000x0x - // Extension register load/store instructions - // |1111|110|00000|0000|1111|110|0|00000000| - // |5 2|1 9|87654|3 0|5 2|1 9|8|7 0| - // |----|---|-----|----|----|---|-|--------| - // |3322|222|22222|1111|1111|110|0|00000000| - // |1 8|7 5|4 0|9 6|5 2|1 9|8|7 0| - // |----|---|-----|----|----|---|-|--------| - // |1110|110|PUDWL| Rn | Vd |101|S| imm8 | - uint32_t P = (instr >> 24) & 1; - uint32_t U = (instr >> 23) & 1; - uint32_t W = (instr >> 21) & 1; - if (P == U && W == 1) { - opcode << "UNDEFINED"; - } else { - uint32_t L = (instr >> 20) & 1; - uint32_t S = (instr >> 8) & 1; - ArmRegister Rn(instr, 16); - if (P == 1 && W == 0) { // VLDR - FpRegister d(instr, 12, 22); - uint32_t imm8 = instr & 0xFF; - opcode << (L == 1 ? "vldr" : "vstr"); - args << d << ", [" << Rn << ", #" << ((U == 1) ? "" : "-") - << (imm8 << 2) << "]"; - if (Rn.r == 15 && U == 1) { - DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, U, imm8 << 2, kT2LitHexLong); - } - } else if (Rn.r == 13 && W == 1 && U == L) { // VPUSH/VPOP - opcode << (L == 1 ? "vpop" : "vpush"); - args << FpRegisterRange(instr); - } else { // VLDM - opcode << (L == 1 ? "vldm" : "vstm"); - args << Rn << ((W == 1) ? "!" : "") << ", " - << FpRegisterRange(instr); - } - opcode << (S == 1 ? ".f64" : ".f32"); - } - } else if ((op3 >> 1) == 2) { // 00010x - if ((instr & 0xD0) == 0x10) { - // 64bit transfers between ARM core and extension registers. - uint32_t L = (instr >> 20) & 1; - uint32_t S = (instr >> 8) & 1; - ArmRegister Rt2(instr, 16); - ArmRegister Rt(instr, 12); - FpRegister m(instr, 0, 5); - opcode << "vmov" << (S ? ".f64" : ".f32"); - if (L == 1) { - args << Rt << ", " << Rt2 << ", "; - } - if (S) { - args << m; - } else { - args << m << ", " << FpRegister(m, 1); - } - if (L == 0) { - args << ", " << Rt << ", " << Rt2; - } - if (Rt.r == 15 || Rt.r == 13 || Rt2.r == 15 || Rt2.r == 13 || - (S == 0 && m.r == 31) || (L == 1 && Rt.r == Rt2.r)) { - args << " (UNPREDICTABLE)"; - } - } - } else if ((op3 >> 4) == 2 && op4 == 0) { // 10xxxx, op = 0 - // fp data processing - // VMLA, VMLS, VMUL, VNMUL, VADD, VSUB, VDIV, VMOV, ... - // |1111|1100|0|0|00|0000|1111|110|0|0|0|0|0|0000| - // |5 2|1 8|7|6|54|3 0|5 2|1 9|8|7|6|5|4|3 0| - // |----|----|-|-|--|----|----|---|-|-|-|-|-|----| - // |3322|2222|2|2|22|1111|1111|110|0|0|0|0|0|0000| - // |1 8|7 4|3|2|10|9 6|5 2|1 9|8|7|6|5|4|3 0| - // |----|----|-|-|--|----|----|---|-|-|-|-|-|----| - // |1110|1110| op3 | Vn | Vd |101|S|N|Q|M|0| Vm | - // |1110|1110|0|D|00| Vn | Vd |101|S|N|0|M|0| Vm | VMLA - // |1110|1110|0|D|00| Vn | Vd |101|S|N|1|M|0| Vm | VMLS - // |1110|1110|0|D|10| Vn | Vd |101|S|N|0|M|0| Vm | VMUL - // |1110|1110|0|D|10| Vn | Vd |101|S|N|1|M|0| Vm | VNMUL - // |1110|1110|0|D|11| Vn | Vd |101|S|N|0|M|0| Vm | VADD - // |1110|1110|0|D|11| Vn | Vd |101|S|N|1|M|0| Vm | VSUB - // |1110|1110|1|D|00| Vn | Vd |101|S|N|0|M|0| Vm | VDIV - // |1110|1110|1|D|11| iH | Vd |101|S|0|0|0|0| iL | VMOV (imm) - // |1110|1110|1|D|11|op5 | Vd |101|S|.|1|M|0| Vm | ... (see below) - uint32_t S = (instr >> 8) & 1; - uint32_t Q = (instr >> 6) & 1; - FpRegister d(instr, 12, 22); - FpRegister n(instr, 16, 7); - FpRegister m(instr, 0, 5); - if ((op3 & 0xB) == 0) { // 100x00 - opcode << (Q == 0 ? "vmla" : "vmls") << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << n << ", " << m; - } else if ((op3 & 0xB) == 0x2) { // 100x10 - opcode << (Q == 0 ? "vmul" : "vnmul") << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << n << ", " << m; - } else if ((op3 & 0xB) == 0x3) { // 100x11 - opcode << (Q == 0 ? "vadd" : "vsub") << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << n << ", " << m; - } else if ((op3 & 0xB) == 0x8 && Q == 0) { // 101x00, Q == 0 - opcode << "vdiv" << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << n << ", " << m; - } else if ((op3 & 0xB) == 0xB && Q == 0) { // 101x11, Q == 0 - uint32_t imm8 = ((instr & 0xf0000u) >> 12) | (instr & 0xfu); - opcode << "vmov" << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << (S != 0 ? StringPrintf("0x%016" PRIx64, VFPExpand64(imm8)) - : StringPrintf("0x%08x", VFPExpand32(imm8))); - if ((instr & 0xa0) != 0) { - args << " (UNPREDICTABLE)"; - } - } else if ((op3 & 0xB) == 0xB && Q == 1) { // 101x11, Q == 1 - // VNEG, VSQRT, VCMP, VCMPE, VCVT (floating-point conversion) - // |1111|1100|0|0|00|0000|1111|110|0|0 |0|0|0|0000| - // |5 2|1 8|7|6|54|3 0|5 2|1 9|8|7 |6|5|4|3 0| - // |----|----|-|-|--|----|----|---|-|- |-|-|-|----| - // |3322|2222|2|2|22|1111|1111|110|0|0 |0|0|0|0000| - // |1 8|7 4|3|2|10|9 6|5 2|1 9|8|7 |6|5|4|3 0| - // |----|----|-|-|--|----|----|---|-|- |-|-|-|----| - // |1110|1110|1|D|11|0000| Vd |101|S|0 |1|M|0| Vm | VMOV (reg) - // |1110|1110|1|D|11|0000| Vd |101|S|1 |1|M|0| Vm | VABS - // |1110|1110|1|D|11|0001| Vd |101|S|0 |1|M|0| Vm | VNEG - // |1110|1110|1|D|11|0001| Vd |101|S|1 |1|M|0| Vm | VSQRT - // |1110|1110|1|D|11|0100| Vd |101|S|op|1|M|0| Vm | VCMP - // |1110|1110|1|D|11|0101| Vd |101|S|op|1|0|0|0000| VCMPE - // |1110|1110|1|D|11|op5 | Vd |101|S|op|1|M|0| Vm | VCVT - uint32_t op5 = (instr >> 16) & 0xF; - uint32_t op = (instr >> 7) & 1; - // Register types in VCVT instructions rely on the combination of op5 and S. - FpRegister Dd(instr, 12, 22, 1); - FpRegister Sd(instr, 12, 22, 0); - FpRegister Dm(instr, 0, 5, 1); - FpRegister Sm(instr, 0, 5, 0); - if (op5 == 0) { - opcode << (op == 0 ? "vmov" : "vabs") << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << m; - } else if (op5 == 1) { - opcode << (op != 0 ? "vsqrt" : "vneg") << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << m; - } else if (op5 == 4) { - opcode << "vcmp" << ((op != 0) ? "e" : "") << (S != 0 ? ".f64" : ".f32"); - args << d << ", " << m; - } else if (op5 == 5) { - opcode << "vcmp" << ((op != 0) ? "e" : "") << (S != 0 ? ".f64" : ".f32"); - args << d << ", #0.0"; - if ((instr & 0x2f) != 0) { - args << " (UNPREDICTABLE)"; - } - } else if (op5 == 0xD) { - if (S == 1) { - // vcvt{r}.s32.f64 - opcode << "vcvt" << (op == 0 ? "r" : "") << ".s32.f64"; - args << Sd << ", " << Dm; - } else { - // vcvt{r}.s32.f32 - opcode << "vcvt" << (op == 0 ? "r" : "") << ".s32.f32"; - args << Sd << ", " << Sm; - } - } else if (op5 == 0xC) { - if (S == 1) { - // vcvt{r}.u32.f64 - opcode << "vcvt" << (op == 0 ? "r" : "") << ".u32.f64"; - args << Sd << ", " << Dm; - } else { - // vcvt{r}.u32.f32 - opcode << "vcvt" << (op == 0 ? "r" : "") << ".u32.f32"; - args << Sd << ", " << Sm; - } - } else if (op5 == 0x8) { - if (S == 1) { - // vcvt.f64.<Tm> - opcode << "vcvt.f64." << (op == 0 ? "u" : "s") << "32"; - args << Dd << ", " << Sm; - } else { - // vcvt.f32.<Tm> - opcode << "vcvt.f32." << (op == 0 ? "u" : "s") << "32"; - args << Sd << ", " << Sm; - } - } else if (op5 == 0x7) { - if (op == 1) { - if (S == 1) { - // vcvt.f64.f32 - opcode << "vcvt.f64.f32"; - args << Dd << ", " << Sm; - } else { - // vcvt.f32.f64 - opcode << "vcvt.f32.f64"; - args << Sd << ", " << Dm; - } - } - } else if ((op5 & 0xa) == 0xa) { - opcode << "vcvt"; - args << "[undecoded: floating <-> fixed]"; - } - } - } else if ((op3 >> 4) == 2 && op4 == 1) { // 10xxxx, op = 1 - if (coproc == 10 && (op3 & 0xE) == 0) { - // VMOV (between ARM core register and single-precision register) - // |1111|1100|000|0 |0000|1111|1100|0|00|0|0000| - // |5 |1 8|7 5|4 |3 0|5 2|1 8|7|65|4|3 0| - // |----|----|---|- |----|----|----|-|--|-|----| - // |3322|2222|222|2 |1111|1111|1100|0|00|0|0000| - // |1 8|7 4|3 1|0 |9 6|5 2|1 8|7|65|4|3 0| - // |----|----|---|- |----|----|----|-|--|-|----| - // |1110|1110|000|op| Vn | Rt |1010|N|00|1|0000| - uint32_t op = op3 & 1; - ArmRegister Rt(instr, 12); - FpRegister n(instr, 16, 7); - opcode << "vmov.f32"; - if (op) { - args << Rt << ", " << n; - } else { - args << n << ", " << Rt; - } - if (Rt.r == 13 || Rt.r == 15 || (instr & 0x6F) != 0) { - args << " (UNPREDICTABLE)"; - } - } else if (coproc == 10 && op3 == 0x2F) { - // VMRS - // |1111|11000000|0000|1111|1100|000|0|0000| - // |5 |1 4|3 0|5 2|1 8|7 5|4|3 0| - // |----|--------|----|----|----|---|-|----| - // |3322|22222222|1111|1111|1100|000|0|0000| - // |1 8|7 0|9 6|5 2|1 8|7 5|4|3 0| - // |----|--------|----|----|----|---|-|----| - // |1110|11101111|reg | Rt |1010|000|1|0000| - last 7 0s are (0) - uint32_t spec_reg = (instr >> 16) & 0xF; - ArmRegister Rt(instr, 12); - opcode << "vmrs"; - if (spec_reg == 1) { - if (Rt.r == 15) { - args << "APSR_nzcv, FPSCR"; - } else if (Rt.r == 13) { - args << Rt << ", FPSCR (UNPREDICTABLE)"; - } else { - args << Rt << ", FPSCR"; - } - } else { - args << "(PRIVILEGED)"; - } - } else if (coproc == 11 && (op3 & 0x9) != 8) { - // VMOV (ARM core register to scalar or vice versa; 8/16/32-bit) - } - } - } - } - break; - case 2: - if ((instr & 0x8000) == 0 && (op2 & 0x20) == 0) { - // Data-processing (modified immediate) - // |111|11|10|0000|0|0000|1|111|1100|00000000| - // |5 3|21|09|8765|4|3 0|5|4 2|10 8|7 5 0| - // |---|--|--|----|-|----|-|---|----|--------| - // |332|22|22|2222|2|1111|1|111|1100|00000000| - // |1 9|87|65|4321|0|9 6|5|4 2|10 8|7 5 0| - // |---|--|--|----|-|----|-|---|----|--------| - // |111|10|i0| op3|S| Rn |0|iii| Rd |iiiiiiii| - // 111 10 x0 xxxx x xxxx opxxx xxxx xxxxxxxx - uint32_t i = (instr >> 26) & 1; - uint32_t op3 = (instr >> 21) & 0xF; - uint32_t S = (instr >> 20) & 1; - ArmRegister Rn(instr, 16); - uint32_t imm3 = (instr >> 12) & 7; - ArmRegister Rd(instr, 8); - uint32_t imm8 = instr & 0xFF; - int32_t imm32 = (i << 11) | (imm3 << 8) | imm8; - if (Rn.r == 0xF && (op3 == 0x2 || op3 == 0x3)) { - if (op3 == 0x2) { - opcode << "mov"; - if (S == 1) { - opcode << "s"; - } - opcode << ".w"; - } else { - opcode << "mvn"; - if (S == 1) { - opcode << "s"; - } - } - args << Rd << ", #" << ThumbExpand(imm32); - } else if (Rd.r == 0xF && S == 1 && - (op3 == 0x0 || op3 == 0x4 || op3 == 0x8 || op3 == 0xD)) { - if (op3 == 0x0) { - opcode << "tst"; - } else if (op3 == 0x4) { - opcode << "teq"; - } else if (op3 == 0x8) { - opcode << "cmn.w"; - } else { - opcode << "cmp.w"; - } - args << Rn << ", #" << ThumbExpand(imm32); - } else { - switch (op3) { - case 0x0: opcode << "and"; break; - case 0x1: opcode << "bic"; break; - case 0x2: opcode << "orr"; break; - case 0x3: opcode << "orn"; break; - case 0x4: opcode << "eor"; break; - case 0x8: opcode << "add"; break; - case 0xA: opcode << "adc"; break; - case 0xB: opcode << "sbc"; break; - case 0xD: opcode << "sub"; break; - case 0xE: opcode << "rsb"; break; - default: opcode << "UNKNOWN DPMI-" << op3; break; - } - if (S == 1) { - opcode << "s"; - } - args << Rd << ", " << Rn << ", #" << ThumbExpand(imm32); - } - } else if ((instr & 0x8000) == 0 && (op2 & 0x20) != 0) { - // Data-processing (plain binary immediate) - // |111|11|10|00000|0000|1|111110000000000| - // |5 3|21|09|87654|3 0|5|4 0 5 0| - // |---|--|--|-----|----|-|---------------| - // |332|22|22|22222|1111|1|111110000000000| - // |1 9|87|65|43210|9 6|5|4 0 5 0| - // |---|--|--|-----|----|-|---------------| - // |111|10|x1| op3 | Rn |0|xxxxxxxxxxxxxxx| - uint32_t op3 = (instr >> 20) & 0x1F; - switch (op3) { - case 0x00: case 0x0A: { - // ADD/SUB.W Rd, Rn #imm12 - 111 10 i1 0101 0 nnnn 0 iii dddd iiiiiiii - ArmRegister Rd(instr, 8); - ArmRegister Rn(instr, 16); - uint32_t i = (instr >> 26) & 1; - uint32_t imm3 = (instr >> 12) & 0x7; - uint32_t imm8 = instr & 0xFF; - uint32_t imm12 = (i << 11) | (imm3 << 8) | imm8; - if (Rn.r != 0xF) { - opcode << (op3 == 0 ? "addw" : "subw"); - args << Rd << ", " << Rn << ", #" << imm12; - } else { - opcode << "adr"; - args << Rd << ", "; - DumpBranchTarget(args, instr_ptr + 4, (op3 == 0) ? imm12 : -imm12); - } - break; - } - case 0x04: case 0x0C: { - // MOVW/T Rd, #imm16 - 111 10 i0 0010 0 iiii 0 iii dddd iiiiiiii - ArmRegister Rd(instr, 8); - uint32_t i = (instr >> 26) & 1; - uint32_t imm3 = (instr >> 12) & 0x7; - uint32_t imm8 = instr & 0xFF; - uint32_t Rn = (instr >> 16) & 0xF; - uint32_t imm16 = (Rn << 12) | (i << 11) | (imm3 << 8) | imm8; - opcode << (op3 == 0x04 ? "movw" : "movt"); - args << Rd << ", #" << imm16; - break; - } - case 0x16: case 0x14: case 0x1C: { - // BFI Rd, Rn, #lsb, #width - 111 10 0 11 011 0 nnnn 0 iii dddd ii 0 iiiii - // SBFX Rd, Rn, #lsb, #width - 111 10 0 11 010 0 nnnn 0 iii dddd ii 0 iiiii - // UBFX Rd, Rn, #lsb, #width - 111 10 0 11 110 0 nnnn 0 iii dddd ii 0 iiiii - ArmRegister Rd(instr, 8); - ArmRegister Rn(instr, 16); - uint32_t msb = instr & 0x1F; - uint32_t imm2 = (instr >> 6) & 0x3; - uint32_t imm3 = (instr >> 12) & 0x7; - uint32_t lsb = (imm3 << 2) | imm2; - uint32_t width = msb - lsb + 1; - if (op3 == 0x16) { - if (Rn.r != 0xF) { - opcode << "bfi"; - args << Rd << ", " << Rn << ", #" << lsb << ", #" << width; - } else { - opcode << "bfc"; - args << Rd << ", #" << lsb << ", #" << width; - } - } else { - opcode << ((op3 & 0x8) != 0u ? "ubfx" : "sbfx"); - args << Rd << ", " << Rn << ", #" << lsb << ", #" << width; - if (Rd.r == 13 || Rd.r == 15 || Rn.r == 13 || Rn.r == 15 || - (instr & 0x04000020) != 0u) { - args << " (UNPREDICTABLE)"; - } - } - break; - } - default: - break; - } - } else { - // Branches and miscellaneous control - // |111|11|1000000|0000|1|111|1100|00000000| - // |5 3|21|0987654|3 0|5|4 2|10 8|7 5 0| - // |---|--|-------|----|-|---|----|--------| - // |332|22|2222222|1111|1|111|1100|00000000| - // |1 9|87|6543210|9 6|5|4 2|10 8|7 5 0| - // |---|--|-------|----|-|---|----|--------| - // |111|10| op2 | |1|op3|op4 | | - - uint32_t op3 = (instr >> 12) & 7; - // uint32_t op4 = (instr >> 8) & 0xF; - switch (op3) { - case 0: - if ((op2 & 0x38) != 0x38) { - // Conditional branch - // |111|11|1|0000|000000|1|1|1 |1|1 |10000000000| - // |5 3|21|0|9876|543 0|5|4|3 |2|1 |0 5 0| - // |---|--|-|----|------|-|-|--|-|--|-----------| - // |332|22|2|2222|221111|1|1|1 |1|1 |10000000000| - // |1 9|87|6|5432|109 6|5|4|3 |2|1 |0 5 0| - // |---|--|-|----|------|-|-|--|-|--|-----------| - // |111|10|S|cond| imm6 |1|0|J1|0|J2| imm11 | - uint32_t S = (instr >> 26) & 1; - uint32_t J2 = (instr >> 11) & 1; - uint32_t J1 = (instr >> 13) & 1; - uint32_t imm6 = (instr >> 16) & 0x3F; - uint32_t imm11 = instr & 0x7FF; - uint32_t cond = (instr >> 22) & 0xF; - int32_t imm32 = (S << 20) | (J2 << 19) | (J1 << 18) | (imm6 << 12) | (imm11 << 1); - imm32 = (imm32 << 11) >> 11; // sign extend 21bit immediate - opcode << "b"; - DumpCond(opcode, cond); - opcode << ".w"; - DumpBranchTarget(args, instr_ptr + 4, imm32); - } else if (op2 == 0x3B) { - // Miscellaneous control instructions - uint32_t op5 = (instr >> 4) & 0xF; - switch (op5) { - case 4: opcode << "dsb"; DumpMemoryDomain(args, instr & 0xF); break; - case 5: opcode << "dmb"; DumpMemoryDomain(args, instr & 0xF); break; - case 6: opcode << "isb"; DumpMemoryDomain(args, instr & 0xF); break; - } - } - break; - case 2: - if ((op2 & 0x38) == 0x38) { - if (op2 == 0x7F) { - opcode << "udf"; - } - break; - } - FALLTHROUGH_INTENDED; // Else deliberate fall-through to B. - case 1: case 3: { - // B - // |111|11|1|0000|000000|11|1 |1|1 |10000000000| - // |5 3|21|0|9876|543 0|54|3 |2|1 |0 5 0| - // |---|--|-|----|------|--|--|-|--|-----------| - // |332|22|2|2222|221111|11|1 |1|1 |10000000000| - // |1 9|87|6|5 2|10 6|54|3 |2|1 |0 5 0| - // |---|--|-|----|------|--|--|-|--|-----------| - // |111|10|S|cond| imm6 |10|J1|0|J2| imm11 | - // |111|10|S| imm10 |10|J1|1|J2| imm11 | - uint32_t S = (instr >> 26) & 1; - uint32_t cond = (instr >> 22) & 0xF; - uint32_t J2 = (instr >> 11) & 1; - uint32_t form = (instr >> 12) & 1; - uint32_t J1 = (instr >> 13) & 1; - uint32_t imm10 = (instr >> 16) & 0x3FF; - uint32_t imm6 = (instr >> 16) & 0x3F; - uint32_t imm11 = instr & 0x7FF; - opcode << "b"; - int32_t imm32; - if (form == 0) { - DumpCond(opcode, cond); - imm32 = (S << 20) | (J2 << 19) | (J1 << 18) | (imm6 << 12) | (imm11 << 1); - imm32 = (imm32 << 11) >> 11; // sign extend 21 bit immediate. - } else { - uint32_t I1 = (J1 ^ S) ^ 1; - uint32_t I2 = (J2 ^ S) ^ 1; - imm32 = (S << 24) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1); - imm32 = (imm32 << 7) >> 7; // sign extend 25 bit immediate. - } - opcode << ".w"; - DumpBranchTarget(args, instr_ptr + 4, imm32); - break; - } - case 4: case 6: case 5: case 7: { - // BL, BLX (immediate) - // |111|11|1|0000000000|11|1 |1|1 |10000000000| - // |5 3|21|0|9876543 0|54|3 |2|1 |0 5 0| - // |---|--|-|----------|--|--|-|--|-----------| - // |332|22|2|2222221111|11|1 |1|1 |10000000000| - // |1 9|87|6|5 0 6|54|3 |2|1 |0 5 0| - // |---|--|-|----------|--|--|-|--|-----------| - // |111|10|S| imm10 |11|J1|L|J2| imm11 | - uint32_t S = (instr >> 26) & 1; - uint32_t J2 = (instr >> 11) & 1; - uint32_t L = (instr >> 12) & 1; - uint32_t J1 = (instr >> 13) & 1; - uint32_t imm10 = (instr >> 16) & 0x3FF; - uint32_t imm11 = instr & 0x7FF; - if (L == 0) { - opcode << "bx"; - } else { - opcode << "blx"; - } - uint32_t I1 = ~(J1 ^ S); - uint32_t I2 = ~(J2 ^ S); - int32_t imm32 = (S << 24) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1); - imm32 = (imm32 << 8) >> 8; // sign extend 24 bit immediate. - DumpBranchTarget(args, instr_ptr + 4, imm32); - break; - } - } - } - break; - case 3: - switch (op2) { - case 0x07: case 0x0F: case 0x17: case 0x1F: { // Explicitly UNDEFINED, A6.3. - opcode << "UNDEFINED"; - break; - } - case 0x06: case 0x0E: { // "Store single data item" undefined opcodes, A6.3.10. - opcode << "UNDEFINED [store]"; - break; - } - case 0x15: case 0x1D: { // "Load word" undefined opcodes, A6.3.7. - opcode << "UNDEFINED [load]"; - break; - } - case 0x10: case 0x12: case 0x14: case 0x16: case 0x18: case 0x1A: case 0x1C: case 0x1E: { - opcode << "UNKNOWN " << op2 << " [SIMD]"; - break; - } - case 0x01: case 0x00: case 0x09: case 0x08: // {LD,ST}RB{,T} - case 0x03: case 0x02: case 0x0B: case 0x0A: // {LD,ST}RH{,T} - case 0x05: case 0x04: case 0x0D: case 0x0C: // {LD,ST}R{,T} - case 0x11: case 0x19: // LDRSB{,T} (no signed store) - case 0x13: case 0x1B: { // LDRSH{,T} (no signed store) - // Load: - // (Store is the same except that l==0 and always s==0 below.) - // 00s.whl (sign, word, half, load) - // LDR{S}B imm12: 11111|00s1001| Rn | Rt |imm12 (0x09) - // LDR{S}B imm8: 11111|00s0001| Rn | Rt |1PUW|imm8 (0x01) - // LDR{S}BT imm8: 11111|00s0001| Rn | Rt |1110|imm8 (0x01) - // LDR{S}B lit: 11111|00sU001|1111| Rt |imm12 (0x01/0x09) - // LDR{S}B reg: 11111|00s0001| Rn | Rt |000000|imm2| Rm (0x01) - // LDR{S}H imm12: 11111|00s1011| Rn | Rt |imm12 (0x0B) - // LDR{S}H imm8: 11111|00s0011| Rn | Rt |1PUW|imm8 (0x03) - // LDR{S}HT imm8: 11111|00s0011| Rn | Rt |1110|imm8 (0x03) - // LDR{S}H lit: 11111|00sU011|1111| Rt |imm12 (0x03/0x0B) - // LDR{S}H reg: 11111|00s0011| Rn | Rt |000000|imm2| Rm (0x03) - // LDR imm12: 11111|0001101| Rn | Rt |imm12 (0x0D) - // LDR imm8: 11111|0000101| Rn | Rt |1PUW|imm8 (0x05) - // LDRT imm8: 11111|0000101| Rn | Rt |1110|imm8 (0x05) - // LDR lit: 11111|000U101|1111| Rt |imm12 (0x05/0x0D) - // LDR reg: 11111|0000101| Rn | Rt |000000|imm2| Rm (0x05) - // - // If Rt == 15, instead of load we have preload: - // PLD{W} imm12: 11111|00010W1| Rn |1111|imm12 (0x09/0x0B) - // PLD{W} imm8: 11111|00000W1| Rn |1111|1100|imm8 (0x01/0x03); -imm8 - // PLD lit: 11111|000U001|1111|1111|imm12 (0x01/0x09) - // PLD{W} reg: 11111|00000W1| Rn |1111|000000|imm2| Rm (0x01/0x03) - // PLI imm12: 11111|0011001| Rn |1111|imm12 (0x19) - // PLI imm8: 11111|0010001| Rn |1111|1100|imm8 (0x11); -imm8 - // PLI lit: 11111|001U001|1111|1111|imm12 (0x01/0x09) - // PLI reg: 11111|0010001| Rn |1111|000000|imm2| Rm (0x01/0x03) - - bool is_load = HasBitSet(instr, 20); - bool is_half = HasBitSet(instr, 21); // W for PLD/PLDW. - bool is_word = HasBitSet(instr, 22); - bool is_signed = HasBitSet(instr, 24); - ArmRegister Rn(instr, 16); - ArmRegister Rt(instr, 12); - uint32_t imm12 = instr & 0xFFF; - uint32_t U = (instr >> 23) & 1; // U for imm12 - uint32_t imm8 = instr & 0xFF; - uint32_t op4 = (instr >> 8) & 0xF; // 1PUW for imm8 - if (Rt.r == PC && is_load && !is_word) { - // PLD, PLDW, PLI - const char* pld_pli = (is_signed ? "pli" : "pld"); - const char* w = (is_half ? "w" : ""); - if (is_signed && !is_half) { - opcode << "UNDEFINED [PLI+W]"; - } else if (Rn.r == PC || U != 0u) { - opcode << pld_pli << w; - args << "[" << Rn << ", #" << (U != 0u ? "" : "-") << imm12 << "]"; - if (Rn.r == PC && is_half) { - args << " (UNPREDICTABLE)"; - } - } else if ((instr & 0xFC0) == 0) { - opcode << pld_pli << w; - RmLslImm2 Rm(instr); - args << "[" << Rn << ", " << Rm << "]"; - } else if (op4 == 0xC) { - opcode << pld_pli << w; - args << "[" << Rn << ", #-" << imm8 << "]"; - } else { - opcode << "UNDEFINED [~" << pld_pli << "]"; - } - break; - } - const char* ldr_str = is_load ? "ldr" : "str"; - const char* sign = is_signed ? "s" : ""; - const char* type = is_word ? "" : is_half ? "h" : "b"; - bool unpred = (Rt.r == SP && !is_word) || (Rt.r == PC && !is_load); - if (Rn.r == PC && !is_load) { - opcode << "UNDEFINED [STR-lit]"; - unpred = false; - } else if (Rn.r == PC || U != 0u) { - // Load/store with imm12 (load literal if Rn.r == PC; there's no store literal). - opcode << ldr_str << sign << type << ".w"; - args << Rt << ", [" << Rn << ", #" << (U != 0u ? "" : "-") << imm12 << "]"; - if (Rn.r == TR && is_load) { - args << " ; "; - GetDisassemblerOptions()->thread_offset_name_function_(args, imm12); - } else if (Rn.r == PC) { - T2LitType lit_type[] = { - kT2LitUByte, kT2LitUHalf, kT2LitHexWord, kT2LitInvalid, - kT2LitUByte, kT2LitUHalf, kT2LitHexWord, kT2LitInvalid, - kT2LitSByte, kT2LitSHalf, kT2LitInvalid, kT2LitInvalid, - kT2LitSByte, kT2LitSHalf, kT2LitInvalid, kT2LitInvalid, - }; - DCHECK_LT(op2 >> 1, arraysize(lit_type)); - DCHECK_NE(lit_type[op2 >> 1], kT2LitInvalid); - DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, U, imm12, lit_type[op2 >> 1]); - } - } else if ((instr & 0xFC0) == 0) { - opcode << ldr_str << sign << type << ".w"; - RmLslImm2 Rm(instr); - args << Rt << ", [" << Rn << ", " << Rm << "]"; - unpred = unpred || (Rm.rm.r == SP) || (Rm.rm.r == PC); - } else if (is_word && Rn.r == SP && imm8 == 4 && op4 == (is_load ? 0xB : 0xD)) { - opcode << (is_load ? "pop" : "push") << ".w"; - args << Rn; - unpred = unpred || (Rn.r == SP); - } else if ((op4 & 5) == 0) { - opcode << "UNDEFINED [P = W = 0 for " << ldr_str << "]"; - unpred = false; - } else { - uint32_t P = (instr >> 10) & 1; - U = (instr >> 9) & 1; - uint32_t W = (instr >> 8) & 1; - bool pre_index = (P != 0 && W == 1); - bool post_index = (P == 0 && W == 1); - const char* t = (P != 0 && U != 0 && W == 0) ? "t" : ""; // Unprivileged load/store? - opcode << ldr_str << sign << type << t << ".w"; - args << Rt << ", [" << Rn << (post_index ? "]" : "") << ", #" << (U != 0 ? "" : "-") - << imm8 << (post_index ? "" : "]") << (pre_index ? "!" : ""); - unpred = (W != 0 && Rn.r == Rt.r); - } - if (unpred) { - args << " (UNPREDICTABLE)"; - } - break; - } - case 0x29: { // 0101001 - // |111|11|1000000|0000|1111|1100|00|0 0|0000| - // |5 3|21|0 4|3 0|5 2|1 8|76|5 4|3 0| - // |---|--|-------|----|----|----|--|---|----| - // |332|22|2222222|1111|1111|1100|00|0 0|0000| - // |1 9|87|6 0|9 6|5 2|1 8|76|5 4|3 0| - // |---|--|-------|----|----|----|--|---|----| - // |111|11|0101001| Rm |1111| Rd |11|op3| Rm | - // REV - 111 11 0101001 mmmm 1111 dddd 1000 mmmm - // REV16 - 111 11 0101001 mmmm 1111 dddd 1001 mmmm - // RBIT - 111 11 0101001 mmmm 1111 dddd 1010 mmmm - // REVSH - 111 11 0101001 mmmm 1111 dddd 1011 mmmm - if ((instr & 0xf0c0) == 0xf080) { - uint32_t op3 = (instr >> 4) & 3; - opcode << kThumbReverseOperations[op3]; - ArmRegister Rm(instr, 0); - ArmRegister Rd(instr, 8); - args << Rd << ", " << Rm; - ArmRegister Rm2(instr, 16); - if (Rm.r != Rm2.r || Rm.r == 13 || Rm.r == 15 || Rd.r == 13 || Rd.r == 15) { - args << " (UNPREDICTABLE)"; - } - } // else unknown instruction - break; - } - case 0x2B: { // 0101011 - // CLZ - 111 11 0101011 mmmm 1111 dddd 1000 mmmm - if ((instr & 0xf0f0) == 0xf080) { - opcode << "clz"; - ArmRegister Rm(instr, 0); - ArmRegister Rd(instr, 8); - args << Rd << ", " << Rm; - ArmRegister Rm2(instr, 16); - if (Rm.r != Rm2.r || Rm.r == 13 || Rm.r == 15 || Rd.r == 13 || Rd.r == 15) { - args << " (UNPREDICTABLE)"; - } - } - break; - } - case 0x7B: case 0x7F: { - FpRegister d(instr, 12, 22); - FpRegister m(instr, 0, 5); - uint32_t sz = (instr >> 18) & 0x3; // Decode size bits. - uint32_t size = (sz == 0) ? 8 : sz << 4; - uint32_t opc2 = (instr >> 7) & 0xF; - uint32_t Q = (instr >> 6) & 1; - if (Q == 0 && opc2 == 0xA && size == 8) { // 1010, VCNT - opcode << "vcnt." << size; - args << d << ", " << m; - } else if (Q == 0 && (opc2 == 0x4 || opc2 == 0x5) && size <= 32) { // 010x, VPADDL - bool op = HasBitSet(instr, 7); - opcode << "vpaddl." << (op ? "u" : "s") << size; - args << d << ", " << m; - } else { - opcode << "UNKNOWN " << op2; - } - break; - } - default: // more formats - if ((op2 >> 4) == 2) { // 010xxxx - // data processing (register) - if ((instr & 0x0080f0f0) == 0x0000f000) { - // LSL, LSR, ASR, ROR - uint32_t shift_op = (instr >> 21) & 3; - uint32_t S = (instr >> 20) & 1; - ArmRegister Rd(instr, 8); - ArmRegister Rn(instr, 16); - ArmRegister Rm(instr, 0); - opcode << kThumb2ShiftOperations[shift_op] << (S != 0 ? "s" : ""); - args << Rd << ", " << Rn << ", " << Rm; - } - } else if ((op2 >> 3) == 6) { // 0110xxx - // Multiply, multiply accumulate, and absolute difference - op1 = (instr >> 20) & 0x7; - op2 = (instr >> 4) & 0x1; - ArmRegister Ra(instr, 12); - ArmRegister Rn(instr, 16); - ArmRegister Rm(instr, 0); - ArmRegister Rd(instr, 8); - switch (op1) { - case 0: - if (op2 == 0) { - if (Ra.r == 0xf) { - opcode << "mul"; - args << Rd << ", " << Rn << ", " << Rm; - } else { - opcode << "mla"; - args << Rd << ", " << Rn << ", " << Rm << ", " << Ra; - } - } else { - opcode << "mls"; - args << Rd << ", " << Rn << ", " << Rm << ", " << Ra; - } - break; - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - break; // do these sometime - } - } else if ((op2 >> 3) == 7) { // 0111xxx - // Long multiply, long multiply accumulate, and divide - op1 = (instr >> 20) & 0x7; - op2 = (instr >> 4) & 0xf; - ArmRegister Rn(instr, 16); - ArmRegister Rm(instr, 0); - ArmRegister Rd(instr, 8); - ArmRegister RdHi(instr, 8); - ArmRegister RdLo(instr, 12); - switch (op1) { - case 0: - opcode << "smull"; - args << RdLo << ", " << RdHi << ", " << Rn << ", " << Rm; - break; - case 1: - opcode << "sdiv"; - args << Rd << ", " << Rn << ", " << Rm; - break; - case 2: - opcode << "umull"; - args << RdLo << ", " << RdHi << ", " << Rn << ", " << Rm; - break; - case 3: - opcode << "udiv"; - args << Rd << ", " << Rn << ", " << Rm; - break; - case 4: - case 5: - case 6: - break; // TODO: when we generate these... - } - } - } - break; - default: - break; - } +void DisassemblerArm::Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) { + DCHECK_LE(begin, end); - // Apply any IT-block conditions to the opcode if necessary. - if (!it_conditions_.empty()) { - opcode << it_conditions_.back(); - it_conditions_.pop_back(); - } - if (opcode.str().size() == 0) { - opcode << "UNKNOWN " << op2; - } + // Remove the Thumb specifier bit; no effect if begin does not point to T32 code. + const uintptr_t base = reinterpret_cast<uintptr_t>(begin) & ~1; - os << FormatInstructionPointer(instr_ptr) - << StringPrintf(": %08x\t%-7s ", instr, opcode.str().c_str()) - << args.str() << '\n'; - return 4; -} // NOLINT(readability/fn_size) + disasm_->SetT32((reinterpret_cast<uintptr_t>(begin) & 1) != 0); + disasm_->JumpToPc(GetPc(base)); -size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr) { - uint16_t instr = ReadU16(instr_ptr); - bool is_32bit = ((instr & 0xF000) == 0xF000) || ((instr & 0xF800) == 0xE800); - if (is_32bit) { - return DumpThumb32(os, instr_ptr); + if (disasm_->IsT32()) { + // The Thumb specifier bits cancel each other. + disasm_->DisassembleT32Buffer(reinterpret_cast<const uint16_t*>(base), end - begin); } else { - std::ostringstream opcode; - std::ostringstream args; - uint16_t opcode1 = instr >> 10; - if (opcode1 < 0x10) { - // shift (immediate), add, subtract, move, and compare - uint16_t opcode2 = instr >> 9; - switch (opcode2) { - case 0x0: case 0x1: case 0x2: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: - case 0x8: case 0x9: case 0xA: case 0xB: { - // Logical shift left - 00 000xx iii mmm ddd - // Logical shift right - 00 001xx iii mmm ddd - // Arithmetic shift right - 00 010xx iii mmm ddd - uint16_t imm5 = (instr >> 6) & 0x1F; - ThumbRegister rm(instr, 3); - ThumbRegister Rd(instr, 0); - if (opcode2 <= 3) { - opcode << "lsls"; - } else if (opcode2 <= 7) { - opcode << "lsrs"; - } else { - opcode << "asrs"; - } - args << Rd << ", " << rm << ", #" << imm5; - break; - } - case 0xC: case 0xD: case 0xE: case 0xF: { - // Add register - 00 01100 mmm nnn ddd - // Sub register - 00 01101 mmm nnn ddd - // Add 3-bit immediate - 00 01110 iii nnn ddd - // Sub 3-bit immediate - 00 01111 iii nnn ddd - uint16_t imm3_or_Rm = (instr >> 6) & 7; - ThumbRegister Rn(instr, 3); - ThumbRegister Rd(instr, 0); - if ((opcode2 & 2) != 0 && imm3_or_Rm == 0) { - opcode << "mov"; - } else { - if ((opcode2 & 1) == 0) { - opcode << "adds"; - } else { - opcode << "subs"; - } - } - args << Rd << ", " << Rn; - if ((opcode2 & 2) == 0) { - ArmRegister Rm(imm3_or_Rm); - args << ", " << Rm; - } else if (imm3_or_Rm != 0) { - args << ", #" << imm3_or_Rm; - } - break; - } - case 0x10: case 0x11: case 0x12: case 0x13: - case 0x14: case 0x15: case 0x16: case 0x17: - case 0x18: case 0x19: case 0x1A: case 0x1B: - case 0x1C: case 0x1D: case 0x1E: case 0x1F: { - // MOVS Rd, #imm8 - 00100 ddd iiiiiiii - // CMP Rn, #imm8 - 00101 nnn iiiiiiii - // ADDS Rn, #imm8 - 00110 nnn iiiiiiii - // SUBS Rn, #imm8 - 00111 nnn iiiiiiii - ThumbRegister Rn(instr, 8); - uint16_t imm8 = instr & 0xFF; - switch (opcode2 >> 2) { - case 4: opcode << "movs"; break; - case 5: opcode << "cmp"; break; - case 6: opcode << "adds"; break; - case 7: opcode << "subs"; break; - } - args << Rn << ", #" << imm8; - break; - } - default: - break; - } - } else if (opcode1 == 0x10) { - // Data-processing - uint16_t opcode2 = (instr >> 6) & 0xF; - ThumbRegister rm(instr, 3); - ThumbRegister rdn(instr, 0); - opcode << kThumbDataProcessingOperations[opcode2]; - args << rdn << ", " << rm; - } else if (opcode1 == 0x11) { - // Special data instructions and branch and exchange - uint16_t opcode2 = (instr >> 6) & 0x0F; - switch (opcode2) { - case 0x0: case 0x1: case 0x2: case 0x3: { - // Add low registers - 010001 0000 xxxxxx - // Add high registers - 010001 0001/001x xxxxxx - uint16_t DN = (instr >> 7) & 1; - ArmRegister rm(instr, 3); - uint16_t Rdn = instr & 7; - ArmRegister DN_Rdn((DN << 3) | Rdn); - opcode << "add"; - args << DN_Rdn << ", " << rm; - break; - } - case 0x8: case 0x9: case 0xA: case 0xB: { - // Move low registers - 010001 1000 xxxxxx - // Move high registers - 010001 1001/101x xxxxxx - uint16_t DN = (instr >> 7) & 1; - ArmRegister rm(instr, 3); - uint16_t Rdn = instr & 7; - ArmRegister DN_Rdn((DN << 3) | Rdn); - opcode << "mov"; - args << DN_Rdn << ", " << rm; - break; - } - case 0x5: case 0x6: case 0x7: { - // Compare high registers - 010001 0101/011x xxxxxx - uint16_t N = (instr >> 7) & 1; - ArmRegister rm(instr, 3); - uint16_t Rn = instr & 7; - ArmRegister N_Rn((N << 3) | Rn); - opcode << "cmp"; - args << N_Rn << ", " << rm; - break; - } - case 0xC: case 0xD: case 0xE: case 0xF: { - // Branch and exchange - 010001 110x xxxxxx - // Branch with link and exchange - 010001 111x xxxxxx - ArmRegister rm(instr, 3); - opcode << ((opcode2 & 0x2) == 0 ? "bx" : "blx"); - args << rm; - break; - } - default: - break; - } - } else if (opcode1 == 0x12 || opcode1 == 0x13) { // 01001x - const uintptr_t lo_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->base_address_); - const uintptr_t hi_adr = reinterpret_cast<intptr_t>(GetDisassemblerOptions()->end_address_); - ThumbRegister Rt(instr, 8); - uint16_t imm8 = instr & 0xFF; - opcode << "ldr"; - args << Rt << ", [pc, #" << (imm8 << 2) << "]"; - DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, /*U*/ 1u, imm8 << 2, kT2LitHexWord); - } else if ((opcode1 >= 0x14 && opcode1 <= 0x17) || // 0101xx - (opcode1 >= 0x18 && opcode1 <= 0x1f) || // 011xxx - (opcode1 >= 0x20 && opcode1 <= 0x27)) { // 100xxx - // Load/store single data item - uint16_t opA = (instr >> 12) & 0xF; - if (opA == 0x5) { - uint16_t opB = (instr >> 9) & 0x7; - ThumbRegister Rm(instr, 6); - ThumbRegister Rn(instr, 3); - ThumbRegister Rt(instr, 0); - switch (opB) { - case 0: opcode << "str"; break; - case 1: opcode << "strh"; break; - case 2: opcode << "strb"; break; - case 3: opcode << "ldrsb"; break; - case 4: opcode << "ldr"; break; - case 5: opcode << "ldrh"; break; - case 6: opcode << "ldrb"; break; - case 7: opcode << "ldrsh"; break; - } - args << Rt << ", [" << Rn << ", " << Rm << "]"; - } else if (opA == 9) { - uint16_t opB = (instr >> 11) & 1; - ThumbRegister Rt(instr, 8); - uint16_t imm8 = instr & 0xFF; - opcode << (opB == 0 ? "str" : "ldr"); - args << Rt << ", [sp, #" << (imm8 << 2) << "]"; - } else { - uint16_t imm5 = (instr >> 6) & 0x1F; - uint16_t opB = (instr >> 11) & 1; - ThumbRegister Rn(instr, 3); - ThumbRegister Rt(instr, 0); - switch (opA) { - case 6: - imm5 <<= 2; - opcode << (opB == 0 ? "str" : "ldr"); - break; - case 7: - imm5 <<= 0; - opcode << (opB == 0 ? "strb" : "ldrb"); - break; - case 8: - imm5 <<= 1; - opcode << (opB == 0 ? "strh" : "ldrh"); - break; - } - args << Rt << ", [" << Rn << ", #" << imm5 << "]"; - } - } else if (opcode1 >= 0x34 && opcode1 <= 0x37) { // 1101xx - int8_t imm8 = instr & 0xFF; - uint32_t cond = (instr >> 8) & 0xF; - opcode << "b"; - DumpCond(opcode, cond); - DumpBranchTarget(args, instr_ptr + 4, (imm8 << 1)); - } else if ((instr & 0xF800) == 0xA800) { - // Generate SP-relative address - ThumbRegister rd(instr, 8); - int imm8 = instr & 0xFF; - opcode << "add"; - args << rd << ", sp, #" << (imm8 << 2); - } else if ((instr & 0xF000) == 0xB000) { - // Miscellaneous 16-bit instructions - uint16_t opcode2 = (instr >> 5) & 0x7F; - switch (opcode2) { - case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: { - // Add immediate to SP - 1011 00000 ii iiiii - // Subtract immediate from SP - 1011 00001 ii iiiii - int imm7 = instr & 0x7F; - opcode << ((opcode2 & 4) == 0 ? "add" : "sub"); - args << "sp, sp, #" << (imm7 << 2); - break; - } - case 0x08: case 0x09: case 0x0A: case 0x0B: // 0001xxx - case 0x0C: case 0x0D: case 0x0E: case 0x0F: - case 0x18: case 0x19: case 0x1A: case 0x1B: // 0011xxx - case 0x1C: case 0x1D: case 0x1E: case 0x1F: - case 0x48: case 0x49: case 0x4A: case 0x4B: // 1001xxx - case 0x4C: case 0x4D: case 0x4E: case 0x4F: - case 0x58: case 0x59: case 0x5A: case 0x5B: // 1011xxx - case 0x5C: case 0x5D: case 0x5E: case 0x5F: { - // CBNZ, CBZ - uint16_t op = (instr >> 11) & 1; - uint16_t i = (instr >> 9) & 1; - uint16_t imm5 = (instr >> 3) & 0x1F; - ThumbRegister Rn(instr, 0); - opcode << (op != 0 ? "cbnz" : "cbz"); - uint32_t imm32 = (i << 6) | (imm5 << 1); - args << Rn << ", "; - DumpBranchTarget(args, instr_ptr + 4, imm32); - break; - } - case 0x20: case 0x21: case 0x22: case 0x23: case 0x24: case 0x25: case 0x26: case 0x27: - case 0x28: case 0x29: case 0x2A: case 0x2B: case 0x2C: case 0x2D: case 0x2E: case 0x2F: { - opcode << "push"; - args << RegisterList((instr & 0xFF) | ((instr & 0x100) << 6)); - break; - } - case 0x60: case 0x61: case 0x62: case 0x63: case 0x64: case 0x65: case 0x66: case 0x67: - case 0x68: case 0x69: case 0x6A: case 0x6B: case 0x6C: case 0x6D: case 0x6E: case 0x6F: { - opcode << "pop"; - args << RegisterList((instr & 0xFF) | ((instr & 0x100) << 7)); - break; - } - case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77: { - opcode << "bkpt"; - args << "#" << (instr & 0xFF); - break; - } - case 0x50: case 0x51: // 101000x - case 0x52: case 0x53: // 101001x - case 0x56: case 0x57: { // 101011x - uint16_t op = (instr >> 6) & 3; - opcode << kThumbReverseOperations[op]; - ThumbRegister Rm(instr, 3); - ThumbRegister Rd(instr, 0); - args << Rd << ", " << Rm; - break; - } - case 0x78: case 0x79: case 0x7A: case 0x7B: // 1111xxx - case 0x7C: case 0x7D: case 0x7E: case 0x7F: { - // If-Then, and hints - uint16_t opA = (instr >> 4) & 0xF; - uint16_t opB = instr & 0xF; - if (opB == 0) { - switch (opA) { - case 0: opcode << "nop"; break; - case 1: opcode << "yield"; break; - case 2: opcode << "wfe"; break; - case 3: opcode << "sev"; break; - default: break; - } - } else { - uint32_t first_cond = opA; - uint32_t mask = opB; - opcode << "it"; - - // Flesh out the base "it" opcode with the specific collection of 't's and 'e's, - // and store up the actual condition codes we'll want to add to the next few opcodes. - size_t count = 3 - CTZ(mask); - it_conditions_.resize(count + 2); // Plus the implicit 't', plus the "" for the IT itself. - for (size_t i = 0; i < count; ++i) { - bool positive_cond = ((first_cond & 1) != 0); - bool positive_mask = ((mask & (1 << (3 - i))) != 0); - if (positive_mask == positive_cond) { - opcode << 't'; - it_conditions_[i] = kConditionCodeNames[first_cond]; - } else { - opcode << 'e'; - it_conditions_[i] = kConditionCodeNames[first_cond ^ 1]; - } - } - it_conditions_[count] = kConditionCodeNames[first_cond]; // The implicit 't'. - - it_conditions_[count + 1] = ""; // No condition code for the IT itself... - DumpCond(args, first_cond); // ...because it's considered an argument. - } - break; - } - default: - break; - } - } else if (((instr & 0xF000) == 0x5000) || ((instr & 0xE000) == 0x6000) || - ((instr & 0xE000) == 0x8000)) { - // Load/store single data item - uint16_t opA = instr >> 12; - // uint16_t opB = (instr >> 9) & 7; - switch (opA) { - case 0x6: { - // STR Rt, [Rn, #imm] - 01100 iiiii nnn ttt - // LDR Rt, [Rn, #imm] - 01101 iiiii nnn ttt - uint16_t imm5 = (instr >> 6) & 0x1F; - ThumbRegister Rn(instr, 3); - ThumbRegister Rt(instr, 0); - opcode << ((instr & 0x800) == 0 ? "str" : "ldr"); - args << Rt << ", [" << Rn << ", #" << (imm5 << 2) << "]"; - break; - } - case 0x9: { - // STR Rt, [SP, #imm] - 01100 ttt iiiiiiii - // LDR Rt, [SP, #imm] - 01101 ttt iiiiiiii - uint16_t imm8 = instr & 0xFF; - ThumbRegister Rt(instr, 8); - opcode << ((instr & 0x800) == 0 ? "str" : "ldr"); - args << Rt << ", [sp, #" << (imm8 << 2) << "]"; - break; - } - default: - break; - } - } else if (opcode1 == 0x38 || opcode1 == 0x39) { - uint16_t imm11 = instr & 0x7FFF; - int32_t imm32 = imm11 << 1; - imm32 = (imm32 << 20) >> 20; // sign extend 12 bit immediate - opcode << "b"; - DumpBranchTarget(args, instr_ptr + 4, imm32); - } - - // Apply any IT-block conditions to the opcode if necessary. - if (!it_conditions_.empty()) { - opcode << it_conditions_.back(); - it_conditions_.pop_back(); - } - - os << FormatInstructionPointer(instr_ptr) - << StringPrintf(": %04x \t%-7s ", instr, opcode.str().c_str()) - << args.str() << '\n'; + disasm_->DisassembleA32Buffer(reinterpret_cast<const uint32_t*>(base), end - begin); } - return 2; + + os << output_.str(); + output_.str(std::string()); } } // namespace arm diff --git a/disassembler/disassembler_arm.h b/disassembler/disassembler_arm.h index f870e8ef86..237b577bc2 100644 --- a/disassembler/disassembler_arm.h +++ b/disassembler/disassembler_arm.h @@ -17,32 +17,33 @@ #ifndef ART_DISASSEMBLER_DISASSEMBLER_ARM_H_ #define ART_DISASSEMBLER_DISASSEMBLER_ARM_H_ -#include <vector> +#include <memory> +#include <sstream> +#include "base/macros.h" #include "disassembler.h" namespace art { namespace arm { class DisassemblerArm FINAL : public Disassembler { + class CustomDisassembler; + public: - explicit DisassemblerArm(DisassemblerOptions* options) : Disassembler(options) {} + explicit DisassemblerArm(DisassemblerOptions* options); size_t Dump(std::ostream& os, const uint8_t* begin) OVERRIDE; void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) OVERRIDE; private: - void DumpArm(std::ostream& os, const uint8_t* instr); - - // Returns the size of the instruction just decoded - size_t DumpThumb16(std::ostream& os, const uint8_t* instr); - size_t DumpThumb32(std::ostream& os, const uint8_t* instr_ptr); - - void DumpBranchTarget(std::ostream& os, const uint8_t* instr_ptr, int32_t imm32); - void DumpCond(std::ostream& os, uint32_t cond); - void DumpMemoryDomain(std::ostream& os, uint32_t domain); - - std::vector<const char*> it_conditions_; + uintptr_t GetPc(uintptr_t instr_ptr) const { + return GetDisassemblerOptions()->absolute_addresses_ + ? instr_ptr + : instr_ptr - reinterpret_cast<uintptr_t>(GetDisassemblerOptions()->base_address_); + } + + std::ostringstream output_; + std::unique_ptr<CustomDisassembler> disasm_; DISALLOW_COPY_AND_ASSIGN(DisassemblerArm); }; diff --git a/imgdiag/Android.bp b/imgdiag/Android.bp index 4c0772d9ba..7837d66913 100644 --- a/imgdiag/Android.bp +++ b/imgdiag/Android.bp @@ -69,3 +69,11 @@ art_cc_binary { "libartd-compiler", ], } + +art_cc_test { + name: "art_imgdiag_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["imgdiag_test.cc"], +} diff --git a/oatdump/Android.bp b/oatdump/Android.bp index b01bf51e29..dd6331c24b 100644 --- a/oatdump/Android.bp +++ b/oatdump/Android.bp @@ -53,6 +53,7 @@ art_cc_binary { art_cc_binary { name: "oatdumps", defaults: ["oatdump-defaults"], + device_supported: false, target: { darwin: { enabled: false, @@ -73,6 +74,7 @@ art_cc_binary { "art_debug_defaults", "oatdump-defaults", ], + device_supported: false, target: { darwin: { enabled: false, @@ -87,3 +89,10 @@ art_cc_binary { ] + art_static_dependencies, } +art_cc_test { + name: "art_oatdump_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["oatdump_test.cc"], +} diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index db6a709628..d8ac581a99 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -526,7 +526,7 @@ class OatDumper { } else { const char* descriptor = m->GetDeclaringClassDescriptor(); const DexFile::ClassDef* class_def = - dex_file->FindClassDef(descriptor, ComputeModifiedUtf8Hash(descriptor)); + OatDexFile::FindClassDef(*dex_file, descriptor, ComputeModifiedUtf8Hash(descriptor)); if (class_def != nullptr) { uint16_t class_def_index = dex_file->GetIndexForClassDef(*class_def); const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index); @@ -742,7 +742,7 @@ class OatDumper { if (oat_dex_file.GetLookupTableData() != nullptr) { uint32_t table_offset = dchecked_integral_cast<uint32_t>( oat_dex_file.GetLookupTableData() - oat_file_begin); - uint32_t table_size = TypeLookupTable::RawDataLength(*dex_file); + uint32_t table_size = TypeLookupTable::RawDataLength(dex_file->NumClassDefs()); os << StringPrintf("type-table: 0x%08x..0x%08x\n", table_offset, table_offset + table_size - 1); diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 5240011901..1af3660e8f 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -37,8 +37,8 @@ #include "elf_file_impl.h" #include "gc/space/image_space.h" #include "image-inl.h" -#include "mirror/abstract_method.h" #include "mirror/dex_cache.h" +#include "mirror/executable.h" #include "mirror/object-inl.h" #include "mirror/method.h" #include "mirror/reference.h" @@ -770,8 +770,8 @@ void PatchOat::VisitObject(mirror::Object* object) { } else if (object->GetClass() == mirror::Method::StaticClass() || object->GetClass() == mirror::Constructor::StaticClass()) { // Need to go update the ArtMethod. - auto* dest = down_cast<mirror::AbstractMethod*>(copy); - auto* src = down_cast<mirror::AbstractMethod*>(object); + auto* dest = down_cast<mirror::Executable*>(copy); + auto* src = down_cast<mirror::Executable*>(object); dest->SetArtMethod(RelocatedAddressOfPointer(src->GetArtMethod())); } } diff --git a/profman/Android.bp b/profman/Android.bp index f3b4e1436f..322dda2211 100644 --- a/profman/Android.bp +++ b/profman/Android.bp @@ -52,3 +52,11 @@ art_cc_binary { "libartd", ], } + +art_cc_test { + name: "art_profman_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: ["profile_assistant_test.cc"], +} diff --git a/runtime/Android.bp b/runtime/Android.bp index b61976c47b..6234a8459a 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -53,6 +53,7 @@ cc_defaults { "compiler_filter.cc", "debugger.cc", "dex_file.cc", + "dex_file_annotations.cc", "dex_file_verifier.cc", "dex_instruction.cc", "elf_file.cc", @@ -119,10 +120,10 @@ cc_defaults { "linear_alloc.cc", "mem_map.cc", "memory_region.cc", - "mirror/abstract_method.cc", "mirror/array.cc", "mirror/class.cc", "mirror/dex_cache.cc", + "mirror/executable.cc", "mirror/field.cc", "mirror/method.cc", "mirror/object.cc", @@ -150,9 +151,9 @@ cc_defaults { "native/java_lang_VMClassLoader.cc", "native/java_lang_ref_FinalizerReference.cc", "native/java_lang_ref_Reference.cc", - "native/java_lang_reflect_AbstractMethod.cc", "native/java_lang_reflect_Array.cc", "native/java_lang_reflect_Constructor.cc", + "native/java_lang_reflect_Executable.cc", "native/java_lang_reflect_Field.cc", "native/java_lang_reflect_Method.cc", "native/java_lang_reflect_Parameter.cc", @@ -197,6 +198,7 @@ cc_defaults { "verifier/reg_type.cc", "verifier/reg_type_cache.cc", "verifier/register_line.cc", + "verifier/verifier_deps.cc", "well_known_classes.cc", "zip_archive.cc", @@ -461,6 +463,127 @@ art_cc_library { ], } +art_cc_library { + name: "libart-runtime-gtest", + defaults: ["libart-gtest-defaults"], + srcs: ["common_runtime_test.cc"], + shared_libs: [ + "libartd", + ], +} + +art_cc_test { + name: "art_runtime_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: [ + "arch/arch_test.cc", + "arch/instruction_set_test.cc", + "arch/instruction_set_features_test.cc", + "arch/memcmp16_test.cc", + "arch/stub_test.cc", + "arch/arm/instruction_set_features_arm_test.cc", + "arch/arm64/instruction_set_features_arm64_test.cc", + "arch/mips/instruction_set_features_mips_test.cc", + "arch/mips64/instruction_set_features_mips64_test.cc", + "arch/x86/instruction_set_features_x86_test.cc", + "arch/x86_64/instruction_set_features_x86_64_test.cc", + "barrier_test.cc", + "base/arena_allocator_test.cc", + "base/bit_field_test.cc", + "base/bit_utils_test.cc", + "base/bit_vector_test.cc", + "base/hash_set_test.cc", + "base/hex_dump_test.cc", + "base/histogram_test.cc", + "base/mutex_test.cc", + "base/scoped_flock_test.cc", + "base/stringprintf_test.cc", + "base/time_utils_test.cc", + "base/timing_logger_test.cc", + "base/transform_array_ref_test.cc", + "base/transform_iterator_test.cc", + "base/variant_map_test.cc", + "base/unix_file/fd_file_test.cc", + "class_linker_test.cc", + "compiler_filter_test.cc", + "dex_file_test.cc", + "dex_file_verifier_test.cc", + "dex_instruction_test.cc", + "dex_instruction_visitor_test.cc", + "dex_method_iterator_test.cc", + "entrypoints/math_entrypoints_test.cc", + "entrypoints/quick/quick_trampoline_entrypoints_test.cc", + "entrypoints_order_test.cc", + "gc/accounting/card_table_test.cc", + "gc/accounting/mod_union_table_test.cc", + "gc/accounting/space_bitmap_test.cc", + "gc/collector/immune_spaces_test.cc", + "gc/heap_test.cc", + "gc/reference_queue_test.cc", + "gc/space/dlmalloc_space_static_test.cc", + "gc/space/dlmalloc_space_random_test.cc", + "gc/space/large_object_space_test.cc", + "gc/space/rosalloc_space_static_test.cc", + "gc/space/rosalloc_space_random_test.cc", + "gc/space/space_create_test.cc", + "gc/system_weak_test.cc", + "gc/task_processor_test.cc", + "gtest_test.cc", + "handle_scope_test.cc", + "indenter_test.cc", + "indirect_reference_table_test.cc", + "instrumentation_test.cc", + "intern_table_test.cc", + "interpreter/safe_math_test.cc", + "interpreter/unstarted_runtime_test.cc", + "java_vm_ext_test.cc", + "jit/profile_compilation_info_test.cc", + "leb128_test.cc", + "mem_map_test.cc", + "memory_region_test.cc", + "mirror/dex_cache_test.cc", + "mirror/object_test.cc", + "monitor_pool_test.cc", + "monitor_test.cc", + "oat_file_test.cc", + "oat_file_assistant_test.cc", + "parsed_options_test.cc", + "prebuilt_tools_test.cc", + "reference_table_test.cc", + "thread_pool_test.cc", + "transaction_test.cc", + "type_lookup_table_test.cc", + "utf_test.cc", + "utils_test.cc", + "verifier/method_verifier_test.cc", + "verifier/reg_type_test.cc", + "verifier/verifier_deps_test.cc", + "zip_archive_test.cc", + ], + shared_libs: [ + "libbacktrace", + ], +} + +art_cc_test { + name: "art_runtime_compiler_tests", + defaults: [ + "art_gtest_defaults", + ], + srcs: [ + "jni_internal_test.cc", + "proxy_test.cc", + "reflection_test.cc", + ], + shared_libs: [ + "libartd-compiler", + "libvixld-arm", + "libvixld-arm64", + ], +} + subdirs = [ "openjdkjvm", "openjdkjvmti", diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S index 38ca76a6a9..9eca86232d 100644 --- a/runtime/arch/arm/asm_support_arm.S +++ b/runtime/arch/arm/asm_support_arm.S @@ -68,14 +68,14 @@ .set .Lruntime_current3_used, 0 // The RUNTIME_CURRENT macros that are bound to the \name argument of DEF_ENTRY to ensure // that label names are unique. - .macro RUNTIME_CURRENT1 rDest, rTemp - RUNTIME_CURRENT \name, 1, \rDest, \rTemp + .macro RUNTIME_CURRENT1 rDest + RUNTIME_CURRENT \name, 1, \rDest .endm - .macro RUNTIME_CURRENT2 rDest, rTemp - RUNTIME_CURRENT \name, 2, \rDest, \rTemp + .macro RUNTIME_CURRENT2 rDest + RUNTIME_CURRENT \name, 2, \rDest .endm - .macro RUNTIME_CURRENT3 rDest, rTemp - RUNTIME_CURRENT \name, 3, \rDest, \rTemp + .macro RUNTIME_CURRENT3 rDest + RUNTIME_CURRENT \name, 3, \rDest .endm .endm diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc index befdd480ed..daa2dff060 100644 --- a/runtime/arch/arm/fault_handler_arm.cc +++ b/runtime/arch/arm/fault_handler_arm.cc @@ -122,13 +122,16 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); uint8_t* ptr = reinterpret_cast<uint8_t*>(sc->arm_pc); - uint32_t instr_size = GetInstructionSize(ptr); - sc->arm_lr = (sc->arm_pc + instr_size) | 1; // LR needs to point to gc map location + uintptr_t gc_map_location = (sc->arm_pc + instr_size) | 1; + + // Push the gc map location to the stack and pass the fault address in LR. + sc->arm_sp -= sizeof(uintptr_t); + *reinterpret_cast<uintptr_t*>(sc->arm_sp) = gc_map_location; + sc->arm_lr = reinterpret_cast<uintptr_t>(info->si_addr); sc->arm_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal); // Pass the faulting address as the first argument of // art_quick_throw_null_pointer_exception_from_signal. - sc->arm_r0 = reinterpret_cast<uintptr_t>(info->si_addr); VLOG(signals) << "Generating null pointer exception"; return true; } diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index a3f053b79d..5d53062902 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -173,6 +173,29 @@ /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveEverything) + * when core registers are already saved. + */ +.macro SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED rTemp + @ 14 words of callee saves and args already saved. + vpush {d0-d15} @ 32 words, 2 for each of the 16 saved doubles. + .cfi_adjust_cfa_offset 128 + sub sp, #8 @ 2 words of space, alignment padding and Method* + .cfi_adjust_cfa_offset 8 + RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp. + @ Load kSaveEverything Method* into rTemp. + ldr \rTemp, [\rTemp, #RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET] + str \rTemp, [sp, #0] @ Place Method* at bottom of stack. + str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. + + // Ugly compile-time check, but we only have the preprocessor. +#if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8) +#error "FRAME_SIZE_SAVE_EVERYTHING(ARM) size not as expected." +#endif +.endm + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveEverything) */ .macro SETUP_SAVE_EVERYTHING_FRAME rTemp push {r0-r12, lr} @ 14 words of callee saves and args. @@ -191,20 +214,7 @@ .cfi_rel_offset r11, 44 .cfi_rel_offset ip, 48 .cfi_rel_offset lr, 52 - vpush {d0-d15} @ 32 words, 2 for each of the 16 saved doubles. - .cfi_adjust_cfa_offset 128 - sub sp, #8 @ 2 words of space, alignment padding and Method* - .cfi_adjust_cfa_offset 8 - RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp. - @ Load kSaveEverything Method* into rTemp. - ldr \rTemp, [\rTemp, #RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET] - str \rTemp, [sp, #0] @ Place Method* at bottom of stack. - str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame. - - // Ugly compile-time check, but we only have the preprocessor. -#if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8) -#error "FRAME_SIZE_SAVE_EVERYTHING(ARM) size not as expected." -#endif + SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED \rTemp .endm .macro RESTORE_SAVE_EVERYTHING_FRAME @@ -250,7 +260,7 @@ .fnstart SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save callee saves for throw mov r0, r9 @ pass Thread::Current - b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*) + bl artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*) .endm .macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name @@ -258,7 +268,16 @@ ENTRY \c_name SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save all registers as basis for long jump context mov r0, r9 @ pass Thread::Current - b \cxx_name @ \cxx_name(Thread*) + bl \cxx_name @ \cxx_name(Thread*) +END \c_name +.endm + +.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + SETUP_SAVE_EVERYTHING_FRAME r0 @ save all registers as basis for long jump context + mov r0, r9 @ pass Thread::Current + bl \cxx_name @ \cxx_name(Thread*) END \c_name .endm @@ -267,16 +286,16 @@ END \c_name ENTRY \c_name SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1 @ save all registers as basis for long jump context mov r1, r9 @ pass Thread::Current - b \cxx_name @ \cxx_name(Thread*) + bl \cxx_name @ \cxx_name(Thread*) END \c_name .endm -.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name +.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name .extern \cxx_name ENTRY \c_name - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context + SETUP_SAVE_EVERYTHING_FRAME r2 @ save all registers as basis for long jump context mov r2, r9 @ pass Thread::Current - b \cxx_name @ \cxx_name(Thread*) + bl \cxx_name @ \cxx_name(Thread*) END \c_name .endm @@ -351,29 +370,56 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo /* * Called by managed code to create and deliver a NullPointerException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode /* * Call installed by a signal handler to create and deliver a NullPointerException. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal + .extern art_quick_throw_null_pointer_exception_from_signal +ENTRY art_quick_throw_null_pointer_exception_from_signal + // The fault handler pushes the gc map address, i.e. "return address", to stack + // and passes the fault address in LR. So we need to set up the CFI info accordingly. + .cfi_def_cfa_offset __SIZEOF_POINTER__ + .cfi_rel_offset lr, 0 + push {r0-r12} @ 13 words of callee saves and args; LR already saved. + .cfi_adjust_cfa_offset 52 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + .cfi_rel_offset r2, 8 + .cfi_rel_offset r3, 12 + .cfi_rel_offset r4, 16 + .cfi_rel_offset r5, 20 + .cfi_rel_offset r6, 24 + .cfi_rel_offset r7, 28 + .cfi_rel_offset r8, 32 + .cfi_rel_offset r9, 36 + .cfi_rel_offset r10, 40 + .cfi_rel_offset r11, 44 + .cfi_rel_offset ip, 48 + + @ save all registers as basis for long jump context + SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1 + mov r0, lr @ pass the fault address stored in LR by the fault handler. + mov r1, r9 @ pass Thread::Current + bl artThrowNullPointerExceptionFromSignal @ (Thread*) +END art_quick_throw_null_pointer_exception_from_signal /* * Called by managed code to create and deliver an ArithmeticException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds * index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode /* * Called by managed code to create and deliver a StringIndexOutOfBoundsException * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode /* * Called by managed code to create and deliver a StackOverflowError. @@ -381,11 +427,6 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFro NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode - - /* * All generated callsites for interface invokes and invocation slow paths will load arguments * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. @@ -721,7 +762,7 @@ ENTRY art_quick_check_cast .cfi_restore lr SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context mov r2, r9 @ pass Thread::Current - b artThrowClassCastException @ (Class*, Class*, Thread*) + bl artThrowClassCastException @ (Class*, Class*, Thread*) bkpt END art_quick_check_cast @@ -864,7 +905,7 @@ ENTRY art_quick_aput_obj SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3 mov r1, r2 mov r2, r9 @ pass Thread::Current - b artThrowArrayStoreException @ (Class*, Class*, Thread*) + bl artThrowArrayStoreException @ (Class*, Class*, Thread*) bkpt @ unreached END art_quick_aput_obj diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc index 6724d6d480..c02be87e2d 100644 --- a/runtime/arch/arm64/fault_handler_arm64.cc +++ b/runtime/arch/arm64/fault_handler_arm64.cc @@ -96,12 +96,12 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); - sc->regs[30] = sc->pc + 4; // LR needs to point to gc map location + // Push the gc map location to the stack and pass the fault address in LR. + sc->sp -= sizeof(uintptr_t); + *reinterpret_cast<uintptr_t*>(sc->sp) = sc->pc + 4; + sc->regs[30] = reinterpret_cast<uintptr_t>(info->si_addr); sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal); - // Pass the faulting address as the first argument of - // art_quick_throw_null_pointer_exception_from_signal. - sc->regs[0] = reinterpret_cast<uintptr_t>(info->si_addr); VLOG(signals) << "Generating null pointer exception"; return true; } diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 25aa8ceb9d..eee949da8a 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -265,10 +265,10 @@ /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveEverything) + * when the SP has already been decremented by FRAME_SIZE_SAVE_EVERYTHING + * and saving registers x29 and LR is handled elsewhere. */ -.macro SETUP_SAVE_EVERYTHING_FRAME - INCREASE_FRAME 512 - +.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR // Ugly compile-time check, but we only have the preprocessor. #if (FRAME_SIZE_SAVE_EVERYTHING != 512) #error "FRAME_SIZE_SAVE_EVERYTHING(ARM64) size not as expected." @@ -310,7 +310,6 @@ SAVE_TWO_REGS x23, x24, 448 SAVE_TWO_REGS x25, x26, 464 SAVE_TWO_REGS x27, x28, 480 - SAVE_TWO_REGS x29, xLR, 496 // art::Runtime** xIP0 = &art::Runtime::instance_ adrp xIP0, :got:_ZN3art7Runtime9instance_E @@ -328,6 +327,16 @@ str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] .endm + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveEverything) + */ +.macro SETUP_SAVE_EVERYTHING_FRAME + INCREASE_FRAME 512 + SAVE_TWO_REGS x29, xLR, 496 + SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR +.endm + .macro RESTORE_SAVE_EVERYTHING_FRAME // Restore FP registers. // For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned. @@ -391,7 +400,7 @@ mov x0, xSELF // Point of no return. - b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*) + bl artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*) brk 0 // Unreached .endm @@ -424,7 +433,18 @@ ENTRY \c_name SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context mov x0, xSELF // pass Thread::Current - b \cxx_name // \cxx_name(Thread*) + bl \cxx_name // \cxx_name(Thread*) + brk 0 +END \c_name +.endm + +.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name + .extern \cxx_name +ENTRY \c_name + SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context + mov x0, xSELF // pass Thread::Current + bl \cxx_name // \cxx_name(Thread*) + brk 0 END \c_name .endm @@ -433,17 +453,17 @@ END \c_name ENTRY \c_name SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context. mov x1, xSELF // pass Thread::Current. - b \cxx_name // \cxx_name(arg, Thread*). + bl \cxx_name // \cxx_name(arg, Thread*). brk 0 END \c_name .endm -.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name +.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name .extern \cxx_name ENTRY \c_name - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context + SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context mov x2, xSELF // pass Thread::Current - b \cxx_name // \cxx_name(arg1, arg2, Thread*) + bl \cxx_name // \cxx_name(arg1, arg2, Thread*) brk 0 END \c_name .endm @@ -457,29 +477,43 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCo /* * Called by managed code to create and deliver a NullPointerException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode /* * Call installed by a signal handler to create and deliver a NullPointerException. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal + .extern art_quick_throw_null_pointer_exception_from_signal +ENTRY art_quick_throw_null_pointer_exception_from_signal + // The fault handler pushes the gc map address, i.e. "return address", to stack + // and passes the fault address in LR. So we need to set up the CFI info accordingly. + .cfi_def_cfa_offset __SIZEOF_POINTER__ + .cfi_rel_offset lr, 0 + // Save all registers as basis for long jump context. + INCREASE_FRAME (FRAME_SIZE_SAVE_EVERYTHING - __SIZEOF_POINTER__) + SAVE_REG x29, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__) // LR already saved. + SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR + mov x0, lr // pass the fault address stored in LR by the fault handler. + mov x1, xSELF // pass Thread::Current. + bl artThrowNullPointerExceptionFromSignal // (arg, Thread*). + brk 0 +END art_quick_throw_null_pointer_exception_from_signal /* * Called by managed code to create and deliver an ArithmeticException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds * index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode /* * Called by managed code to create and deliver a StringIndexOutOfBoundsException * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode /* * Called by managed code to create and deliver a StackOverflowError. @@ -487,11 +521,6 @@ TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFro NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode - - /* * All generated callsites for interface invokes and invocation slow paths will load arguments * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. @@ -1283,7 +1312,7 @@ ENTRY art_quick_check_cast SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context mov x2, xSELF // pass Thread::Current - b artThrowClassCastException // (Class*, Class*, Thread*) + bl artThrowClassCastException // (Class*, Class*, Thread*) brk 0 // We should not return here... END art_quick_check_cast @@ -1447,10 +1476,10 @@ ENTRY art_quick_aput_obj RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME - mov x1, x2 // Pass value. - mov x2, xSELF // Pass Thread::Current. - b artThrowArrayStoreException // (Object*, Object*, Thread*). - brk 0 // Unreached. + mov x1, x2 // Pass value. + mov x2, xSELF // Pass Thread::Current. + bl artThrowArrayStoreException // (Object*, Object*, Thread*). + brk 0 // Unreached. END art_quick_aput_obj // Macro to facilitate adding new allocation entrypoints. diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S index 801f708ad3..948b06ce61 100644 --- a/runtime/arch/mips/asm_support_mips.S +++ b/runtime/arch/mips/asm_support_mips.S @@ -26,8 +26,8 @@ // Register holding Thread::Current(). #define rSELF $s1 - // Declare a function called name, sets up $gp. -.macro ENTRY name + // Declare a function called name, doesn't set up $gp. +.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset .type \name, %function .global \name // Cache alignment for function entry. @@ -35,23 +35,21 @@ \name: .cfi_startproc // Ensure we get a sane starting CFA. - .cfi_def_cfa $sp,0 - // Load $gp. We expect that ".set noreorder" is in effect. - .cpload $t9 - // Declare a local convenience label to be branched to when $gp is already set up. -.L\name\()_gp_set: + .cfi_def_cfa $sp, \cfa_offset .endm // Declare a function called name, doesn't set up $gp. .macro ENTRY_NO_GP name - .type \name, %function - .global \name - // Cache alignment for function entry. - .balign 16 -\name: - .cfi_startproc - // Ensure we get a sane starting CFA. - .cfi_def_cfa $sp,0 + ENTRY_NO_GP_CUSTOM_CFA \name, 0 +.endm + + // Declare a function called name, sets up $gp. +.macro ENTRY name + ENTRY_NO_GP \name + // Load $gp. We expect that ".set noreorder" is in effect. + .cpload $t9 + // Declare a local convenience label to be branched to when $gp is already set up. +.L\name\()_gp_set: .endm .macro END name diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 38aa67c12f..e10d4e6a74 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -264,8 +264,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { static_assert(!IsDirectEntrypoint(kQuickThrowArrayBounds), "Non-direct C stub marked direct."); qpoints->pThrowDivZero = art_quick_throw_div_zero; static_assert(!IsDirectEntrypoint(kQuickThrowDivZero), "Non-direct C stub marked direct."); - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - static_assert(!IsDirectEntrypoint(kQuickThrowNoSuchMethod), "Non-direct C stub marked direct."); qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; static_assert(!IsDirectEntrypoint(kQuickThrowNullPointer), "Non-direct C stub marked direct."); qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc index 7969a8f5ca..1792f31578 100644 --- a/runtime/arch/mips/fault_handler_mips.cc +++ b/runtime/arch/mips/fault_handler_mips.cc @@ -14,7 +14,7 @@ * limitations under the License. */ - +#include "arch/mips/quick_method_frame_info_mips.h" #include "fault_handler.h" #include <sys/ucontext.h> #include "art_method-inl.h" @@ -82,12 +82,15 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + // Decrement $sp by the frame size of the kSaveEverything method and store + // the fault address in the padding right after the ArtMethod*. + sc->sc_regs[mips::SP] -= mips::MipsCalleeSaveFrameSize(Runtime::kSaveEverything); + uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips::SP]) + /* ArtMethod* */ 1; + *padding = reinterpret_cast<uintptr_t>(info->si_addr); + sc->sc_regs[mips::RA] = sc->sc_pc + 4; // RA needs to point to gc map location sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal); - sc->sc_regs[mips::T9] = sc->sc_pc; // make sure T9 points to the function - // Pass the faulting address as the first argument of - // art_quick_throw_null_pointer_exception_from_signal. - sc->sc_regs[mips::A0] = reinterpret_cast<uintptr_t>(info->si_addr); + // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9. VLOG(signals) << "Generating null pointer exception"; return true; } diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 4bd1314d15..c3c188233b 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -279,6 +279,7 @@ /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveEverything). + * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING. * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31; * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method* * Clobbers $t0 and $t1. @@ -286,10 +287,7 @@ * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack. * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP. */ -.macro SETUP_SAVE_EVERYTHING_FRAME - addiu $sp, $sp, -256 - .cfi_adjust_cfa_offset 256 - +.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP // Ugly compile-time check, but we only have the preprocessor. #if (FRAME_SIZE_SAVE_EVERYTHING != 256) #error "FRAME_SIZE_SAVE_EVERYTHING(MIPS) size not as expected." @@ -388,6 +386,22 @@ .cfi_adjust_cfa_offset ARG_SLOT_SIZE .endm + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveEverything). + * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31; + * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method* + * Clobbers $t0 and $t1. + * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. + * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack. + * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP. + */ +.macro SETUP_SAVE_EVERYTHING_FRAME + addiu $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING) + .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING) + SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP +.endm + .macro RESTORE_SAVE_EVERYTHING_FRAME addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack .cfi_adjust_cfa_offset -ARG_SLOT_SIZE @@ -696,8 +710,10 @@ END art_quick_deliver_exception * Called by managed code to create and deliver a NullPointerException */ .extern artThrowNullPointerExceptionFromCode -ENTRY art_quick_throw_null_pointer_exception - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_null_pointer_exception + // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK, + // even after clobbering any registers we don't need to preserve, such as $gp or $t0. + SETUP_SAVE_EVERYTHING_FRAME la $t9, artThrowNullPointerExceptionFromCode jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*) move $a0, rSELF # pass Thread::Current @@ -708,8 +724,10 @@ END art_quick_throw_null_pointer_exception * Call installed by a signal handler to create and deliver a NullPointerException. */ .extern artThrowNullPointerExceptionFromSignal -ENTRY art_quick_throw_null_pointer_exception_from_signal - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING + SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP + # Retrieve the fault address from the padding where the signal handler stores it. + lw $a0, (ARG_SLOT_SIZE + __SIZEOF_POINTER__)($sp) la $t9, artThrowNullPointerExceptionFromSignal jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*) move $a1, rSELF # pass Thread::Current @@ -719,8 +737,8 @@ END art_quick_throw_null_pointer_exception_from_signal * Called by managed code to create and deliver an ArithmeticException */ .extern artThrowDivZeroFromCode -ENTRY art_quick_throw_div_zero - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_div_zero + SETUP_SAVE_EVERYTHING_FRAME la $t9, artThrowDivZeroFromCode jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*) move $a0, rSELF # pass Thread::Current @@ -730,8 +748,10 @@ END art_quick_throw_div_zero * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException */ .extern artThrowArrayBoundsFromCode -ENTRY art_quick_throw_array_bounds - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_array_bounds + // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK, + // even after clobbering any registers we don't need to preserve, such as $gp or $t0. + SETUP_SAVE_EVERYTHING_FRAME la $t9, artThrowArrayBoundsFromCode jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*) move $a2, rSELF # pass Thread::Current @@ -742,8 +762,8 @@ END art_quick_throw_array_bounds * as if thrown from a call to String.charAt(). */ .extern artThrowStringBoundsFromCode -ENTRY art_quick_throw_string_bounds - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_string_bounds + SETUP_SAVE_EVERYTHING_FRAME la $t9, artThrowStringBoundsFromCode jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*) move $a2, rSELF # pass Thread::Current @@ -761,17 +781,6 @@ ENTRY art_quick_throw_stack_overflow END art_quick_throw_stack_overflow /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ - .extern artThrowNoSuchMethodFromCode -ENTRY art_quick_throw_no_such_method - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME - la $t9, artThrowNoSuchMethodFromCode - jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*) - move $a1, rSELF # pass Thread::Current -END art_quick_throw_no_such_method - - /* * All generated callsites for interface invokes and invocation slow paths will load arguments * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. @@ -1118,7 +1127,7 @@ END art_quick_handle_fill_data */ .extern artLockObjectFromCode ENTRY art_quick_lock_object - beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + beqz $a0, art_quick_throw_null_pointer_exception nop SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block la $t9, artLockObjectFromCode @@ -1128,7 +1137,7 @@ ENTRY art_quick_lock_object END art_quick_lock_object ENTRY art_quick_lock_object_no_inline - beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + beqz $a0, art_quick_throw_null_pointer_exception nop SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block la $t9, artLockObjectFromCode @@ -1142,7 +1151,7 @@ END art_quick_lock_object_no_inline */ .extern artUnlockObjectFromCode ENTRY art_quick_unlock_object - beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + beqz $a0, art_quick_throw_null_pointer_exception nop SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC la $t9, artUnlockObjectFromCode @@ -1152,7 +1161,7 @@ ENTRY art_quick_unlock_object END art_quick_unlock_object ENTRY art_quick_unlock_object_no_inline - beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + beqz $a0, art_quick_throw_null_pointer_exception nop SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC la $t9, artUnlockObjectFromCode @@ -1275,7 +1284,7 @@ END art_quick_check_cast ENTRY art_quick_aput_obj_with_null_and_bound_check bnez $a0, .Lart_quick_aput_obj_with_bound_check_gp_set nop - b .Lart_quick_throw_null_pointer_exception_gp_set + b art_quick_throw_null_pointer_exception nop END art_quick_aput_obj_with_null_and_bound_check @@ -1285,7 +1294,7 @@ ENTRY art_quick_aput_obj_with_bound_check bnez $t1, .Lart_quick_aput_obj_gp_set nop move $a0, $a1 - b .Lart_quick_throw_array_bounds_gp_set + b art_quick_throw_array_bounds move $a1, $t0 END art_quick_aput_obj_with_bound_check diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S index 786e86043e..35f20fbf44 100644 --- a/runtime/arch/mips64/asm_support_mips64.S +++ b/runtime/arch/mips64/asm_support_mips64.S @@ -27,36 +27,34 @@ #define rSELF $s1 - // Declare a function called name, sets up $gp. - // This macro modifies t8. -.macro ENTRY name + // Declare a function called name, doesn't set up $gp. +.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset .type \name, %function .global \name // Cache alignment for function entry. .balign 16 \name: .cfi_startproc + // Ensure we get a sane starting CFA. + .cfi_def_cfa $sp, \cfa_offset +.endm + + // Declare a function called name, doesn't set up $gp. +.macro ENTRY_NO_GP name + ENTRY_NO_GP_CUSTOM_CFA \name, 0 +.endm + + // Declare a function called name, sets up $gp. + // This macro modifies t8. +.macro ENTRY name + ENTRY_NO_GP \name // Set up $gp and store the previous $gp value to $t8. It will be pushed to the // stack after the frame has been constructed. .cpsetup $t9, $t8, \name - // Ensure we get a sane starting CFA. - .cfi_def_cfa $sp,0 // Declare a local convenience label to be branched to when $gp is already set up. .L\name\()_gp_set: .endm - // Declare a function called name, doesn't set up $gp. -.macro ENTRY_NO_GP name - .type \name, %function - .global \name - // Cache alignment for function entry. - .balign 16 -\name: - .cfi_startproc - // Ensure we get a sane starting CFA. - .cfi_def_cfa $sp,0 -.endm - .macro END name .cfi_endproc .size \name, .-\name diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc index 0bbb6e1b03..709cab587c 100644 --- a/runtime/arch/mips64/fault_handler_mips64.cc +++ b/runtime/arch/mips64/fault_handler_mips64.cc @@ -14,7 +14,7 @@ * limitations under the License. */ - +#include "arch/mips64/quick_method_frame_info_mips64.h" #include "fault_handler.h" #include <sys/ucontext.h> #include "art_method-inl.h" @@ -83,12 +83,15 @@ bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* struct ucontext *uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); + // Decrement $sp by the frame size of the kSaveEverything method and store + // the fault address in the padding right after the ArtMethod*. + sc->sc_regs[mips64::SP] -= mips64::Mips64CalleeSaveFrameSize(Runtime::kSaveEverything); + uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips64::SP]) + /* ArtMethod* */ 1; + *padding = reinterpret_cast<uintptr_t>(info->si_addr); + sc->sc_regs[mips64::RA] = sc->sc_pc + 4; // RA needs to point to gc map location sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal); - sc->sc_regs[mips64::T9] = sc->sc_pc; // make sure T9 points to the function - // Pass the faulting address as the first argument of - // art_quick_throw_null_pointer_exception_from_signal. - sc->sc_regs[mips64::A0] = reinterpret_cast<uintptr_t>(info->si_addr); + // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9. VLOG(signals) << "Generating null pointer exception"; return true; } diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index 26717ad798..8fc7bc31ce 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -316,14 +316,12 @@ /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveEverything). + * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING. * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8, * $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method* * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP. */ -.macro SETUP_SAVE_EVERYTHING_FRAME - daddiu $sp, $sp, -496 - .cfi_adjust_cfa_offset 496 - +.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP // Ugly compile-time check, but we only have the preprocessor. #if (FRAME_SIZE_SAVE_EVERYTHING != 496) #error "FRAME_SIZE_SAVE_EVERYTHING(MIPS64) size not as expected." @@ -436,6 +434,19 @@ sd $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) .endm + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveEverything). + * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8, + * $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method* + * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP. + */ +.macro SETUP_SAVE_EVERYTHING_FRAME + daddiu $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING) + .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING) + SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP +.endm + .macro RESTORE_SAVE_EVERYTHING_FRAME // Restore FP registers. l.d $f31, 264($sp) @@ -806,9 +817,10 @@ END art_quick_deliver_exception * Called by managed code to create and deliver a NullPointerException */ .extern artThrowNullPointerExceptionFromCode -ENTRY art_quick_throw_null_pointer_exception -.Lart_quick_throw_null_pointer_exception_gp_set: - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_null_pointer_exception + // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK, + // even after clobbering any registers we don't need to preserve, such as $gp or $t0. + SETUP_SAVE_EVERYTHING_FRAME dla $t9, artThrowNullPointerExceptionFromCode jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*) move $a0, rSELF # pass Thread::Current @@ -818,8 +830,10 @@ END art_quick_throw_null_pointer_exception * Call installed by a signal handler to create and deliver a NullPointerException */ .extern artThrowNullPointerExceptionFromSignal -ENTRY art_quick_throw_null_pointer_exception_from_signal - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING + SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP + # Retrieve the fault address from the padding where the signal handler stores it. + ld $a0, (__SIZEOF_POINTER__)($sp) dla $t9, artThrowNullPointerExceptionFromSignal jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uinptr_t, Thread*) move $a1, rSELF # pass Thread::Current @@ -829,8 +843,8 @@ END art_quick_throw_null_pointer_exception * Called by managed code to create and deliver an ArithmeticException */ .extern artThrowDivZeroFromCode -ENTRY art_quick_throw_div_zero - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_div_zero + SETUP_SAVE_EVERYTHING_FRAME dla $t9, artThrowDivZeroFromCode jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*) move $a0, rSELF # pass Thread::Current @@ -841,9 +855,10 @@ END art_quick_throw_div_zero * ArrayIndexOutOfBoundsException */ .extern artThrowArrayBoundsFromCode -ENTRY art_quick_throw_array_bounds -.Lart_quick_throw_array_bounds_gp_set: - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_array_bounds + // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK, + // even after clobbering any registers we don't need to preserve, such as $gp or $t0. + SETUP_SAVE_EVERYTHING_FRAME dla $t9, artThrowArrayBoundsFromCode jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*) move $a2, rSELF # pass Thread::Current @@ -854,9 +869,8 @@ END art_quick_throw_array_bounds * as if thrown from a call to String.charAt(). */ .extern artThrowStringBoundsFromCode -ENTRY art_quick_throw_string_bounds -.Lart_quick_throw_string_bounds_gp_set: - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME +ENTRY_NO_GP art_quick_throw_string_bounds + SETUP_SAVE_EVERYTHING_FRAME dla $t9, artThrowStringBoundsFromCode jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*) move $a2, rSELF # pass Thread::Current @@ -874,17 +888,6 @@ ENTRY art_quick_throw_stack_overflow END art_quick_throw_stack_overflow /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ - .extern artThrowNoSuchMethodFromCode -ENTRY art_quick_throw_no_such_method - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME - dla $t9, artThrowNoSuchMethodFromCode - jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*) - move $a1, rSELF # pass Thread::Current -END art_quick_throw_no_such_method - - /* * All generated callsites for interface invokes and invocation slow paths will load arguments * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the @@ -1208,18 +1211,20 @@ END art_quick_handle_fill_data * Entry from managed code that calls artLockObjectFromCode, may block for GC. */ .extern artLockObjectFromCode -ENTRY art_quick_lock_object - beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set +ENTRY_NO_GP art_quick_lock_object + beq $a0, $zero, art_quick_throw_null_pointer_exception nop + .cpsetup $t9, $t8, art_quick_lock_object SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block jal artLockObjectFromCode # (Object* obj, Thread*) move $a1, rSELF # pass Thread::Current RETURN_IF_ZERO END art_quick_lock_object -ENTRY art_quick_lock_object_no_inline - beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set +ENTRY_NO_GP art_quick_lock_object_no_inline + beq $a0, $zero, art_quick_throw_null_pointer_exception nop + .cpsetup $t9, $t8, art_quick_lock_object_no_inline SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block jal artLockObjectFromCode # (Object* obj, Thread*) move $a1, rSELF # pass Thread::Current @@ -1230,18 +1235,20 @@ END art_quick_lock_object_no_inline * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ .extern artUnlockObjectFromCode -ENTRY art_quick_unlock_object - beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set +ENTRY_NO_GP art_quick_unlock_object + beq $a0, $zero, art_quick_throw_null_pointer_exception nop + .cpsetup $t9, $t8, art_quick_unlock_object SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC jal artUnlockObjectFromCode # (Object* obj, Thread*) move $a1, rSELF # pass Thread::Current RETURN_IF_ZERO END art_quick_unlock_object -ENTRY art_quick_unlock_object_no_inline - beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set +ENTRY_NO_GP art_quick_unlock_object_no_inline + beq $a0, $zero, art_quick_throw_null_pointer_exception nop + .cpsetup $t9, $t8, art_quick_unlock_object_no_inline SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC jal artUnlockObjectFromCode # (Object* obj, Thread*) move $a1, rSELF # pass Thread::Current @@ -1360,7 +1367,7 @@ END art_quick_check_cast ENTRY art_quick_aput_obj_with_null_and_bound_check bne $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set nop - b .Lart_quick_throw_null_pointer_exception_gp_set + b art_quick_throw_null_pointer_exception nop END art_quick_aput_obj_with_null_and_bound_check @@ -1370,7 +1377,7 @@ ENTRY art_quick_aput_obj_with_bound_check bne $t1, $zero, .Lart_quick_aput_obj_gp_set nop move $a0, $a1 - b .Lart_quick_throw_array_bounds_gp_set + b art_quick_throw_array_bounds move $a1, $t0 END art_quick_aput_obj_with_bound_check diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S index 3e47209afb..14b01c59be 100644 --- a/runtime/arch/x86/asm_support_x86.S +++ b/runtime/arch/x86/asm_support_x86.S @@ -114,7 +114,7 @@ MACRO0(ALIGN_FUNCTION_ENTRY) .balign 16 END_MACRO -MACRO1(DEFINE_FUNCTION, c_name) +MACRO2(DEFINE_FUNCTION_CUSTOM_CFA, c_name, cfa_offset) FUNCTION_TYPE(SYMBOL(\c_name)) ASM_HIDDEN CALLVAR(c_name) .globl CALLVAR(c_name) @@ -122,7 +122,11 @@ MACRO1(DEFINE_FUNCTION, c_name) CALLVAR(c_name): CFI_STARTPROC // Ensure we get a sane starting CFA. - CFI_DEF_CFA(esp, 4) + CFI_DEF_CFA(esp, RAW_VAR(cfa_offset)) +END_MACRO + +MACRO1(DEFINE_FUNCTION, c_name) + DEFINE_FUNCTION_CUSTOM_CFA RAW_VAR(c_name), __SIZEOF_POINTER__ END_MACRO MACRO1(END_FUNCTION, c_name) diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc index c7af249c1c..a4d6bb4444 100644 --- a/runtime/arch/x86/fault_handler_x86.cc +++ b/runtime/arch/x86/fault_handler_x86.cc @@ -325,21 +325,15 @@ bool NullPointerHandler::Action(int, siginfo_t* sig, void* context) { // next instruction (this instruction + instruction size). The return address // is on the stack at the top address of the current frame. - // Push the return address onto the stack. + // Push the return address and fault address onto the stack. uintptr_t retaddr = reinterpret_cast<uintptr_t>(pc + instr_size); - uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - sizeof(uintptr_t)); - *next_sp = retaddr; + uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - 2 * sizeof(uintptr_t)); + next_sp[1] = retaddr; + next_sp[0] = reinterpret_cast<uintptr_t>(sig->si_addr); uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp); uc->CTX_EIP = reinterpret_cast<uintptr_t>( art_quick_throw_null_pointer_exception_from_signal); - // Pass the faulting address as the first argument of - // art_quick_throw_null_pointer_exception_from_signal. -#if defined(__x86_64__) - uc->CTX_RDI = reinterpret_cast<uintptr_t>(sig->si_addr); -#else - uc->CTX_EAX = reinterpret_cast<uintptr_t>(sig->si_addr); -#endif VLOG(signals) << "Generating null pointer exception"; return true; } diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 646a80c37d..879d49644b 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -224,10 +224,11 @@ END_MACRO /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveEverything) + * when EDI is already saved. */ -MACRO2(SETUP_SAVE_EVERYTHING_FRAME, got_reg, temp_reg) - // Save core registers. - PUSH edi +MACRO2(SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED, got_reg, temp_reg) + // Save core registers from highest to lowest to agree with core spills bitmap. + // EDI, or at least a placeholder for it, is already on the stack. PUSH esi PUSH ebp PUSH ebx @@ -264,6 +265,15 @@ MACRO2(SETUP_SAVE_EVERYTHING_FRAME, got_reg, temp_reg) #endif END_MACRO + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveEverything) + */ +MACRO2(SETUP_SAVE_EVERYTHING_FRAME, got_reg, temp_reg) + PUSH edi + SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED RAW_VAR(got_reg), RAW_VAR(temp_reg) +END_MACRO + MACRO0(RESTORE_SAVE_EVERYTHING_FRAME) // Restore FPRs. Method and padding is still on the stack. movsd 16(%esp), %xmm0 @@ -317,10 +327,22 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) END_FUNCTION VAR(c_name) END_MACRO +MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name) + SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save all registers as basis for long jump context + // Outgoing argument set up + subl MACRO_LITERAL(12), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(12) + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + call CALLVAR(cxx_name) // cxx_name(Thread*) + UNREACHABLE + END_FUNCTION VAR(c_name) +END_MACRO + MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) DEFINE_FUNCTION VAR(c_name) SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context - mov %esp, %ecx // Outgoing argument set up subl MACRO_LITERAL(8), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(8) @@ -332,9 +354,9 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) END_FUNCTION VAR(c_name) END_MACRO -MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) +MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name) DEFINE_FUNCTION VAR(c_name) - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context + SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save all registers as basis for long jump context // Outgoing argument set up PUSH eax // alignment padding pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() @@ -349,17 +371,33 @@ END_MACRO /* * Called by managed code to create and deliver a NullPointerException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode /* * Call installed by a signal handler to create and deliver a NullPointerException. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal +DEFINE_FUNCTION_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, 2 * __SIZEOF_POINTER__ + // Fault address and return address were saved by the fault handler. + // Save all registers as basis for long jump context; EDI will replace fault address later. + SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED ebx, ebx + // Retrieve fault address and save EDI. + movl (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)(%esp), %eax + movl %edi, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)(%esp) + CFI_REL_OFFSET(%edi, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)) + // Outgoing argument set up + subl MACRO_LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + PUSH eax // pass arg1 + call SYMBOL(artThrowNullPointerExceptionFromSignal) // (addr, self) + UNREACHABLE +END_FUNCTION art_quick_throw_null_pointer_exception /* * Called by managed code to create and deliver an ArithmeticException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode /* * Called by managed code to create and deliver a StackOverflowError. @@ -373,21 +411,16 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFr ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode - - /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds * index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode /* * Called by managed code to create and deliver a StringIndexOutOfBoundsException * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode /* * All generated callsites for interface invokes and invocation slow paths will load arguments diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S index 0728f99763..af4a6c4f99 100644 --- a/runtime/arch/x86_64/asm_support_x86_64.S +++ b/runtime/arch/x86_64/asm_support_x86_64.S @@ -110,7 +110,7 @@ END_MACRO // TODO: we might need to use SYMBOL() here to add the underscore prefix // for mac builds. -MACRO1(DEFINE_FUNCTION, c_name) +MACRO2(DEFINE_FUNCTION_CUSTOM_CFA, c_name, cfa_offset) FUNCTION_TYPE(SYMBOL(\c_name)) ASM_HIDDEN CALLVAR(c_name) .globl CALLVAR(c_name) @@ -118,7 +118,11 @@ MACRO1(DEFINE_FUNCTION, c_name) CALLVAR(c_name): CFI_STARTPROC // Ensure we get a sane starting CFA. - CFI_DEF_CFA(rsp, 8) + CFI_DEF_CFA(rsp, RAW_VAR(cfa_offset)) +END_MACRO + +MACRO1(DEFINE_FUNCTION, c_name) + DEFINE_FUNCTION_CUSTOM_CFA RAW_VAR(c_name), __SIZEOF_POINTER__ END_MACRO MACRO1(END_FUNCTION, c_name) diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 5ea58af346..a11e4021b4 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -263,14 +263,15 @@ END_MACRO /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveEverything) + * when R15 is already saved. */ -MACRO0(SETUP_SAVE_EVERYTHING_FRAME) +MACRO0(SETUP_SAVE_EVERYTHING_FRAME_R15_SAVED) #if defined(__APPLE__) int3 int3 #else // Save core registers from highest to lowest to agree with core spills bitmap. - PUSH r15 + // R15, or at least a placeholder for it, is already on the stack. PUSH r14 PUSH r13 PUSH r12 @@ -322,6 +323,15 @@ MACRO0(SETUP_SAVE_EVERYTHING_FRAME) #endif // __APPLE__ END_MACRO + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveEverything) + */ +MACRO0(SETUP_SAVE_EVERYTHING_FRAME) + PUSH r15 + SETUP_SAVE_EVERYTHING_FRAME_R15_SAVED +END_MACRO + MACRO0(RESTORE_SAVE_EVERYTHING_FRAME) // Restore FPRs. Method and padding is still on the stack. movq 16(%rsp), %xmm0 @@ -385,6 +395,16 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) END_FUNCTION VAR(c_name) END_MACRO +MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name) + DEFINE_FUNCTION VAR(c_name) + SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context + // Outgoing argument set up + movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current() + call CALLVAR(cxx_name) // cxx_name(Thread*) + UNREACHABLE + END_FUNCTION VAR(c_name) +END_MACRO + MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) DEFINE_FUNCTION VAR(c_name) SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context @@ -395,9 +415,9 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) END_FUNCTION VAR(c_name) END_MACRO -MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) +MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name) DEFINE_FUNCTION VAR(c_name) - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context + SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() call CALLVAR(cxx_name) // cxx_name(Thread*) @@ -408,17 +428,29 @@ END_MACRO /* * Called by managed code to create and deliver a NullPointerException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode /* * Call installed by a signal handler to create and deliver a NullPointerException. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_signal, artThrowNullPointerExceptionFromSignal +DEFINE_FUNCTION_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, 2 * __SIZEOF_POINTER__ + // Fault address and return address were saved by the fault handler. + // Save all registers as basis for long jump context; R15 will replace fault address later. + SETUP_SAVE_EVERYTHING_FRAME_R15_SAVED + // Retrieve fault address and save R15. + movq (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)(%rsp), %rdi + movq %r15, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)(%rsp) + CFI_REL_OFFSET(%r15, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)) + // Outgoing argument set up; RDI already contains the fault address. + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call SYMBOL(artThrowNullPointerExceptionFromSignal) // (addr, self) + UNREACHABLE +END_FUNCTION art_quick_throw_null_pointer_exception_from_signal /* * Called by managed code to create and deliver an ArithmeticException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode +NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode /* * Called by managed code to create and deliver a StackOverflowError. @@ -432,21 +464,16 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFr ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode /* - * Called by managed code to create and deliver a NoSuchMethodError. - */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode - - /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds * index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode /* * Called by managed code to create and deliver a StringIndexOutOfBoundsException * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode /* * All generated callsites for interface invokes and invocation slow paths will load arguments diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index 1659f3333b..9b4b38aa0f 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -24,6 +24,7 @@ #include "class_linker-inl.h" #include "common_throws.h" #include "dex_file.h" +#include "dex_file_annotations.h" #include "dex_file-inl.h" #include "gc_root-inl.h" #include "jit/profiling_info.h" @@ -347,7 +348,7 @@ inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) { if (dex_pc == DexFile::kDexNoIndex) { return IsNative() ? -2 : -1; } - return GetDexFile()->GetLineNumFromPC(this, dex_pc); + return annotations::GetLineNumFromPC(GetDexFile(), this, dex_pc); } inline const DexFile::ProtoId& ArtMethod::GetPrototype() { diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 1392399bde..193bea167f 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -25,6 +25,7 @@ #include "class_linker-inl.h" #include "debugger.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "dex_instruction.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/card_table-inl.h" @@ -33,8 +34,8 @@ #include "jit/jit_code_cache.h" #include "jit/profiling_info.h" #include "jni_internal.h" -#include "mirror/abstract_method.h" #include "mirror/class-inl.h" +#include "mirror/executable.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "mirror/string.h" @@ -51,9 +52,9 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) { - auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(jlr_method); - DCHECK(abstract_method != nullptr); - return abstract_method->GetArtMethod(); + auto* executable = soa.Decode<mirror::Executable*>(jlr_method); + DCHECK(executable != nullptr); + return executable->GetArtMethod(); } mirror::String* ArtMethod::GetNameAsString(Thread* self) { @@ -107,7 +108,7 @@ size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) { } bool ArtMethod::HasSameNameAndSignature(ArtMethod* other) { - ScopedAssertNoThreadSuspension ants(Thread::Current(), "HasSameNameAndSignature"); + ScopedAssertNoThreadSuspension ants("HasSameNameAndSignature"); const DexFile* dex_file = GetDexFile(); const DexFile::MethodId& mid = dex_file->GetMethodId(GetDexMethodIndex()); if (GetDexCache() == other->GetDexCache()) { @@ -349,8 +350,6 @@ bool ArtMethod::IsAnnotatedWith(jclass klass, uint32_t visibility) { ScopedObjectAccess soa(self); StackHandleScope<1> shs(self); - const DexFile& dex_file = GetDeclaringClass()->GetDexFile(); - mirror::Class* annotation = soa.Decode<mirror::Class*>(klass); DCHECK(annotation->IsAnnotation()); Handle<mirror::Class> annotation_handle(shs.NewHandle(annotation)); @@ -358,7 +357,7 @@ bool ArtMethod::IsAnnotatedWith(jclass klass, uint32_t visibility) { // Note: Resolves any method annotations' classes as a side-effect. // -- This seems allowed by the spec since it says we can preload any classes // referenced by another classes's constant pool table. - return dex_file.IsMethodAnnotationPresent(this, annotation_handle, visibility); + return annotations::IsMethodAnnotationPresent(this, annotation_handle, visibility); } bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params) { diff --git a/runtime/art_method.h b/runtime/art_method.h index 8051a1fc7e..b1baccded9 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -33,6 +33,7 @@ namespace art { +template<class T> class Handle; union JValue; class OatQuickMethodHeader; class ProfilingInfo; diff --git a/runtime/asm_support.h b/runtime/asm_support.h index f4addf72a0..b8f72725c6 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -89,20 +89,20 @@ ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET, ADD_TEST_EQ(THREAD_SELF_OFFSET, art::Thread::SelfOffset<POINTER_SIZE>().Int32Value()) -// Offset of field Thread::tlsPtr_.thread_local_objects. -#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 199 * __SIZEOF_POINTER__) -ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET, - art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value()) // Offset of field Thread::tlsPtr_.thread_local_pos. -#define THREAD_LOCAL_POS_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__) +#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 198 * __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET, art::Thread::ThreadLocalPosOffset<POINTER_SIZE>().Int32Value()) // Offset of field Thread::tlsPtr_.thread_local_end. #define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__) ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET, art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value()) +// Offset of field Thread::tlsPtr_.thread_local_objects. +#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__) +ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET, + art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value()) // Offset of field Thread::tlsPtr_.mterp_current_ibase. -#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__) +#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__) ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET, art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value()) // Offset of field Thread::tlsPtr_.mterp_default_ibase. diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h index e48eca9a2d..8d1c982f3d 100644 --- a/runtime/base/allocator.h +++ b/runtime/base/allocator.h @@ -116,7 +116,8 @@ class TrackingAllocatorImpl : public std::allocator<T> { // Used internally by STL data structures. template <class U> - TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc ATTRIBUTE_UNUSED) noexcept {} + TrackingAllocatorImpl( // NOLINT, implicit + const TrackingAllocatorImpl<U, kTag>& alloc ATTRIBUTE_UNUSED) noexcept {} // Used internally by STL data structures. TrackingAllocatorImpl() noexcept { diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h index 3fad96b39b..31dbb36821 100644 --- a/runtime/base/arena_allocator.h +++ b/runtime/base/arena_allocator.h @@ -258,9 +258,9 @@ class MemMapArena FINAL : public Arena { class ArenaPool { public: - ArenaPool(bool use_malloc = true, - bool low_4gb = false, - const char* name = "LinearAlloc"); + explicit ArenaPool(bool use_malloc = true, + bool low_4gb = false, + const char* name = "LinearAlloc"); ~ArenaPool(); Arena* AllocArena(size_t size) REQUIRES(!lock_); void FreeArenaChain(Arena* first) REQUIRES(!lock_); diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h index 68cacd57e6..2c8aa2880a 100644 --- a/runtime/base/arena_containers.h +++ b/runtime/base/arena_containers.h @@ -132,7 +132,7 @@ class ArenaAllocatorAdapter<void> : private ArenaAllocatorAdapterKind { arena_allocator_(arena_allocator) { } template <typename U> - ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) + ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit : ArenaAllocatorAdapterKind(other), arena_allocator_(other.arena_allocator_) { } @@ -168,7 +168,7 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind { arena_allocator_(arena_allocator) { } template <typename U> - ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) + ArenaAllocatorAdapter(const ArenaAllocatorAdapter<U>& other) // NOLINT, implicit : ArenaAllocatorAdapterKind(other), arena_allocator_(other.arena_allocator_) { } diff --git a/compiler/utils/array_ref.h b/runtime/base/array_ref.h index 8dc9ab4a5e..00b9bad6bf 100644 --- a/compiler/utils/array_ref.h +++ b/runtime/base/array_ref.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ART_COMPILER_UTILS_ARRAY_REF_H_ -#define ART_COMPILER_UTILS_ARRAY_REF_H_ +#ifndef ART_RUNTIME_BASE_ARRAY_REF_H_ +#define ART_RUNTIME_BASE_ARRAY_REF_H_ #include <type_traits> #include <vector> @@ -197,4 +197,4 @@ bool operator!=(const ArrayRef<T>& lhs, const ArrayRef<T>& rhs) { } // namespace art -#endif // ART_COMPILER_UTILS_ARRAY_REF_H_ +#endif // ART_RUNTIME_BASE_ARRAY_REF_H_ diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index fec918b681..43c38c4363 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -48,6 +48,7 @@ Mutex* Locks::mem_maps_lock_ = nullptr; Mutex* Locks::modify_ldt_lock_ = nullptr; MutatorMutex* Locks::mutator_lock_ = nullptr; Mutex* Locks::profiler_lock_ = nullptr; +Mutex* Locks::verifier_deps_lock_ = nullptr; ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr; Mutex* Locks::host_dlopen_handles_lock_ = nullptr; Mutex* Locks::reference_processor_lock_ = nullptr; @@ -947,6 +948,7 @@ void Locks::Init() { DCHECK(deoptimization_lock_ != nullptr); DCHECK(heap_bitmap_lock_ != nullptr); DCHECK(oat_file_manager_lock_ != nullptr); + DCHECK(verifier_deps_lock_ != nullptr); DCHECK(host_dlopen_handles_lock_ != nullptr); DCHECK(intern_table_lock_ != nullptr); DCHECK(jni_libraries_lock_ != nullptr); @@ -1035,6 +1037,10 @@ void Locks::Init() { DCHECK(oat_file_manager_lock_ == nullptr); oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock); + DCHECK(verifier_deps_lock_ == nullptr); + verifier_deps_lock_ = new Mutex("verifier deps lock", current_lock_level); + UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock); DCHECK(host_dlopen_handles_lock_ == nullptr); host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level); diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index d0dc8864b3..8af9fa5c46 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -83,6 +83,7 @@ enum LockLevel { kInternTableLock, kOatFileSecondaryLookupLock, kHostDlOpenHandlesLock, + kVerifierDepsLock, kOatFileManagerLock, kTracingUniqueMethodsLock, kTracingStreamingLock, @@ -650,8 +651,11 @@ class Locks { // Guards opened oat files in OatFileManager. static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_); + // Guards verifier dependency collection in VerifierDeps. + static Mutex* verifier_deps_lock_ ACQUIRED_AFTER(oat_file_manager_lock_); + // Guards dlopen_handles_ in DlOpenOatFile. - static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(oat_file_manager_lock_); + static Mutex* host_dlopen_handles_lock_ ACQUIRED_AFTER(verifier_deps_lock_); // Guards intern table. static Mutex* intern_table_lock_ ACQUIRED_AFTER(host_dlopen_handles_lock_); diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h index bd19d00544..7964705993 100644 --- a/runtime/base/scoped_arena_containers.h +++ b/runtime/base/scoped_arena_containers.h @@ -87,7 +87,7 @@ class ScopedArenaAllocatorAdapter<void> arena_stack_(arena_allocator->arena_stack_) { } template <typename U> - ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) + ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit : DebugStackReference(other), DebugStackIndirectTopRef(other), ArenaAllocatorAdapterKind(other), @@ -130,7 +130,7 @@ class ScopedArenaAllocatorAdapter arena_stack_(arena_allocator->arena_stack_) { } template <typename U> - ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) + ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other) // NOLINT, implicit : DebugStackReference(other), DebugStackIndirectTopRef(other), ArenaAllocatorAdapterKind(other), diff --git a/compiler/utils/transform_array_ref.h b/runtime/base/transform_array_ref.h index a6da34fb40..b432f86d77 100644 --- a/compiler/utils/transform_array_ref.h +++ b/runtime/base/transform_array_ref.h @@ -14,13 +14,13 @@ * limitations under the License. */ -#ifndef ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_ -#define ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_ +#ifndef ART_RUNTIME_BASE_TRANSFORM_ARRAY_REF_H_ +#define ART_RUNTIME_BASE_TRANSFORM_ARRAY_REF_H_ #include <type_traits> -#include "utils/array_ref.h" -#include "utils/transform_iterator.h" +#include "base/array_ref.h" +#include "base/transform_iterator.h" namespace art { @@ -72,7 +72,7 @@ class TransformArrayRef { template <typename OtherBT, typename = typename std::enable_if<std::is_same<BaseType, const OtherBT>::value>::type> - TransformArrayRef(const TransformArrayRef<OtherBT, Function>& other) + TransformArrayRef(const TransformArrayRef<OtherBT, Function>& other) // NOLINT, implicit : TransformArrayRef(other.base(), other.GetFunction()) { } // Assignment operators. @@ -193,4 +193,4 @@ TransformArrayRef<const typename Container::value_type, Function> MakeTransformA } // namespace art -#endif // ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_ +#endif // ART_RUNTIME_BASE_TRANSFORM_ARRAY_REF_H_ diff --git a/compiler/utils/transform_array_ref_test.cc b/runtime/base/transform_array_ref_test.cc index 8d71fd7179..494dbb29aa 100644 --- a/compiler/utils/transform_array_ref_test.cc +++ b/runtime/base/transform_array_ref_test.cc @@ -19,7 +19,7 @@ #include "gtest/gtest.h" -#include "utils/transform_array_ref.h" +#include "base/transform_array_ref.h" namespace art { diff --git a/compiler/utils/transform_iterator.h b/runtime/base/transform_iterator.h index 3bc9046408..f1a8a52ceb 100644 --- a/compiler/utils/transform_iterator.h +++ b/runtime/base/transform_iterator.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_ -#define ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_ +#ifndef ART_RUNTIME_BASE_TRANSFORM_ITERATOR_H_ +#define ART_RUNTIME_BASE_TRANSFORM_ITERATOR_H_ #include <iterator> #include <type_traits> @@ -62,7 +62,7 @@ class TransformIterator { : data_(base, fn) { } template <typename OtherBI> - TransformIterator(const TransformIterator<OtherBI, Function>& other) + TransformIterator(const TransformIterator<OtherBI, Function>& other) // NOLINT, implicit : data_(other.base(), other.GetFunction()) { } @@ -175,4 +175,4 @@ auto MakeTransformRange(BaseRange& range, Function f) { } // namespace art -#endif // ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_ +#endif // ART_RUNTIME_BASE_TRANSFORM_ITERATOR_H_ diff --git a/compiler/utils/transform_iterator_test.cc b/runtime/base/transform_iterator_test.cc index 57ff0a62ac..a85dda8958 100644 --- a/compiler/utils/transform_iterator_test.cc +++ b/runtime/base/transform_iterator_test.cc @@ -22,7 +22,7 @@ #include "gtest/gtest.h" -#include "utils/transform_iterator.h" +#include "base/transform_iterator.h" namespace art { diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 4d252e1be9..845e39aa85 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -1533,7 +1533,7 @@ static std::unique_ptr<const DexFile> OpenOatDexFile(const OatFile* oat_file, bool ClassLinker::OpenImageDexFiles(gc::space::ImageSpace* space, std::vector<std::unique_ptr<const DexFile>>* out_dex_files, std::string* error_msg) { - ScopedAssertNoThreadSuspension nts(Thread::Current(), __FUNCTION__); + ScopedAssertNoThreadSuspension nts(__FUNCTION__); const ImageHeader& header = space->GetImageHeader(); mirror::Object* dex_caches_object = header.GetImageRoot(ImageHeader::kDexCaches); DCHECK(dex_caches_object != nullptr); @@ -1923,7 +1923,7 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor) { ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // Not safe to have thread suspension when we are holding a lock. if (self != nullptr) { - ScopedAssertNoThreadSuspension nts(self, __FUNCTION__); + ScopedAssertNoThreadSuspension nts(__FUNCTION__); VisitClassesInternal(visitor); } else { VisitClassesInternal(visitor); @@ -1965,9 +1965,8 @@ class GetClassInToObjectArray : public ClassVisitor { void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) { // TODO: it may be possible to avoid secondary storage if we iterate over dex caches. The problem // is avoiding duplicates. - Thread* const self = Thread::Current(); if (!kMovingClasses) { - ScopedAssertNoThreadSuspension nts(self, __FUNCTION__); + ScopedAssertNoThreadSuspension nts(__FUNCTION__); GetClassesInToVector accumulator; VisitClasses(&accumulator); for (mirror::Class* klass : accumulator.classes_) { @@ -1976,6 +1975,7 @@ void ClassLinker::VisitClassesWithoutClassesLock(ClassVisitor* visitor) { } } } else { + Thread* const self = Thread::Current(); StackHandleScope<1> hs(self); auto classes = hs.NewHandle<mirror::ObjectArray<mirror::Class>>(nullptr); // We size the array assuming classes won't be added to the class table during the visit. @@ -2242,7 +2242,7 @@ typedef std::pair<const DexFile*, const DexFile::ClassDef*> ClassPathEntry; ClassPathEntry FindInClassPath(const char* descriptor, size_t hash, const std::vector<const DexFile*>& class_path) { for (const DexFile* dex_file : class_path) { - const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor, hash); + const DexFile::ClassDef* dex_class_def = OatDexFile::FindClassDef(*dex_file, descriptor, hash); if (dex_class_def != nullptr) { return ClassPathEntry(dex_file, dex_class_def); } @@ -2343,7 +2343,8 @@ bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable& for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) { const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>( long_array->GetWithoutChecks(j))); - const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash); + const DexFile::ClassDef* dex_class_def = + OatDexFile::FindClassDef(*cp_dex_file, descriptor, hash); if (dex_class_def != nullptr) { mirror::Class* klass = DefineClass(self, descriptor, @@ -3047,7 +3048,7 @@ void ClassLinker::LoadClassMembers(Thread* self, { // Note: We cannot have thread suspension until the field and method arrays are setup or else // Class::VisitFieldRoots may miss some fields or methods. - ScopedAssertNoThreadSuspension nts(self, __FUNCTION__); + ScopedAssertNoThreadSuspension nts(__FUNCTION__); // Load static fields. // We allow duplicate definitions of the same field in a class_data_item // but ignore the repeated indexes here, b/21868015. @@ -3113,7 +3114,7 @@ void ClassLinker::LoadClassMembers(Thread* self, // TODO These should really use the iterators. for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) { ArtMethod* method = klass->GetDirectMethodUnchecked(i, image_pointer_size_); - LoadMethod(self, dex_file, it, klass, method); + LoadMethod(dex_file, it, klass, method); LinkCode(method, oat_class, class_def_method_index); uint32_t it_method_index = it.GetMemberIndex(); if (last_dex_method_index == it_method_index) { @@ -3128,7 +3129,7 @@ void ClassLinker::LoadClassMembers(Thread* self, } for (size_t i = 0; it.HasNextVirtualMethod(); i++, it.Next()) { ArtMethod* method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_); - LoadMethod(self, dex_file, it, klass, method); + LoadMethod(dex_file, it, klass, method); DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i); LinkCode(method, oat_class, class_def_method_index); class_def_method_index++; @@ -3149,8 +3150,7 @@ void ClassLinker::LoadField(const ClassDataItemIterator& it, dst->SetAccessFlags(it.GetFieldAccessFlags()); } -void ClassLinker::LoadMethod(Thread* self, - const DexFile& dex_file, +void ClassLinker::LoadMethod(const DexFile& dex_file, const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtMethod* dst) { @@ -3158,7 +3158,7 @@ void ClassLinker::LoadMethod(Thread* self, const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx); const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_); - ScopedAssertNoThreadSuspension ants(self, "LoadMethod"); + ScopedAssertNoThreadSuspension ants("LoadMethod"); dst->SetDexMethodIndex(dex_method_idx); dst->SetDeclaringClass(klass.Get()); dst->SetCodeItemOffset(it.GetMethodCodeItemOffset()); @@ -3692,7 +3692,7 @@ void ClassLinker::AddImageClassesToClassTable(std::vector<gc::space::ImageSpace* mirror::ClassLoader* class_loader) { Thread* self = Thread::Current(); WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); - ScopedAssertNoThreadSuspension ants(self, "Moving image classes to class table"); + ScopedAssertNoThreadSuspension ants("Moving image classes to class table"); ClassTable* const class_table = InsertClassTableForClassLoader(class_loader); @@ -3747,7 +3747,7 @@ void ClassLinker::MoveClassTableToPreZygote() { } mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) { - ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup"); + ScopedAssertNoThreadSuspension ants("Image class lookup"); std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector = GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces()); for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) { @@ -4639,8 +4639,11 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, } } - EncodedStaticFieldValueIterator value_it(dex_file, &dex_cache, &class_loader, - this, *dex_class_def); + annotations::RuntimeEncodedStaticFieldValueIterator value_it(dex_file, + &dex_cache, + &class_loader, + this, + *dex_class_def); const uint8_t* class_data = dex_file.GetClassData(*dex_class_def); ClassDataItemIterator field_it(dex_file, class_data); if (value_it.HasNext()) { @@ -6505,7 +6508,7 @@ bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class> size_t new_ifcount; { - ScopedAssertNoThreadSuspension nts(self, "Copying mirror::Class*'s for FillIfTable"); + ScopedAssertNoThreadSuspension nts("Copying mirror::Class*'s for FillIfTable"); std::vector<mirror::Class*> to_add; for (size_t i = 0; i < num_interfaces; i++) { mirror::Class* interface = have_interfaces ? interfaces->Get(i) : @@ -8268,7 +8271,7 @@ void ClassLinker::CleanupClassLoaders() { std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) { ScopedTrace trace(__PRETTY_FUNCTION__); ScopedObjectAccess soa(Thread::Current()); - ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__); + ScopedAssertNoThreadSuspension ants(__FUNCTION__); std::set<DexCacheResolvedClasses> ret; VLOG(class_linker) << "Collecting resolved classes"; const uint64_t start_time = NanoTime(); @@ -8342,7 +8345,7 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys( Thread* const self = Thread::Current(); std::unordered_map<std::string, const DexFile*> location_to_dex_file; ScopedObjectAccess soa(self); - ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__); + ScopedAssertNoThreadSuspension ants(__FUNCTION__); ReaderMutexLock mu(self, *DexLock()); for (const ClassLinker::DexCacheData& data : GetDexCachesData()) { if (!self->IsJWeakCleared(data.weak_root)) { diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 5e4ae03782..0a46e2ebed 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -769,8 +769,7 @@ class ClassLinker { void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtField* dst) REQUIRES_SHARED(Locks::mutator_lock_); - void LoadMethod(Thread* self, - const DexFile& dex_file, + void LoadMethod(const DexFile& dex_file, const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtMethod* dst) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 5e0ee6fe23..65ce600027 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -28,7 +28,6 @@ #include "experimental_flags.h" #include "entrypoints/entrypoint_utils-inl.h" #include "gc/heap.h" -#include "mirror/abstract_method.h" #include "mirror/accessible_object.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" @@ -697,24 +696,18 @@ struct FieldOffsets : public CheckOffsets<mirror::Field> { struct ExecutableOffsets : public CheckOffsets<mirror::Executable> { ExecutableOffsets() : CheckOffsets<mirror::Executable>( false, "Ljava/lang/reflect/Executable;") { + addOffset(OFFSETOF_MEMBER(mirror::Executable, access_flags_), "accessFlags"); + addOffset(OFFSETOF_MEMBER(mirror::Executable, art_method_), "artMethod"); + addOffset(OFFSETOF_MEMBER(mirror::Executable, declaring_class_), "declaringClass"); + addOffset(OFFSETOF_MEMBER(mirror::Executable, declaring_class_of_overridden_method_), + "declaringClassOfOverriddenMethod"); + addOffset(OFFSETOF_MEMBER(mirror::Executable, dex_method_index_), "dexMethodIndex"); addOffset(OFFSETOF_MEMBER(mirror::Executable, has_real_parameter_data_), "hasRealParameterData"); addOffset(OFFSETOF_MEMBER(mirror::Executable, parameters_), "parameters"); }; }; -struct AbstractMethodOffsets : public CheckOffsets<mirror::AbstractMethod> { - AbstractMethodOffsets() : CheckOffsets<mirror::AbstractMethod>( - false, "Ljava/lang/reflect/AbstractMethod;") { - addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, access_flags_), "accessFlags"); - addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, art_method_), "artMethod"); - addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_), "declaringClass"); - addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, declaring_class_of_overridden_method_), - "declaringClassOfOverriddenMethod"); - addOffset(OFFSETOF_MEMBER(mirror::AbstractMethod, dex_method_index_), "dexMethodIndex"); - }; -}; - // C++ fields must exactly match the fields in the Java classes. If this fails, // reorder the fields in the C++ class. Managed class fields are ordered by // ClassLinker::LinkFields. @@ -733,7 +726,6 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) { EXPECT_TRUE(AccessibleObjectOffsets().Check()); EXPECT_TRUE(FieldOffsets().Check()); EXPECT_TRUE(ExecutableOffsets().Check()); - EXPECT_TRUE(AbstractMethodOffsets().Check()); } TEST_F(ClassLinkerTest, FindClassNonexistent) { diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index dba0a81125..72349527d1 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -409,15 +409,6 @@ void CommonRuntimeTestImpl::TearDown() { TearDownAndroidData(android_data_, true); dalvik_cache_.clear(); - // icu4c has a fixed 10-element array "gCommonICUDataArray". - // If we run > 10 tests, we fill that array and u_setCommonData fails. - // There's a function to clear the array, but it's not public... - typedef void (*IcuCleanupFn)(); - void* sym = dlsym(RTLD_DEFAULT, "u_cleanup_" U_ICU_VERSION_SHORT); - CHECK(sym != nullptr) << dlerror(); - IcuCleanupFn icu_cleanup_fn = reinterpret_cast<IcuCleanupFn>(sym); - (*icu_cleanup_fn)(); - Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test } @@ -591,9 +582,9 @@ std::string CommonRuntimeTestImpl::GetCoreFileLocation(const char* suffix) { if (IsHost()) { const char* host_dir = getenv("ANDROID_HOST_OUT"); CHECK(host_dir != nullptr); - location = StringPrintf("%s/framework/core.%s", host_dir, suffix); + location = StringPrintf("%s/framework/core-npic.%s", host_dir, suffix); } else { - location = StringPrintf("/data/art-test/core.%s", suffix); + location = StringPrintf("/data/art-test/core-npic.%s", suffix); } return location; diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 77362a51be..1e4c7725b7 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -357,16 +357,6 @@ void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece ThrowException("Ljava/lang/NoSuchMethodError;", c, msg.str().c_str()); } -void ThrowNoSuchMethodError(uint32_t method_idx) { - ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr); - mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); - const DexFile& dex_file = *dex_cache->GetDexFile(); - std::ostringstream msg; - msg << "No method '" << PrettyMethod(method_idx, dex_file, true) << "'"; - ThrowException("Ljava/lang/NoSuchMethodError;", - method->GetDeclaringClass(), msg.str().c_str()); -} - // NullPointerException void ThrowNullPointerExceptionForFieldAccess(ArtField* field, bool is_read) { diff --git a/runtime/common_throws.h b/runtime/common_throws.h index ab25543ec6..945dc2daba 100644 --- a/runtime/common_throws.h +++ b/runtime/common_throws.h @@ -178,9 +178,6 @@ void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece const Signature& signature) REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR; -void ThrowNoSuchMethodError(uint32_t method_idx) - REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR; - // NullPointerException void ThrowNullPointerExceptionForFieldAccess(ArtField* field, diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h index ee797e0712..00dedef1e7 100644 --- a/runtime/compiler_callbacks.h +++ b/runtime/compiler_callbacks.h @@ -25,6 +25,7 @@ namespace art { namespace verifier { class MethodVerifier; +class VerifierDeps; } // namespace verifier @@ -45,6 +46,8 @@ class CompilerCallbacks { // done so. Return false if relocating in this way would be problematic. virtual bool IsRelocationPossible() = 0; + virtual verifier::VerifierDeps* GetVerifierDeps() const = 0; + bool IsBootImage() { return mode_ == CallbackMode::kCompileBootImage; } diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 9f3c2aa89b..6ed44fc5c4 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -28,6 +28,7 @@ #include "class_linker.h" #include "class_linker-inl.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "dex_instruction.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/card_table-inl.h" @@ -1760,22 +1761,32 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje return error; } - mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error); - if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { + Thread* self = Thread::Current(); + StackHandleScope<2> hs(self); + MutableHandle<mirror::Object> + o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error))); + if ((!is_static && o.Get() == nullptr) || error != JDWP::ERR_NONE) { return JDWP::ERR_INVALID_OBJECT; } ArtField* f = FromFieldId(field_id); mirror::Class* receiver_class = c; - if (receiver_class == nullptr && o != nullptr) { + if (receiver_class == nullptr && o.Get() != nullptr) { receiver_class = o->GetClass(); } + // TODO: should we give up now if receiver_class is null? if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) { LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class); return JDWP::ERR_INVALID_FIELDID; } + // Ensure the field's class is initialized. + Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass())); + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) { + LOG(WARNING) << "Not able to initialize class for SetValues: " << PrettyClass(klass.Get()); + } + // The RI only enforces the static/non-static mismatch in one direction. // TODO: should we change the tests and check both? if (is_static) { @@ -1789,10 +1800,10 @@ static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::Obje } } if (f->IsStatic()) { - o = f->GetDeclaringClass(); + o.Assign(f->GetDeclaringClass()); } - JValue field_value(GetArtFieldValue(f, o)); + JValue field_value(GetArtFieldValue(f, o.Get())); JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); Dbg::OutputJValue(tag, &field_value, pReply); return JDWP::ERR_NONE; @@ -1882,12 +1893,21 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId uint64_t value, int width, bool is_static) REQUIRES_SHARED(Locks::mutator_lock_) { JDWP::JdwpError error; - mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error); - if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) { + Thread* self = Thread::Current(); + StackHandleScope<2> hs(self); + MutableHandle<mirror::Object> + o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error))); + if ((!is_static && o.Get() == nullptr) || error != JDWP::ERR_NONE) { return JDWP::ERR_INVALID_OBJECT; } ArtField* f = FromFieldId(field_id); + // Ensure the field's class is initialized. + Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass())); + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) { + LOG(WARNING) << "Not able to initialize class for SetValues: " << PrettyClass(klass.Get()); + } + // The RI only enforces the static/non-static mismatch in one direction. // TODO: should we change the tests and check both? if (is_static) { @@ -1901,9 +1921,9 @@ static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId } } if (f->IsStatic()) { - o = f->GetDeclaringClass(); + o.Assign(f->GetDeclaringClass()); } - return SetArtFieldValue(f, o, value, width); + return SetArtFieldValue(f, o.Get(), value, width); } JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, @@ -1986,7 +2006,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p if (error != JDWP::ERR_NONE) { return JDWP::ERR_INVALID_OBJECT; } - ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup"); + ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroup"); // Okay, so it's an object, but is it actually a thread? DecodeThread(soa, thread_id, &error); if (error == JDWP::ERR_THREAD_NOT_ALIVE) { @@ -2036,7 +2056,7 @@ JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::Ex if (error != JDWP::ERR_NONE) { return error; } - ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName"); + ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName"); ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); CHECK(f != nullptr); mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group)); @@ -2055,7 +2075,7 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP:: } mirror::Object* parent; { - ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent"); + ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent"); ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent); CHECK(f != nullptr); parent = f->GetObject(thread_group); @@ -3694,8 +3714,8 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache(); method = m; if (dex_cache != nullptr) { - const DexFile& dex_file = *dex_cache->GetDexFile(); - line_number = dex_file.GetLineNumFromPC(m, GetDexPc()); + const DexFile* dex_file = dex_cache->GetDexFile(); + line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc()); } } } diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 76cd348d8e..03223b0d3c 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -27,8 +27,6 @@ #include <memory> #include <sstream> -#include "art_field-inl.h" -#include "art_method-inl.h" #include "base/enums.h" #include "base/file_magic.h" #include "base/hash_map.h" @@ -37,18 +35,13 @@ #include "base/stringprintf.h" #include "base/systrace.h" #include "base/unix_file/fd_file.h" -#include "class_linker-inl.h" #include "dex_file-inl.h" #include "dex_file_verifier.h" #include "globals.h" -#include "handle_scope-inl.h" #include "jvalue.h" #include "leb128.h" -#include "mirror/field.h" -#include "mirror/method.h" -#include "mirror/string.h" +#include "oat_file.h" #include "os.h" -#include "reflection.h" #include "safe_map.h" #include "thread.h" #include "type_lookup_table.h" @@ -510,16 +503,6 @@ DexFile::DexFile(const uint8_t* base, size_t size, oat_dex_file_(oat_dex_file) { CHECK(begin_ != nullptr) << GetLocation(); CHECK_GT(size_, 0U) << GetLocation(); - const uint8_t* lookup_data = (oat_dex_file != nullptr) - ? oat_dex_file->GetLookupTableData() - : nullptr; - if (lookup_data != nullptr) { - if (lookup_data + TypeLookupTable::RawDataLength(*this) > oat_dex_file->GetOatFile()->End()) { - LOG(WARNING) << "found truncated lookup table in " << GetLocation(); - } else { - lookup_table_.reset(TypeLookupTable::Open(lookup_data, *this)); - } - } } DexFile::~DexFile() { @@ -579,33 +562,12 @@ uint32_t DexFile::Header::GetVersion() const { return atoi(version); } -const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor, size_t hash) const { - DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash); - if (LIKELY(lookup_table_ != nullptr)) { - const uint32_t class_def_idx = lookup_table_->Lookup(descriptor, hash); - return (class_def_idx != DexFile::kDexNoIndex) ? &GetClassDef(class_def_idx) : nullptr; - } - +const DexFile::ClassDef* DexFile::FindClassDef(uint16_t type_idx) const { + size_t num_class_defs = NumClassDefs(); // Fast path for rare no class defs case. - const uint32_t num_class_defs = NumClassDefs(); if (num_class_defs == 0) { return nullptr; } - const TypeId* type_id = FindTypeId(descriptor); - if (type_id != nullptr) { - uint16_t type_idx = GetIndexForTypeId(*type_id); - for (size_t i = 0; i < num_class_defs; ++i) { - const ClassDef& class_def = GetClassDef(i); - if (class_def.class_idx_ == type_idx) { - return &class_def; - } - } - } - return nullptr; -} - -const DexFile::ClassDef* DexFile::FindClassDef(uint16_t type_idx) const { - size_t num_class_defs = NumClassDefs(); for (size_t i = 0; i < num_class_defs; ++i) { const ClassDef& class_def = GetClassDef(i); if (class_def.class_idx_ == type_idx) { @@ -796,10 +758,6 @@ const DexFile::ProtoId* DexFile::FindProtoId(uint16_t return_type_idx, return nullptr; } -void DexFile::CreateTypeLookupTable(uint8_t* storage) const { - lookup_table_.reset(TypeLookupTable::Create(*this, storage)); -} - // Given a signature place the type ids into the given vector bool DexFile::CreateTypeList(const StringPiece& signature, uint16_t* return_type_idx, std::vector<uint16_t>* param_type_idxs) const { @@ -864,22 +822,6 @@ const Signature DexFile::CreateSignature(const StringPiece& signature) const { return Signature(this, *proto_id); } -int32_t DexFile::GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const { - // For native method, lineno should be -2 to indicate it is native. Note that - // "line number == -2" is how libcore tells from StackTraceElement. - if (method->GetCodeItemOffset() == 0) { - return -2; - } - - const CodeItem* code_item = GetCodeItem(method->GetCodeItemOffset()); - DCHECK(code_item != nullptr) << PrettyMethod(method) << " " << GetLocation(); - - // A method with no line number info should return -1 - LineNumFromPcContext context(rel_pc, -1); - DecodeDebugPositionInfo(code_item, LineNumForPcCb, &context); - return context.line_num_; -} - int32_t DexFile::FindTryItem(const CodeItem &code_item, uint32_t address) { // Note: Signed type is important for max and min. int32_t min = 0; @@ -1186,7 +1128,7 @@ std::string DexFile::GetDexCanonicalLocation(const char* dex_location) { } // Read a signed integer. "zwidth" is the zero-based byte count. -static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth) { +int32_t DexFile::ReadSignedInt(const uint8_t* ptr, int zwidth) { int32_t val = 0; for (int i = zwidth; i >= 0; --i) { val = ((uint32_t)val >> 8) | (((int32_t)*ptr++) << 24); @@ -1197,7 +1139,7 @@ static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth) { // Read an unsigned integer. "zwidth" is the zero-based byte count, // "fill_on_right" indicates which side we want to zero-fill from. -static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right) { +uint32_t DexFile::ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right) { uint32_t val = 0; for (int i = zwidth; i >= 0; --i) { val = (val >> 8) | (((uint32_t)*ptr++) << 24); @@ -1209,7 +1151,7 @@ static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_rig } // Read a signed long. "zwidth" is the zero-based byte count. -static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth) { +int64_t DexFile::ReadSignedLong(const uint8_t* ptr, int zwidth) { int64_t val = 0; for (int i = zwidth; i >= 0; --i) { val = ((uint64_t)val >> 8) | (((int64_t)*ptr++) << 56); @@ -1220,7 +1162,7 @@ static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth) { // Read an unsigned long. "zwidth" is the zero-based byte count, // "fill_on_right" indicates which side we want to zero-fill from. -static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right) { +uint64_t DexFile::ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right) { uint64_t val = 0; for (int i = zwidth; i >= 0; --i) { val = (val >> 8) | (((uint64_t)*ptr++) << 56); @@ -1233,1150 +1175,6 @@ static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_ri // Checks that visibility is as expected. Includes special behavior for M and // before to allow runtime and build visibility when expecting runtime. -static bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) { - if (expected == DexFile::kDexVisibilityRuntime) { - int32_t sdk_version = Runtime::Current()->GetTargetSdkVersion(); - if (sdk_version > 0 && sdk_version <= 23) { - return actual == DexFile::kDexVisibilityRuntime || actual == DexFile::kDexVisibilityBuild; - } - } - return actual == expected; -} - -const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForField(ArtField* field) const { - mirror::Class* klass = field->GetDeclaringClass(); - const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef()); - if (annotations_dir == nullptr) { - return nullptr; - } - const FieldAnnotationsItem* field_annotations = GetFieldAnnotations(annotations_dir); - if (field_annotations == nullptr) { - return nullptr; - } - uint32_t field_index = field->GetDexFieldIndex(); - uint32_t field_count = annotations_dir->fields_size_; - for (uint32_t i = 0; i < field_count; ++i) { - if (field_annotations[i].field_idx_ == field_index) { - return GetFieldAnnotationSetItem(field_annotations[i]); - } - } - return nullptr; -} - -mirror::Object* DexFile::GetAnnotationForField(ArtField* field, - Handle<mirror::Class> annotation_class) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); - if (annotation_set == nullptr) { - return nullptr; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); - return GetAnnotationObjectFromAnnotationSet( - field_class, annotation_set, kDexVisibilityRuntime, annotation_class); -} - -mirror::ObjectArray<mirror::Object>* DexFile::GetAnnotationsForField(ArtField* field) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); - return ProcessAnnotationSet(field_class, annotation_set, kDexVisibilityRuntime); -} - -mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForField(ArtField* field) - const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); - if (annotation_set == nullptr) { - return nullptr; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); - return GetSignatureValue(field_class, annotation_set); -} - -bool DexFile::IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) - const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); - if (annotation_set == nullptr) { - return false; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); - const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet( - field_class, annotation_set, kDexVisibilityRuntime, annotation_class); - return annotation_item != nullptr; -} - -const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForMethod(ArtMethod* method) const { - mirror::Class* klass = method->GetDeclaringClass(); - const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef()); - if (annotations_dir == nullptr) { - return nullptr; - } - const MethodAnnotationsItem* method_annotations = GetMethodAnnotations(annotations_dir); - if (method_annotations == nullptr) { - return nullptr; - } - uint32_t method_index = method->GetDexMethodIndex(); - uint32_t method_count = annotations_dir->methods_size_; - for (uint32_t i = 0; i < method_count; ++i) { - if (method_annotations[i].method_idx_ == method_index) { - return GetMethodAnnotationSetItem(method_annotations[i]); - } - } - return nullptr; -} - -const DexFile::ParameterAnnotationsItem* DexFile::FindAnnotationsItemForMethod(ArtMethod* method) - const { - mirror::Class* klass = method->GetDeclaringClass(); - const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef()); - if (annotations_dir == nullptr) { - return nullptr; - } - const ParameterAnnotationsItem* parameter_annotations = GetParameterAnnotations(annotations_dir); - if (parameter_annotations == nullptr) { - return nullptr; - } - uint32_t method_index = method->GetDexMethodIndex(); - uint32_t parameter_count = annotations_dir->parameters_size_; - for (uint32_t i = 0; i < parameter_count; ++i) { - if (parameter_annotations[i].method_idx_ == method_index) { - return ¶meter_annotations[i]; - } - } - return nullptr; -} - -mirror::Object* DexFile::GetAnnotationDefaultValue(ArtMethod* method) const { - mirror::Class* klass = method->GetDeclaringClass(); - const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef()); - if (annotations_dir == nullptr) { - return nullptr; - } - const AnnotationSetItem* annotation_set = GetClassAnnotationSet(annotations_dir); - if (annotation_set == nullptr) { - return nullptr; - } - const AnnotationItem* annotation_item = SearchAnnotationSet(annotation_set, - "Ldalvik/annotation/AnnotationDefault;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "value"); - if (annotation == nullptr) { - return nullptr; - } - uint8_t header_byte = *(annotation++); - if ((header_byte & kDexAnnotationValueTypeMask) != kDexAnnotationAnnotation) { - return nullptr; - } - annotation = SearchEncodedAnnotation(annotation, method->GetName()); - if (annotation == nullptr) { - return nullptr; - } - AnnotationValue annotation_value; - StackHandleScope<2> hs(Thread::Current()); - Handle<mirror::Class> h_klass(hs.NewHandle(klass)); - PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - Handle<mirror::Class> return_type(hs.NewHandle( - method->GetReturnType(true /* resolve */, pointer_size))); - if (!ProcessAnnotationValue(h_klass, &annotation, &annotation_value, return_type, kAllObjects)) { - return nullptr; - } - return annotation_value.value_.GetL(); -} - -mirror::Object* DexFile::GetAnnotationForMethod(ArtMethod* method, - Handle<mirror::Class> annotation_class) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); - if (annotation_set == nullptr) { - return nullptr; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetAnnotationObjectFromAnnotationSet(method_class, annotation_set, - kDexVisibilityRuntime, annotation_class); -} - -mirror::ObjectArray<mirror::Object>* DexFile::GetAnnotationsForMethod(ArtMethod* method) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return ProcessAnnotationSet(method_class, annotation_set, kDexVisibilityRuntime); -} - -mirror::ObjectArray<mirror::Class>* DexFile::GetExceptionTypesForMethod(ArtMethod* method) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); - if (annotation_set == nullptr) { - return nullptr; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetThrowsValue(method_class, annotation_set); -} - -mirror::ObjectArray<mirror::Object>* DexFile::GetParameterAnnotations(ArtMethod* method) const { - const ParameterAnnotationsItem* parameter_annotations = FindAnnotationsItemForMethod(method); - if (parameter_annotations == nullptr) { - return nullptr; - } - const AnnotationSetRefList* set_ref_list = - GetParameterAnnotationSetRefList(parameter_annotations); - if (set_ref_list == nullptr) { - return nullptr; - } - uint32_t size = set_ref_list->size_; - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return ProcessAnnotationSetRefList(method_class, set_ref_list, size); -} - -mirror::Object* DexFile::GetAnnotationForMethodParameter(ArtMethod* method, - uint32_t parameter_idx, - Handle<mirror::Class> annotation_class) - const { - const ParameterAnnotationsItem* parameter_annotations = FindAnnotationsItemForMethod(method); - if (parameter_annotations == nullptr) { - return nullptr; - } - const AnnotationSetRefList* set_ref_list = - GetParameterAnnotationSetRefList(parameter_annotations); - if (set_ref_list == nullptr) { - return nullptr; - } - - if (parameter_idx >= set_ref_list->size_) { - return nullptr; - } - const AnnotationSetRefItem* annotation_set_ref = &set_ref_list->list_[parameter_idx]; - const AnnotationSetItem* annotation_set = GetSetRefItemItem(annotation_set_ref); - - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetAnnotationObjectFromAnnotationSet(method_class, - annotation_set, - kDexVisibilityRuntime, - annotation_class); -} - -mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForMethod(ArtMethod* method) - const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); - if (annotation_set == nullptr) { - return nullptr; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetSignatureValue(method_class, annotation_set); -} - -bool DexFile::IsMethodAnnotationPresent(ArtMethod* method, - Handle<mirror::Class> annotation_class, - uint32_t visibility /* = kDexVisibilityRuntime */) - const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); - if (annotation_set == nullptr) { - return false; - } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(method_class, - annotation_set, - visibility, - annotation_class); - return annotation_item != nullptr; -} - -const DexFile::AnnotationSetItem* DexFile::FindAnnotationSetForClass(Handle<mirror::Class> klass) - const { - const AnnotationsDirectoryItem* annotations_dir = GetAnnotationsDirectory(*klass->GetClassDef()); - if (annotations_dir == nullptr) { - return nullptr; - } - return GetClassAnnotationSet(annotations_dir); -} - -mirror::Object* DexFile::GetAnnotationForClass(Handle<mirror::Class> klass, - Handle<mirror::Class> annotation_class) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return nullptr; - } - return GetAnnotationObjectFromAnnotationSet(klass, annotation_set, kDexVisibilityRuntime, - annotation_class); -} - -mirror::ObjectArray<mirror::Object>* DexFile::GetAnnotationsForClass(Handle<mirror::Class> klass) - const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - return ProcessAnnotationSet(klass, annotation_set, kDexVisibilityRuntime); -} - -mirror::ObjectArray<mirror::Class>* DexFile::GetDeclaredClasses(Handle<mirror::Class> klass) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return nullptr; - } - const AnnotationItem* annotation_item = SearchAnnotationSet( - annotation_set, "Ldalvik/annotation/MemberClasses;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - StackHandleScope<1> hs(Thread::Current()); - mirror::Class* class_class = mirror::Class::GetJavaLangClass(); - Handle<mirror::Class> class_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(hs.Self(), &class_class))); - if (class_array_class.Get() == nullptr) { - return nullptr; - } - mirror::Object* obj = GetAnnotationValue( - klass, annotation_item, "value", class_array_class, kDexAnnotationArray); - if (obj == nullptr) { - return nullptr; - } - return obj->AsObjectArray<mirror::Class>(); -} - -mirror::Class* DexFile::GetDeclaringClass(Handle<mirror::Class> klass) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return nullptr; - } - const AnnotationItem* annotation_item = SearchAnnotationSet( - annotation_set, "Ldalvik/annotation/EnclosingClass;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - mirror::Object* obj = GetAnnotationValue(klass, - annotation_item, - "value", - ScopedNullHandle<mirror::Class>(), - kDexAnnotationType); - if (obj == nullptr) { - return nullptr; - } - return obj->AsClass(); -} - -mirror::Class* DexFile::GetEnclosingClass(Handle<mirror::Class> klass) const { - mirror::Class* declaring_class = GetDeclaringClass(klass); - if (declaring_class != nullptr) { - return declaring_class; - } - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return nullptr; - } - const AnnotationItem* annotation_item = SearchAnnotationSet( - annotation_set, "Ldalvik/annotation/EnclosingMethod;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "value"); - if (annotation == nullptr) { - return nullptr; - } - AnnotationValue annotation_value; - if (!ProcessAnnotationValue(klass, - &annotation, - &annotation_value, - ScopedNullHandle<mirror::Class>(), - kAllRaw)) { - return nullptr; - } - if (annotation_value.type_ != kDexAnnotationMethod) { - return nullptr; - } - StackHandleScope<2> hs(Thread::Current()); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); - ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType( - klass->GetDexFile(), annotation_value.value_.GetI(), dex_cache, class_loader); - if (method == nullptr) { - return nullptr; - } - return method->GetDeclaringClass(); -} - -mirror::Object* DexFile::GetEnclosingMethod(Handle<mirror::Class> klass) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return nullptr; - } - const AnnotationItem* annotation_item = SearchAnnotationSet( - annotation_set, "Ldalvik/annotation/EnclosingMethod;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - return GetAnnotationValue( - klass, annotation_item, "value", ScopedNullHandle<mirror::Class>(), kDexAnnotationMethod); -} - -bool DexFile::GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return false; - } - const AnnotationItem* annotation_item = SearchAnnotationSet( - annotation_set, "Ldalvik/annotation/InnerClass;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return false; - } - const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "name"); - if (annotation == nullptr) { - return false; - } - AnnotationValue annotation_value; - if (!ProcessAnnotationValue(klass, - &annotation, - &annotation_value, - ScopedNullHandle<mirror::Class>(), - kAllObjects)) { - return false; - } - if (annotation_value.type_ != kDexAnnotationNull && - annotation_value.type_ != kDexAnnotationString) { - return false; - } - *name = down_cast<mirror::String*>(annotation_value.value_.GetL()); - return true; -} - -bool DexFile::GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return false; - } - const AnnotationItem* annotation_item = SearchAnnotationSet( - annotation_set, "Ldalvik/annotation/InnerClass;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return false; - } - const uint8_t* annotation = SearchEncodedAnnotation(annotation_item->annotation_, "accessFlags"); - if (annotation == nullptr) { - return false; - } - AnnotationValue annotation_value; - if (!ProcessAnnotationValue(klass, - &annotation, - &annotation_value, - ScopedNullHandle<mirror::Class>(), - kAllRaw)) { - return false; - } - if (annotation_value.type_ != kDexAnnotationInt) { - return false; - } - *flags = annotation_value.value_.GetI(); - return true; -} - -mirror::ObjectArray<mirror::String>* DexFile::GetSignatureAnnotationForClass( - Handle<mirror::Class> klass) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return nullptr; - } - return GetSignatureValue(klass, annotation_set); -} - -bool DexFile::IsClassAnnotationPresent(Handle<mirror::Class> klass, - Handle<mirror::Class> annotation_class) const { - const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - if (annotation_set == nullptr) { - return false; - } - const AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet( - klass, annotation_set, kDexVisibilityRuntime, annotation_class); - return annotation_item != nullptr; -} - -mirror::Object* DexFile::CreateAnnotationMember(Handle<mirror::Class> klass, - Handle<mirror::Class> annotation_class, const uint8_t** annotation) const { - Thread* self = Thread::Current(); - ScopedObjectAccessUnchecked soa(self); - StackHandleScope<5> hs(self); - uint32_t element_name_index = DecodeUnsignedLeb128(annotation); - const char* name = StringDataByIdx(element_name_index); - Handle<mirror::String> string_name( - hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, name))); - - PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - ArtMethod* annotation_method = - annotation_class->FindDeclaredVirtualMethodByName(name, pointer_size); - if (annotation_method == nullptr) { - return nullptr; - } - Handle<mirror::Class> method_return(hs.NewHandle( - annotation_method->GetReturnType(true /* resolve */, pointer_size))); - - AnnotationValue annotation_value; - if (!ProcessAnnotationValue(klass, annotation, &annotation_value, method_return, kAllObjects)) { - return nullptr; - } - Handle<mirror::Object> value_object(hs.NewHandle(annotation_value.value_.GetL())); - - mirror::Class* annotation_member_class = - WellKnownClasses::ToClass(WellKnownClasses::libcore_reflect_AnnotationMember); - Handle<mirror::Object> new_member(hs.NewHandle(annotation_member_class->AllocObject(self))); - mirror::Method* method_obj_ptr; - DCHECK(!Runtime::Current()->IsActiveTransaction()); - if (pointer_size == PointerSize::k64) { - method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k64, false>( - self, annotation_method); - } else { - method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k32, false>( - self, annotation_method); - } - Handle<mirror::Method> method_object(hs.NewHandle(method_obj_ptr)); - - if (new_member.Get() == nullptr || string_name.Get() == nullptr || - method_object.Get() == nullptr || method_return.Get() == nullptr) { - LOG(ERROR) << StringPrintf("Failed creating annotation element (m=%p n=%p a=%p r=%p", - new_member.Get(), string_name.Get(), method_object.Get(), method_return.Get()); - return nullptr; - } - - JValue result; - ArtMethod* annotation_member_init = - soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init); - uint32_t args[5] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(new_member.Get())), - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(string_name.Get())), - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value_object.Get())), - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method_return.Get())), - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method_object.Get())) - }; - annotation_member_init->Invoke(self, args, sizeof(args), &result, "VLLLL"); - if (self->IsExceptionPending()) { - LOG(INFO) << "Exception in AnnotationMember.<init>"; - return nullptr; - } - - return new_member.Get(); -} - -const DexFile::AnnotationItem* DexFile::GetAnnotationItemFromAnnotationSet( - Handle<mirror::Class> klass, const AnnotationSetItem* annotation_set, uint32_t visibility, - Handle<mirror::Class> annotation_class) const { - for (uint32_t i = 0; i < annotation_set->size_; ++i) { - const AnnotationItem* annotation_item = GetAnnotationItem(annotation_set, i); - if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) { - continue; - } - const uint8_t* annotation = annotation_item->annotation_; - uint32_t type_index = DecodeUnsignedLeb128(&annotation); - mirror::Class* resolved_class = Runtime::Current()->GetClassLinker()->ResolveType( - klass->GetDexFile(), type_index, klass.Get()); - if (resolved_class == nullptr) { - std::string temp; - LOG(WARNING) << StringPrintf("Unable to resolve %s annotation class %d", - klass->GetDescriptor(&temp), type_index); - CHECK(Thread::Current()->IsExceptionPending()); - Thread::Current()->ClearException(); - continue; - } - if (resolved_class == annotation_class.Get()) { - return annotation_item; - } - } - - return nullptr; -} - -mirror::Object* DexFile::GetAnnotationObjectFromAnnotationSet(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set, uint32_t visibility, - Handle<mirror::Class> annotation_class) const { - const AnnotationItem* annotation_item = - GetAnnotationItemFromAnnotationSet(klass, annotation_set, visibility, annotation_class); - if (annotation_item == nullptr) { - return nullptr; - } - const uint8_t* annotation = annotation_item->annotation_; - return ProcessEncodedAnnotation(klass, &annotation); -} - -mirror::Object* DexFile::GetAnnotationValue(Handle<mirror::Class> klass, - const AnnotationItem* annotation_item, const char* annotation_name, - Handle<mirror::Class> array_class, uint32_t expected_type) const { - const uint8_t* annotation = - SearchEncodedAnnotation(annotation_item->annotation_, annotation_name); - if (annotation == nullptr) { - return nullptr; - } - AnnotationValue annotation_value; - if (!ProcessAnnotationValue(klass, &annotation, &annotation_value, array_class, kAllObjects)) { - return nullptr; - } - if (annotation_value.type_ != expected_type) { - return nullptr; - } - return annotation_value.value_.GetL(); -} - -mirror::ObjectArray<mirror::String>* DexFile::GetSignatureValue(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set) const { - StackHandleScope<1> hs(Thread::Current()); - const AnnotationItem* annotation_item = - SearchAnnotationSet(annotation_set, "Ldalvik/annotation/Signature;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - mirror::Class* string_class = mirror::String::GetJavaLangString(); - Handle<mirror::Class> string_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class))); - if (string_array_class.Get() == nullptr) { - return nullptr; - } - mirror::Object* obj = - GetAnnotationValue(klass, annotation_item, "value", string_array_class, kDexAnnotationArray); - if (obj == nullptr) { - return nullptr; - } - return obj->AsObjectArray<mirror::String>(); -} - -mirror::ObjectArray<mirror::Class>* DexFile::GetThrowsValue(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set) const { - StackHandleScope<1> hs(Thread::Current()); - const AnnotationItem* annotation_item = - SearchAnnotationSet(annotation_set, "Ldalvik/annotation/Throws;", kDexVisibilitySystem); - if (annotation_item == nullptr) { - return nullptr; - } - mirror::Class* class_class = mirror::Class::GetJavaLangClass(); - Handle<mirror::Class> class_array_class(hs.NewHandle( - Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class))); - if (class_array_class.Get() == nullptr) { - return nullptr; - } - mirror::Object* obj = - GetAnnotationValue(klass, annotation_item, "value", class_array_class, kDexAnnotationArray); - if (obj == nullptr) { - return nullptr; - } - return obj->AsObjectArray<mirror::Class>(); -} - -mirror::ObjectArray<mirror::Object>* DexFile::ProcessAnnotationSet(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set, uint32_t visibility) const { - Thread* self = Thread::Current(); - ScopedObjectAccessUnchecked soa(self); - StackHandleScope<2> hs(self); - Handle<mirror::Class> annotation_array_class(hs.NewHandle( - soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array))); - if (annotation_set == nullptr) { - return mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), 0); - } - - uint32_t size = annotation_set->size_; - Handle<mirror::ObjectArray<mirror::Object>> result(hs.NewHandle( - mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), size))); - if (result.Get() == nullptr) { - return nullptr; - } - - uint32_t dest_index = 0; - for (uint32_t i = 0; i < size; ++i) { - const AnnotationItem* annotation_item = GetAnnotationItem(annotation_set, i); - // Note that we do not use IsVisibilityCompatible here because older code - // was correct for this case. - if (annotation_item->visibility_ != visibility) { - continue; - } - const uint8_t* annotation = annotation_item->annotation_; - mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation); - if (annotation_obj != nullptr) { - result->SetWithoutChecks<false>(dest_index, annotation_obj); - ++dest_index; - } else if (self->IsExceptionPending()) { - return nullptr; - } - } - - if (dest_index == size) { - return result.Get(); - } - - mirror::ObjectArray<mirror::Object>* trimmed_result = - mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index); - if (trimmed_result == nullptr) { - return nullptr; - } - - for (uint32_t i = 0; i < dest_index; ++i) { - mirror::Object* obj = result->GetWithoutChecks(i); - trimmed_result->SetWithoutChecks<false>(i, obj); - } - - return trimmed_result; -} - -mirror::ObjectArray<mirror::Object>* DexFile::ProcessAnnotationSetRefList( - Handle<mirror::Class> klass, const AnnotationSetRefList* set_ref_list, uint32_t size) const { - Thread* self = Thread::Current(); - ScopedObjectAccessUnchecked soa(self); - StackHandleScope<1> hs(self); - mirror::Class* annotation_array_class = - soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array); - mirror::Class* annotation_array_array_class = - Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class); - if (annotation_array_array_class == nullptr) { - return nullptr; - } - Handle<mirror::ObjectArray<mirror::Object>> annotation_array_array(hs.NewHandle( - mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_array_class, size))); - if (annotation_array_array.Get() == nullptr) { - LOG(ERROR) << "Annotation set ref array allocation failed"; - return nullptr; - } - for (uint32_t index = 0; index < size; ++index) { - const AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index]; - const AnnotationSetItem* set_item = GetSetRefItemItem(set_ref_item); - mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item, kDexVisibilityRuntime); - if (annotation_set == nullptr) { - return nullptr; - } - annotation_array_array->SetWithoutChecks<false>(index, annotation_set); - } - return annotation_array_array.Get(); -} - -bool DexFile::ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr, - AnnotationValue* annotation_value, Handle<mirror::Class> array_class, - DexFile::AnnotationResultStyle result_style) const { - Thread* self = Thread::Current(); - mirror::Object* element_object = nullptr; - bool set_object = false; - Primitive::Type primitive_type = Primitive::kPrimVoid; - const uint8_t* annotation = *annotation_ptr; - uint8_t header_byte = *(annotation++); - uint8_t value_type = header_byte & kDexAnnotationValueTypeMask; - uint8_t value_arg = header_byte >> kDexAnnotationValueArgShift; - int32_t width = value_arg + 1; - annotation_value->type_ = value_type; - - switch (value_type) { - case kDexAnnotationByte: - annotation_value->value_.SetB(static_cast<int8_t>(ReadSignedInt(annotation, value_arg))); - primitive_type = Primitive::kPrimByte; - break; - case kDexAnnotationShort: - annotation_value->value_.SetS(static_cast<int16_t>(ReadSignedInt(annotation, value_arg))); - primitive_type = Primitive::kPrimShort; - break; - case kDexAnnotationChar: - annotation_value->value_.SetC(static_cast<uint16_t>(ReadUnsignedInt(annotation, value_arg, - false))); - primitive_type = Primitive::kPrimChar; - break; - case kDexAnnotationInt: - annotation_value->value_.SetI(ReadSignedInt(annotation, value_arg)); - primitive_type = Primitive::kPrimInt; - break; - case kDexAnnotationLong: - annotation_value->value_.SetJ(ReadSignedLong(annotation, value_arg)); - primitive_type = Primitive::kPrimLong; - break; - case kDexAnnotationFloat: - annotation_value->value_.SetI(ReadUnsignedInt(annotation, value_arg, true)); - primitive_type = Primitive::kPrimFloat; - break; - case kDexAnnotationDouble: - annotation_value->value_.SetJ(ReadUnsignedLong(annotation, value_arg, true)); - primitive_type = Primitive::kPrimDouble; - break; - case kDexAnnotationBoolean: - annotation_value->value_.SetZ(value_arg != 0); - primitive_type = Primitive::kPrimBoolean; - width = 0; - break; - case kDexAnnotationString: { - uint32_t index = ReadUnsignedInt(annotation, value_arg, false); - if (result_style == kAllRaw) { - annotation_value->value_.SetI(index); - } else { - StackHandleScope<1> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - element_object = Runtime::Current()->GetClassLinker()->ResolveString( - klass->GetDexFile(), index, dex_cache); - set_object = true; - if (element_object == nullptr) { - return false; - } - } - break; - } - case kDexAnnotationType: { - uint32_t index = ReadUnsignedInt(annotation, value_arg, false); - if (result_style == kAllRaw) { - annotation_value->value_.SetI(index); - } else { - element_object = Runtime::Current()->GetClassLinker()->ResolveType( - klass->GetDexFile(), index, klass.Get()); - set_object = true; - if (element_object == nullptr) { - CHECK(self->IsExceptionPending()); - if (result_style == kAllObjects) { - const char* msg = StringByTypeIdx(index); - self->ThrowNewWrappedException("Ljava/lang/TypeNotPresentException;", msg); - element_object = self->GetException(); - self->ClearException(); - } else { - return false; - } - } - } - break; - } - case kDexAnnotationMethod: { - uint32_t index = ReadUnsignedInt(annotation, value_arg, false); - if (result_style == kAllRaw) { - annotation_value->value_.SetI(index); - } else { - StackHandleScope<2> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType( - klass->GetDexFile(), index, dex_cache, class_loader); - if (method == nullptr) { - return false; - } - PointerSize pointer_size = class_linker->GetImagePointerSize(); - set_object = true; - DCHECK(!Runtime::Current()->IsActiveTransaction()); - if (method->IsConstructor()) { - if (pointer_size == PointerSize::k64) { - element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k64, - false>(self, method); - } else { - element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k32, - false>(self, method); - } - } else { - if (pointer_size == PointerSize::k64) { - element_object = mirror::Method::CreateFromArtMethod<PointerSize::k64, - false>(self, method); - } else { - element_object = mirror::Method::CreateFromArtMethod<PointerSize::k32, - false>(self, method); - } - } - if (element_object == nullptr) { - return false; - } - } - break; - } - case kDexAnnotationField: { - uint32_t index = ReadUnsignedInt(annotation, value_arg, false); - if (result_style == kAllRaw) { - annotation_value->value_.SetI(index); - } else { - StackHandleScope<2> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); - ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS( - klass->GetDexFile(), index, dex_cache, class_loader); - if (field == nullptr) { - return false; - } - set_object = true; - PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - if (pointer_size == PointerSize::k64) { - element_object = mirror::Field::CreateFromArtField<PointerSize::k64>(self, field, true); - } else { - element_object = mirror::Field::CreateFromArtField<PointerSize::k32>(self, field, true); - } - if (element_object == nullptr) { - return false; - } - } - break; - } - case kDexAnnotationEnum: { - uint32_t index = ReadUnsignedInt(annotation, value_arg, false); - if (result_style == kAllRaw) { - annotation_value->value_.SetI(index); - } else { - StackHandleScope<3> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); - ArtField* enum_field = Runtime::Current()->GetClassLinker()->ResolveField( - klass->GetDexFile(), index, dex_cache, class_loader, true); - if (enum_field == nullptr) { - return false; - } else { - Handle<mirror::Class> field_class(hs.NewHandle(enum_field->GetDeclaringClass())); - Runtime::Current()->GetClassLinker()->EnsureInitialized(self, field_class, true, true); - element_object = enum_field->GetObject(field_class.Get()); - set_object = true; - } - } - break; - } - case kDexAnnotationArray: - if (result_style == kAllRaw || array_class.Get() == nullptr) { - return false; - } else { - ScopedObjectAccessUnchecked soa(self); - StackHandleScope<2> hs(self); - uint32_t size = DecodeUnsignedLeb128(&annotation); - Handle<mirror::Class> component_type(hs.NewHandle(array_class->GetComponentType())); - Handle<mirror::Array> new_array(hs.NewHandle(mirror::Array::Alloc<true>( - self, array_class.Get(), size, array_class->GetComponentSizeShift(), - Runtime::Current()->GetHeap()->GetCurrentAllocator()))); - if (new_array.Get() == nullptr) { - LOG(ERROR) << "Annotation element array allocation failed with size " << size; - return false; - } - AnnotationValue new_annotation_value; - for (uint32_t i = 0; i < size; ++i) { - if (!ProcessAnnotationValue(klass, &annotation, &new_annotation_value, component_type, - kPrimitivesOrObjects)) { - return false; - } - if (!component_type->IsPrimitive()) { - mirror::Object* obj = new_annotation_value.value_.GetL(); - new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks<false>(i, obj); - } else { - switch (new_annotation_value.type_) { - case kDexAnnotationByte: - new_array->AsByteArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetB()); - break; - case kDexAnnotationShort: - new_array->AsShortArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetS()); - break; - case kDexAnnotationChar: - new_array->AsCharArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetC()); - break; - case kDexAnnotationInt: - new_array->AsIntArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetI()); - break; - case kDexAnnotationLong: - new_array->AsLongArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetJ()); - break; - case kDexAnnotationFloat: - new_array->AsFloatArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetF()); - break; - case kDexAnnotationDouble: - new_array->AsDoubleArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetD()); - break; - case kDexAnnotationBoolean: - new_array->AsBooleanArray()->SetWithoutChecks<false>( - i, new_annotation_value.value_.GetZ()); - break; - default: - LOG(FATAL) << "Found invalid annotation value type while building annotation array"; - return false; - } - } - } - element_object = new_array.Get(); - set_object = true; - width = 0; - } - break; - case kDexAnnotationAnnotation: - if (result_style == kAllRaw) { - return false; - } - element_object = ProcessEncodedAnnotation(klass, &annotation); - if (element_object == nullptr) { - return false; - } - set_object = true; - width = 0; - break; - case kDexAnnotationNull: - if (result_style == kAllRaw) { - annotation_value->value_.SetI(0); - } else { - CHECK(element_object == nullptr); - set_object = true; - } - width = 0; - break; - default: - LOG(ERROR) << StringPrintf("Bad annotation element value type 0x%02x", value_type); - return false; - } - - annotation += width; - *annotation_ptr = annotation; - - if (result_style == kAllObjects && primitive_type != Primitive::kPrimVoid) { - element_object = BoxPrimitive(primitive_type, annotation_value->value_); - set_object = true; - } - - if (set_object) { - annotation_value->value_.SetL(element_object); - } - - return true; -} - -mirror::Object* DexFile::ProcessEncodedAnnotation(Handle<mirror::Class> klass, - const uint8_t** annotation) const { - uint32_t type_index = DecodeUnsignedLeb128(annotation); - uint32_t size = DecodeUnsignedLeb128(annotation); - - Thread* self = Thread::Current(); - ScopedObjectAccessUnchecked soa(self); - StackHandleScope<2> hs(self); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - Handle<mirror::Class> annotation_class(hs.NewHandle( - class_linker->ResolveType(klass->GetDexFile(), type_index, klass.Get()))); - if (annotation_class.Get() == nullptr) { - LOG(INFO) << "Unable to resolve " << PrettyClass(klass.Get()) << " annotation class " - << type_index; - DCHECK(Thread::Current()->IsExceptionPending()); - Thread::Current()->ClearException(); - return nullptr; - } - - mirror::Class* annotation_member_class = - soa.Decode<mirror::Class*>(WellKnownClasses::libcore_reflect_AnnotationMember); - mirror::Class* annotation_member_array_class = - class_linker->FindArrayClass(self, &annotation_member_class); - if (annotation_member_array_class == nullptr) { - return nullptr; - } - mirror::ObjectArray<mirror::Object>* element_array = nullptr; - if (size > 0) { - element_array = - mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size); - if (element_array == nullptr) { - LOG(ERROR) << "Failed to allocate annotation member array (" << size << " elements)"; - return nullptr; - } - } - - Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array)); - for (uint32_t i = 0; i < size; ++i) { - mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation); - if (new_member == nullptr) { - return nullptr; - } - h_element_array->SetWithoutChecks<false>(i, new_member); - } - - JValue result; - ArtMethod* create_annotation_method = - soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation); - uint32_t args[2] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(annotation_class.Get())), - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_element_array.Get())) }; - create_annotation_method->Invoke(self, args, sizeof(args), &result, "LLL"); - if (self->IsExceptionPending()) { - LOG(INFO) << "Exception in AnnotationFactory.createAnnotation"; - return nullptr; - } - - return result.GetL(); -} - -const DexFile::AnnotationItem* DexFile::SearchAnnotationSet(const AnnotationSetItem* annotation_set, - const char* descriptor, uint32_t visibility) const { - const AnnotationItem* result = nullptr; - for (uint32_t i = 0; i < annotation_set->size_; ++i) { - const AnnotationItem* annotation_item = GetAnnotationItem(annotation_set, i); - if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) { - continue; - } - const uint8_t* annotation = annotation_item->annotation_; - uint32_t type_index = DecodeUnsignedLeb128(&annotation); - - if (strcmp(descriptor, StringByTypeIdx(type_index)) == 0) { - result = annotation_item; - break; - } - } - return result; -} - -const uint8_t* DexFile::SearchEncodedAnnotation(const uint8_t* annotation, const char* name) const { - DecodeUnsignedLeb128(&annotation); // unused type_index - uint32_t size = DecodeUnsignedLeb128(&annotation); - - while (size != 0) { - uint32_t element_name_index = DecodeUnsignedLeb128(&annotation); - const char* element_name = GetStringData(GetStringId(element_name_index)); - if (strcmp(name, element_name) == 0) { - return annotation; - } - SkipAnnotationValue(&annotation); - size--; - } - return nullptr; -} - -bool DexFile::SkipAnnotationValue(const uint8_t** annotation_ptr) const { - const uint8_t* annotation = *annotation_ptr; - uint8_t header_byte = *(annotation++); - uint8_t value_type = header_byte & kDexAnnotationValueTypeMask; - uint8_t value_arg = header_byte >> kDexAnnotationValueArgShift; - int32_t width = value_arg + 1; - - switch (value_type) { - case kDexAnnotationByte: - case kDexAnnotationShort: - case kDexAnnotationChar: - case kDexAnnotationInt: - case kDexAnnotationLong: - case kDexAnnotationFloat: - case kDexAnnotationDouble: - case kDexAnnotationString: - case kDexAnnotationType: - case kDexAnnotationMethod: - case kDexAnnotationField: - case kDexAnnotationEnum: - break; - case kDexAnnotationArray: - { - uint32_t size = DecodeUnsignedLeb128(&annotation); - while (size--) { - if (!SkipAnnotationValue(&annotation)) { - return false; - } - } - width = 0; - break; - } - case kDexAnnotationAnnotation: - { - DecodeUnsignedLeb128(&annotation); // unused type_index - uint32_t size = DecodeUnsignedLeb128(&annotation); - while (size--) { - DecodeUnsignedLeb128(&annotation); // unused element_name_index - if (!SkipAnnotationValue(&annotation)) { - return false; - } - } - width = 0; - break; - } - case kDexAnnotationBoolean: - case kDexAnnotationNull: - width = 0; - break; - default: - LOG(FATAL) << StringPrintf("Bad annotation element value byte 0x%02x", value_type); - return false; - } - - annotation += width; - *annotation_ptr = annotation; - return true; -} - std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) { os << StringPrintf("[DexFile: %s dex-checksum=%08x location-checksum=%08x %p-%p]", dex_file.GetLocation().c_str(), @@ -2460,50 +1258,13 @@ void ClassDataItemIterator::ReadClassDataMethod() { } } -EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator( - const DexFile& dex_file, - const DexFile::ClassDef& class_def) - : EncodedStaticFieldValueIterator(dex_file, - nullptr, - nullptr, - nullptr, - class_def, - -1, - kByte) { -} - -EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator( - const DexFile& dex_file, - Handle<mirror::DexCache>* dex_cache, - Handle<mirror::ClassLoader>* class_loader, - ClassLinker* linker, - const DexFile::ClassDef& class_def) - : EncodedStaticFieldValueIterator(dex_file, - dex_cache, class_loader, - linker, - class_def, - -1, - kByte) { - DCHECK(dex_cache_ != nullptr); - DCHECK(class_loader_ != nullptr); -} - -EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator( - const DexFile& dex_file, - Handle<mirror::DexCache>* dex_cache, - Handle<mirror::ClassLoader>* class_loader, - ClassLinker* linker, - const DexFile::ClassDef& class_def, - size_t pos, - ValueType type) +EncodedStaticFieldValueIterator::EncodedStaticFieldValueIterator(const DexFile& dex_file, + const DexFile::ClassDef& class_def) : dex_file_(dex_file), - dex_cache_(dex_cache), - class_loader_(class_loader), - linker_(linker), array_size_(), - pos_(pos), - type_(type) { - ptr_ = dex_file.GetEncodedStaticFieldValuesArray(class_def); + pos_(-1), + type_(kByte) { + ptr_ = dex_file_.GetEncodedStaticFieldValuesArray(class_def); if (ptr_ == nullptr) { array_size_ = 0; } else { @@ -2529,32 +1290,32 @@ void EncodedStaticFieldValueIterator::Next() { width = 0; break; case kByte: - jval_.i = ReadSignedInt(ptr_, value_arg); + jval_.i = DexFile::ReadSignedInt(ptr_, value_arg); CHECK(IsInt<8>(jval_.i)); break; case kShort: - jval_.i = ReadSignedInt(ptr_, value_arg); + jval_.i = DexFile::ReadSignedInt(ptr_, value_arg); CHECK(IsInt<16>(jval_.i)); break; case kChar: - jval_.i = ReadUnsignedInt(ptr_, value_arg, false); + jval_.i = DexFile::ReadUnsignedInt(ptr_, value_arg, false); CHECK(IsUint<16>(jval_.i)); break; case kInt: - jval_.i = ReadSignedInt(ptr_, value_arg); + jval_.i = DexFile::ReadSignedInt(ptr_, value_arg); break; case kLong: - jval_.j = ReadSignedLong(ptr_, value_arg); + jval_.j = DexFile::ReadSignedLong(ptr_, value_arg); break; case kFloat: - jval_.i = ReadUnsignedInt(ptr_, value_arg, true); + jval_.i = DexFile::ReadUnsignedInt(ptr_, value_arg, true); break; case kDouble: - jval_.j = ReadUnsignedLong(ptr_, value_arg, true); + jval_.j = DexFile::ReadUnsignedLong(ptr_, value_arg, true); break; case kString: case kType: - jval_.i = ReadUnsignedInt(ptr_, value_arg, false); + jval_.i = DexFile::ReadUnsignedInt(ptr_, value_arg, false); break; case kField: case kMethod: @@ -2574,38 +1335,6 @@ void EncodedStaticFieldValueIterator::Next() { ptr_ += width; } -template<bool kTransactionActive> -void EncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const { - DCHECK(dex_cache_ != nullptr); - DCHECK(class_loader_ != nullptr); - switch (type_) { - case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); - break; - case kByte: field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break; - case kShort: field->SetShort<kTransactionActive>(field->GetDeclaringClass(), jval_.s); break; - case kChar: field->SetChar<kTransactionActive>(field->GetDeclaringClass(), jval_.c); break; - case kInt: field->SetInt<kTransactionActive>(field->GetDeclaringClass(), jval_.i); break; - case kLong: field->SetLong<kTransactionActive>(field->GetDeclaringClass(), jval_.j); break; - case kFloat: field->SetFloat<kTransactionActive>(field->GetDeclaringClass(), jval_.f); break; - case kDouble: field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break; - case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break; - case kString: { - mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_); - field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved); - break; - } - case kType: { - mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, *dex_cache_, - *class_loader_); - field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved); - break; - } - default: UNIMPLEMENTED(FATAL) << ": type " << type_; - } -} -template void EncodedStaticFieldValueIterator::ReadValueToField<true>(ArtField* field) const; -template void EncodedStaticFieldValueIterator::ReadValueToField<false>(ArtField* field) const; - CatchHandlerIterator::CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address) { handler_.address_ = -1; int32_t offset = -1; diff --git a/runtime/dex_file.h b/runtime/dex_file.h index 23676bdbf7..97c2596de0 100644 --- a/runtime/dex_file.h +++ b/runtime/dex_file.h @@ -27,27 +27,14 @@ #include "globals.h" #include "invoke_type.h" #include "jni.h" -#include "mirror/object_array.h" #include "modifiers.h" #include "utf.h" namespace art { -// TODO: remove dependencies on mirror classes, primarily by moving -// EncodedStaticFieldValueIterator to its own file. -namespace mirror { - class ClassLoader; - class DexCache; -} // namespace mirror -class ArtField; -class ArtMethod; -class ClassLinker; -template <class Key, class Value, class EmptyFn, class HashFn, class Pred, class Alloc> -class HashMap; class MemMap; class OatDexFile; class Signature; -template<class T> class Handle; class StringPiece; class TypeLookupTable; class ZipArchive; @@ -402,6 +389,8 @@ class DexFile { kAllRaw }; + struct AnnotationValue; + // Returns the checksum of a file for comparison with GetLocationChecksum(). // For .dex files, this is the header checksum. // For zip files, this is the classes.dex zip entry CRC32 checksum. @@ -675,10 +664,6 @@ class DexFile { // Returns the class descriptor string of a class definition. const char* GetClassDescriptor(const ClassDef& class_def) const; - // Looks up a class definition by its class descriptor. Hash must be - // ComputeModifiedUtf8Hash(descriptor). - const ClassDef* FindClassDef(const char* descriptor, size_t hash) const; - // Looks up a class definition by its type index. const ClassDef* FindClassDef(uint16_t type_idx) const; @@ -934,110 +919,6 @@ class DexFile { return reinterpret_cast<const AnnotationSetItem*>(begin_ + offset); } - const AnnotationSetItem* FindAnnotationSetForField(ArtField* field) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) - const REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) const - REQUIRES_SHARED(Locks::mutator_lock_); - bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) const - REQUIRES_SHARED(Locks::mutator_lock_); - - const AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - const ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) - const REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method, - uint32_t parameter_idx, - Handle<mirror::Class> annotation_class) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) const - REQUIRES_SHARED(Locks::mutator_lock_); - bool IsMethodAnnotationPresent(ArtMethod* method, - Handle<mirror::Class> annotation_class, - uint32_t visibility = kDexVisibilityRuntime) const - REQUIRES_SHARED(Locks::mutator_lock_); - - const AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass, - Handle<mirror::Class> annotation_class) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) const - REQUIRES_SHARED(Locks::mutator_lock_); - bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const - REQUIRES_SHARED(Locks::mutator_lock_); - bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) - const REQUIRES_SHARED(Locks::mutator_lock_); - bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) - const REQUIRES_SHARED(Locks::mutator_lock_); - - mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, - Handle<mirror::Class> annotation_class, - const uint8_t** annotation) const - REQUIRES_SHARED(Locks::mutator_lock_); - const AnnotationItem* GetAnnotationItemFromAnnotationSet(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set, - uint32_t visibility, - Handle<mirror::Class> annotation_class) - const REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationObjectFromAnnotationSet(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set, - uint32_t visibility, - Handle<mirror::Class> annotation_class) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass, - const AnnotationItem* annotation_item, - const char* annotation_name, - Handle<mirror::Class> array_class, - uint32_t expected_type) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set) - const REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(Handle<mirror::Class> klass, - const AnnotationSetItem* annotation_set, - uint32_t visibility) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(Handle<mirror::Class> klass, - const AnnotationSetRefList* set_ref_list, uint32_t size) const - REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, - const uint8_t** annotation) const - REQUIRES_SHARED(Locks::mutator_lock_); - const AnnotationItem* SearchAnnotationSet(const AnnotationSetItem* annotation_set, - const char* descriptor, uint32_t visibility) const - REQUIRES_SHARED(Locks::mutator_lock_); - const uint8_t* SearchEncodedAnnotation(const uint8_t* annotation, const char* name) const - REQUIRES_SHARED(Locks::mutator_lock_); - bool SkipAnnotationValue(const uint8_t** annotation_ptr) const - REQUIRES_SHARED(Locks::mutator_lock_); - // Debug info opcodes and constants enum { DBG_END_SEQUENCE = 0x00, @@ -1064,17 +945,6 @@ class DexFile { DISALLOW_COPY_AND_ASSIGN(LineNumFromPcContext); }; - // Determine the source file line number based on the program counter. - // "pc" is an offset, in 16-bit units, from the start of the method's code. - // - // Returns -1 if no match was found (possibly because the source files were - // compiled without "-g", so no line number information is present). - // Returns -2 for native methods (as expected in exception traces). - // - // This is used by runtime; therefore use art::Method not art::DexFile::Method. - int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const - REQUIRES_SHARED(Locks::mutator_lock_); - // Returns false if there is no debugging information or if it cannot be decoded. bool DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx, DexDebugNewLocalCb local_cb, void* context) const; @@ -1134,11 +1004,11 @@ class DexFile { return oat_dex_file_; } - TypeLookupTable* GetTypeLookupTable() const { - return lookup_table_.get(); - } - - void CreateTypeLookupTable(uint8_t* storage = nullptr) const; + // Utility methods for reading integral values from a buffer. + static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth); + static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right); + static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth); + static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right); private: // Opens a .dex file @@ -1204,13 +1074,6 @@ class DexFile { // whether the string contains the separator character. static bool IsMultiDexLocation(const char* location); - struct AnnotationValue; - - bool ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr, - AnnotationValue* annotation_value, Handle<mirror::Class> return_class, - DexFile::AnnotationResultStyle result_style) const - REQUIRES_SHARED(Locks::mutator_lock_); - // The base address of the memory mapping. const uint8_t* const begin_; @@ -1253,7 +1116,6 @@ class DexFile { // pointer to the OatDexFile it was loaded from. Otherwise oat_dex_file_ is // null. const OatDexFile* oat_dex_file_; - mutable std::unique_ptr<TypeLookupTable> lookup_table_; friend class DexFileVerifierTest; ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor @@ -1514,22 +1376,9 @@ class ClassDataItemIterator { class EncodedStaticFieldValueIterator { public: - // A constructor for static tools. You cannot call - // ReadValueToField() for an object created by this. EncodedStaticFieldValueIterator(const DexFile& dex_file, const DexFile::ClassDef& class_def); - // A constructor meant to be called from runtime code. - EncodedStaticFieldValueIterator(const DexFile& dex_file, - Handle<mirror::DexCache>* dex_cache, - Handle<mirror::ClassLoader>* class_loader, - ClassLinker* linker, - const DexFile::ClassDef& class_def) - REQUIRES_SHARED(Locks::mutator_lock_); - - template<bool kTransactionActive> - void ReadValueToField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_); - bool HasNext() const { return pos_ < array_size_; } void Next(); @@ -1556,27 +1405,18 @@ class EncodedStaticFieldValueIterator { ValueType GetValueType() const { return type_; } const jvalue& GetJavaValue() const { return jval_; } - private: - EncodedStaticFieldValueIterator(const DexFile& dex_file, - Handle<mirror::DexCache>* dex_cache, - Handle<mirror::ClassLoader>* class_loader, - ClassLinker* linker, - const DexFile::ClassDef& class_def, - size_t pos, - ValueType type); - + protected: static constexpr uint8_t kEncodedValueTypeMask = 0x1f; // 0b11111 static constexpr uint8_t kEncodedValueArgShift = 5; const DexFile& dex_file_; - Handle<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects. - Handle<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types. - ClassLinker* linker_; // Linker to resolve literal objects. size_t array_size_; // Size of array. size_t pos_; // Current position. const uint8_t* ptr_; // Pointer into encoded data array. ValueType type_; // Type of current encoded value. jvalue jval_; // Value of current encoded value. + + private: DISALLOW_IMPLICIT_CONSTRUCTORS(EncodedStaticFieldValueIterator); }; std::ostream& operator<<(std::ostream& os, const EncodedStaticFieldValueIterator::ValueType& code); diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc new file mode 100644 index 0000000000..c6c87fdf36 --- /dev/null +++ b/runtime/dex_file_annotations.cc @@ -0,0 +1,1303 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_file_annotations.h" + +#include <stdlib.h> + +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "class_linker-inl.h" +#include "dex_file-inl.h" +#include "jvalue.h" +#include "mirror/field.h" +#include "mirror/method.h" +#include "reflection.h" +#include "thread.h" + +namespace art { + +struct DexFile::AnnotationValue { + JValue value_; + uint8_t type_; +}; + +namespace { +mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, + Handle<mirror::Class> annotation_class, + const uint8_t** annotation) + REQUIRES_SHARED(Locks::mutator_lock_); + +bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) { + if (expected == DexFile::kDexVisibilityRuntime) { + int32_t sdk_version = Runtime::Current()->GetTargetSdkVersion(); + if (sdk_version > 0 && sdk_version <= 23) { + return actual == DexFile::kDexVisibilityRuntime || actual == DexFile::kDexVisibilityBuild; + } + } + return actual == expected; +} + +const DexFile::AnnotationSetItem* FindAnnotationSetForField(ArtField* field) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile* dex_file = field->GetDexFile(); + mirror::Class* klass = field->GetDeclaringClass(); + const DexFile::AnnotationsDirectoryItem* annotations_dir = + dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + if (annotations_dir == nullptr) { + return nullptr; + } + const DexFile::FieldAnnotationsItem* field_annotations = + dex_file->GetFieldAnnotations(annotations_dir); + if (field_annotations == nullptr) { + return nullptr; + } + uint32_t field_index = field->GetDexFieldIndex(); + uint32_t field_count = annotations_dir->fields_size_; + for (uint32_t i = 0; i < field_count; ++i) { + if (field_annotations[i].field_idx_ == field_index) { + return dex_file->GetFieldAnnotationSetItem(field_annotations[i]); + } + } + return nullptr; +} + +const DexFile::AnnotationItem* SearchAnnotationSet(const DexFile& dex_file, + const DexFile::AnnotationSetItem* annotation_set, + const char* descriptor, + uint32_t visibility) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile::AnnotationItem* result = nullptr; + for (uint32_t i = 0; i < annotation_set->size_; ++i) { + const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i); + if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) { + continue; + } + const uint8_t* annotation = annotation_item->annotation_; + uint32_t type_index = DecodeUnsignedLeb128(&annotation); + + if (strcmp(descriptor, dex_file.StringByTypeIdx(type_index)) == 0) { + result = annotation_item; + break; + } + } + return result; +} + +bool SkipAnnotationValue(const DexFile& dex_file, const uint8_t** annotation_ptr) + REQUIRES_SHARED(Locks::mutator_lock_) { + const uint8_t* annotation = *annotation_ptr; + uint8_t header_byte = *(annotation++); + uint8_t value_type = header_byte & DexFile::kDexAnnotationValueTypeMask; + uint8_t value_arg = header_byte >> DexFile::kDexAnnotationValueArgShift; + int32_t width = value_arg + 1; + + switch (value_type) { + case DexFile::kDexAnnotationByte: + case DexFile::kDexAnnotationShort: + case DexFile::kDexAnnotationChar: + case DexFile::kDexAnnotationInt: + case DexFile::kDexAnnotationLong: + case DexFile::kDexAnnotationFloat: + case DexFile::kDexAnnotationDouble: + case DexFile::kDexAnnotationString: + case DexFile::kDexAnnotationType: + case DexFile::kDexAnnotationMethod: + case DexFile::kDexAnnotationField: + case DexFile::kDexAnnotationEnum: + break; + case DexFile::kDexAnnotationArray: + { + uint32_t size = DecodeUnsignedLeb128(&annotation); + while (size--) { + if (!SkipAnnotationValue(dex_file, &annotation)) { + return false; + } + } + width = 0; + break; + } + case DexFile::kDexAnnotationAnnotation: + { + DecodeUnsignedLeb128(&annotation); // unused type_index + uint32_t size = DecodeUnsignedLeb128(&annotation); + while (size--) { + DecodeUnsignedLeb128(&annotation); // unused element_name_index + if (!SkipAnnotationValue(dex_file, &annotation)) { + return false; + } + } + width = 0; + break; + } + case DexFile::kDexAnnotationBoolean: + case DexFile::kDexAnnotationNull: + width = 0; + break; + default: + LOG(FATAL) << StringPrintf("Bad annotation element value byte 0x%02x", value_type); + return false; + } + + annotation += width; + *annotation_ptr = annotation; + return true; +} + +const uint8_t* SearchEncodedAnnotation(const DexFile& dex_file, + const uint8_t* annotation, + const char* name) + REQUIRES_SHARED(Locks::mutator_lock_) { + DecodeUnsignedLeb128(&annotation); // unused type_index + uint32_t size = DecodeUnsignedLeb128(&annotation); + + while (size != 0) { + uint32_t element_name_index = DecodeUnsignedLeb128(&annotation); + const char* element_name = dex_file.GetStringData(dex_file.GetStringId(element_name_index)); + if (strcmp(name, element_name) == 0) { + return annotation; + } + SkipAnnotationValue(dex_file, &annotation); + size--; + } + return nullptr; +} + +const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile* dex_file = method->GetDexFile(); + mirror::Class* klass = method->GetDeclaringClass(); + const DexFile::AnnotationsDirectoryItem* annotations_dir = + dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + if (annotations_dir == nullptr) { + return nullptr; + } + const DexFile::MethodAnnotationsItem* method_annotations = + dex_file->GetMethodAnnotations(annotations_dir); + if (method_annotations == nullptr) { + return nullptr; + } + uint32_t method_index = method->GetDexMethodIndex(); + uint32_t method_count = annotations_dir->methods_size_; + for (uint32_t i = 0; i < method_count; ++i) { + if (method_annotations[i].method_idx_ == method_index) { + return dex_file->GetMethodAnnotationSetItem(method_annotations[i]); + } + } + return nullptr; +} + +const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile* dex_file = method->GetDexFile(); + mirror::Class* klass = method->GetDeclaringClass(); + const DexFile::AnnotationsDirectoryItem* annotations_dir = + dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + if (annotations_dir == nullptr) { + return nullptr; + } + const DexFile::ParameterAnnotationsItem* parameter_annotations = + dex_file->GetParameterAnnotations(annotations_dir); + if (parameter_annotations == nullptr) { + return nullptr; + } + uint32_t method_index = method->GetDexMethodIndex(); + uint32_t parameter_count = annotations_dir->parameters_size_; + for (uint32_t i = 0; i < parameter_count; ++i) { + if (parameter_annotations[i].method_idx_ == method_index) { + return ¶meter_annotations[i]; + } + } + return nullptr; +} + +const DexFile::AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + const DexFile::AnnotationsDirectoryItem* annotations_dir = + dex_file.GetAnnotationsDirectory(*klass->GetClassDef()); + if (annotations_dir == nullptr) { + return nullptr; + } + return dex_file.GetClassAnnotationSet(annotations_dir); +} + +mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint8_t** annotation) + REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t type_index = DecodeUnsignedLeb128(annotation); + uint32_t size = DecodeUnsignedLeb128(annotation); + + Thread* self = Thread::Current(); + ScopedObjectAccessUnchecked soa(self); + StackHandleScope<2> hs(self); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Handle<mirror::Class> annotation_class(hs.NewHandle( + class_linker->ResolveType(klass->GetDexFile(), type_index, klass.Get()))); + if (annotation_class.Get() == nullptr) { + LOG(INFO) << "Unable to resolve " << PrettyClass(klass.Get()) << " annotation class " + << type_index; + DCHECK(Thread::Current()->IsExceptionPending()); + Thread::Current()->ClearException(); + return nullptr; + } + + mirror::Class* annotation_member_class = + soa.Decode<mirror::Class*>(WellKnownClasses::libcore_reflect_AnnotationMember); + mirror::Class* annotation_member_array_class = + class_linker->FindArrayClass(self, &annotation_member_class); + if (annotation_member_array_class == nullptr) { + return nullptr; + } + mirror::ObjectArray<mirror::Object>* element_array = nullptr; + if (size > 0) { + element_array = + mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size); + if (element_array == nullptr) { + LOG(ERROR) << "Failed to allocate annotation member array (" << size << " elements)"; + return nullptr; + } + } + + Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array)); + for (uint32_t i = 0; i < size; ++i) { + mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation); + if (new_member == nullptr) { + return nullptr; + } + h_element_array->SetWithoutChecks<false>(i, new_member); + } + + JValue result; + ArtMethod* create_annotation_method = + soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation); + uint32_t args[2] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(annotation_class.Get())), + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_element_array.Get())) }; + create_annotation_method->Invoke(self, args, sizeof(args), &result, "LLL"); + if (self->IsExceptionPending()) { + LOG(INFO) << "Exception in AnnotationFactory.createAnnotation"; + return nullptr; + } + + return result.GetL(); +} + +bool ProcessAnnotationValue(Handle<mirror::Class> klass, + const uint8_t** annotation_ptr, + DexFile::AnnotationValue* annotation_value, + Handle<mirror::Class> array_class, + DexFile::AnnotationResultStyle result_style) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + Thread* self = Thread::Current(); + mirror::Object* element_object = nullptr; + bool set_object = false; + Primitive::Type primitive_type = Primitive::kPrimVoid; + const uint8_t* annotation = *annotation_ptr; + uint8_t header_byte = *(annotation++); + uint8_t value_type = header_byte & DexFile::kDexAnnotationValueTypeMask; + uint8_t value_arg = header_byte >> DexFile::kDexAnnotationValueArgShift; + int32_t width = value_arg + 1; + annotation_value->type_ = value_type; + + switch (value_type) { + case DexFile::kDexAnnotationByte: + annotation_value->value_.SetB( + static_cast<int8_t>(DexFile::ReadSignedInt(annotation, value_arg))); + primitive_type = Primitive::kPrimByte; + break; + case DexFile::kDexAnnotationShort: + annotation_value->value_.SetS( + static_cast<int16_t>(DexFile::ReadSignedInt(annotation, value_arg))); + primitive_type = Primitive::kPrimShort; + break; + case DexFile::kDexAnnotationChar: + annotation_value->value_.SetC( + static_cast<uint16_t>(DexFile::ReadUnsignedInt(annotation, value_arg, false))); + primitive_type = Primitive::kPrimChar; + break; + case DexFile::kDexAnnotationInt: + annotation_value->value_.SetI(DexFile::ReadSignedInt(annotation, value_arg)); + primitive_type = Primitive::kPrimInt; + break; + case DexFile::kDexAnnotationLong: + annotation_value->value_.SetJ(DexFile::ReadSignedLong(annotation, value_arg)); + primitive_type = Primitive::kPrimLong; + break; + case DexFile::kDexAnnotationFloat: + annotation_value->value_.SetI(DexFile::ReadUnsignedInt(annotation, value_arg, true)); + primitive_type = Primitive::kPrimFloat; + break; + case DexFile::kDexAnnotationDouble: + annotation_value->value_.SetJ(DexFile::ReadUnsignedLong(annotation, value_arg, true)); + primitive_type = Primitive::kPrimDouble; + break; + case DexFile::kDexAnnotationBoolean: + annotation_value->value_.SetZ(value_arg != 0); + primitive_type = Primitive::kPrimBoolean; + width = 0; + break; + case DexFile::kDexAnnotationString: { + uint32_t index = DexFile::ReadUnsignedInt(annotation, value_arg, false); + if (result_style == DexFile::kAllRaw) { + annotation_value->value_.SetI(index); + } else { + StackHandleScope<1> hs(self); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); + element_object = Runtime::Current()->GetClassLinker()->ResolveString( + klass->GetDexFile(), index, dex_cache); + set_object = true; + if (element_object == nullptr) { + return false; + } + } + break; + } + case DexFile::kDexAnnotationType: { + uint32_t index = DexFile::ReadUnsignedInt(annotation, value_arg, false); + if (result_style == DexFile::kAllRaw) { + annotation_value->value_.SetI(index); + } else { + element_object = Runtime::Current()->GetClassLinker()->ResolveType( + klass->GetDexFile(), index, klass.Get()); + set_object = true; + if (element_object == nullptr) { + CHECK(self->IsExceptionPending()); + if (result_style == DexFile::kAllObjects) { + const char* msg = dex_file.StringByTypeIdx(index); + self->ThrowNewWrappedException("Ljava/lang/TypeNotPresentException;", msg); + element_object = self->GetException(); + self->ClearException(); + } else { + return false; + } + } + } + break; + } + case DexFile::kDexAnnotationMethod: { + uint32_t index = DexFile::ReadUnsignedInt(annotation, value_arg, false); + if (result_style == DexFile::kAllRaw) { + annotation_value->value_.SetI(index); + } else { + StackHandleScope<2> hs(self); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType( + klass->GetDexFile(), index, dex_cache, class_loader); + if (method == nullptr) { + return false; + } + PointerSize pointer_size = class_linker->GetImagePointerSize(); + set_object = true; + DCHECK(!Runtime::Current()->IsActiveTransaction()); + if (method->IsConstructor()) { + if (pointer_size == PointerSize::k64) { + element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k64, + false>(self, method); + } else { + element_object = mirror::Constructor::CreateFromArtMethod<PointerSize::k32, + false>(self, method); + } + } else { + if (pointer_size == PointerSize::k64) { + element_object = mirror::Method::CreateFromArtMethod<PointerSize::k64, + false>(self, method); + } else { + element_object = mirror::Method::CreateFromArtMethod<PointerSize::k32, + false>(self, method); + } + } + if (element_object == nullptr) { + return false; + } + } + break; + } + case DexFile::kDexAnnotationField: { + uint32_t index = DexFile::ReadUnsignedInt(annotation, value_arg, false); + if (result_style == DexFile::kAllRaw) { + annotation_value->value_.SetI(index); + } else { + StackHandleScope<2> hs(self); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); + ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS( + klass->GetDexFile(), index, dex_cache, class_loader); + if (field == nullptr) { + return false; + } + set_object = true; + PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + if (pointer_size == PointerSize::k64) { + element_object = mirror::Field::CreateFromArtField<PointerSize::k64>(self, field, true); + } else { + element_object = mirror::Field::CreateFromArtField<PointerSize::k32>(self, field, true); + } + if (element_object == nullptr) { + return false; + } + } + break; + } + case DexFile::kDexAnnotationEnum: { + uint32_t index = DexFile::ReadUnsignedInt(annotation, value_arg, false); + if (result_style == DexFile::kAllRaw) { + annotation_value->value_.SetI(index); + } else { + StackHandleScope<3> hs(self); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); + ArtField* enum_field = Runtime::Current()->GetClassLinker()->ResolveField( + klass->GetDexFile(), index, dex_cache, class_loader, true); + if (enum_field == nullptr) { + return false; + } else { + Handle<mirror::Class> field_class(hs.NewHandle(enum_field->GetDeclaringClass())); + Runtime::Current()->GetClassLinker()->EnsureInitialized(self, field_class, true, true); + element_object = enum_field->GetObject(field_class.Get()); + set_object = true; + } + } + break; + } + case DexFile::kDexAnnotationArray: + if (result_style == DexFile::kAllRaw || array_class.Get() == nullptr) { + return false; + } else { + ScopedObjectAccessUnchecked soa(self); + StackHandleScope<2> hs(self); + uint32_t size = DecodeUnsignedLeb128(&annotation); + Handle<mirror::Class> component_type(hs.NewHandle(array_class->GetComponentType())); + Handle<mirror::Array> new_array(hs.NewHandle(mirror::Array::Alloc<true>( + self, array_class.Get(), size, array_class->GetComponentSizeShift(), + Runtime::Current()->GetHeap()->GetCurrentAllocator()))); + if (new_array.Get() == nullptr) { + LOG(ERROR) << "Annotation element array allocation failed with size " << size; + return false; + } + DexFile::AnnotationValue new_annotation_value; + for (uint32_t i = 0; i < size; ++i) { + if (!ProcessAnnotationValue(klass, &annotation, &new_annotation_value, + component_type, DexFile::kPrimitivesOrObjects)) { + return false; + } + if (!component_type->IsPrimitive()) { + mirror::Object* obj = new_annotation_value.value_.GetL(); + new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks<false>(i, obj); + } else { + switch (new_annotation_value.type_) { + case DexFile::kDexAnnotationByte: + new_array->AsByteArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetB()); + break; + case DexFile::kDexAnnotationShort: + new_array->AsShortArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetS()); + break; + case DexFile::kDexAnnotationChar: + new_array->AsCharArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetC()); + break; + case DexFile::kDexAnnotationInt: + new_array->AsIntArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetI()); + break; + case DexFile::kDexAnnotationLong: + new_array->AsLongArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetJ()); + break; + case DexFile::kDexAnnotationFloat: + new_array->AsFloatArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetF()); + break; + case DexFile::kDexAnnotationDouble: + new_array->AsDoubleArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetD()); + break; + case DexFile::kDexAnnotationBoolean: + new_array->AsBooleanArray()->SetWithoutChecks<false>( + i, new_annotation_value.value_.GetZ()); + break; + default: + LOG(FATAL) << "Found invalid annotation value type while building annotation array"; + return false; + } + } + } + element_object = new_array.Get(); + set_object = true; + width = 0; + } + break; + case DexFile::kDexAnnotationAnnotation: + if (result_style == DexFile::kAllRaw) { + return false; + } + element_object = ProcessEncodedAnnotation(klass, &annotation); + if (element_object == nullptr) { + return false; + } + set_object = true; + width = 0; + break; + case DexFile::kDexAnnotationNull: + if (result_style == DexFile::kAllRaw) { + annotation_value->value_.SetI(0); + } else { + CHECK(element_object == nullptr); + set_object = true; + } + width = 0; + break; + default: + LOG(ERROR) << StringPrintf("Bad annotation element value type 0x%02x", value_type); + return false; + } + + annotation += width; + *annotation_ptr = annotation; + + if (result_style == DexFile::kAllObjects && primitive_type != Primitive::kPrimVoid) { + element_object = BoxPrimitive(primitive_type, annotation_value->value_); + set_object = true; + } + + if (set_object) { + annotation_value->value_.SetL(element_object); + } + + return true; +} + +mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, + Handle<mirror::Class> annotation_class, + const uint8_t** annotation) { + const DexFile& dex_file = klass->GetDexFile(); + Thread* self = Thread::Current(); + ScopedObjectAccessUnchecked soa(self); + StackHandleScope<5> hs(self); + uint32_t element_name_index = DecodeUnsignedLeb128(annotation); + const char* name = dex_file.StringDataByIdx(element_name_index); + Handle<mirror::String> string_name( + hs.NewHandle(mirror::String::AllocFromModifiedUtf8(self, name))); + + PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + ArtMethod* annotation_method = + annotation_class->FindDeclaredVirtualMethodByName(name, pointer_size); + if (annotation_method == nullptr) { + return nullptr; + } + Handle<mirror::Class> method_return(hs.NewHandle( + annotation_method->GetReturnType(true /* resolve */, pointer_size))); + + DexFile::AnnotationValue annotation_value; + if (!ProcessAnnotationValue(klass, annotation, &annotation_value, method_return, + DexFile::kAllObjects)) { + return nullptr; + } + Handle<mirror::Object> value_object(hs.NewHandle(annotation_value.value_.GetL())); + + mirror::Class* annotation_member_class = + WellKnownClasses::ToClass(WellKnownClasses::libcore_reflect_AnnotationMember); + Handle<mirror::Object> new_member(hs.NewHandle(annotation_member_class->AllocObject(self))); + mirror::Method* method_obj_ptr; + DCHECK(!Runtime::Current()->IsActiveTransaction()); + if (pointer_size == PointerSize::k64) { + method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k64, false>( + self, annotation_method); + } else { + method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k32, false>( + self, annotation_method); + } + Handle<mirror::Method> method_object(hs.NewHandle(method_obj_ptr)); + + if (new_member.Get() == nullptr || string_name.Get() == nullptr || + method_object.Get() == nullptr || method_return.Get() == nullptr) { + LOG(ERROR) << StringPrintf("Failed creating annotation element (m=%p n=%p a=%p r=%p", + new_member.Get(), string_name.Get(), method_object.Get(), method_return.Get()); + return nullptr; + } + + JValue result; + ArtMethod* annotation_member_init = + soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init); + uint32_t args[5] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(new_member.Get())), + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(string_name.Get())), + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value_object.Get())), + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method_return.Get())), + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(method_object.Get())) + }; + annotation_member_init->Invoke(self, args, sizeof(args), &result, "VLLLL"); + if (self->IsExceptionPending()) { + LOG(INFO) << "Exception in AnnotationMember.<init>"; + return nullptr; + } + + return new_member.Get(); +} + +const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet( + Handle<mirror::Class> klass, + const DexFile::AnnotationSetItem* annotation_set, + uint32_t visibility, + Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + for (uint32_t i = 0; i < annotation_set->size_; ++i) { + const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i); + if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) { + continue; + } + const uint8_t* annotation = annotation_item->annotation_; + uint32_t type_index = DecodeUnsignedLeb128(&annotation); + mirror::Class* resolved_class = Runtime::Current()->GetClassLinker()->ResolveType( + klass->GetDexFile(), type_index, klass.Get()); + if (resolved_class == nullptr) { + std::string temp; + LOG(WARNING) << StringPrintf("Unable to resolve %s annotation class %d", + klass->GetDescriptor(&temp), type_index); + CHECK(Thread::Current()->IsExceptionPending()); + Thread::Current()->ClearException(); + continue; + } + if (resolved_class == annotation_class.Get()) { + return annotation_item; + } + } + + return nullptr; +} + +mirror::Object* GetAnnotationObjectFromAnnotationSet( + Handle<mirror::Class> klass, + const DexFile::AnnotationSetItem* annotation_set, + uint32_t visibility, + Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile::AnnotationItem* annotation_item = + GetAnnotationItemFromAnnotationSet(klass, annotation_set, visibility, annotation_class); + if (annotation_item == nullptr) { + return nullptr; + } + const uint8_t* annotation = annotation_item->annotation_; + return ProcessEncodedAnnotation(klass, &annotation); +} + +mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass, + const DexFile::AnnotationItem* annotation_item, + const char* annotation_name, + Handle<mirror::Class> array_class, + uint32_t expected_type) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + const uint8_t* annotation = + SearchEncodedAnnotation(dex_file, annotation_item->annotation_, annotation_name); + if (annotation == nullptr) { + return nullptr; + } + DexFile::AnnotationValue annotation_value; + if (!ProcessAnnotationValue(klass, &annotation, &annotation_value, array_class, + DexFile::kAllObjects)) { + return nullptr; + } + if (annotation_value.type_ != expected_type) { + return nullptr; + } + return annotation_value.value_.GetL(); +} + +mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass, + const DexFile::AnnotationSetItem* annotation_set) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + StackHandleScope<1> hs(Thread::Current()); + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Signature;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + mirror::Class* string_class = mirror::String::GetJavaLangString(); + Handle<mirror::Class> string_array_class(hs.NewHandle( + Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &string_class))); + if (string_array_class.Get() == nullptr) { + return nullptr; + } + mirror::Object* obj = + GetAnnotationValue(klass, annotation_item, "value", string_array_class, + DexFile::kDexAnnotationArray); + if (obj == nullptr) { + return nullptr; + } + return obj->AsObjectArray<mirror::String>(); +} + +mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass, + const DexFile::AnnotationSetItem* annotation_set) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + StackHandleScope<1> hs(Thread::Current()); + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Throws;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + mirror::Class* class_class = mirror::Class::GetJavaLangClass(); + Handle<mirror::Class> class_array_class(hs.NewHandle( + Runtime::Current()->GetClassLinker()->FindArrayClass(Thread::Current(), &class_class))); + if (class_array_class.Get() == nullptr) { + return nullptr; + } + mirror::Object* obj = + GetAnnotationValue(klass, annotation_item, "value", class_array_class, + DexFile::kDexAnnotationArray); + if (obj == nullptr) { + return nullptr; + } + return obj->AsObjectArray<mirror::Class>(); +} + +mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet( + Handle<mirror::Class> klass, + const DexFile::AnnotationSetItem* annotation_set, + uint32_t visibility) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + Thread* self = Thread::Current(); + ScopedObjectAccessUnchecked soa(self); + StackHandleScope<2> hs(self); + Handle<mirror::Class> annotation_array_class(hs.NewHandle( + soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array))); + if (annotation_set == nullptr) { + return mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), 0); + } + + uint32_t size = annotation_set->size_; + Handle<mirror::ObjectArray<mirror::Object>> result(hs.NewHandle( + mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), size))); + if (result.Get() == nullptr) { + return nullptr; + } + + uint32_t dest_index = 0; + for (uint32_t i = 0; i < size; ++i) { + const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i); + // Note that we do not use IsVisibilityCompatible here because older code + // was correct for this case. + if (annotation_item->visibility_ != visibility) { + continue; + } + const uint8_t* annotation = annotation_item->annotation_; + mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation); + if (annotation_obj != nullptr) { + result->SetWithoutChecks<false>(dest_index, annotation_obj); + ++dest_index; + } else if (self->IsExceptionPending()) { + return nullptr; + } + } + + if (dest_index == size) { + return result.Get(); + } + + mirror::ObjectArray<mirror::Object>* trimmed_result = + mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index); + if (trimmed_result == nullptr) { + return nullptr; + } + + for (uint32_t i = 0; i < dest_index; ++i) { + mirror::Object* obj = result->GetWithoutChecks(i); + trimmed_result->SetWithoutChecks<false>(i, obj); + } + + return trimmed_result; +} + +mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList( + Handle<mirror::Class> klass, + const DexFile::AnnotationSetRefList* set_ref_list, + uint32_t size) + REQUIRES_SHARED(Locks::mutator_lock_) { + const DexFile& dex_file = klass->GetDexFile(); + Thread* self = Thread::Current(); + ScopedObjectAccessUnchecked soa(self); + StackHandleScope<1> hs(self); + mirror::Class* annotation_array_class = + soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array); + mirror::Class* annotation_array_array_class = + Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class); + if (annotation_array_array_class == nullptr) { + return nullptr; + } + Handle<mirror::ObjectArray<mirror::Object>> annotation_array_array(hs.NewHandle( + mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_array_class, size))); + if (annotation_array_array.Get() == nullptr) { + LOG(ERROR) << "Annotation set ref array allocation failed"; + return nullptr; + } + for (uint32_t index = 0; index < size; ++index) { + const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index]; + const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item); + mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item, + DexFile::kDexVisibilityRuntime); + if (annotation_set == nullptr) { + return nullptr; + } + annotation_array_array->SetWithoutChecks<false>(index, annotation_set); + } + return annotation_array_array.Get(); +} +} // namespace + +namespace annotations { + +mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); + if (annotation_set == nullptr) { + return nullptr; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + return GetAnnotationObjectFromAnnotationSet(field_class, annotation_set, + DexFile::kDexVisibilityRuntime, annotation_class); +} + +mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + return ProcessAnnotationSet(field_class, annotation_set, DexFile::kDexVisibilityRuntime); +} + +mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); + if (annotation_set == nullptr) { + return nullptr; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + return GetSignatureValue(field_class, annotation_set); +} + +bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); + if (annotation_set == nullptr) { + return false; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet( + field_class, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); + return annotation_item != nullptr; +} + +mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) { + const DexFile* dex_file = method->GetDexFile(); + mirror::Class* klass = method->GetDeclaringClass(); + const DexFile::AnnotationsDirectoryItem* annotations_dir = + dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + if (annotations_dir == nullptr) { + return nullptr; + } + const DexFile::AnnotationSetItem* annotation_set = + dex_file->GetClassAnnotationSet(annotations_dir); + if (annotation_set == nullptr) { + return nullptr; + } + const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(*dex_file, annotation_set, + "Ldalvik/annotation/AnnotationDefault;", DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + const uint8_t* annotation = + SearchEncodedAnnotation(*dex_file, annotation_item->annotation_, "value"); + if (annotation == nullptr) { + return nullptr; + } + uint8_t header_byte = *(annotation++); + if ((header_byte & DexFile::kDexAnnotationValueTypeMask) != DexFile::kDexAnnotationAnnotation) { + return nullptr; + } + annotation = SearchEncodedAnnotation(*dex_file, annotation, method->GetName()); + if (annotation == nullptr) { + return nullptr; + } + DexFile::AnnotationValue annotation_value; + StackHandleScope<2> hs(Thread::Current()); + Handle<mirror::Class> h_klass(hs.NewHandle(klass)); + PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + Handle<mirror::Class> return_type(hs.NewHandle( + method->GetReturnType(true /* resolve */, pointer_size))); + if (!ProcessAnnotationValue(h_klass, &annotation, &annotation_value, return_type, + DexFile::kAllObjects)) { + return nullptr; + } + return annotation_value.value_.GetL(); +} + +mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); + if (annotation_set == nullptr) { + return nullptr; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + return GetAnnotationObjectFromAnnotationSet(method_class, annotation_set, + DexFile::kDexVisibilityRuntime, annotation_class); +} + +mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + return ProcessAnnotationSet(method_class, annotation_set, DexFile::kDexVisibilityRuntime); +} + +mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); + if (annotation_set == nullptr) { + return nullptr; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + return GetThrowsValue(method_class, annotation_set); +} + +mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) { + const DexFile* dex_file = method->GetDexFile(); + const DexFile::ParameterAnnotationsItem* parameter_annotations = + FindAnnotationsItemForMethod(method); + if (parameter_annotations == nullptr) { + return nullptr; + } + const DexFile::AnnotationSetRefList* set_ref_list = + dex_file->GetParameterAnnotationSetRefList(parameter_annotations); + if (set_ref_list == nullptr) { + return nullptr; + } + uint32_t size = set_ref_list->size_; + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + return ProcessAnnotationSetRefList(method_class, set_ref_list, size); +} + +mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method, + uint32_t parameter_idx, + Handle<mirror::Class> annotation_class) { + const DexFile* dex_file = method->GetDexFile(); + const DexFile::ParameterAnnotationsItem* parameter_annotations = + FindAnnotationsItemForMethod(method); + if (parameter_annotations == nullptr) { + return nullptr; + } + const DexFile::AnnotationSetRefList* set_ref_list = + dex_file->GetParameterAnnotationSetRefList(parameter_annotations); + if (set_ref_list == nullptr) { + return nullptr; + } + if (parameter_idx >= set_ref_list->size_) { + return nullptr; + } + const DexFile::AnnotationSetRefItem* annotation_set_ref = &set_ref_list->list_[parameter_idx]; + const DexFile::AnnotationSetItem* annotation_set = + dex_file->GetSetRefItemItem(annotation_set_ref); + + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + return GetAnnotationObjectFromAnnotationSet(method_class, + annotation_set, + DexFile::kDexVisibilityRuntime, + annotation_class); +} + +mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); + if (annotation_set == nullptr) { + return nullptr; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + return GetSignatureValue(method_class, annotation_set); +} + +bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class, + uint32_t visibility /* = DexFile::kDexVisibilityRuntime */) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); + if (annotation_set == nullptr) { + return false; + } + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); + const DexFile::AnnotationItem* annotation_item = + GetAnnotationItemFromAnnotationSet(method_class, annotation_set, visibility, + annotation_class); + return annotation_item != nullptr; +} + +mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass, + Handle<mirror::Class> annotation_class) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return nullptr; + } + return GetAnnotationObjectFromAnnotationSet(klass, annotation_set, DexFile::kDexVisibilityRuntime, + annotation_class); +} + +mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + return ProcessAnnotationSet(klass, annotation_set, DexFile::kDexVisibilityRuntime); +} + +mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) { + const DexFile& dex_file = klass->GetDexFile(); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return nullptr; + } + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/MemberClasses;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + StackHandleScope<1> hs(Thread::Current()); + mirror::Class* class_class = mirror::Class::GetJavaLangClass(); + Handle<mirror::Class> class_array_class(hs.NewHandle( + Runtime::Current()->GetClassLinker()->FindArrayClass(hs.Self(), &class_class))); + if (class_array_class.Get() == nullptr) { + return nullptr; + } + mirror::Object* obj = + GetAnnotationValue(klass, annotation_item, "value", class_array_class, + DexFile::kDexAnnotationArray); + if (obj == nullptr) { + return nullptr; + } + return obj->AsObjectArray<mirror::Class>(); +} + +mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) { + const DexFile& dex_file = klass->GetDexFile(); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return nullptr; + } + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingClass;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + mirror::Object* obj = GetAnnotationValue(klass, annotation_item, "value", + ScopedNullHandle<mirror::Class>(), + DexFile::kDexAnnotationType); + if (obj == nullptr) { + return nullptr; + } + return obj->AsClass(); +} + +mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) { + const DexFile& dex_file = klass->GetDexFile(); + mirror::Class* declaring_class = GetDeclaringClass(klass); + if (declaring_class != nullptr) { + return declaring_class; + } + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return nullptr; + } + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingMethod;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + const uint8_t* annotation = + SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "value"); + if (annotation == nullptr) { + return nullptr; + } + DexFile::AnnotationValue annotation_value; + if (!ProcessAnnotationValue(klass, &annotation, &annotation_value, + ScopedNullHandle<mirror::Class>(), DexFile::kAllRaw)) { + return nullptr; + } + if (annotation_value.type_ != DexFile::kDexAnnotationMethod) { + return nullptr; + } + StackHandleScope<2> hs(Thread::Current()); + Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); + Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); + ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType( + klass->GetDexFile(), annotation_value.value_.GetI(), dex_cache, class_loader); + if (method == nullptr) { + return nullptr; + } + return method->GetDeclaringClass(); +} + +mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) { + const DexFile& dex_file = klass->GetDexFile(); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return nullptr; + } + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingMethod;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return nullptr; + } + return GetAnnotationValue(klass, annotation_item, "value", ScopedNullHandle<mirror::Class>(), + DexFile::kDexAnnotationMethod); +} + +bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) { + const DexFile& dex_file = klass->GetDexFile(); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return false; + } + const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet( + dex_file, annotation_set, "Ldalvik/annotation/InnerClass;", DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return false; + } + const uint8_t* annotation = + SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "name"); + if (annotation == nullptr) { + return false; + } + DexFile::AnnotationValue annotation_value; + if (!ProcessAnnotationValue(klass, &annotation, &annotation_value, + ScopedNullHandle<mirror::Class>(), + DexFile::kAllObjects)) { + return false; + } + if (annotation_value.type_ != DexFile::kDexAnnotationNull && + annotation_value.type_ != DexFile::kDexAnnotationString) { + return false; + } + *name = down_cast<mirror::String*>(annotation_value.value_.GetL()); + return true; +} + +bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) { + const DexFile& dex_file = klass->GetDexFile(); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return false; + } + const DexFile::AnnotationItem* annotation_item = + SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/InnerClass;", + DexFile::kDexVisibilitySystem); + if (annotation_item == nullptr) { + return false; + } + const uint8_t* annotation = + SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "accessFlags"); + if (annotation == nullptr) { + return false; + } + DexFile::AnnotationValue annotation_value; + if (!ProcessAnnotationValue(klass, &annotation, &annotation_value, + ScopedNullHandle<mirror::Class>(), DexFile::kAllRaw)) { + return false; + } + if (annotation_value.type_ != DexFile::kDexAnnotationInt) { + return false; + } + *flags = annotation_value.value_.GetI(); + return true; +} + +mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return nullptr; + } + return GetSignatureValue(klass, annotation_set); +} + +bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) { + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + if (annotation_set == nullptr) { + return false; + } + const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet( + klass, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); + return annotation_item != nullptr; +} + +int32_t GetLineNumFromPC(const DexFile* dex_file, ArtMethod* method, uint32_t rel_pc) { + // For native method, lineno should be -2 to indicate it is native. Note that + // "line number == -2" is how libcore tells from StackTraceElement. + if (method->GetCodeItemOffset() == 0) { + return -2; + } + + const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); + DCHECK(code_item != nullptr) << PrettyMethod(method) << " " << dex_file->GetLocation(); + + // A method with no line number info should return -1 + DexFile::LineNumFromPcContext context(rel_pc, -1); + dex_file->DecodeDebugPositionInfo(code_item, DexFile::LineNumForPcCb, &context); + return context.line_num_; +} + +template<bool kTransactionActive> +void RuntimeEncodedStaticFieldValueIterator::ReadValueToField(ArtField* field) const { + DCHECK(dex_cache_ != nullptr); + DCHECK(class_loader_ != nullptr); + switch (type_) { + case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); + break; + case kByte: field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break; + case kShort: field->SetShort<kTransactionActive>(field->GetDeclaringClass(), jval_.s); break; + case kChar: field->SetChar<kTransactionActive>(field->GetDeclaringClass(), jval_.c); break; + case kInt: field->SetInt<kTransactionActive>(field->GetDeclaringClass(), jval_.i); break; + case kLong: field->SetLong<kTransactionActive>(field->GetDeclaringClass(), jval_.j); break; + case kFloat: field->SetFloat<kTransactionActive>(field->GetDeclaringClass(), jval_.f); break; + case kDouble: field->SetDouble<kTransactionActive>(field->GetDeclaringClass(), jval_.d); break; + case kNull: field->SetObject<kTransactionActive>(field->GetDeclaringClass(), nullptr); break; + case kString: { + mirror::String* resolved = linker_->ResolveString(dex_file_, jval_.i, *dex_cache_); + field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved); + break; + } + case kType: { + mirror::Class* resolved = linker_->ResolveType(dex_file_, jval_.i, *dex_cache_, + *class_loader_); + field->SetObject<kTransactionActive>(field->GetDeclaringClass(), resolved); + break; + } + default: UNIMPLEMENTED(FATAL) << ": type " << type_; + } +} +template +void RuntimeEncodedStaticFieldValueIterator::ReadValueToField<true>(ArtField* field) const; +template +void RuntimeEncodedStaticFieldValueIterator::ReadValueToField<false>(ArtField* field) const; + +} // namespace annotations + +} // namespace art diff --git a/runtime/dex_file_annotations.h b/runtime/dex_file_annotations.h new file mode 100644 index 0000000000..7b4e8564b0 --- /dev/null +++ b/runtime/dex_file_annotations.h @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_DEX_FILE_ANNOTATIONS_H_ +#define ART_RUNTIME_DEX_FILE_ANNOTATIONS_H_ + +#include "dex_file.h" + +#include "mirror/object_array.h" + +namespace art { + +namespace mirror { + class ClassLoader; + class DexCache; +} // namespace mirror +class ArtField; +class ArtMethod; +class ClassLinker; + +namespace annotations { + +// Field annotations. +mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) + REQUIRES_SHARED(Locks::mutator_lock_); +bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_); + +// Method annotations. +mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method, + uint32_t parameter_idx, + Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); +bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class, + uint32_t visibility = DexFile::kDexVisibilityRuntime) + REQUIRES_SHARED(Locks::mutator_lock_); + +// Class annotations. +mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass, + Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); +bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) + REQUIRES_SHARED(Locks::mutator_lock_); +bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) + REQUIRES_SHARED(Locks::mutator_lock_); +mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) + REQUIRES_SHARED(Locks::mutator_lock_); +bool IsClassAnnotationPresent(Handle<mirror::Class> klass, + Handle<mirror::Class> annotation_class) + REQUIRES_SHARED(Locks::mutator_lock_); + +// Map back from a PC to the line number in a method. +int32_t GetLineNumFromPC(const DexFile* dex_file, ArtMethod* method, uint32_t rel_pc) + REQUIRES_SHARED(Locks::mutator_lock_); + +// Annotations iterator. +class RuntimeEncodedStaticFieldValueIterator : public EncodedStaticFieldValueIterator { + public: + // A constructor meant to be called from runtime code. + RuntimeEncodedStaticFieldValueIterator(const DexFile& dex_file, + Handle<mirror::DexCache>* dex_cache, + Handle<mirror::ClassLoader>* class_loader, + ClassLinker* linker, + const DexFile::ClassDef& class_def) + REQUIRES_SHARED(Locks::mutator_lock_) + : EncodedStaticFieldValueIterator(dex_file, class_def), + dex_cache_(dex_cache), + class_loader_(class_loader), + linker_(linker) { + } + + template<bool kTransactionActive> + void ReadValueToField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_); + + private: + Handle<mirror::DexCache>* const dex_cache_; // Dex cache to resolve literal objects. + Handle<mirror::ClassLoader>* const class_loader_; // ClassLoader to resolve types. + ClassLinker* linker_; // Linker to resolve literal objects. + DISALLOW_IMPLICIT_CONSTRUCTORS(RuntimeEncodedStaticFieldValueIterator); +}; + +} // namespace annotations + +} // namespace art + +#endif // ART_RUNTIME_DEX_FILE_ANNOTATIONS_H_ diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index d03a9d80eb..14110c24ba 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -48,7 +48,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, // This method is being used by artQuickResolutionTrampoline, before it sets up // the passed parameters in a GC friendly way. Therefore we must never be // suspended while executing it. - ScopedAssertNoThreadSuspension sants(Thread::Current(), __FUNCTION__); + ScopedAssertNoThreadSuspension sants(__FUNCTION__); uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth); InvokeType invoke_type = static_cast<InvokeType>( @@ -120,8 +120,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, return inlined_method; } -inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) - REQUIRES_SHARED(Locks::mutator_lock_) { +inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) { return GetCalleeSaveMethodCaller( self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */); } @@ -130,7 +129,8 @@ template <const bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, ArtMethod* method, - Thread* self, bool* slow_path) { + Thread* self, + bool* slow_path) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); PointerSize pointer_size = class_linker->GetImagePointerSize(); mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size); @@ -363,7 +363,7 @@ template<FindFieldType type, bool access_check> inline ArtField* FindFieldFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self, - size_t expected_size) REQUIRES(!Roles::uninterruptible_) { + size_t expected_size) { bool is_primitive; bool is_set; bool is_static; @@ -444,8 +444,7 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx, return resolved_field; } else { StackHandleScope<1> hs(self); - Handle<mirror::Class> h_class(hs.NewHandle(fields_class)); - if (LIKELY(class_linker->EnsureInitialized(self, h_class, true, true))) { + if (LIKELY(class_linker->EnsureInitialized(self, hs.NewHandle(fields_class), true, true))) { // Otherwise let's ensure the class is initialized before resolving the field. return resolved_field; } @@ -479,8 +478,10 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite); #undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL template<InvokeType type, bool access_check> -inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_object, - ArtMethod* referrer, Thread* self) { +inline ArtMethod* FindMethodFromCode(uint32_t method_idx, + mirror::Object** this_object, + ArtMethod* referrer, + Thread* self) { ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, referrer); if (resolved_method == nullptr) { @@ -554,8 +555,11 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ // that will actually not be what we want in some cases where there are miranda methods or // defaults. What we actually need is a GetContainingClass that says which classes virtuals // this method is coming from. - mirror::Class* referring_class = referrer->GetDeclaringClass(); - uint16_t method_type_idx = referring_class->GetDexFile().GetMethodId(method_idx).class_idx_; + StackHandleScope<2> hs2(self); + HandleWrapper<mirror::Object> h_this(hs2.NewHandleWrapper(this_object)); + Handle<mirror::Class> h_referring_class(hs2.NewHandle(referrer->GetDeclaringClass())); + const uint16_t method_type_idx = + h_referring_class->GetDexFile().GetMethodId(method_idx).class_idx_; mirror::Class* method_reference_class = class_linker->ResolveType(method_type_idx, referrer); if (UNLIKELY(method_reference_class == nullptr)) { // Bad type idx. @@ -566,8 +570,8 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ // referenced class in the bytecode, we use its super class. Otherwise, we throw // a NoSuchMethodError. mirror::Class* super_class = nullptr; - if (method_reference_class->IsAssignableFrom(referring_class)) { - super_class = referring_class->GetSuperClass(); + if (method_reference_class->IsAssignableFrom(h_referring_class.Get())) { + super_class = h_referring_class->GetSuperClass(); } uint16_t vtable_index = resolved_method->GetMethodIndex(); if (access_check) { @@ -587,10 +591,10 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ } else { // It is an interface. if (access_check) { - if (!method_reference_class->IsAssignableFrom((*this_object)->GetClass())) { + if (!method_reference_class->IsAssignableFrom(h_this->GetClass())) { ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(resolved_method, method_reference_class, - *this_object, + h_this.Get(), referrer); return nullptr; // Failure. } @@ -605,6 +609,7 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ } return result; } + UNREACHABLE(); } case kInterface: { uint32_t imt_index = resolved_method->GetImtIndex(); @@ -661,6 +666,7 @@ EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface); // Fast path field resolution that can't initialize classes or throw exceptions. inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size) { + ScopedAssertNoThreadSuspension ants(__FUNCTION__); ArtField* resolved_field = referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize); @@ -713,6 +719,7 @@ inline ArtField* FindFieldFast(uint32_t field_idx, ArtMethod* referrer, FindFiel // Fast path method resolution that can't throw exceptions. inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check, InvokeType type) { + ScopedAssertNoThreadSuspension ants(__FUNCTION__); if (UNLIKELY(this_object == nullptr && type != kStatic)) { return nullptr; } diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 4056ec53a4..fd9ffbd04d 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -43,7 +43,7 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, ArtMethod* referrer, Thread* self, bool access_check) - REQUIRES_SHARED(Locks::mutator_lock_) { + REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) { if (UNLIKELY(component_count < 0)) { ThrowNegativeArraySizeException(component_count); return nullptr; // Failure @@ -120,19 +120,19 @@ mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, heap->GetCurrentAllocator()); } -void CheckReferenceResult(mirror::Object* o, Thread* self) { - if (o == nullptr) { +void CheckReferenceResult(Handle<mirror::Object> o, Thread* self) { + if (o.Get() == nullptr) { return; } // Make sure that the result is an instance of the type this method was expected to return. - mirror::Class* return_type = self->GetCurrentMethod(nullptr)->GetReturnType(true /* resolve */, - kRuntimePointerSize); + ArtMethod* method = self->GetCurrentMethod(nullptr); + mirror::Class* return_type = method->GetReturnType(true /* resolve */, kRuntimePointerSize); if (!o->InstanceOf(return_type)) { Runtime::Current()->GetJavaVM()->JniAbortF(nullptr, "attempt to return an instance of %s from %s", - PrettyTypeOf(o).c_str(), - PrettyMethod(self->GetCurrentMethod(nullptr)).c_str()); + PrettyTypeOf(o.Get()).c_str(), + PrettyMethod(method).c_str()); } } @@ -186,12 +186,11 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons // Do nothing. return zero; } else { - StackHandleScope<1> hs(soa.Self()); - auto h_interface_method(hs.NewHandle(soa.Decode<mirror::Method*>(interface_method_jobj))); + ArtMethod* interface_method = + soa.Decode<mirror::Method*>(interface_method_jobj)->GetArtMethod(); // This can cause thread suspension. PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); - mirror::Class* result_type = - h_interface_method->GetArtMethod()->GetReturnType(true /* resolve */, pointer_size); + mirror::Class* result_type = interface_method->GetReturnType(true /* resolve */, pointer_size); mirror::Object* result_ref = soa.Decode<mirror::Object*>(result); JValue result_unboxed; if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) { @@ -205,26 +204,29 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons // a UndeclaredThrowableException. mirror::Throwable* exception = soa.Self()->GetException(); if (exception->IsCheckedException()) { - mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj); - mirror::Class* proxy_class = rcvr->GetClass(); - mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj); - ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface( - interface_method->GetArtMethod(), kRuntimePointerSize); - auto virtual_methods = proxy_class->GetVirtualMethodsSlice(kRuntimePointerSize); - size_t num_virtuals = proxy_class->NumVirtualMethods(); - size_t method_size = ArtMethod::Size(kRuntimePointerSize); - // Rely on the fact that the methods are contiguous to determine the index of the method in - // the slice. - int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) - - reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size; - CHECK_LT(throws_index, static_cast<int>(num_virtuals)); - mirror::ObjectArray<mirror::Class>* declared_exceptions = - proxy_class->GetThrows()->Get(throws_index); - mirror::Class* exception_class = exception->GetClass(); bool declares_exception = false; - for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { - mirror::Class* declared_exception = declared_exceptions->Get(i); - declares_exception = declared_exception->IsAssignableFrom(exception_class); + { + ScopedAssertNoThreadSuspension ants(__FUNCTION__); + mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj); + mirror::Class* proxy_class = rcvr->GetClass(); + mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj); + ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface( + interface_method->GetArtMethod(), kRuntimePointerSize); + auto virtual_methods = proxy_class->GetVirtualMethodsSlice(kRuntimePointerSize); + size_t num_virtuals = proxy_class->NumVirtualMethods(); + size_t method_size = ArtMethod::Size(kRuntimePointerSize); + // Rely on the fact that the methods are contiguous to determine the index of the method in + // the slice. + int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) - + reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size; + CHECK_LT(throws_index, static_cast<int>(num_virtuals)); + mirror::ObjectArray<mirror::Class>* declared_exceptions = + proxy_class->GetThrows()->Get(throws_index); + mirror::Class* exception_class = exception->GetClass(); + for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { + mirror::Class* declared_exception = declared_exceptions->Get(i); + declares_exception = declared_exception->IsAssignableFrom(exception_class); + } } if (!declares_exception) { soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;", @@ -260,6 +262,7 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, Runtime::CalleeSaveType type, bool do_caller_check) REQUIRES_SHARED(Locks::mutator_lock_) { + ScopedAssertNoThreadSuspension ants(__FUNCTION__); DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type)); const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type); diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index f88e81dc61..20c8401111 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -24,6 +24,7 @@ #include "base/mutex.h" #include "dex_instruction.h" #include "gc/allocator_type.h" +#include "handle.h" #include "invoke_type.h" #include "jvalue.h" #include "runtime.h" @@ -46,13 +47,16 @@ class Thread; template <const bool kAccessCheck> ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx, ArtMethod* method, - Thread* self, bool* slow_path) - REQUIRES_SHARED(Locks::mutator_lock_); + Thread* self, + bool* slow_path) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass, Thread* self, bool* slow_path) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. @@ -63,21 +67,25 @@ ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Given the context of a calling Method and a resolved class, create an instance. template <bool kInstrumented> ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass, Thread* self, gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Given the context of a calling Method and an initialized class, create an instance. template <bool kInstrumented> -ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass, - Thread* self, - gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); +ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized( + mirror::Class* klass, + Thread* self, + gc::AllocatorType allocator_type) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); template <bool kAccessCheck> @@ -85,7 +93,8 @@ ALWAYS_INLINE inline mirror::Class* CheckArrayAlloc(uint32_t type_idx, int32_t component_count, ArtMethod* method, bool* slow_path) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If // it cannot be resolved, throw an error. If it can, use it to create an array. @@ -97,7 +106,8 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); template <bool kAccessCheck, bool kInstrumented> ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, @@ -105,13 +115,15 @@ ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* kl ArtMethod* method, Thread* self, gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self, bool access_check, gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, int32_t component_count, @@ -119,7 +131,8 @@ extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, Thread* self, bool access_check, gc::AllocatorType allocator_type) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Type of find field operation for fast and slow case. enum FindFieldType { @@ -134,54 +147,76 @@ enum FindFieldType { }; template<FindFieldType type, bool access_check> -inline ArtField* FindFieldFromCode( - uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size) - REQUIRES_SHARED(Locks::mutator_lock_); +inline ArtField* FindFieldFromCode(uint32_t field_idx, + ArtMethod* referrer, + Thread* self, + size_t expected_size) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); template<InvokeType type, bool access_check> -inline ArtMethod* FindMethodFromCode( - uint32_t method_idx, mirror::Object** this_object, ArtMethod* referrer, Thread* self) - REQUIRES_SHARED(Locks::mutator_lock_); +inline ArtMethod* FindMethodFromCode(uint32_t method_idx, + mirror::Object** this_object, + ArtMethod* referrer, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // Fast path field resolution that can't initialize classes or throw exceptions. -inline ArtField* FindFieldFast( - uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size) +inline ArtField* FindFieldFast(uint32_t field_idx, + ArtMethod* referrer, + FindFieldType type, + size_t expected_size) REQUIRES_SHARED(Locks::mutator_lock_); // Fast path method resolution that can't throw exceptions. -inline ArtMethod* FindMethodFast( - uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check, - InvokeType type) +inline ArtMethod* FindMethodFast(uint32_t method_idx, + mirror::Object* this_object, + ArtMethod* referrer, + bool access_check, + InvokeType type) REQUIRES_SHARED(Locks::mutator_lock_); -inline mirror::Class* ResolveVerifyAndClinit( - uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access) - REQUIRES_SHARED(Locks::mutator_lock_); +inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, + ArtMethod* referrer, + Thread* self, + bool can_run_clinit, + bool verify_access) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); // TODO: annotalysis disabled as monitor semantics are maintained in Java code. inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - NO_THREAD_SAFETY_ANALYSIS; + NO_THREAD_SAFETY_ANALYSIS REQUIRES(!Roles::uninterruptible_); -void CheckReferenceResult(mirror::Object* o, Thread* self) - REQUIRES_SHARED(Locks::mutator_lock_); +void CheckReferenceResult(Handle<mirror::Object> o, Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty, jobject rcvr_jobj, jobject interface_art_method_jobj, std::vector<jvalue>& args) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) - REQUIRES_SHARED(Locks::mutator_lock_); + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); template <typename INT_TYPE, typename FLOAT_TYPE> inline INT_TYPE art_float_to_integral(FLOAT_TYPE f); ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp, Runtime::CalleeSaveType type, - bool do_caller_check = false); + bool do_caller_check = false) + REQUIRES_SHARED(Locks::mutator_lock_); + +ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type) + REQUIRES_SHARED(Locks::mutator_lock_); } // namespace art diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h index 86fb8818ec..cfa5325e45 100644 --- a/runtime/entrypoints/quick/quick_default_externs.h +++ b/runtime/entrypoints/quick/quick_default_externs.h @@ -118,7 +118,6 @@ extern "C" void art_quick_test_suspend(); extern "C" void art_quick_deliver_exception(art::mirror::Object*); extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit); extern "C" void art_quick_throw_div_zero(); -extern "C" void art_quick_throw_no_such_method(int32_t method_idx); extern "C" void art_quick_throw_null_pointer_exception(); extern "C" void art_quick_throw_null_pointer_exception_from_signal(uintptr_t address); extern "C" void art_quick_throw_stack_overflow(void*); diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h index 2a206c286a..1ee1f818b6 100644 --- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h +++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h @@ -113,7 +113,6 @@ void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) qpoints->pDeliverException = art_quick_deliver_exception; qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; qpoints->pThrowStringBounds = art_quick_throw_string_bounds; diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.cc b/runtime/entrypoints/quick/quick_entrypoints_enum.cc index 7b80af6477..81f152b6fd 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_enum.cc +++ b/runtime/entrypoints/quick/quick_entrypoints_enum.cc @@ -71,4 +71,55 @@ bool EntrypointRequiresStackMap(QuickEntrypointEnum trampoline) { } } +bool EntrypointCanTriggerGC(QuickEntrypointEnum entrypoint) { + switch (entrypoint) { + // Listed in the same order as in quick_entrypoints_list.h. + case kQuickCmpgDouble: + case kQuickCmpgFloat: + case kQuickCmplDouble: + case kQuickCmplFloat: + case kQuickCos: + case kQuickSin: + case kQuickAcos: + case kQuickAsin: + case kQuickAtan: + case kQuickAtan2: + case kQuickCbrt: + case kQuickCosh: + case kQuickExp: + case kQuickExpm1: + case kQuickHypot: + case kQuickLog: + case kQuickLog10: + case kQuickNextAfter: + case kQuickSinh: + case kQuickTan: + case kQuickTanh: + case kQuickFmod: + case kQuickL2d: + case kQuickFmodf: + case kQuickL2f: + case kQuickD2iz: + case kQuickF2iz: + case kQuickIdivmod: + case kQuickD2l: + case kQuickF2l: + case kQuickLdiv: + case kQuickLmod: + case kQuickLmul: + case kQuickShlLong: + case kQuickShrLong: + case kQuickUshrLong: + return false; + + /* Used by mips for 64bit volatile load/stores. */ + case kQuickA64Load: + case kQuickA64Store: + return false; + + default: + return true; + } +} + } // namespace art diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h index 7674873731..abf2c34744 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_enum.h +++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h @@ -63,6 +63,7 @@ template <> inline void CheckEntrypointTypes<kQuick ## name, __VA_ARGS__>() {}; #undef ENTRYPOINT_ENUM bool EntrypointRequiresStackMap(QuickEntrypointEnum trampoline); +bool EntrypointCanTriggerGC(QuickEntrypointEnum entrypoint); } // namespace art diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index 74c928ad8d..e4029191d6 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -139,7 +139,6 @@ V(DeliverException, void, mirror::Object*) \ V(ThrowArrayBounds, void, int32_t, int32_t) \ V(ThrowDivZero, void, void) \ - V(ThrowNoSuchMethod, void, int32_t) \ V(ThrowNullPointer, void, void) \ V(ThrowStackOverflow, void, void*) \ V(ThrowStringBounds, void, int32_t, int32_t) \ diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 64f19afccd..76b545652d 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -114,7 +114,8 @@ extern void JniMethodFastEnd(uint32_t saved_local_ref_cookie, Thread* self) { PopLocalReferences(saved_local_ref_cookie, self); } -extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject locked, +extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, + jobject locked, Thread* self) { GoToRunnable(self); UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. @@ -135,13 +136,17 @@ static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result, PopLocalReferences(saved_local_ref_cookie, self); // Process result. if (UNLIKELY(self->GetJniEnv()->check_jni)) { - CheckReferenceResult(o, self); + // CheckReferenceResult can resolve types. + StackHandleScope<1> hs(self); + HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&o)); + CheckReferenceResult(h_obj, self); } VerifyObject(o); return o; } -extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie, +extern mirror::Object* JniMethodEndWithReference(jobject result, + uint32_t saved_local_ref_cookie, Thread* self) { GoToRunnable(self); return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self); @@ -149,7 +154,8 @@ extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, uint32_t saved_local_ref_cookie, - jobject locked, Thread* self) { + jobject locked, + Thread* self) { GoToRunnable(self); UnlockJniSynchronizedMethod(locked, self); return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self); diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index 67cae8a7d9..a205b17f1b 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -101,13 +101,6 @@ extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self) self->QuickDeliverException(); } -extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self) - REQUIRES_SHARED(Locks::mutator_lock_) { - ScopedQuickEntrypointChecks sqec(self); - ThrowNoSuchMethodError(method_idx); - self->QuickDeliverException(); -} - extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type, Thread* self) diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 3043c83668..3c6f807d64 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -1961,8 +1961,12 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** // Run the visitor and update sp. BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp); - visitor.VisitArguments(); - visitor.FinalizeHandleScope(self); + { + ScopedAssertNoThreadSuspension sants(__FUNCTION__); + visitor.VisitArguments(); + // FinalizeHandleScope pushes the handle scope on the thread. + visitor.FinalizeHandleScope(self); + } // Fix up managed-stack things in Thread. self->SetTopOfStack(sp); diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc index b102334418..03254ab8d4 100644 --- a/runtime/entrypoints_order_test.cc +++ b/runtime/entrypoints_order_test.cc @@ -122,10 +122,10 @@ class EntrypointsOrderTest : public CommonRuntimeTest { // Skip across the entrypoints structures. - EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_objects, sizeof(void*)); - EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, thread_local_pos, sizeof(size_t)); + EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*)); - EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, mterp_current_ibase, sizeof(void*)); + EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*)); + EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(size_t)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*)); EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*)); @@ -288,8 +288,7 @@ class EntrypointsOrderTest : public CommonRuntimeTest { EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeliverException, pThrowArrayBounds, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowArrayBounds, pThrowDivZero, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, sizeof(void*)); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNullPointer, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pThrowStringBounds, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStringBounds, pDeoptimize, sizeof(void*)); diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h index 7faa2dc7e3..54d2c35b7c 100644 --- a/runtime/experimental_flags.h +++ b/runtime/experimental_flags.h @@ -31,7 +31,8 @@ struct ExperimentalFlags { }; constexpr ExperimentalFlags() : value_(0x0000) {} - constexpr ExperimentalFlags(decltype(kNone) t) : value_(static_cast<uint32_t>(t)) {} + constexpr ExperimentalFlags(decltype(kNone) t) // NOLINT, implicit + : value_(static_cast<uint32_t>(t)) {} constexpr operator decltype(kNone)() const { return static_cast<decltype(kNone)>(value_); diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index e534369ffb..2750fea751 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -568,7 +568,11 @@ void ConcurrentCopying::MarkingPhase() { if (kVerboseMode) { LOG(INFO) << "GC MarkingPhase"; } - CHECK(weak_ref_access_enabled_); + Thread* self = Thread::Current(); + if (kIsDebugBuild) { + MutexLock mu(self, *Locks::thread_list_lock_); + CHECK(weak_ref_access_enabled_); + } // Scan immune spaces. // Update all the fields in the immune spaces first without graying the objects so that we @@ -627,7 +631,6 @@ void ConcurrentCopying::MarkingPhase() { Runtime::Current()->VisitNonThreadRoots(this); } - Thread* self = Thread::Current(); { TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings()); // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The @@ -695,7 +698,10 @@ void ConcurrentCopying::MarkingPhase() { CheckEmptyMarkStack(); } - CHECK(weak_ref_access_enabled_); + if (kIsDebugBuild) { + MutexLock mu(self, *Locks::thread_list_lock_); + CHECK(weak_ref_access_enabled_); + } if (kVerboseMode) { LOG(INFO) << "GC end of MarkingPhase"; } @@ -705,11 +711,10 @@ void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) { if (kVerboseMode) { LOG(INFO) << "ReenableWeakRefAccess"; } - weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads. - QuasiAtomic::ThreadFenceForConstructor(); // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access. { MutexLock mu(self, *Locks::thread_list_lock_); + weak_ref_access_enabled_ = true; // This is for new threads. std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); for (Thread* thread : thread_list) { thread->SetWeakRefAccessEnabled(true); @@ -744,12 +749,30 @@ class ConcurrentCopying::DisableMarkingCheckpoint : public Closure { ConcurrentCopying* const concurrent_copying_; }; +class ConcurrentCopying::DisableMarkingCallback : public Closure { + public: + explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying) + : concurrent_copying_(concurrent_copying) { + } + + void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) { + // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint() + // to avoid a race with ThreadList::Register(). + CHECK(concurrent_copying_->is_marking_); + concurrent_copying_->is_marking_ = false; + } + + private: + ConcurrentCopying* const concurrent_copying_; +}; + void ConcurrentCopying::IssueDisableMarkingCheckpoint() { Thread* self = Thread::Current(); DisableMarkingCheckpoint check_point(this); ThreadList* thread_list = Runtime::Current()->GetThreadList(); gc_barrier_->Init(self, 0); - size_t barrier_count = thread_list->RunCheckpoint(&check_point); + DisableMarkingCallback dmc(this); + size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc); // If there are no threads to wait which implies that all the checkpoint functions are finished, // then no need to release the mutator lock. if (barrier_count == 0) { @@ -765,13 +788,9 @@ void ConcurrentCopying::IssueDisableMarkingCheckpoint() { } void ConcurrentCopying::DisableMarking() { - // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the - // thread-local flags so that a new thread starting up will get the correct is_marking flag. - is_marking_ = false; - QuasiAtomic::ThreadFenceForConstructor(); - // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are - // still in the middle of a read barrier which may have a from-space ref cached in a local - // variable. + // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and + // to ensure no threads are still in the middle of a read barrier which may have a from-space ref + // cached in a local variable. IssueDisableMarkingCheckpoint(); if (kUseTableLookupReadBarrier) { heap_->rb_table_->ClearAll(); @@ -1158,12 +1177,13 @@ class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure { const bool disable_weak_ref_access_; }; -void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) { +void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, + Closure* checkpoint_callback) { Thread* self = Thread::Current(); RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access); ThreadList* thread_list = Runtime::Current()->GetThreadList(); gc_barrier_->Init(self, 0); - size_t barrier_count = thread_list->RunCheckpoint(&check_point); + size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback); // If there are no threads to wait which implys that all the checkpoint functions are finished, // then no need to release the mutator lock. if (barrier_count == 0) { @@ -1213,7 +1233,7 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); if (mark_stack_mode == kMarkStackModeThreadLocal) { // Process the thread-local mark stacks and the GC mark stack. - count += ProcessThreadLocalMarkStacks(false); + count += ProcessThreadLocalMarkStacks(false, nullptr); while (!gc_mark_stack_->IsEmpty()) { mirror::Object* to_ref = gc_mark_stack_->PopBack(); ProcessMarkStackRef(to_ref); @@ -1265,9 +1285,10 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { return count == 0; } -size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) { +size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, + Closure* checkpoint_callback) { // Run a checkpoint to collect all thread local mark stacks and iterate over them all. - RevokeThreadLocalMarkStacks(disable_weak_ref_access); + RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback); size_t count = 0; std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks; { @@ -1360,6 +1381,23 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { } } +class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure { + public: + explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying) + : concurrent_copying_(concurrent_copying) { + } + + void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) { + // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint() + // to avoid a deadlock b/31500969. + CHECK(concurrent_copying_->weak_ref_access_enabled_); + concurrent_copying_->weak_ref_access_enabled_ = false; + } + + private: + ConcurrentCopying* const concurrent_copying_; +}; + void ConcurrentCopying::SwitchToSharedMarkStackMode() { Thread* self = Thread::Current(); CHECK(thread_running_gc_ != nullptr); @@ -1369,12 +1407,10 @@ void ConcurrentCopying::SwitchToSharedMarkStackMode() { CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), static_cast<uint32_t>(kMarkStackModeThreadLocal)); mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); - CHECK(weak_ref_access_enabled_.LoadRelaxed()); - weak_ref_access_enabled_.StoreRelaxed(false); - QuasiAtomic::ThreadFenceForConstructor(); + DisableWeakRefAccessCallback dwrac(this); // Process the thread local mark stacks one last time after switching to the shared mark stack // mode and disable weak ref accesses. - ProcessThreadLocalMarkStacks(true); + ProcessThreadLocalMarkStacks(true, &dwrac); if (kVerboseMode) { LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; } @@ -1403,7 +1439,7 @@ void ConcurrentCopying::CheckEmptyMarkStack() { MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); if (mark_stack_mode == kMarkStackModeThreadLocal) { // Thread-local mark stack mode. - RevokeThreadLocalMarkStacks(false); + RevokeThreadLocalMarkStacks(false, nullptr); MutexLock mu(Thread::Current(), mark_stack_lock_); if (!revoked_mark_stacks_.empty()) { for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) { @@ -2043,13 +2079,23 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { } DCHECK(to_ref != nullptr); + // Copy the object excluding the lock word since that is handled in the loop. + to_ref->SetClass(from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()); + const size_t kObjectHeaderSize = sizeof(mirror::Object); + DCHECK_GE(obj_size, kObjectHeaderSize); + static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) + + sizeof(LockWord), + "Object header size does not match"); + // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the + // object in the from space is immutable other than the lock word. b/31423258 + memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize, + reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize, + obj_size - kObjectHeaderSize); + // Attempt to install the forward pointer. This is in a loop as the // lock word atomic write can fail. while (true) { - // Copy the object. TODO: copy only the lockword in the second iteration and on? - memcpy(to_ref, from_ref, obj_size); - - LockWord old_lock_word = to_ref->GetLockWord(false); + LockWord old_lock_word = from_ref->GetLockWord(false); if (old_lock_word.GetState() == LockWord::kForwardingAddress) { // Lost the race. Another thread (either GC or mutator) stored @@ -2093,6 +2139,8 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { return to_ref; } + // Copy the old lock word over since we did not copy it yet. + to_ref->SetLockWord(old_lock_word, false); // Set the gray ptr. if (kUseBakerReadBarrier) { to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr()); diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h index 1ef0aeac24..81ffbc5f2c 100644 --- a/runtime/gc/collector/concurrent_copying.h +++ b/runtime/gc/collector/concurrent_copying.h @@ -34,6 +34,7 @@ #include <vector> namespace art { +class Closure; class RootInfo; namespace gc { @@ -61,9 +62,9 @@ class ConcurrentCopying : public GarbageCollector { // pages. static constexpr bool kGrayDirtyImmuneObjects = true; - ConcurrentCopying(Heap* heap, - const std::string& name_prefix = "", - bool measure_read_barrier_slow_path = false); + explicit ConcurrentCopying(Heap* heap, + const std::string& name_prefix = "", + bool measure_read_barrier_slow_path = false); ~ConcurrentCopying(); virtual void RunPhases() OVERRIDE @@ -120,8 +121,8 @@ class ConcurrentCopying : public GarbageCollector { Barrier& GetBarrier() { return *gc_barrier_; } - bool IsWeakRefAccessEnabled() { - return weak_ref_access_enabled_.LoadRelaxed(); + bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) { + return weak_ref_access_enabled_; } void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); @@ -161,9 +162,9 @@ class ConcurrentCopying : public GarbageCollector { void VerifyGrayImmuneObjects() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); - size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) + size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); - void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) + void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) REQUIRES_SHARED(Locks::mutator_lock_); void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); @@ -269,7 +270,7 @@ class ConcurrentCopying : public GarbageCollector { // without a lock. Other threads won't access the mark stack. }; Atomic<MarkStackMode> mark_stack_mode_; - Atomic<bool> weak_ref_access_enabled_; + bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_); // How many objects and bytes we moved. Used for accounting. Atomic<size_t> bytes_moved_; @@ -311,7 +312,9 @@ class ConcurrentCopying : public GarbageCollector { class AssertToSpaceInvariantRefsVisitor; class ClearBlackPtrsVisitor; class ComputeUnevacFromSpaceLiveRatioVisitor; + class DisableMarkingCallback; class DisableMarkingCheckpoint; + class DisableWeakRefAccessCallback; class FlipCallback; class GrayImmuneObjectVisitor; class ImmuneSpaceScanObjVisitor; diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h index 6afe876710..4ffa254bb0 100644 --- a/runtime/gc/collector/garbage_collector.h +++ b/runtime/gc/collector/garbage_collector.h @@ -44,7 +44,7 @@ class Heap; namespace collector { struct ObjectBytePair { - ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0) + explicit ObjectBytePair(uint64_t num_objects = 0, int64_t num_bytes = 0) : objects(num_objects), bytes(num_bytes) {} void Add(const ObjectBytePair& other) { objects += other.objects; diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 600aff19dd..cb5226b7b6 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -1011,7 +1011,7 @@ void Heap::VisitObjects(ObjectCallback callback, void* arg) { DecrementDisableMovingGC(self); } else { // GCs can move objects, so don't allow this. - ScopedAssertNoThreadSuspension ants(self, "Visiting objects"); + ScopedAssertNoThreadSuspension ants("Visiting objects"); DCHECK(region_space_ == nullptr); VisitObjectsInternal(callback, arg); } diff --git a/runtime/globals.h b/runtime/globals.h index 691bf55d42..28534e4708 100644 --- a/runtime/globals.h +++ b/runtime/globals.h @@ -85,9 +85,9 @@ static constexpr bool kIsTargetLinux = false; # endif #endif -// Are additional statically-linked ART host binaries (dex2oats, -// oatdumps, etc.) built and available? -#if !defined(ART_TARGET) && defined(ART_BUILD_HOST_STATIC) +// Additional statically-linked ART binaries (dex2oats, oatdumps, etc.) are +// always available on the host +#if !defined(ART_TARGET) static constexpr bool kHostStaticBuildEnabled = true; #else static constexpr bool kHostStaticBuildEnabled = false; diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 388561b012..ff433890a9 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -966,7 +966,7 @@ void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread, ArtMethod* callee) const { // We cannot have thread suspension since that would cause the this_object parameter to // potentially become a dangling pointer. An alternative could be to put it in a handle instead. - ScopedAssertNoThreadSuspension ants(thread, __FUNCTION__); + ScopedAssertNoThreadSuspension ants(__FUNCTION__); for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) { if (listener != nullptr) { listener->InvokeVirtualOrInterface(thread, this_object, caller, dex_pc, callee); diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 9d76685fb5..814adf7132 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -147,8 +147,7 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { if (type == kVirtual || type == kInterface) { - jit->InvokeVirtualOrInterface( - self, receiver, sf_method, shadow_frame.GetDexPC(), called_method); + jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method); } jit->AddSamples(self, sf_method, 1, /*with_backedges*/false); } @@ -195,7 +194,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { jit->InvokeVirtualOrInterface( - self, receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method); + receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method); jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false); } instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index c6144086a8..d505aea10a 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -402,7 +402,7 @@ void UnstartedRuntime::UnstartedClassGetEnclosingClass( if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { result->SetL(nullptr); } - result->SetL(klass->GetDexFile().GetEnclosingClass(klass)); + result->SetL(annotations::GetEnclosingClass(klass)); } void UnstartedRuntime::UnstartedClassGetInnerClassFlags( diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index cff23541b3..d984f45253 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -431,7 +431,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread, const uint8_t* native_pc = nullptr; { - ScopedAssertNoThreadSuspension sts(thread, "Holding OSR method"); + ScopedAssertNoThreadSuspension sts("Holding OSR method"); const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method); if (osr_method == nullptr) { // No osr method yet, just return to the interpreter. @@ -683,12 +683,11 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) { } } -void Jit::InvokeVirtualOrInterface(Thread* thread, - mirror::Object* this_object, +void Jit::InvokeVirtualOrInterface(mirror::Object* this_object, ArtMethod* caller, uint32_t dex_pc, ArtMethod* callee ATTRIBUTE_UNUSED) { - ScopedAssertNoThreadSuspension ants(thread, __FUNCTION__); + ScopedAssertNoThreadSuspension ants(__FUNCTION__); DCHECK(this_object != nullptr); ProfilingInfo* info = caller->GetProfilingInfo(kRuntimePointerSize); if (info != nullptr) { diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 417a185853..35656cd6f6 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -30,6 +30,7 @@ namespace art { class ArtMethod; +class ClassLinker; struct RuntimeArgumentMap; union JValue; @@ -108,8 +109,7 @@ class Jit { void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges) REQUIRES_SHARED(Locks::mutator_lock_); - void InvokeVirtualOrInterface(Thread* thread, - mirror::Object* this_object, + void InvokeVirtualOrInterface(mirror::Object* this_object, ArtMethod* caller, uint32_t dex_pc, ArtMethod* callee) diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index a434442d93..d9cb1c66a4 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -375,7 +375,7 @@ class JNI { CHECK_NON_NULL_ARGUMENT(mid); ScopedObjectAccess soa(env); ArtMethod* m = soa.DecodeMethod(mid); - mirror::AbstractMethod* method; + mirror::Executable* method; DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); DCHECK(!Runtime::Current()->IsActiveTransaction()); if (m->IsConstructor()) { diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h deleted file mode 100644 index 9c2061387e..0000000000 --- a/runtime/mirror/abstract_method.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_ -#define ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_ - -#include "executable.h" -#include "gc_root.h" -#include "object.h" -#include "object_callbacks.h" -#include "read_barrier_option.h" - -namespace art { - -struct AbstractMethodOffsets; -class ArtMethod; - -namespace mirror { - -// C++ mirror of java.lang.reflect.AbstractMethod. -class MANAGED AbstractMethod : public Executable { - public: - // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod. - template <PointerSize kPointerSize, bool kTransactionActive> - bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) - REQUIRES(!Roles::uninterruptible_); - - ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_); - // Only used by the image writer. - template <bool kTransactionActive = false> - void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); - mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_); - - private: - static MemberOffset ArtMethodOffset() { - return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, art_method_)); - } - static MemberOffset DeclaringClassOffset() { - return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_)); - } - static MemberOffset DeclaringClassOfOverriddenMethodOffset() { - return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, declaring_class_of_overridden_method_)); - } - static MemberOffset AccessFlagsOffset() { - return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, access_flags_)); - } - static MemberOffset DexMethodIndexOffset() { - return MemberOffset(OFFSETOF_MEMBER(AbstractMethod, dex_method_index_)); - } - - HeapReference<mirror::Class> declaring_class_; - HeapReference<mirror::Class> declaring_class_of_overridden_method_; - uint64_t art_method_; - uint32_t access_flags_; - uint32_t dex_method_index_; - - friend struct art::AbstractMethodOffsets; // for verifying offset information - DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod); -}; - -} // namespace mirror -} // namespace art - -#endif // ART_RUNTIME_MIRROR_ABSTRACT_METHOD_H_ diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index c979c28138..f21baed70f 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -23,6 +23,7 @@ #include "class-inl.h" #include "dex_cache.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "gc/accounting/card_table-inl.h" #include "handle_scope-inl.h" #include "method.h" @@ -784,7 +785,7 @@ ArtField* Class::FindStaticField(Thread* self, } // Though GetDirectInterface() should not cause thread suspension when called // from here, it takes a Handle as an argument, so we need to wrap `k`. - ScopedAssertNoThreadSuspension ants(self, __FUNCTION__); + ScopedAssertNoThreadSuspension ants(__FUNCTION__); StackHandleScope<1> hs(self); Handle<mirror::Class> h_k(hs.NewHandle(k)); // Is this field in any of this class' interfaces? @@ -1214,7 +1215,7 @@ int32_t Class::GetInnerClassFlags(Handle<Class> h_this, int32_t default_value) { return default_value; } uint32_t flags; - if (!h_this->GetDexFile().GetInnerClassFlags(h_this, &flags)) { + if (!annotations::GetInnerClassFlags(h_this, &flags)) { return default_value; } return flags; diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/executable.cc index b4dce583e1..33ebd817d1 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/executable.cc @@ -14,15 +14,14 @@ * limitations under the License. */ -#include "abstract_method.h" - #include "art_method-inl.h" +#include "executable.h" namespace art { namespace mirror { template <PointerSize kPointerSize, bool kTransactionActive> -bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) { +bool Executable::CreateFromArtMethod(ArtMethod* method) { auto* interface_method = method->GetInterfaceMethodIfProxy(kPointerSize); SetArtMethod<kTransactionActive>(method); SetFieldObject<kTransactionActive>(DeclaringClassOffset(), method->GetDeclaringClass()); @@ -33,28 +32,28 @@ bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) { return true; } -template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, false>( +template bool Executable::CreateFromArtMethod<PointerSize::k32, false>( ArtMethod* method); -template bool AbstractMethod::CreateFromArtMethod<PointerSize::k32, true>( +template bool Executable::CreateFromArtMethod<PointerSize::k32, true>( ArtMethod* method); -template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, false>( +template bool Executable::CreateFromArtMethod<PointerSize::k64, false>( ArtMethod* method); -template bool AbstractMethod::CreateFromArtMethod<PointerSize::k64, true>( +template bool Executable::CreateFromArtMethod<PointerSize::k64, true>( ArtMethod* method); -ArtMethod* AbstractMethod::GetArtMethod() { +ArtMethod* Executable::GetArtMethod() { return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset())); } template <bool kTransactionActive> -void AbstractMethod::SetArtMethod(ArtMethod* method) { +void Executable::SetArtMethod(ArtMethod* method) { SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method)); } -template void AbstractMethod::SetArtMethod<false>(ArtMethod* method); -template void AbstractMethod::SetArtMethod<true>(ArtMethod* method); +template void Executable::SetArtMethod<false>(ArtMethod* method); +template void Executable::SetArtMethod<true>(ArtMethod* method); -mirror::Class* AbstractMethod::GetDeclaringClass() { +mirror::Class* Executable::GetDeclaringClass() { return GetFieldObject<mirror::Class>(DeclaringClassOffset()); } diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h index 232fce8693..6c465f6bbb 100644 --- a/runtime/mirror/executable.h +++ b/runtime/mirror/executable.h @@ -32,9 +32,42 @@ namespace mirror { // C++ mirror of java.lang.reflect.Executable. class MANAGED Executable : public AccessibleObject { + public: + // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod. + template <PointerSize kPointerSize, bool kTransactionActive> + bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Roles::uninterruptible_); + + ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_); + // Only used by the image writer. + template <bool kTransactionActive = false> + void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); + mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_); + private: uint16_t has_real_parameter_data_; + HeapReference<mirror::Class> declaring_class_; + HeapReference<mirror::Class> declaring_class_of_overridden_method_; HeapReference<mirror::Array> parameters_; + uint64_t art_method_; + uint32_t access_flags_; + uint32_t dex_method_index_; + + static MemberOffset ArtMethodOffset() { + return MemberOffset(OFFSETOF_MEMBER(Executable, art_method_)); + } + static MemberOffset DeclaringClassOffset() { + return MemberOffset(OFFSETOF_MEMBER(Executable, declaring_class_)); + } + static MemberOffset DeclaringClassOfOverriddenMethodOffset() { + return MemberOffset(OFFSETOF_MEMBER(Executable, declaring_class_of_overridden_method_)); + } + static MemberOffset AccessFlagsOffset() { + return MemberOffset(OFFSETOF_MEMBER(Executable, access_flags_)); + } + static MemberOffset DexMethodIndexOffset() { + return MemberOffset(OFFSETOF_MEMBER(Executable, dex_method_index_)); + } friend struct art::ExecutableOffsets; // for verifying offset information DISALLOW_IMPLICIT_CONSTRUCTORS(Executable); diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc index ef16719f27..71bac7e3d6 100644 --- a/runtime/mirror/method.cc +++ b/runtime/mirror/method.cc @@ -56,7 +56,7 @@ Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) { DCHECK(!method->IsConstructor()) << PrettyMethod(method); auto* ret = down_cast<Method*>(StaticClass()->AllocObject(self)); if (LIKELY(ret != nullptr)) { - static_cast<AbstractMethod*>(ret)-> + static_cast<Executable*>(ret)-> CreateFromArtMethod<kPointerSize, kTransactionActive>(method); } return ret; @@ -108,7 +108,7 @@ Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) { DCHECK(method->IsConstructor()) << PrettyMethod(method); auto* ret = down_cast<Constructor*>(StaticClass()->AllocObject(self)); if (LIKELY(ret != nullptr)) { - static_cast<AbstractMethod*>(ret)-> + static_cast<Executable*>(ret)-> CreateFromArtMethod<kPointerSize, kTransactionActive>(method); } return ret; diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h index 6881991736..205ea7a050 100644 --- a/runtime/mirror/method.h +++ b/runtime/mirror/method.h @@ -17,8 +17,8 @@ #ifndef ART_RUNTIME_MIRROR_METHOD_H_ #define ART_RUNTIME_MIRROR_METHOD_H_ -#include "abstract_method.h" #include "gc_root.h" +#include "executable.h" namespace art { namespace mirror { @@ -26,7 +26,7 @@ namespace mirror { class Class; // C++ mirror of java.lang.reflect.Method. -class MANAGED Method : public AbstractMethod { +class MANAGED Method : public Executable { public: template <PointerSize kPointerSize, bool kTransactionActive> static Method* CreateFromArtMethod(Thread* self, ArtMethod* method) @@ -58,7 +58,7 @@ class MANAGED Method : public AbstractMethod { }; // C++ mirror of java.lang.reflect.Constructor. -class MANAGED Constructor: public AbstractMethod { +class MANAGED Constructor: public Executable { public: template <PointerSize kPointerSize, bool kTransactionActive> static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method) diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h index 583cfc3c75..f4a358018e 100644 --- a/runtime/mirror/object_reference.h +++ b/runtime/mirror/object_reference.h @@ -55,7 +55,7 @@ class MANAGED ObjectReference { } protected: - ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr) + explicit ObjectReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) : reference_(Compress(mirror_ptr)) { } @@ -87,7 +87,7 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr return HeapReference<MirrorType>(mirror_ptr); } private: - HeapReference<MirrorType>(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) + explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) : ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {} }; @@ -104,7 +104,7 @@ class MANAGED CompressedReference : public mirror::ObjectReference<false, Mirror } private: - CompressedReference<MirrorType>(MirrorType* p) REQUIRES_SHARED(Locks::mutator_lock_) + explicit CompressedReference(MirrorType* p) REQUIRES_SHARED(Locks::mutator_lock_) : mirror::ObjectReference<false, MirrorType>(p) {} }; diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index b2349fc23c..384de34909 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -270,7 +270,8 @@ static jclass DexFile_defineClassNative(JNIEnv* env, const std::string descriptor(DotToDescriptor(class_name.c_str())); const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str())); for (auto& dex_file : dex_files) { - const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor.c_str(), hash); + const DexFile::ClassDef* dex_class_def = + OatDexFile::FindClassDef(*dex_file, descriptor.c_str(), hash); if (dex_class_def != nullptr) { ScopedObjectAccess soa(env); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc index 1761799242..94933bce32 100644 --- a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc +++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc @@ -23,6 +23,7 @@ #include "mem_map.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" +#include "oat_file.h" #include "scoped_thread_state_change.h" #include "ScopedUtfChars.h" @@ -140,7 +141,8 @@ static jclass InMemoryDexClassLoader_DexData_findClass( const char* class_descriptor = descriptor.c_str(); const size_t hash = ComputeModifiedUtf8Hash(class_descriptor); const DexFile* dex_file = CookieToDexFile(cookie); - const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(class_descriptor, hash); + const DexFile::ClassDef* dex_class_def = + OatDexFile::FindClassDef(*dex_file, class_descriptor, hash); if (dex_class_def != nullptr) { ScopedObjectAccess soa(env); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index d89a334f32..af9b68f9a7 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -23,6 +23,7 @@ #include "class_linker.h" #include "common_throws.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "jni_internal.h" #include "nth_caller_visitor.h" #include "mirror/class-inl.h" @@ -454,7 +455,7 @@ static jobject Class_getDeclaredAnnotation(JNIEnv* env, jobject javaThis, jclass } Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationClass))); return soa.AddLocalReference<jobject>( - klass->GetDexFile().GetAnnotationForClass(klass, annotation_class)); + annotations::GetAnnotationForClass(klass, annotation_class)); } static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis) { @@ -469,7 +470,7 @@ static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis) mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0); return soa.AddLocalReference<jobjectArray>(empty_array); } - return soa.AddLocalReference<jobjectArray>(klass->GetDexFile().GetAnnotationsForClass(klass)); + return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForClass(klass)); } static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) { @@ -478,7 +479,7 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) { Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis))); mirror::ObjectArray<mirror::Class>* classes = nullptr; if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) { - classes = klass->GetDexFile().GetDeclaredClasses(klass); + classes = annotations::GetDeclaredClasses(klass); } if (classes == nullptr) { // Return an empty array instead of a null pointer. @@ -506,7 +507,7 @@ static jclass Class_getEnclosingClass(JNIEnv* env, jobject javaThis) { if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } - return soa.AddLocalReference<jclass>(klass->GetDexFile().GetEnclosingClass(klass)); + return soa.AddLocalReference<jclass>(annotations::GetEnclosingClass(klass)); } static jobject Class_getEnclosingConstructorNative(JNIEnv* env, jobject javaThis) { @@ -516,7 +517,7 @@ static jobject Class_getEnclosingConstructorNative(JNIEnv* env, jobject javaThis if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } - mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass); + mirror::Object* method = annotations::GetEnclosingMethod(klass); if (method != nullptr) { if (method->GetClass() == soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Constructor)) { @@ -533,7 +534,7 @@ static jobject Class_getEnclosingMethodNative(JNIEnv* env, jobject javaThis) { if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) { return nullptr; } - mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass); + mirror::Object* method = annotations::GetEnclosingMethod(klass); if (method != nullptr) { if (method->GetClass() == soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Method)) { @@ -558,7 +559,7 @@ static jstring Class_getInnerClassName(JNIEnv* env, jobject javaThis) { return nullptr; } mirror::String* class_name = nullptr; - if (!klass->GetDexFile().GetInnerClass(klass, &class_name)) { + if (!annotations::GetInnerClass(klass, &class_name)) { return nullptr; } return soa.AddLocalReference<jstring>(class_name); @@ -572,7 +573,7 @@ static jobjectArray Class_getSignatureAnnotation(JNIEnv* env, jobject javaThis) return nullptr; } return soa.AddLocalReference<jobjectArray>( - klass->GetDexFile().GetSignatureAnnotationForClass(klass)); + annotations::GetSignatureAnnotationForClass(klass)); } static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) { @@ -583,7 +584,7 @@ static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) { return false; } mirror::String* class_name = nullptr; - if (!klass->GetDexFile().GetInnerClass(klass, &class_name)) { + if (!annotations::GetInnerClass(klass, &class_name)) { return false; } return class_name == nullptr; @@ -598,7 +599,7 @@ static jboolean Class_isDeclaredAnnotationPresent(JNIEnv* env, jobject javaThis, return false; } Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); - return klass->GetDexFile().IsClassAnnotationPresent(klass, annotation_class); + return annotations::IsClassAnnotationPresent(klass, annotation_class); } static jclass Class_getDeclaringClass(JNIEnv* env, jobject javaThis) { @@ -612,7 +613,7 @@ static jclass Class_getDeclaringClass(JNIEnv* env, jobject javaThis) { if (Class_isAnonymousClass(env, javaThis)) { return nullptr; } - return soa.AddLocalReference<jclass>(klass->GetDexFile().GetDeclaringClass(klass)); + return soa.AddLocalReference<jclass>(annotations::GetDeclaringClass(klass)); } static jobject Class_newInstance(JNIEnv* env, jobject javaThis) { @@ -668,8 +669,7 @@ static jobject Class_newInstance(JNIEnv* env, jobject javaThis) { caller.Assign(GetCallingClass(soa.Self(), 1)); } if (UNLIKELY(caller.Get() != nullptr && !VerifyAccess( - soa.Self(), receiver.Get(), declaring_class, constructor->GetAccessFlags(), - caller.Get()))) { + receiver.Get(), declaring_class, constructor->GetAccessFlags(), caller.Get()))) { soa.Self()->ThrowNewExceptionF( "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s", PrettyMethod(constructor).c_str(), PrettyClass(caller.Get()).c_str()); diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index f699d6bd75..d001d0c0da 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -20,6 +20,7 @@ #include "base/enums.h" #include "class_linker.h" #include "class_linker-inl.h" +#include "dex_file_annotations.h" #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/method.h" @@ -35,7 +36,7 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod) ->GetInterfaceMethodIfProxy(kRuntimePointerSize); mirror::ObjectArray<mirror::Class>* result_array = - method->GetDexFile()->GetExceptionTypesForMethod(method); + annotations::GetExceptionTypesForMethod(method); if (result_array == nullptr) { // Return an empty array instead of a null pointer. mirror::Class* class_class = mirror::Class::GetJavaLangClass(); diff --git a/runtime/native/java_lang_reflect_AbstractMethod.cc b/runtime/native/java_lang_reflect_Executable.cc index 33e0daef1f..8fcf6aca08 100644 --- a/runtime/native/java_lang_reflect_AbstractMethod.cc +++ b/runtime/native/java_lang_reflect_Executable.cc @@ -14,9 +14,10 @@ * limitations under the License. */ -#include "java_lang_reflect_AbstractMethod.h" +#include "java_lang_reflect_Executable.h" #include "art_method-inl.h" +#include "dex_file_annotations.h" #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" @@ -27,7 +28,7 @@ namespace art { -static jobjectArray AbstractMethod_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) { +static jobjectArray Executable_getDeclaredAnnotationsNative(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); if (method->GetDeclaringClass()->IsProxyClass()) { @@ -38,10 +39,10 @@ static jobjectArray AbstractMethod_getDeclaredAnnotations(JNIEnv* env, jobject j mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0); return soa.AddLocalReference<jobjectArray>(empty_array); } - return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetAnnotationsForMethod(method)); + return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForMethod(method)); } -static jobject AbstractMethod_getAnnotationNative(JNIEnv* env, +static jobject Executable_getAnnotationNative(JNIEnv* env, jobject javaMethod, jclass annotationType) { ScopedFastNativeObjectAccess soa(env); @@ -51,35 +52,32 @@ static jobject AbstractMethod_getAnnotationNative(JNIEnv* env, return nullptr; } else { Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); - return soa.AddLocalReference<jobject>( - method->GetDexFile()->GetAnnotationForMethod(method, klass)); + return soa.AddLocalReference<jobject>(annotations::GetAnnotationForMethod(method, klass)); } } -static jobjectArray AbstractMethod_getSignatureAnnotation(JNIEnv* env, jobject javaMethod) { +static jobjectArray Executable_getSignatureAnnotation(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); if (method->GetDeclaringClass()->IsProxyClass()) { return nullptr; } StackHandleScope<1> hs(soa.Self()); - return soa.AddLocalReference<jobjectArray>( - method->GetDexFile()->GetSignatureAnnotationForMethod(method)); + return soa.AddLocalReference<jobjectArray>(annotations::GetSignatureAnnotationForMethod(method)); } -static jobjectArray AbstractMethod_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) { +static jobjectArray Executable_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); if (method->IsProxyMethod()) { return nullptr; } else { - return soa.AddLocalReference<jobjectArray>( - method->GetDexFile()->GetParameterAnnotations(method)); + return soa.AddLocalReference<jobjectArray>(annotations::GetParameterAnnotations(method)); } } -static jboolean AbstractMethod_isAnnotationPresentNative(JNIEnv* env, +static jboolean Executable_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod, jclass annotationType) { ScopedFastNativeObjectAccess soa(env); @@ -89,21 +87,21 @@ static jboolean AbstractMethod_isAnnotationPresentNative(JNIEnv* env, } StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); - return method->GetDexFile()->IsMethodAnnotationPresent(method, klass); + return annotations::IsMethodAnnotationPresent(method, klass); } static JNINativeMethod gMethods[] = { - NATIVE_METHOD(AbstractMethod, getAnnotationNative, + NATIVE_METHOD(Executable, getAnnotationNative, "!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"), - NATIVE_METHOD(AbstractMethod, getDeclaredAnnotations, "!()[Ljava/lang/annotation/Annotation;"), - NATIVE_METHOD(AbstractMethod, getParameterAnnotationsNative, + NATIVE_METHOD(Executable, getDeclaredAnnotationsNative, "!()[Ljava/lang/annotation/Annotation;"), + NATIVE_METHOD(Executable, getParameterAnnotationsNative, "!()[[Ljava/lang/annotation/Annotation;"), - NATIVE_METHOD(AbstractMethod, getSignatureAnnotation, "!()[Ljava/lang/String;"), - NATIVE_METHOD(AbstractMethod, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"), + NATIVE_METHOD(Executable, getSignatureAnnotation, "!()[Ljava/lang/String;"), + NATIVE_METHOD(Executable, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"), }; -void register_java_lang_reflect_AbstractMethod(JNIEnv* env) { - REGISTER_NATIVE_METHODS("java/lang/reflect/AbstractMethod"); +void register_java_lang_reflect_Executable(JNIEnv* env) { + REGISTER_NATIVE_METHODS("java/lang/reflect/Executable"); } } // namespace art diff --git a/runtime/native/java_lang_reflect_AbstractMethod.h b/runtime/native/java_lang_reflect_Executable.h index 222e5a05d0..0cfed62e49 100644 --- a/runtime/native/java_lang_reflect_AbstractMethod.h +++ b/runtime/native/java_lang_reflect_Executable.h @@ -14,15 +14,15 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_ -#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_ +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_EXECUTABLE_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_EXECUTABLE_H_ #include <jni.h> namespace art { -void register_java_lang_reflect_AbstractMethod(JNIEnv* env); +void register_java_lang_reflect_Executable(JNIEnv* env); } // namespace art -#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ABSTRACTMETHOD_H_ +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_EXECUTABLE_H_ diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index 5a4ced2fed..412445f0d0 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -20,6 +20,7 @@ #include "class_linker-inl.h" #include "common_throws.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/field.h" @@ -423,7 +424,7 @@ static jobject Field_getAnnotationNative(JNIEnv* env, jobject javaField, jclass return nullptr; } Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); - return soa.AddLocalReference<jobject>(field->GetDexFile()->GetAnnotationForField(field, klass)); + return soa.AddLocalReference<jobject>(annotations::GetAnnotationForField(field, klass)); } static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) { @@ -437,7 +438,7 @@ static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, 0); return soa.AddLocalReference<jobjectArray>(empty_array); } - return soa.AddLocalReference<jobjectArray>(field->GetDexFile()->GetAnnotationsForField(field)); + return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForField(field)); } static jobjectArray Field_getSignatureAnnotation(JNIEnv* env, jobject javaField) { @@ -446,8 +447,7 @@ static jobjectArray Field_getSignatureAnnotation(JNIEnv* env, jobject javaField) if (field->GetDeclaringClass()->IsProxyClass()) { return nullptr; } - return soa.AddLocalReference<jobjectArray>( - field->GetDexFile()->GetSignatureAnnotationForField(field)); + return soa.AddLocalReference<jobjectArray>(annotations::GetSignatureAnnotationForField(field)); } static jboolean Field_isAnnotationPresentNative(JNIEnv* env, jobject javaField, @@ -459,7 +459,7 @@ static jboolean Field_isAnnotationPresentNative(JNIEnv* env, jobject javaField, return false; } Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); - return field->GetDexFile()->IsFieldAnnotationPresent(field, klass); + return annotations::IsFieldAnnotationPresent(field, klass); } static JNINativeMethod gMethods[] = { diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index 3360f4170e..b8efb14903 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -20,6 +20,7 @@ #include "base/enums.h" #include "class_linker.h" #include "class_linker-inl.h" +#include "dex_file_annotations.h" #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" @@ -36,7 +37,7 @@ static jobject Method_getDefaultValue(JNIEnv* env, jobject javaMethod) { if (!method->GetDeclaringClass()->IsAnnotation()) { return nullptr; } - return soa.AddLocalReference<jobject>(method->GetDexFile()->GetAnnotationDefaultValue(method)); + return soa.AddLocalReference<jobject>(annotations::GetAnnotationDefaultValue(method)); } static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { @@ -58,7 +59,7 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self())); } else { mirror::ObjectArray<mirror::Class>* result_array = - method->GetDexFile()->GetExceptionTypesForMethod(method); + annotations::GetExceptionTypesForMethod(method); if (result_array == nullptr) { // Return an empty array instead of a null pointer mirror::Class* class_class = mirror::Class::GetJavaLangClass(); diff --git a/runtime/native/java_lang_reflect_Parameter.cc b/runtime/native/java_lang_reflect_Parameter.cc index 8fe3bb590e..c2a803c3cd 100644 --- a/runtime/native/java_lang_reflect_Parameter.cc +++ b/runtime/native/java_lang_reflect_Parameter.cc @@ -19,6 +19,7 @@ #include "art_method-inl.h" #include "common_throws.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "jni_internal.h" #include "scoped_fast_native_object_access.h" #include "utils.h" @@ -54,7 +55,7 @@ static jobject Parameter_getAnnotationNative(JNIEnv* env, StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType))); return soa.AddLocalReference<jobject>( - method->GetDexFile()->GetAnnotationForMethodParameter(method, parameterIndex, klass)); + annotations::GetAnnotationForMethodParameter(method, parameterIndex, klass)); } static JNINativeMethod gMethods[] = { diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h index 02081cbb60..9c777cc277 100644 --- a/runtime/noop_compiler_callbacks.h +++ b/runtime/noop_compiler_callbacks.h @@ -36,6 +36,8 @@ class NoopCompilerCallbacks FINAL : public CompilerCallbacks { // to disable the relocation since both deal with writing out the images directly. bool IsRelocationPossible() OVERRIDE { return false; } + verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return nullptr; } + private: DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks); }; diff --git a/runtime/oat.h b/runtime/oat.h index 35d0c92e84..12a8298771 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -171,7 +171,7 @@ std::ostream& operator<<(std::ostream& os, const OatClassType& rhs); class PACKED(4) OatMethodOffsets { public: - OatMethodOffsets(uint32_t code_offset = 0); + explicit OatMethodOffsets(uint32_t code_offset = 0); ~OatMethodOffsets(); diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 76b71a3271..ea692cdaae 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -49,6 +49,7 @@ #include "os.h" #include "runtime.h" #include "type_lookup_table.h" +#include "utf-inl.h" #include "utils.h" #include "utils/dex_cache_arrays_layout-inl.h" @@ -1204,7 +1205,21 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file, dex_file_pointer_(dex_file_pointer), lookup_table_data_(lookup_table_data), oat_class_offsets_pointer_(oat_class_offsets_pointer), - dex_cache_arrays_(dex_cache_arrays) {} + dex_cache_arrays_(dex_cache_arrays) { + // Initialize TypeLookupTable. + if (lookup_table_data_ != nullptr) { + // Peek the number of classes from the DexFile. + const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer_); + const uint32_t num_class_defs = dex_header->class_defs_size_; + if (lookup_table_data_ + TypeLookupTable::RawDataLength(num_class_defs) > GetOatFile()->End()) { + LOG(WARNING) << "found truncated lookup table in " << dex_file_location_; + } else { + lookup_table_.reset(TypeLookupTable::Open(dex_file_pointer_, + lookup_table_data_, + num_class_defs)); + } + } +} OatFile::OatDexFile::~OatDexFile() {} @@ -1273,6 +1288,28 @@ OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) con reinterpret_cast<const OatMethodOffsets*>(methods_pointer)); } +const DexFile::ClassDef* OatFile::OatDexFile::FindClassDef(const DexFile& dex_file, + const char* descriptor, + size_t hash) { + const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile(); + DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash); + if (LIKELY((oat_dex_file != nullptr) && (oat_dex_file->GetTypeLookupTable() != nullptr))) { + const uint32_t class_def_idx = oat_dex_file->GetTypeLookupTable()->Lookup(descriptor, hash); + return (class_def_idx != DexFile::kDexNoIndex) ? &dex_file.GetClassDef(class_def_idx) : nullptr; + } + // Fast path for rare no class defs case. + const uint32_t num_class_defs = dex_file.NumClassDefs(); + if (num_class_defs == 0) { + return nullptr; + } + const DexFile::TypeId* type_id = dex_file.FindTypeId(descriptor); + if (type_id != nullptr) { + uint16_t type_idx = dex_file.GetIndexForTypeId(*type_id); + return dex_file.FindClassDef(type_idx); + } + return nullptr; +} + OatFile::OatClass::OatClass(const OatFile* oat_file, mirror::Class::Status status, OatClassType type, diff --git a/runtime/oat_file.h b/runtime/oat_file.h index a48791ee73..a61b941862 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -29,6 +29,8 @@ #include "mirror/class.h" #include "oat.h" #include "os.h" +#include "type_lookup_table.h" +#include "utf.h" #include "utils.h" #include "vdex_file.h" @@ -404,6 +406,16 @@ class OatDexFile FINAL { return dex_file_pointer_; } + // Looks up a class definition by its class descriptor. Hash must be + // ComputeModifiedUtf8Hash(descriptor). + static const DexFile::ClassDef* FindClassDef(const DexFile& dex_file, + const char* descriptor, + size_t hash); + + TypeLookupTable* GetTypeLookupTable() const { + return lookup_table_.get(); + } + ~OatDexFile(); private: @@ -424,6 +436,7 @@ class OatDexFile FINAL { const uint8_t* lookup_table_data_; const uint32_t* const oat_class_offsets_pointer_; uint8_t* const dex_cache_arrays_; + mutable std::unique_ptr<TypeLookupTable> lookup_table_; friend class OatFile; friend class OatFileBase; diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h index abddc6d7a0..ee5002f84a 100644 --- a/runtime/oat_quick_method_header.h +++ b/runtime/oat_quick_method_header.h @@ -30,11 +30,11 @@ class ArtMethod; // OatQuickMethodHeader precedes the raw code chunk generated by the compiler. class PACKED(4) OatQuickMethodHeader { public: - OatQuickMethodHeader(uint32_t vmap_table_offset = 0U, - uint32_t frame_size_in_bytes = 0U, - uint32_t core_spill_mask = 0U, - uint32_t fp_spill_mask = 0U, - uint32_t code_size = 0U); + explicit OatQuickMethodHeader(uint32_t vmap_table_offset = 0U, + uint32_t frame_size_in_bytes = 0U, + uint32_t core_spill_mask = 0U, + uint32_t fp_spill_mask = 0U, + uint32_t code_size = 0U); ~OatQuickMethodHeader(); diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h index 356e290843..2df2ced7f4 100644 --- a/runtime/quick/inline_method_analyser.h +++ b/runtime/quick/inline_method_analyser.h @@ -33,6 +33,7 @@ namespace art { namespace verifier { class MethodVerifier; } // namespace verifier +class ArtMethod; enum InlineMethodOpcode : uint16_t { kIntrinsicDoubleCvt, diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 55aba2befa..b3f29c28e4 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -160,8 +160,8 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) { LOG(INFO) << "Handler is upcall"; } if (handler_method_ != nullptr) { - const DexFile& dex_file = *handler_method_->GetDeclaringClass()->GetDexCache()->GetDexFile(); - int line_number = dex_file.GetLineNumFromPC(handler_method_, handler_dex_pc_); + const DexFile* dex_file = handler_method_->GetDeclaringClass()->GetDexCache()->GetDexFile(); + int line_number = annotations::GetLineNumFromPC(dex_file, handler_method_, handler_dex_pc_); LOG(INFO) << "Handler: " << PrettyMethod(handler_method_) << " (line: " << line_number << ")"; } } diff --git a/runtime/reflection.cc b/runtime/reflection.cc index f2af3da6e4..c69e98c8ee 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -24,8 +24,8 @@ #include "dex_file-inl.h" #include "indirect_reference_table-inl.h" #include "jni_internal.h" -#include "mirror/abstract_method.h" #include "mirror/class-inl.h" +#include "mirror/executable.h" #include "mirror/object_array-inl.h" #include "nth_caller_visitor.h" #include "scoped_thread_state_change.h" @@ -578,9 +578,9 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM return nullptr; } - auto* abstract_method = soa.Decode<mirror::AbstractMethod*>(javaMethod); - const bool accessible = abstract_method->IsAccessible(); - ArtMethod* m = abstract_method->GetArtMethod(); + auto* executable = soa.Decode<mirror::Executable*>(javaMethod); + const bool accessible = executable->IsAccessible(); + ArtMethod* m = executable->GetArtMethod(); mirror::Class* declaring_class = m->GetDeclaringClass(); if (UNLIKELY(!declaring_class->IsInitialized())) { @@ -625,8 +625,12 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM // If method is not set to be accessible, verify it can be accessed by the caller. mirror::Class* calling_class = nullptr; - if (!accessible && !VerifyAccess(soa.Self(), receiver, declaring_class, m->GetAccessFlags(), - &calling_class, num_frames)) { + if (!accessible && !VerifyAccess(soa.Self(), + receiver, + declaring_class, + m->GetAccessFlags(), + &calling_class, + num_frames)) { ThrowIllegalAccessException( StringPrintf("Class %s cannot access %s method %s of class %s", calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(), @@ -857,15 +861,17 @@ bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_cl return false; } *calling_class = klass; - return VerifyAccess(self, obj, declaring_class, access_flags, klass); + return VerifyAccess(obj, declaring_class, access_flags, klass); } -bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class, - uint32_t access_flags, mirror::Class* calling_class) { +bool VerifyAccess(mirror::Object* obj, + mirror::Class* declaring_class, + uint32_t access_flags, + mirror::Class* calling_class) { if (calling_class == declaring_class) { return true; } - ScopedAssertNoThreadSuspension sants(self, "verify-access"); + ScopedAssertNoThreadSuspension sants("verify-access"); if ((access_flags & kAccPrivate) != 0) { return false; } diff --git a/runtime/reflection.h b/runtime/reflection.h index 579c6b1c7d..208b533917 100644 --- a/runtime/reflection.h +++ b/runtime/reflection.h @@ -74,8 +74,10 @@ bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_cl REQUIRES_SHARED(Locks::mutator_lock_); // This version takes a known calling class. -bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class, - uint32_t access_flags, mirror::Class* calling_class) +bool VerifyAccess(mirror::Object* obj, + mirror::Class* declaring_class, + uint32_t access_flags, + mirror::Class* calling_class) REQUIRES_SHARED(Locks::mutator_lock_); // Get the calling class by using a stack visitor, may return null for unattached native threads. diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 97911d4e20..15e3b1cfc7 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -108,9 +108,9 @@ #include "native/java_lang_VMClassLoader.h" #include "native/java_lang_ref_FinalizerReference.h" #include "native/java_lang_ref_Reference.h" -#include "native/java_lang_reflect_AbstractMethod.h" #include "native/java_lang_reflect_Array.h" #include "native/java_lang_reflect_Constructor.h" +#include "native/java_lang_reflect_Executable.h" #include "native/java_lang_reflect_Field.h" #include "native/java_lang_reflect_Method.h" #include "native/java_lang_reflect_Parameter.h" @@ -639,11 +639,7 @@ bool Runtime::Start() { system_class_loader_ = CreateSystemClassLoader(this); - if (is_zygote_) { - if (!InitZygote()) { - return false; - } - } else { + if (!is_zygote_) { if (is_native_bridge_loaded_) { PreInitializeNativeBridge("."); } @@ -688,45 +684,6 @@ void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) { } } -// Do zygote-mode-only initialization. -bool Runtime::InitZygote() { -#ifdef __linux__ - // zygote goes into its own process group - setpgid(0, 0); - - // See storage config details at http://source.android.com/tech/storage/ - // Create private mount namespace shared by all children - if (unshare(CLONE_NEWNS) == -1) { - PLOG(ERROR) << "Failed to unshare()"; - return false; - } - - // Mark rootfs as being a slave so that changes from default - // namespace only flow into our children. - if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) { - PLOG(ERROR) << "Failed to mount() rootfs as MS_SLAVE"; - return false; - } - - // Create a staging tmpfs that is shared by our children; they will - // bind mount storage into their respective private namespaces, which - // are isolated from each other. - const char* target_base = getenv("EMULATED_STORAGE_TARGET"); - if (target_base != nullptr) { - if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV, - "uid=0,gid=1028,mode=0751") == -1) { - PLOG(ERROR) << "Failed to mount tmpfs to " << target_base; - return false; - } - } - - return true; -#else - UNIMPLEMENTED(FATAL); - return false; -#endif -} - void Runtime::InitNonZygoteOrPostFork( JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) { is_zygote_ = false; @@ -1421,9 +1378,9 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) { register_java_lang_DexCache(env); register_java_lang_Object(env); register_java_lang_ref_FinalizerReference(env); - register_java_lang_reflect_AbstractMethod(env); register_java_lang_reflect_Array(env); register_java_lang_reflect_Constructor(env); + register_java_lang_reflect_Executable(env); register_java_lang_reflect_Field(env); register_java_lang_reflect_Method(env); register_java_lang_reflect_Parameter(env); diff --git a/runtime/runtime.h b/runtime/runtime.h index 58068ebbf1..9e63564d58 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -455,7 +455,6 @@ class Runtime { bool UseJitCompilation() const; void PreZygoteFork(); - bool InitZygote(); void InitNonZygoteOrPostFork( JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa); diff --git a/runtime/thread.cc b/runtime/thread.cc index 43ef1cb22e..89403545fd 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -40,6 +40,7 @@ #include "class_linker-inl.h" #include "debugger.h" #include "dex_file-inl.h" +#include "dex_file_annotations.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "gc/accounting/card_table-inl.h" @@ -1388,8 +1389,8 @@ struct StackDumpVisitor : public StackVisitor { mirror::DexCache* dex_cache = c->GetDexCache(); int line_number = -1; if (dex_cache != nullptr) { // be tolerant of bad input - const DexFile& dex_file = *dex_cache->GetDexFile(); - line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false)); + const DexFile* dex_file = dex_cache->GetDexFile(); + line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false)); } if (line_number == last_line_number && last_method == m) { ++repetition_count; @@ -2548,7 +2549,6 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { QUICK_ENTRY_POINT_INFO(pDeliverException) QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) QUICK_ENTRY_POINT_INFO(pThrowDivZero) - QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod) QUICK_ENTRY_POINT_INFO(pThrowNullPointer) QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) QUICK_ENTRY_POINT_INFO(pDeoptimize) diff --git a/runtime/thread.h b/runtime/thread.h index d248123db5..016c2bc7ea 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -1363,12 +1363,12 @@ class Thread { instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr), stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr), frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0), - last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr), - thread_local_objects(0), thread_local_pos(nullptr), thread_local_end(nullptr), - mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr), - thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr), - nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr), - thread_local_mark_stack(nullptr) { + last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr), + thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr), + thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), + mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr), + thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr), + flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr) { std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr); } @@ -1480,11 +1480,11 @@ class Thread { // Thread-local allocation pointer. uint8_t* thread_local_start; - size_t thread_local_objects; // thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for // potentially better performance. uint8_t* thread_local_pos; uint8_t* thread_local_end; + size_t thread_local_objects; // Mterp jump table bases. void* mterp_current_ibase; @@ -1546,19 +1546,25 @@ class Thread { class SCOPED_CAPABILITY ScopedAssertNoThreadSuspension { public: - ScopedAssertNoThreadSuspension(Thread* self, const char* cause) ACQUIRE(Roles::uninterruptible_) - : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) { - } - ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) { - self_->EndAssertNoThreadSuspension(old_cause_); + ALWAYS_INLINE ScopedAssertNoThreadSuspension(const char* cause) ACQUIRE(Roles::uninterruptible_) { + if (kIsDebugBuild) { + self_ = Thread::Current(); + old_cause_ = self_->StartAssertNoThreadSuspension(cause); + } else { + Roles::uninterruptible_.Acquire(); // No-op. + } } - Thread* Self() { - return self_; + ALWAYS_INLINE ~ScopedAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) { + if (kIsDebugBuild) { + self_->EndAssertNoThreadSuspension(old_cause_); + } else { + Roles::uninterruptible_.Release(); // No-op. + } } private: - Thread* const self_; - const char* const old_cause_; + Thread* self_; + const char* old_cause_; }; class ScopedStackedShadowFramePusher { diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index ab1f198646..5e6c8a40f1 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -284,7 +284,7 @@ static void ThreadSuspendSleep(useconds_t delay_us) { } } -size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) { +size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) { Thread* self = Thread::Current(); Locks::mutator_lock_->AssertNotExclusiveHeld(self); Locks::thread_list_lock_->AssertNotHeld(self); @@ -318,6 +318,10 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) { } } } + // Run the callback to be called inside this critical section. + if (callback != nullptr) { + callback->Run(self); + } } // Run the checkpoint on ourself while we wait for threads to suspend. diff --git a/runtime/thread_list.h b/runtime/thread_list.h index 5880085576..b455e31e4c 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -94,8 +94,10 @@ class ThreadList { // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside // of the suspend check. Returns how many checkpoints that are expected to run, including for - // already suspended threads for b/24191051. - size_t RunCheckpoint(Closure* checkpoint_function) + // already suspended threads for b/24191051. Run the callback, if non-null, inside the + // thread_list_lock critical section after determining the runnable/suspended states of the + // threads. + size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function) @@ -209,7 +211,7 @@ class ThreadList { // Helper for suspending all threads and class ScopedSuspendAll : public ValueObject { public: - ScopedSuspendAll(const char* cause, bool long_suspend = false) + explicit ScopedSuspendAll(const char* cause, bool long_suspend = false) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, diff --git a/runtime/type_lookup_table.cc b/runtime/type_lookup_table.cc index fc9faec5c1..56e9262573 100644 --- a/runtime/type_lookup_table.cc +++ b/runtime/type_lookup_table.cc @@ -38,14 +38,6 @@ TypeLookupTable::~TypeLookupTable() { } } -uint32_t TypeLookupTable::RawDataLength() const { - return RawDataLength(dex_file_); -} - -uint32_t TypeLookupTable::RawDataLength(const DexFile& dex_file) { - return RawDataLength(dex_file.NumClassDefs()); -} - uint32_t TypeLookupTable::RawDataLength(uint32_t num_class_defs) { return SupportedSize(num_class_defs) ? RoundUpToPowerOfTwo(num_class_defs) * sizeof(Entry) : 0u; } @@ -65,12 +57,15 @@ TypeLookupTable* TypeLookupTable::Create(const DexFile& dex_file, uint8_t* stora : nullptr; } -TypeLookupTable* TypeLookupTable::Open(const uint8_t* raw_data, const DexFile& dex_file) { - return new TypeLookupTable(raw_data, dex_file); +TypeLookupTable* TypeLookupTable::Open(const uint8_t* dex_file_pointer, + const uint8_t* raw_data, + uint32_t num_class_defs) { + return new TypeLookupTable(dex_file_pointer, raw_data, num_class_defs); } TypeLookupTable::TypeLookupTable(const DexFile& dex_file, uint8_t* storage) - : dex_file_(dex_file), + : dex_file_begin_(dex_file.Begin()), + raw_data_length_(RawDataLength(dex_file.NumClassDefs())), mask_(CalculateMask(dex_file.NumClassDefs())), entries_(storage != nullptr ? reinterpret_cast<Entry*>(storage) : new Entry[mask_ + 1]), owns_entries_(storage == nullptr) { @@ -106,9 +101,12 @@ TypeLookupTable::TypeLookupTable(const DexFile& dex_file, uint8_t* storage) } } -TypeLookupTable::TypeLookupTable(const uint8_t* raw_data, const DexFile& dex_file) - : dex_file_(dex_file), - mask_(CalculateMask(dex_file.NumClassDefs())), +TypeLookupTable::TypeLookupTable(const uint8_t* dex_file_pointer, + const uint8_t* raw_data, + uint32_t num_class_defs) + : dex_file_begin_(dex_file_pointer), + raw_data_length_(RawDataLength(num_class_defs)), + mask_(CalculateMask(num_class_defs)), entries_(reinterpret_cast<Entry*>(const_cast<uint8_t*>(raw_data))), owns_entries_(false) {} diff --git a/runtime/type_lookup_table.h b/runtime/type_lookup_table.h index d74d01de4d..9595743e15 100644 --- a/runtime/type_lookup_table.h +++ b/runtime/type_lookup_table.h @@ -62,8 +62,11 @@ class TypeLookupTable { // Method creates lookup table for dex file static TypeLookupTable* Create(const DexFile& dex_file, uint8_t* storage = nullptr); - // Method opens lookup table from binary data. Lookup table does not owns binary data. - static TypeLookupTable* Open(const uint8_t* raw_data, const DexFile& dex_file); + // Method opens lookup table from binary data. Lookups will traverse strings and other + // data contained in dex_file as well. Lookup table does not own raw_data or dex_file. + static TypeLookupTable* Open(const uint8_t* dex_file_pointer, + const uint8_t* raw_data, + uint32_t num_class_defs); // Method returns pointer to binary data of lookup table. Used by the oat writer. const uint8_t* RawData() const { @@ -71,10 +74,7 @@ class TypeLookupTable { } // Method returns length of binary data. Used by the oat writer. - uint32_t RawDataLength() const; - - // Method returns length of binary data for the specified dex file. - static uint32_t RawDataLength(const DexFile& dex_file); + uint32_t RawDataLength() const { return raw_data_length_; } // Method returns length of binary data for the specified number of class definitions. static uint32_t RawDataLength(uint32_t num_class_defs); @@ -119,10 +119,13 @@ class TypeLookupTable { explicit TypeLookupTable(const DexFile& dex_file, uint8_t* storage); // Construct from a dex file with existing data. - TypeLookupTable(const uint8_t* raw_data, const DexFile& dex_file); + TypeLookupTable(const uint8_t* dex_file_pointer, + const uint8_t* raw_data, + uint32_t num_class_defs); bool IsStringsEquals(const char* str, uint32_t str_offset) const { - const uint8_t* ptr = dex_file_.Begin() + str_offset; + const uint8_t* ptr = dex_file_begin_ + str_offset; + CHECK(dex_file_begin_ != nullptr); // Skip string length. DecodeUnsignedLeb128(&ptr); return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues( @@ -154,7 +157,8 @@ class TypeLookupTable { // Find the last entry in a chain. uint32_t FindLastEntryInBucket(uint32_t cur_pos) const; - const DexFile& dex_file_; + const uint8_t* dex_file_begin_; + const uint32_t raw_data_length_; const uint32_t mask_; std::unique_ptr<Entry[]> entries_; // owns_entries_ specifies if the lookup table owns the entries_ array. diff --git a/runtime/utils.cc b/runtime/utils.cc index d48edcfa55..6f10aaacaf 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -442,6 +442,12 @@ std::string PrettyJavaAccessFlags(uint32_t access_flags) { if ((access_flags & kAccStatic) != 0) { result += "static "; } + if ((access_flags & kAccAbstract) != 0) { + result += "abstract "; + } + if ((access_flags & kAccInterface) != 0) { + result += "interface "; + } if ((access_flags & kAccTransient) != 0) { result += "transient "; } diff --git a/runtime/verifier/method_resolution_kind.h b/runtime/verifier/method_resolution_kind.h new file mode 100644 index 0000000000..f72eb7af3a --- /dev/null +++ b/runtime/verifier/method_resolution_kind.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_VERIFIER_METHOD_RESOLUTION_KIND_H_ +#define ART_RUNTIME_VERIFIER_METHOD_RESOLUTION_KIND_H_ + +namespace art { +namespace verifier { + +// Values corresponding to the method resolution algorithms defined in mirror::Class. +enum MethodResolutionKind { + kDirectMethodResolution, + kVirtualMethodResolution, + kInterfaceMethodResolution, +}; + +} // namespace verifier +} // namespace art + +#endif // ART_RUNTIME_VERIFIER_METHOD_RESOLUTION_KIND_H_ diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 6b1170b98e..abd741cac4 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -37,6 +37,7 @@ #include "indenter.h" #include "intern_table.h" #include "leb128.h" +#include "method_resolution_kind.h" #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" @@ -47,6 +48,7 @@ #include "runtime.h" #include "scoped_thread_state_change.h" #include "utils.h" +#include "verifier_deps.h" #include "handle_scope-inl.h" namespace art { @@ -2189,7 +2191,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // We really do expect a reference here. Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object returns a non-reference type " << reg_type; - } else if (!return_type.IsAssignableFrom(reg_type)) { + } else if (!return_type.IsAssignableFrom(reg_type, this)) { if (reg_type.IsUnresolvedTypes() || return_type.IsUnresolvedTypes()) { Fail(VERIFY_ERROR_NO_CLASS) << " can't resolve returned type '" << return_type << "' or '" << reg_type << "'"; @@ -2198,7 +2200,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { // Check whether arrays are involved. They will show a valid class status, even // if their components are erroneous. if (reg_type.IsArrayTypes() && return_type.IsArrayTypes()) { - return_type.CanAssignArray(reg_type, reg_types_, class_loader_, &soft_error); + return_type.CanAssignArray(reg_type, reg_types_, class_loader_, this, &soft_error); if (soft_error) { Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "array with erroneous component type: " << reg_type << " vs " << return_type; @@ -2486,7 +2488,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { break; case Instruction::THROW: { const RegType& res_type = work_line_->GetRegisterType(this, inst->VRegA_11x()); - if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) { + if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type, this)) { if (res_type.IsUninitializedTypes()) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "thrown exception not initialized"; } else if (!res_type.IsReferenceTypes()) { @@ -2639,7 +2641,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { cast_type.HasClass() && // Could be conflict type, make sure it has a class. !cast_type.GetClass()->IsInterface() && (orig_type.IsZero() || - orig_type.IsStrictlyAssignableFrom(cast_type.Merge(orig_type, ®_types_)))) { + orig_type.IsStrictlyAssignableFrom( + cast_type.Merge(orig_type, ®_types_, this), this))) { RegisterLine* update_line = RegisterLine::Create(code_item_->registers_size_, this); if (inst->Opcode() == Instruction::IF_EQZ) { fallthrough_line.reset(update_line); @@ -3281,7 +3284,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } break; // Note: the following instructions encode offsets derived from class linking. - // As such they use Class*/Field*/AbstractMethod* as these offsets only have + // As such they use Class*/Field*/Executable* as these offsets only have // meaning if the class linking and resolution were successful. case Instruction::IGET_QUICK: VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true); @@ -3636,8 +3639,13 @@ const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) { return *result; } if (klass == nullptr && !result->IsUnresolvedTypes()) { - dex_cache_->SetResolvedType(class_idx, result->GetClass()); + klass = result->GetClass(); + dex_cache_->SetResolvedType(class_idx, klass); } + + // Record result of class resolution attempt. + VerifierDeps::MaybeRecordClassResolution(*dex_file_, class_idx, klass); + // Check if access is allowed. Unresolved types use xxxWithAccessCheck to // check at runtime if access is allowed and so pass here. If result is // primitive, skip the access check. @@ -3664,7 +3672,7 @@ const RegType& MethodVerifier::GetCaughtExceptionType() { common_super = ®_types_.JavaLangThrowable(false); } else { const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex()); - if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) { + if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception, this)) { DCHECK(!exception.IsUninitializedTypes()); // Comes from dex, shouldn't be uninit. if (exception.IsUnresolvedTypes()) { // We don't know enough about the type. Fail here and let runtime handle it. @@ -3679,9 +3687,10 @@ const RegType& MethodVerifier::GetCaughtExceptionType() { } else if (common_super->Equals(exception)) { // odd case, but nothing to do } else { - common_super = &common_super->Merge(exception, ®_types_); + common_super = &common_super->Merge(exception, ®_types_, this); if (FailOrAbort(this, - reg_types_.JavaLangThrowable(false).IsAssignableFrom(*common_super), + reg_types_.JavaLangThrowable(false).IsAssignableFrom( + *common_super, this), "java.lang.Throwable is not assignable-from common_super at ", work_insn_idx_)) { break; @@ -3701,6 +3710,20 @@ const RegType& MethodVerifier::GetCaughtExceptionType() { return *common_super; } +inline static MethodResolutionKind GetMethodResolutionKind( + MethodType method_type, bool is_interface) { + if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) { + return kDirectMethodResolution; + } else if (method_type == METHOD_INTERFACE) { + return kInterfaceMethodResolution; + } else if (method_type == METHOD_SUPER && is_interface) { + return kInterfaceMethodResolution; + } else { + DCHECK(method_type == METHOD_VIRTUAL || method_type == METHOD_SUPER); + return kVirtualMethodResolution; + } +} + ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess( uint32_t dex_method_idx, MethodType method_type) { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx); @@ -3718,6 +3741,7 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess( const RegType& referrer = GetDeclaringClass(); auto* cl = Runtime::Current()->GetClassLinker(); auto pointer_size = cl->GetImagePointerSize(); + MethodResolutionKind res_kind = GetMethodResolutionKind(method_type, klass->IsInterface()); ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx, pointer_size); bool stash_method = false; @@ -3725,35 +3749,44 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess( const char* name = dex_file_->GetMethodName(method_id); const Signature signature = dex_file_->GetMethodSignature(method_id); - if (method_type == METHOD_DIRECT || method_type == METHOD_STATIC) { + if (res_kind == kDirectMethodResolution) { res_method = klass->FindDirectMethod(name, signature, pointer_size); - } else if (method_type == METHOD_INTERFACE) { - res_method = klass->FindInterfaceMethod(name, signature, pointer_size); - } else if (method_type == METHOD_SUPER && klass->IsInterface()) { - res_method = klass->FindInterfaceMethod(name, signature, pointer_size); - } else { - DCHECK(method_type == METHOD_VIRTUAL || method_type == METHOD_SUPER); + } else if (res_kind == kVirtualMethodResolution) { res_method = klass->FindVirtualMethod(name, signature, pointer_size); + } else { + DCHECK_EQ(res_kind, kInterfaceMethodResolution); + res_method = klass->FindInterfaceMethod(name, signature, pointer_size); } + if (res_method != nullptr) { stash_method = true; } else { // If a virtual or interface method wasn't found with the expected type, look in // the direct methods. This can happen when the wrong invoke type is used or when // a class has changed, and will be flagged as an error in later checks. - if (method_type == METHOD_INTERFACE || - method_type == METHOD_VIRTUAL || - method_type == METHOD_SUPER) { + // Note that in this case, we do not put the resolved method in the Dex cache + // because it was not discovered using the expected type of method resolution. + if (res_kind != kDirectMethodResolution) { + // Record result of the initial resolution attempt. + VerifierDeps::MaybeRecordMethodResolution(*dex_file_, dex_method_idx, res_kind, nullptr); + // Change resolution type to 'direct' and try to resolve again. + res_kind = kDirectMethodResolution; res_method = klass->FindDirectMethod(name, signature, pointer_size); } - if (res_method == nullptr) { - Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method " - << PrettyDescriptor(klass) << "." << name - << " " << signature; - return nullptr; - } } } + + // Record result of method resolution attempt. + VerifierDeps::MaybeRecordMethodResolution(*dex_file_, dex_method_idx, res_kind, res_method); + + if (res_method == nullptr) { + Fail(VERIFY_ERROR_NO_METHOD) << "couldn't find method " + << PrettyDescriptor(klass) << "." + << dex_file_->GetMethodName(method_id) << " " + << dex_file_->GetMethodSignature(method_id); + return nullptr; + } + // Make sure calls to constructors are "direct". There are additional restrictions but we don't // enforce them here. if (res_method->IsConstructor() && method_type != METHOD_DIRECT) { @@ -3897,7 +3930,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator( dex_file_->StringByTypeIdx(class_idx), false); } - if (!res_method_class->IsAssignableFrom(adjusted_type)) { + if (!res_method_class->IsAssignableFrom(adjusted_type, this)) { Fail(adjusted_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT) @@ -4029,12 +4062,15 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs( // has a vtable entry for the target method. Or the target is on a interface. if (method_type == METHOD_SUPER) { uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_; - mirror::Class* reference_class = dex_cache_->GetResolvedType(class_idx); - if (reference_class == nullptr) { + const RegType& reference_type = reg_types_.FromDescriptor( + GetClassLoader(), + dex_file_->StringByTypeIdx(class_idx), + false); + if (reference_type.IsUnresolvedTypes()) { Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Unable to find referenced class from invoke-super"; return nullptr; } - if (reference_class->IsInterface()) { + if (reference_type.GetClass()->IsInterface()) { // TODO Can we verify anything else. if (class_idx == class_def_.class_idx_) { Fail(VERIFY_ERROR_CLASS_CHANGE) << "Cannot invoke-super on self as interface"; @@ -4046,12 +4082,12 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs( Fail(VERIFY_ERROR_NO_CLASS) << "Unable to resolve the full class of 'this' used in an" << "interface invoke-super"; return nullptr; - } else if (!reference_class->IsAssignableFrom(GetDeclaringClass().GetClass())) { + } else if (!reference_type.IsStrictlyAssignableFrom(GetDeclaringClass(), this)) { Fail(VERIFY_ERROR_CLASS_CHANGE) << "invoke-super in " << PrettyClass(GetDeclaringClass().GetClass()) << " in method " << PrettyMethod(dex_method_idx_, *dex_file_) << " to method " << PrettyMethod(method_idx, *dex_file_) << " references " - << "non-super-interface type " << PrettyClass(reference_class); + << "non-super-interface type " << PrettyClass(reference_type.GetClass()); return nullptr; } } else { @@ -4062,7 +4098,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs( << " to super " << PrettyMethod(res_method); return nullptr; } - if (!reference_class->IsAssignableFrom(GetDeclaringClass().GetClass()) || + if (!reference_type.IsStrictlyAssignableFrom(GetDeclaringClass(), this) || (res_method->GetMethodIndex() >= super.GetClass()->GetVTableLength())) { Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from " << PrettyMethod(dex_method_idx_, *dex_file_) @@ -4177,7 +4213,7 @@ ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst, std::string temp; const RegType& res_method_class = FromClass(klass->GetDescriptor(&temp), klass, klass->CannotBeAssignedFromOtherTypes()); - if (!res_method_class.IsAssignableFrom(actual_arg_type)) { + if (!res_method_class.IsAssignableFrom(actual_arg_type, this)) { Fail(actual_arg_type.IsUninitializedTypes() // Just overcautious - should have never ? VERIFY_ERROR_BAD_CLASS_HARD // quickened this. : actual_arg_type.IsUnresolvedTypes() @@ -4466,8 +4502,11 @@ ArtField* MethodVerifier::GetStaticField(int field_idx) { return nullptr; // Can't resolve Class so no more to do here, will do checking at runtime. } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_, - class_loader_); + ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_, class_loader_); + + // Record result of the field resolution attempt. + VerifierDeps::MaybeRecordFieldResolution(*dex_file_, field_idx, field); + if (field == nullptr) { VLOG(verifier) << "Unable to resolve static field " << field_idx << " (" << dex_file_->GetFieldName(field_id) << ") in " @@ -4501,8 +4540,11 @@ ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_id return nullptr; // Can't resolve Class so no more to do here } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_, - class_loader_); + ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_, class_loader_); + + // Record result of the field resolution attempt. + VerifierDeps::MaybeRecordFieldResolution(*dex_file_, field_idx, field); + if (field == nullptr) { VLOG(verifier) << "Unable to resolve instance field " << field_idx << " (" << dex_file_->GetFieldName(field_id) << ") in " @@ -4536,7 +4578,7 @@ ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_id << " of " << PrettyMethod(dex_method_idx_, *dex_file_); return nullptr; } - } else if (!field_klass.IsAssignableFrom(obj_type)) { + } else if (!field_klass.IsAssignableFrom(obj_type, this)) { // Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class // of C1. For resolution to occur the declared class of the field must be compatible with // obj_type, we've discovered this wasn't so, so report the field didn't exist. @@ -4643,7 +4685,7 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& if (is_primitive) { VerifyPrimitivePut(*field_type, insn_type, vregA); } else { - if (!insn_type.IsAssignableFrom(*field_type)) { + if (!insn_type.IsAssignableFrom(*field_type, this)) { // If the field type is not a reference, this is a global failure rather than // a class change failure as the instructions and the descriptors for the type // should have been consistent within the same file at compile time. @@ -4675,7 +4717,7 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& return; } } else { - if (!insn_type.IsAssignableFrom(*field_type)) { + if (!insn_type.IsAssignableFrom(*field_type, this)) { // If the field type is not a reference, this is a global failure rather than // a class change failure as the instructions and the descriptors for the type // should have been consistent within the same file at compile time. @@ -4806,7 +4848,7 @@ void MethodVerifier::VerifyQuickFieldAccess(const Instruction* inst, const RegTy return; } } else { - if (!insn_type.IsAssignableFrom(*field_type)) { + if (!insn_type.IsAssignableFrom(*field_type, this)) { Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field) << " to be compatible with type '" << insn_type << "' but found type '" << *field_type @@ -4832,7 +4874,7 @@ void MethodVerifier::VerifyQuickFieldAccess(const Instruction* inst, const RegTy return; } } else { - if (!insn_type.IsAssignableFrom(*field_type)) { + if (!insn_type.IsAssignableFrom(*field_type, this)) { Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field) << " to be compatible with type '" << insn_type << "' but found type '" << *field_type diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index c4b1c6eef6..eb8b7a639d 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -180,6 +180,11 @@ class MethodVerifier { uint8_t EncodePcToReferenceMapData() const; + const DexFile& GetDexFile() const { + DCHECK(dex_file_ != nullptr); + return *dex_file_; + } + uint32_t DexFileVersion() const { return dex_file_->GetVersion(); } @@ -353,7 +358,8 @@ class MethodVerifier { * (3) Iterate through the method, checking type safety and looking * for code flow problems. */ - static FailureData VerifyMethod(Thread* self, uint32_t method_idx, + static FailureData VerifyMethod(Thread* self, + uint32_t method_idx, const DexFile* dex_file, Handle<mirror::DexCache> dex_cache, Handle<mirror::ClassLoader> class_loader, @@ -842,6 +848,7 @@ class MethodVerifier { MethodVerifier* link_; friend class art::Thread; + friend class VerifierDepsTest; DISALLOW_COPY_AND_ASSIGN(MethodVerifier); }; diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h index 861db3cf8c..d93aaa193c 100644 --- a/runtime/verifier/reg_type-inl.h +++ b/runtime/verifier/reg_type-inl.h @@ -22,6 +22,8 @@ #include "base/casts.h" #include "base/scoped_arena_allocator.h" #include "mirror/class.h" +#include "method_verifier.h" +#include "verifier_deps.h" namespace art { namespace verifier { @@ -62,7 +64,10 @@ inline bool RegType::IsConstantBoolean() const { } } -inline bool RegType::AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict) { +inline bool RegType::AssignableFrom(const RegType& lhs, + const RegType& rhs, + bool strict, + MethodVerifier* verifier) { if (lhs.Equals(rhs)) { return true; } else { @@ -104,10 +109,15 @@ inline bool RegType::AssignableFrom(const RegType& lhs, const RegType& rhs, bool return true; } else if (lhs.IsJavaLangObjectArray()) { return rhs.IsObjectArrayTypes(); // All reference arrays may be assigned to Object[] - } else if (lhs.HasClass() && rhs.HasClass() && - lhs.GetClass()->IsAssignableFrom(rhs.GetClass())) { - // We're assignable from the Class point-of-view. - return true; + } else if (lhs.HasClass() && rhs.HasClass()) { + // Test assignability from the Class point-of-view. + bool result = lhs.GetClass()->IsAssignableFrom(rhs.GetClass()); + // Record assignability dependency. The `verifier` is null during unit tests. + if (verifier != nullptr) { + VerifierDeps::MaybeRecordAssignability( + verifier->GetDexFile(), lhs.GetClass(), rhs.GetClass(), strict, result); + } + return result; } else { // Unresolved types are only assignable for null and equality. return false; @@ -116,12 +126,12 @@ inline bool RegType::AssignableFrom(const RegType& lhs, const RegType& rhs, bool } } -inline bool RegType::IsAssignableFrom(const RegType& src) const { - return AssignableFrom(*this, src, false); +inline bool RegType::IsAssignableFrom(const RegType& src, MethodVerifier* verifier) const { + return AssignableFrom(*this, src, false, verifier); } -inline bool RegType::IsStrictlyAssignableFrom(const RegType& src) const { - return AssignableFrom(*this, src, true); +inline bool RegType::IsStrictlyAssignableFrom(const RegType& src, MethodVerifier* verifier) const { + return AssignableFrom(*this, src, true, verifier); } inline const DoubleHiType* DoubleHiType::GetInstance() { diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index 5c1996949f..3bc2acc1f3 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -21,6 +21,7 @@ #include "base/casts.h" #include "class_linker-inl.h" #include "dex_file-inl.h" +#include "method_verifier.h" #include "mirror/class.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" @@ -575,7 +576,9 @@ static const RegType& SelectNonConstant(const RegType& a, const RegType& b) { return a.IsConstantTypes() ? b : a; } -const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const { +const RegType& RegType::Merge(const RegType& incoming_type, + RegTypeCache* reg_types, + MethodVerifier* verifier) const { DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller // Perform pointer equality tests for undefined and conflict to avoid virtual method dispatch. const UndefinedType& undefined = reg_types->Undefined(); @@ -696,13 +699,21 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty // have two sub-classes and don't know how to merge. Create a new string-based unresolved // type that reflects our lack of knowledge and that allows the rest of the unresolved // mechanics to continue. - return reg_types->FromUnresolvedMerge(*this, incoming_type); + return reg_types->FromUnresolvedMerge(*this, incoming_type, verifier); } else { // Two reference types, compute Join mirror::Class* c1 = GetClass(); mirror::Class* c2 = incoming_type.GetClass(); DCHECK(c1 != nullptr && !c1->IsPrimitive()); DCHECK(c2 != nullptr && !c2->IsPrimitive()); mirror::Class* join_class = ClassJoin(c1, c2); + // Record the dependency that both `c1` and `c2` are assignable to `join_class`. + // The `verifier` is null during unit tests. + if (verifier != nullptr) { + VerifierDeps::MaybeRecordAssignability( + verifier->GetDexFile(), join_class, c1, true /* strict */, true /* is_assignable */); + VerifierDeps::MaybeRecordAssignability( + verifier->GetDexFile(), join_class, c2, true /* strict */, true /* is_assignable */); + } if (c1 == join_class && !IsPreciseReference()) { return *this; } else if (c2 == join_class && !incoming_type.IsPreciseReference()) { @@ -873,8 +884,11 @@ std::ostream& operator<<(std::ostream& os, const RegType& rhs) { return os; } -bool RegType::CanAssignArray(const RegType& src, RegTypeCache& reg_types, - Handle<mirror::ClassLoader> class_loader, bool* soft_error) const { +bool RegType::CanAssignArray(const RegType& src, + RegTypeCache& reg_types, + Handle<mirror::ClassLoader> class_loader, + MethodVerifier* verifier, + bool* soft_error) const { if (!IsArrayTypes() || !src.IsArrayTypes()) { *soft_error = false; return false; @@ -891,7 +905,7 @@ bool RegType::CanAssignArray(const RegType& src, RegTypeCache& reg_types, const RegType& cmp1 = reg_types.GetComponentType(*this, class_loader.Get()); const RegType& cmp2 = reg_types.GetComponentType(src, class_loader.Get()); - if (cmp1.IsAssignableFrom(cmp2)) { + if (cmp1.IsAssignableFrom(cmp2, verifier)) { return true; } if (cmp1.IsUnresolvedTypes()) { @@ -914,7 +928,7 @@ bool RegType::CanAssignArray(const RegType& src, RegTypeCache& reg_types, *soft_error = false; return false; } - return cmp1.CanAssignArray(cmp2, reg_types, class_loader, soft_error); + return cmp1.CanAssignArray(cmp2, reg_types, class_loader, verifier, soft_error); } diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index c3ed77a962..9170bb1a68 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -43,7 +43,9 @@ class ScopedArenaAllocator; namespace verifier { +class MethodVerifier; class RegTypeCache; + /* * RegType holds information about the "type" of data held in a register. */ @@ -210,7 +212,7 @@ class RegType { // Note: Object and interface types may always be assigned to one another, see // comment on // ClassJoin. - bool IsAssignableFrom(const RegType& src) const + bool IsAssignableFrom(const RegType& src, MethodVerifier* verifier) const REQUIRES_SHARED(Locks::mutator_lock_); // Can this array type potentially be assigned by src. @@ -220,14 +222,17 @@ class RegType { // will be set to true iff the assignment test failure should be treated as a soft-error, i.e., // when both array types have the same 'depth' and the 'final' component types may be assignable // (both are reference types). - bool CanAssignArray(const RegType& src, RegTypeCache& reg_types, - Handle<mirror::ClassLoader> class_loader, bool* soft_error) const + bool CanAssignArray(const RegType& src, + RegTypeCache& reg_types, + Handle<mirror::ClassLoader> class_loader, + MethodVerifier* verifier, + bool* soft_error) const REQUIRES_SHARED(Locks::mutator_lock_); // Can this type be assigned by src? Variant of IsAssignableFrom that doesn't // allow assignment to // an interface from an Object. - bool IsStrictlyAssignableFrom(const RegType& src) const + bool IsStrictlyAssignableFrom(const RegType& src, MethodVerifier* verifier) const REQUIRES_SHARED(Locks::mutator_lock_); // Are these RegTypes the same? @@ -235,36 +240,21 @@ class RegType { // Compute the merge of this register from one edge (path) with incoming_type // from another. - const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const + const RegType& Merge(const RegType& incoming_type, + RegTypeCache* reg_types, + MethodVerifier* verifier) const REQUIRES_SHARED(Locks::mutator_lock_); // Same as above, but also handles the case where incoming_type == this. - const RegType& SafeMerge(const RegType& incoming_type, RegTypeCache* reg_types) const + const RegType& SafeMerge(const RegType& incoming_type, + RegTypeCache* reg_types, + MethodVerifier* verifier) const REQUIRES_SHARED(Locks::mutator_lock_) { if (Equals(incoming_type)) { return *this; } - return Merge(incoming_type, reg_types); + return Merge(incoming_type, reg_types, verifier); } - /* - * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is - * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of - * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J - * is the deepest (lowest upper bound) parent of S and T). - * - * This operation applies for regular classes and arrays, however, for interface types there - * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial - * order by introducing sets of types, however, the only operation permissible on an interface is - * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface - * types until an invoke-interface call on the interface typed reference at runtime and allow - * the perversion of Object being assignable to an interface type (note, however, that we don't - * allow assignment of Object or Interface to any concrete class and are therefore type safe). - * - * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy - */ - static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t) - REQUIRES_SHARED(Locks::mutator_lock_); - virtual ~RegType() {} void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const @@ -298,7 +288,29 @@ class RegType { friend class RegTypeCache; private: - static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict) + /* + * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is + * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of + * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J + * is the deepest (lowest upper bound) parent of S and T). + * + * This operation applies for regular classes and arrays, however, for interface types there + * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial + * order by introducing sets of types, however, the only operation permissible on an interface is + * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface + * types until an invoke-interface call on the interface typed reference at runtime and allow + * the perversion of Object being assignable to an interface type (note, however, that we don't + * allow assignment of Object or Interface to any concrete class and are therefore type safe). + * + * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy + */ + static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t) + REQUIRES_SHARED(Locks::mutator_lock_); + + static bool AssignableFrom(const RegType& lhs, + const RegType& rhs, + bool strict, + MethodVerifier* verifier) REQUIRES_SHARED(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(RegType); diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index 4d4886e8aa..d0493e5f73 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -342,7 +342,9 @@ void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() { } } -const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) { +const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, + const RegType& right, + MethodVerifier* verifier) { ArenaBitVector types(&arena_, kDefaultArenaBitVectorBytes * kBitsPerByte, // Allocate at least 8 bytes. true); // Is expandable. @@ -383,7 +385,7 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT } // Merge the resolved parts. Left and right might be equal, so use SafeMerge. - const RegType& resolved_parts_merged = left_resolved->SafeMerge(*right_resolved, this); + const RegType& resolved_parts_merged = left_resolved->SafeMerge(*right_resolved, this, verifier); // If we get a conflict here, the merge result is a conflict, not an unresolved merge type. if (resolved_parts_merged.IsConflict()) { return Conflict(); diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h index 14d95092f6..df0fe3d041 100644 --- a/runtime/verifier/reg_type_cache.h +++ b/runtime/verifier/reg_type_cache.h @@ -75,7 +75,9 @@ class RegTypeCache { REQUIRES_SHARED(Locks::mutator_lock_); const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise) REQUIRES_SHARED(Locks::mutator_lock_); - const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right) + const RegType& FromUnresolvedMerge(const RegType& left, + const RegType& right, + MethodVerifier* verifier) REQUIRES_SHARED(Locks::mutator_lock_); const RegType& FromUnresolvedSuperClass(const RegType& child) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index 42a74f88e1..f2411b56fd 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -79,8 +79,8 @@ TEST_F(RegTypeTest, Pairs) { EXPECT_FALSE(precise_lo.CheckWidePair(precise_const)); EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi)); // Test Merging. - EXPECT_TRUE((long_lo.Merge(precise_lo, &cache)).IsLongTypes()); - EXPECT_TRUE((long_hi.Merge(precise_hi, &cache)).IsLongHighTypes()); + EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier */ nullptr)).IsLongTypes()); + EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier */ nullptr)).IsLongHighTypes()); } TEST_F(RegTypeTest, Primitives) { @@ -427,7 +427,8 @@ TEST_F(RegTypeReferenceTest, Dump) { const RegType& resolved_ref = cache.JavaLangString(); const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10); const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12); - const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another); + const RegType& unresolved_merged = cache.FromUnresolvedMerge( + unresolved_ref, unresolved_ref_another, /* verifier */ nullptr); std::string expected = "Unresolved Reference: java.lang.DoesNotExist"; EXPECT_EQ(expected, unresolved_ref.Dump()); @@ -488,14 +489,14 @@ TEST_F(RegTypeReferenceTest, Merging) { RegTypeCache cache_new(true, allocator); const RegType& string = cache_new.JavaLangString(); const RegType& Object = cache_new.JavaLangObject(true); - EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject()); + EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier */ nullptr).IsJavaLangObject()); // Merge two unresolved types. const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true); EXPECT_TRUE(ref_type_0.IsUnresolvedReference()); const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true); EXPECT_FALSE(ref_type_0.Equals(ref_type_1)); - const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new); + const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsUnresolvedMergedReference()); RegType& merged_nonconst = const_cast<RegType&>(merged); @@ -518,22 +519,22 @@ TEST_F(RegTypeTest, MergingFloat) { const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false); { // float MERGE precise cst => float. - const RegType& merged = float_type.Merge(precise_cst, &cache_new); + const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsFloat()); } { // precise cst MERGE float => float. - const RegType& merged = precise_cst.Merge(float_type, &cache_new); + const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsFloat()); } { // float MERGE imprecise cst => float. - const RegType& merged = float_type.Merge(imprecise_cst, &cache_new); + const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsFloat()); } { // imprecise cst MERGE float => float. - const RegType& merged = imprecise_cst.Merge(float_type, &cache_new); + const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsFloat()); } } @@ -554,42 +555,46 @@ TEST_F(RegTypeTest, MergingLong) { const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false); { // lo MERGE precise cst lo => lo. - const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new); + const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // precise cst lo MERGE lo => lo. - const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new); + const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // lo MERGE imprecise cst lo => lo. - const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new); + const RegType& merged = long_lo_type.Merge( + imprecise_cst_lo, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // imprecise cst lo MERGE lo => lo. - const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new); + const RegType& merged = imprecise_cst_lo.Merge( + long_lo_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // hi MERGE precise cst hi => hi. - const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new); + const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } { // precise cst hi MERGE hi => hi. - const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new); + const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } { // hi MERGE imprecise cst hi => hi. - const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new); + const RegType& merged = long_hi_type.Merge( + imprecise_cst_hi, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } { // imprecise cst hi MERGE hi => hi. - const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new); + const RegType& merged = imprecise_cst_hi.Merge( + long_hi_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } } @@ -610,42 +615,50 @@ TEST_F(RegTypeTest, MergingDouble) { const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false); { // lo MERGE precise cst lo => lo. - const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new); + const RegType& merged = double_lo_type.Merge( + precise_cst_lo, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // precise cst lo MERGE lo => lo. - const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new); + const RegType& merged = precise_cst_lo.Merge( + double_lo_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // lo MERGE imprecise cst lo => lo. - const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new); + const RegType& merged = double_lo_type.Merge( + imprecise_cst_lo, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // imprecise cst lo MERGE lo => lo. - const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new); + const RegType& merged = imprecise_cst_lo.Merge( + double_lo_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // hi MERGE precise cst hi => hi. - const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new); + const RegType& merged = double_hi_type.Merge( + precise_cst_hi, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } { // precise cst hi MERGE hi => hi. - const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new); + const RegType& merged = precise_cst_hi.Merge( + double_hi_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } { // hi MERGE imprecise cst hi => hi. - const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new); + const RegType& merged = double_hi_type.Merge( + imprecise_cst_hi, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } { // imprecise cst hi MERGE hi => hi. - const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new); + const RegType& merged = imprecise_cst_hi.Merge( + double_hi_type, &cache_new, /* verifier */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } } diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h index d2f3485889..382314393f 100644 --- a/runtime/verifier/register_line-inl.h +++ b/runtime/verifier/register_line-inl.h @@ -131,7 +131,7 @@ inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t const RegType& check_type) { // Verify the src register type against the check type refining the type of the register const RegType& src_type = GetRegisterType(verifier, vsrc); - if (UNLIKELY(!check_type.IsAssignableFrom(src_type))) { + if (UNLIKELY(!check_type.IsAssignableFrom(src_type, verifier))) { enum VerifyError fail_type; if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) { // Hard fail if one of the types is primitive, since they are concretely known. diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc index 71aa94ea4d..823336c3a7 100644 --- a/runtime/verifier/register_line.cc +++ b/runtime/verifier/register_line.cc @@ -73,7 +73,7 @@ bool RegisterLine::VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsr DCHECK(check_type1.CheckWidePair(check_type2)); // Verify the src register type against the check type refining the type of the register const RegType& src_type = GetRegisterType(verifier, vsrc); - if (!check_type1.IsAssignableFrom(src_type)) { + if (!check_type1.IsAssignableFrom(src_type, verifier)) { verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type << " but expected " << check_type1; return false; @@ -433,7 +433,8 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* if (line_[idx] != incoming_line->line_[idx]) { const RegType& incoming_reg_type = incoming_line->GetRegisterType(verifier, idx); const RegType& cur_type = GetRegisterType(verifier, idx); - const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier->GetRegTypeCache()); + const RegType& new_type = cur_type.Merge( + incoming_reg_type, verifier->GetRegTypeCache(), verifier); changed = changed || !cur_type.Equals(new_type); line_[idx] = new_type.GetId(); } diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc new file mode 100644 index 0000000000..350c838717 --- /dev/null +++ b/runtime/verifier/verifier_deps.cc @@ -0,0 +1,469 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verifier_deps.h" + +#include "compiler_callbacks.h" +#include "leb128.h" +#include "mirror/class-inl.h" +#include "runtime.h" + +namespace art { +namespace verifier { + +VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files) { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + for (const DexFile* dex_file : dex_files) { + DCHECK(GetDexFileDeps(*dex_file) == nullptr); + std::unique_ptr<DexFileDeps> deps(new DexFileDeps()); + dex_deps_.emplace(dex_file, std::move(deps)); + } +} + +VerifierDeps::DexFileDeps* VerifierDeps::GetDexFileDeps(const DexFile& dex_file) { + auto it = dex_deps_.find(&dex_file); + return (it == dex_deps_.end()) ? nullptr : it->second.get(); +} + +template <typename T> +uint16_t VerifierDeps::GetAccessFlags(T* element) { + static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant"); + if (element == nullptr) { + return VerifierDeps::kUnresolvedMarker; + } else { + uint16_t access_flags = Low16Bits(element->GetAccessFlags()); + CHECK_NE(access_flags, VerifierDeps::kUnresolvedMarker); + return access_flags; + } +} + +template <typename T> +uint32_t VerifierDeps::GetDeclaringClassStringId(const DexFile& dex_file, T* element) { + static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant"); + if (element == nullptr) { + return VerifierDeps::kUnresolvedMarker; + } else { + std::string temp; + uint32_t string_id = GetIdFromString( + dex_file, element->GetDeclaringClass()->GetDescriptor(&temp)); + return string_id; + } +} + +uint32_t VerifierDeps::GetIdFromString(const DexFile& dex_file, const std::string& str) { + const DexFile::StringId* string_id = dex_file.FindStringId(str.c_str()); + if (string_id != nullptr) { + // String is in the DEX file. Return its ID. + return dex_file.GetIndexForStringId(*string_id); + } + + // String is not in the DEX file. Assign a new ID to it which is higher than + // the number of strings in the DEX file. + + DexFileDeps* deps = GetDexFileDeps(dex_file); + DCHECK(deps != nullptr); + + uint32_t num_ids_in_dex = dex_file.NumStringIds(); + uint32_t num_extra_ids = deps->strings_.size(); + + for (size_t i = 0; i < num_extra_ids; ++i) { + if (deps->strings_[i] == str) { + return num_ids_in_dex + i; + } + } + + deps->strings_.push_back(str); + + uint32_t new_id = num_ids_in_dex + num_extra_ids; + CHECK_GE(new_id, num_ids_in_dex); // check for overflows + DCHECK_EQ(str, GetStringFromId(dex_file, new_id)); + + return new_id; +} + +std::string VerifierDeps::GetStringFromId(const DexFile& dex_file, uint32_t string_id) { + uint32_t num_ids_in_dex = dex_file.NumStringIds(); + if (string_id < num_ids_in_dex) { + return std::string(dex_file.StringDataByIdx(string_id)); + } else { + DexFileDeps* deps = GetDexFileDeps(dex_file); + DCHECK(deps != nullptr); + string_id -= num_ids_in_dex; + CHECK_LT(string_id, deps->strings_.size()); + return deps->strings_[string_id]; + } +} + +bool VerifierDeps::IsInClassPath(mirror::Class* klass) { + DCHECK(klass != nullptr); + + mirror::DexCache* dex_cache = klass->GetDexCache(); + if (dex_cache == nullptr) { + // This is a synthesized class, in this case always an array. They are not + // defined in the compiled DEX files and therefore are part of the classpath. + // We could avoid recording dependencies on arrays with component types in + // the compiled DEX files but we choose to record them anyway so as to + // record the access flags VM sets for array classes. + DCHECK(klass->IsArrayClass()) << PrettyDescriptor(klass); + return true; + } + + const DexFile* dex_file = dex_cache->GetDexFile(); + DCHECK(dex_file != nullptr); + + // Test if the `dex_deps_` contains an entry for `dex_file`. If not, the dex + // file was not registered as being compiled and we assume `klass` is in the + // classpath. + return (GetDexFileDeps(*dex_file) == nullptr); +} + +void VerifierDeps::AddClassResolution(const DexFile& dex_file, + uint16_t type_idx, + mirror::Class* klass) { + DexFileDeps* dex_deps = GetDexFileDeps(dex_file); + if (dex_deps == nullptr) { + // This invocation is from verification of a dex file which is not being compiled. + return; + } + + if (klass != nullptr && !IsInClassPath(klass)) { + // Class resolved into one of the DEX files which are being compiled. + // This is not a classpath dependency. + return; + } + + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + dex_deps->classes_.emplace(ClassResolution(type_idx, GetAccessFlags(klass))); +} + +void VerifierDeps::AddFieldResolution(const DexFile& dex_file, + uint32_t field_idx, + ArtField* field) { + DexFileDeps* dex_deps = GetDexFileDeps(dex_file); + if (dex_deps == nullptr) { + // This invocation is from verification of a dex file which is not being compiled. + return; + } + + if (field != nullptr && !IsInClassPath(field->GetDeclaringClass())) { + // Field resolved into one of the DEX files which are being compiled. + // This is not a classpath dependency. + return; + } + + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + dex_deps->fields_.emplace(FieldResolution( + field_idx, GetAccessFlags(field), GetDeclaringClassStringId(dex_file, field))); +} + +void VerifierDeps::AddMethodResolution(const DexFile& dex_file, + uint32_t method_idx, + MethodResolutionKind resolution_kind, + ArtMethod* method) { + DexFileDeps* dex_deps = GetDexFileDeps(dex_file); + if (dex_deps == nullptr) { + // This invocation is from verification of a dex file which is not being compiled. + return; + } + + if (method != nullptr && !IsInClassPath(method->GetDeclaringClass())) { + // Method resolved into one of the DEX files which are being compiled. + // This is not a classpath dependency. + return; + } + + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + MethodResolution method_tuple(method_idx, + GetAccessFlags(method), + GetDeclaringClassStringId(dex_file, method)); + if (resolution_kind == kDirectMethodResolution) { + dex_deps->direct_methods_.emplace(method_tuple); + } else if (resolution_kind == kVirtualMethodResolution) { + dex_deps->virtual_methods_.emplace(method_tuple); + } else { + DCHECK_EQ(resolution_kind, kInterfaceMethodResolution); + dex_deps->interface_methods_.emplace(method_tuple); + } +} + +void VerifierDeps::AddAssignability(const DexFile& dex_file, + mirror::Class* destination, + mirror::Class* source, + bool is_strict, + bool is_assignable) { + // Test that the method is only called on reference types. + // Note that concurrent verification of `destination` and `source` may have + // set their status to erroneous. However, the tests performed below rely + // merely on no issues with linking (valid access flags, superclass and + // implemented interfaces). If the class at any point reached the IsResolved + // status, the requirement holds. This is guaranteed by RegTypeCache::ResolveClass. + DCHECK(destination != nullptr && !destination->IsPrimitive()); + DCHECK(source != nullptr && !source->IsPrimitive()); + + if (destination == source || + destination->IsObjectClass() || + (!is_strict && destination->IsInterface())) { + // Cases when `destination` is trivially assignable from `source`. + DCHECK(is_assignable); + return; + } + + DCHECK_EQ(is_assignable, destination->IsAssignableFrom(source)); + + if (destination->IsArrayClass() && source->IsArrayClass()) { + // Both types are arrays. Break down to component types and add recursively. + // This helps filter out destinations from compiled DEX files (see below) + // and deduplicate entries with the same canonical component type. + mirror::Class* destination_component = destination->GetComponentType(); + mirror::Class* source_component = source->GetComponentType(); + + // Only perform the optimization if both types are resolved which guarantees + // that they linked successfully, as required at the top of this method. + if (destination_component->IsResolved() && source_component->IsResolved()) { + AddAssignability(dex_file, + destination_component, + source_component, + /* is_strict */ true, + is_assignable); + return; + } + } + + DexFileDeps* dex_deps = GetDexFileDeps(dex_file); + if (dex_deps == nullptr) { + // This invocation is from verification of a DEX file which is not being compiled. + return; + } + + if (!IsInClassPath(destination) && !IsInClassPath(source)) { + // Both `destination` and `source` are defined in the compiled DEX files. + // No need to record a dependency. + return; + } + + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + + // Get string IDs for both descriptors and store in the appropriate set. + + std::string temp1, temp2; + std::string destination_desc(destination->GetDescriptor(&temp1)); + std::string source_desc(source->GetDescriptor(&temp2)); + uint32_t destination_id = GetIdFromString(dex_file, destination_desc); + uint32_t source_id = GetIdFromString(dex_file, source_desc); + + if (is_assignable) { + dex_deps->assignable_types_.emplace(TypeAssignability(destination_id, source_id)); + } else { + dex_deps->unassignable_types_.emplace(TypeAssignability(destination_id, source_id)); + } +} + +static inline VerifierDeps* GetVerifierDepsSingleton() { + CompilerCallbacks* callbacks = Runtime::Current()->GetCompilerCallbacks(); + if (callbacks == nullptr) { + return nullptr; + } + return callbacks->GetVerifierDeps(); +} + +void VerifierDeps::MaybeRecordClassResolution(const DexFile& dex_file, + uint16_t type_idx, + mirror::Class* klass) { + VerifierDeps* singleton = GetVerifierDepsSingleton(); + if (singleton != nullptr) { + singleton->AddClassResolution(dex_file, type_idx, klass); + } +} + +void VerifierDeps::MaybeRecordFieldResolution(const DexFile& dex_file, + uint32_t field_idx, + ArtField* field) { + VerifierDeps* singleton = GetVerifierDepsSingleton(); + if (singleton != nullptr) { + singleton->AddFieldResolution(dex_file, field_idx, field); + } +} + +void VerifierDeps::MaybeRecordMethodResolution(const DexFile& dex_file, + uint32_t method_idx, + MethodResolutionKind resolution_kind, + ArtMethod* method) { + VerifierDeps* singleton = GetVerifierDepsSingleton(); + if (singleton != nullptr) { + singleton->AddMethodResolution(dex_file, method_idx, resolution_kind, method); + } +} + +void VerifierDeps::MaybeRecordAssignability(const DexFile& dex_file, + mirror::Class* destination, + mirror::Class* source, + bool is_strict, + bool is_assignable) { + VerifierDeps* singleton = GetVerifierDepsSingleton(); + if (singleton != nullptr) { + singleton->AddAssignability(dex_file, destination, source, is_strict, is_assignable); + } +} + +static inline uint32_t DecodeUint32WithOverflowCheck(const uint8_t** in, const uint8_t* end) { + CHECK_LT(*in, end); + return DecodeUnsignedLeb128(in); +} + +template<typename T1, typename T2> +static inline void EncodeTuple(std::vector<uint8_t>* out, const std::tuple<T1, T2>& t) { + EncodeUnsignedLeb128(out, std::get<0>(t)); + EncodeUnsignedLeb128(out, std::get<1>(t)); +} + +template<typename T1, typename T2> +static inline void DecodeTuple(const uint8_t** in, const uint8_t* end, std::tuple<T1, T2>* t) { + T1 v1 = static_cast<T1>(DecodeUint32WithOverflowCheck(in, end)); + T2 v2 = static_cast<T2>(DecodeUint32WithOverflowCheck(in, end)); + *t = std::make_tuple(v1, v2); +} + +template<typename T1, typename T2, typename T3> +static inline void EncodeTuple(std::vector<uint8_t>* out, const std::tuple<T1, T2, T3>& t) { + EncodeUnsignedLeb128(out, std::get<0>(t)); + EncodeUnsignedLeb128(out, std::get<1>(t)); + EncodeUnsignedLeb128(out, std::get<2>(t)); +} + +template<typename T1, typename T2, typename T3> +static inline void DecodeTuple(const uint8_t** in, const uint8_t* end, std::tuple<T1, T2, T3>* t) { + T1 v1 = static_cast<T1>(DecodeUint32WithOverflowCheck(in, end)); + T2 v2 = static_cast<T2>(DecodeUint32WithOverflowCheck(in, end)); + T3 v3 = static_cast<T2>(DecodeUint32WithOverflowCheck(in, end)); + *t = std::make_tuple(v1, v2, v3); +} + +template<typename T> +static inline void EncodeSet(std::vector<uint8_t>* out, const std::set<T>& set) { + EncodeUnsignedLeb128(out, set.size()); + for (const T& entry : set) { + EncodeTuple(out, entry); + } +} + +template<typename T> +static inline void DecodeSet(const uint8_t** in, const uint8_t* end, std::set<T>* set) { + DCHECK(set->empty()); + size_t num_entries = DecodeUint32WithOverflowCheck(in, end); + for (size_t i = 0; i < num_entries; ++i) { + T tuple; + DecodeTuple(in, end, &tuple); + set->emplace(tuple); + } +} + +static inline void EncodeStringVector(std::vector<uint8_t>* out, + const std::vector<std::string>& strings) { + EncodeUnsignedLeb128(out, strings.size()); + for (const std::string& str : strings) { + const uint8_t* data = reinterpret_cast<const uint8_t*>(str.c_str()); + size_t length = str.length() + 1; + out->insert(out->end(), data, data + length); + DCHECK_EQ(0u, out->back()); + } +} + +static inline void DecodeStringVector(const uint8_t** in, + const uint8_t* end, + std::vector<std::string>* strings) { + DCHECK(strings->empty()); + size_t num_strings = DecodeUint32WithOverflowCheck(in, end); + strings->reserve(num_strings); + for (size_t i = 0; i < num_strings; ++i) { + CHECK_LT(*in, end); + const char* string_start = reinterpret_cast<const char*>(*in); + strings->emplace_back(std::string(string_start)); + *in += strings->back().length() + 1; + } +} + +void VerifierDeps::Encode(std::vector<uint8_t>* buffer) const { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + for (auto& entry : dex_deps_) { + EncodeStringVector(buffer, entry.second->strings_); + EncodeSet(buffer, entry.second->assignable_types_); + EncodeSet(buffer, entry.second->unassignable_types_); + EncodeSet(buffer, entry.second->classes_); + EncodeSet(buffer, entry.second->fields_); + EncodeSet(buffer, entry.second->direct_methods_); + EncodeSet(buffer, entry.second->virtual_methods_); + EncodeSet(buffer, entry.second->interface_methods_); + } +} + +VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<uint8_t> data) + : VerifierDeps(dex_files) { + const uint8_t* data_start = data.data(); + const uint8_t* data_end = data_start + data.size(); + for (auto& entry : dex_deps_) { + DecodeStringVector(&data_start, data_end, &entry.second->strings_); + DecodeSet(&data_start, data_end, &entry.second->assignable_types_); + DecodeSet(&data_start, data_end, &entry.second->unassignable_types_); + DecodeSet(&data_start, data_end, &entry.second->classes_); + DecodeSet(&data_start, data_end, &entry.second->fields_); + DecodeSet(&data_start, data_end, &entry.second->direct_methods_); + DecodeSet(&data_start, data_end, &entry.second->virtual_methods_); + DecodeSet(&data_start, data_end, &entry.second->interface_methods_); + } + CHECK_LE(data_start, data_end); +} + +bool VerifierDeps::Equals(const VerifierDeps& rhs) const { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + + if (dex_deps_.size() != rhs.dex_deps_.size()) { + return false; + } + + auto lhs_it = dex_deps_.begin(); + auto rhs_it = rhs.dex_deps_.begin(); + + for (; (lhs_it != dex_deps_.end()) && (rhs_it != rhs.dex_deps_.end()); lhs_it++, rhs_it++) { + const DexFile* lhs_dex_file = lhs_it->first; + const DexFile* rhs_dex_file = rhs_it->first; + if (lhs_dex_file != rhs_dex_file) { + return false; + } + + DexFileDeps* lhs_deps = lhs_it->second.get(); + DexFileDeps* rhs_deps = rhs_it->second.get(); + if (!lhs_deps->Equals(*rhs_deps)) { + return false; + } + } + + DCHECK((lhs_it == dex_deps_.end()) && (rhs_it == rhs.dex_deps_.end())); + return true; +} + +bool VerifierDeps::DexFileDeps::Equals(const VerifierDeps::DexFileDeps& rhs) const { + return (strings_ == rhs.strings_) && + (assignable_types_ == rhs.assignable_types_) && + (unassignable_types_ == rhs.unassignable_types_) && + (classes_ == rhs.classes_) && + (fields_ == rhs.fields_) && + (direct_methods_ == rhs.direct_methods_) && + (virtual_methods_ == rhs.virtual_methods_) && + (interface_methods_ == rhs.interface_methods_); +} + +} // namespace verifier +} // namespace art diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h new file mode 100644 index 0000000000..dc8dfaf2f1 --- /dev/null +++ b/runtime/verifier/verifier_deps.h @@ -0,0 +1,248 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_VERIFIER_VERIFIER_DEPS_H_ +#define ART_RUNTIME_VERIFIER_VERIFIER_DEPS_H_ + +#include <map> +#include <set> +#include <vector> + +#include "art_field.h" +#include "art_method.h" +#include "base/array_ref.h" +#include "base/mutex.h" +#include "method_resolution_kind.h" +#include "os.h" + +namespace art { +namespace verifier { + +// Verification dependencies collector class used by the MethodVerifier to record +// resolution outcomes and type assignability tests of classes/methods/fields +// not present in the set of compiled DEX files, that is classes/methods/fields +// defined in the classpath. +// The compilation driver initializes the class and registers all DEX files +// which are being compiled. Classes defined in DEX files outside of this set +// (or synthesized classes without associated DEX files) are considered being +// in the classpath. +// During code-flow verification, the MethodVerifier informs the VerifierDeps +// singleton about the outcome of every resolution and assignability test, and +// the singleton records them if their outcome may change with changes in the +// classpath. +class VerifierDeps { + public: + explicit VerifierDeps(const std::vector<const DexFile*>& dex_files) + REQUIRES(!Locks::verifier_deps_lock_); + + // Record the outcome `klass` of resolving type `type_idx` from `dex_file`. + // If `klass` is null, the class is assumed unresolved. + static void MaybeRecordClassResolution(const DexFile& dex_file, + uint16_t type_idx, + mirror::Class* klass) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + // Record the outcome `field` of resolving field `field_idx` from `dex_file`. + // If `field` is null, the field is assumed unresolved. + static void MaybeRecordFieldResolution(const DexFile& dex_file, + uint32_t field_idx, + ArtField* field) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + // Record the outcome `method` of resolving method `method_idx` from `dex_file` + // using `res_kind` kind of method resolution algorithm. If `method` is null, + // the method is assumed unresolved. + static void MaybeRecordMethodResolution(const DexFile& dex_file, + uint32_t method_idx, + MethodResolutionKind res_kind, + ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + // Record the outcome `is_assignable` of type assignability test from `source` + // to `destination` as defined by RegType::AssignableFrom. `dex_file` is the + // owner of the method for which MethodVerifier performed the assignability test. + static void MaybeRecordAssignability(const DexFile& dex_file, + mirror::Class* destination, + mirror::Class* source, + bool is_strict, + bool is_assignable) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + // Serialize the recorded dependencies and store the data into `buffer`. + void Encode(std::vector<uint8_t>* buffer) const + REQUIRES(!Locks::verifier_deps_lock_); + + private: + static constexpr uint16_t kUnresolvedMarker = static_cast<uint16_t>(-1); + + // Only used in tests to reconstruct the data structure from serialized data. + VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<uint8_t> data) + REQUIRES(!Locks::verifier_deps_lock_); + + using ClassResolutionBase = std::tuple<uint32_t, uint16_t>; + struct ClassResolution : public ClassResolutionBase { + ClassResolution() = default; + ClassResolution(const ClassResolution&) = default; + ClassResolution(uint32_t type_idx, uint16_t access_flags) + : ClassResolutionBase(type_idx, access_flags) {} + + bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; } + uint32_t GetDexTypeIndex() const { return std::get<0>(*this); } + uint16_t GetAccessFlags() const { return std::get<1>(*this); } + }; + + using FieldResolutionBase = std::tuple<uint32_t, uint16_t, uint32_t>; + struct FieldResolution : public FieldResolutionBase { + FieldResolution() = default; + FieldResolution(const FieldResolution&) = default; + FieldResolution(uint32_t field_idx, uint16_t access_flags, uint32_t declaring_class_idx) + : FieldResolutionBase(field_idx, access_flags, declaring_class_idx) {} + + bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; } + uint32_t GetDexFieldIndex() const { return std::get<0>(*this); } + uint16_t GetAccessFlags() const { return std::get<1>(*this); } + uint32_t GetDeclaringClassIndex() const { return std::get<2>(*this); } + }; + + using MethodResolutionBase = std::tuple<uint32_t, uint16_t, uint32_t>; + struct MethodResolution : public MethodResolutionBase { + MethodResolution() = default; + MethodResolution(const MethodResolution&) = default; + MethodResolution(uint32_t method_idx, uint16_t access_flags, uint32_t declaring_class_idx) + : MethodResolutionBase(method_idx, access_flags, declaring_class_idx) {} + + bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; } + uint32_t GetDexMethodIndex() const { return std::get<0>(*this); } + uint16_t GetAccessFlags() const { return std::get<1>(*this); } + uint32_t GetDeclaringClassIndex() const { return std::get<2>(*this); } + }; + + using TypeAssignabilityBase = std::tuple<uint32_t, uint32_t>; + struct TypeAssignability : public std::tuple<uint32_t, uint32_t> { + TypeAssignability() = default; + TypeAssignability(const TypeAssignability&) = default; + TypeAssignability(uint32_t destination_idx, uint32_t source_idx) + : TypeAssignabilityBase(destination_idx, source_idx) {} + + uint32_t GetDestination() const { return std::get<0>(*this); } + uint32_t GetSource() const { return std::get<1>(*this); } + }; + + // Data structure representing dependencies collected during verification of + // methods inside one DexFile. + struct DexFileDeps { + // Vector of strings which are not present in the corresponding DEX file. + // These are referred to with ids starting with `NumStringIds()` of that DexFile. + std::vector<std::string> strings_; + + // Set of class pairs recording the outcome of assignability test from one + // of the two types to the other. + std::set<TypeAssignability> assignable_types_; + std::set<TypeAssignability> unassignable_types_; + + // Sets of recorded class/field/method resolutions. + std::set<ClassResolution> classes_; + std::set<FieldResolution> fields_; + std::set<MethodResolution> direct_methods_; + std::set<MethodResolution> virtual_methods_; + std::set<MethodResolution> interface_methods_; + + bool Equals(const DexFileDeps& rhs) const; + }; + + // Finds the DexFileDep instance associated with `dex_file`, or nullptr if + // `dex_file` is not reported as being compiled. + // We disable thread safety analysis. The method only reads the key set of + // `dex_deps_` which stays constant after initialization. + DexFileDeps* GetDexFileDeps(const DexFile& dex_file) + NO_THREAD_SAFETY_ANALYSIS; + + // Returns true if `klass` is null or not defined in any of dex files which + // were reported as being compiled. + bool IsInClassPath(mirror::Class* klass) + REQUIRES_SHARED(Locks::mutator_lock_); + + // Returns the index of `str`. If it is defined in `dex_file_`, this is the dex + // string ID. If not, an ID is assigned to the string and cached in `strings_` + // of the corresponding DexFileDeps structure (either provided or inferred from + // `dex_file`). + uint32_t GetIdFromString(const DexFile& dex_file, const std::string& str) + REQUIRES(Locks::verifier_deps_lock_); + + // Returns the string represented by `id`. + std::string GetStringFromId(const DexFile& dex_file, uint32_t string_id) + REQUIRES(Locks::verifier_deps_lock_); + + // Returns the bytecode access flags of `element` (bottom 16 bits), or + // `kUnresolvedMarker` if `element` is null. + template <typename T> + uint16_t GetAccessFlags(T* element) + REQUIRES_SHARED(Locks::mutator_lock_); + + // Returns a string ID of the descriptor of the declaring class of `element`, + // or `kUnresolvedMarker` if `element` is null. + template <typename T> + uint32_t GetDeclaringClassStringId(const DexFile& dex_file, T* element) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(Locks::verifier_deps_lock_); + + void AddClassResolution(const DexFile& dex_file, + uint16_t type_idx, + mirror::Class* klass) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + void AddFieldResolution(const DexFile& dex_file, + uint32_t field_idx, + ArtField* field) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + void AddMethodResolution(const DexFile& dex_file, + uint32_t method_idx, + MethodResolutionKind res_kind, + ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + void AddAssignability(const DexFile& dex_file, + mirror::Class* destination, + mirror::Class* source, + bool is_strict, + bool is_assignable) + REQUIRES_SHARED(Locks::mutator_lock_) + REQUIRES(!Locks::verifier_deps_lock_); + + bool Equals(const VerifierDeps& rhs) const + REQUIRES(!Locks::verifier_deps_lock_); + + // Map from DexFiles into dependencies collected from verification of their methods. + std::map<const DexFile*, std::unique_ptr<DexFileDeps>> dex_deps_ + GUARDED_BY(Locks::verifier_deps_lock_); + + friend class VerifierDepsTest; + ART_FRIEND_TEST(VerifierDepsTest, StringToId); + ART_FRIEND_TEST(VerifierDepsTest, EncodeDecode); +}; + +} // namespace verifier +} // namespace art + +#endif // ART_RUNTIME_VERIFIER_VERIFIER_DEPS_H_ diff --git a/runtime/verifier/verifier_deps_test.cc b/runtime/verifier/verifier_deps_test.cc new file mode 100644 index 0000000000..bbaf59fef6 --- /dev/null +++ b/runtime/verifier/verifier_deps_test.cc @@ -0,0 +1,1060 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verifier_deps.h" + +#include "class_linker.h" +#include "common_runtime_test.h" +#include "compiler_callbacks.h" +#include "dex_file.h" +#include "handle_scope-inl.h" +#include "method_verifier-inl.h" +#include "mirror/class_loader.h" +#include "runtime.h" +#include "thread.h" +#include "scoped_thread_state_change.h" + +namespace art { +namespace verifier { + +class VerifierDepsCompilerCallbacks : public CompilerCallbacks { + public: + explicit VerifierDepsCompilerCallbacks() + : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp), + deps_(nullptr) {} + + void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {} + void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {} + bool IsRelocationPossible() OVERRIDE { return false; } + + verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return deps_; } + void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; } + + private: + verifier::VerifierDeps* deps_; +}; + +class VerifierDepsTest : public CommonRuntimeTest { + public: + void SetUpRuntimeOptions(RuntimeOptions* options) { + CommonRuntimeTest::SetUpRuntimeOptions(options); + callbacks_.reset(new VerifierDepsCompilerCallbacks()); + } + + mirror::Class* FindClassByName(const std::string& name, ScopedObjectAccess* soa) + REQUIRES_SHARED(Locks::mutator_lock_) { + StackHandleScope<1> hs(Thread::Current()); + Handle<mirror::ClassLoader> class_loader_handle( + hs.NewHandle(soa->Decode<mirror::ClassLoader*>(class_loader_))); + mirror::Class* klass = class_linker_->FindClass(Thread::Current(), + name.c_str(), + class_loader_handle); + if (klass == nullptr) { + DCHECK(Thread::Current()->IsExceptionPending()); + Thread::Current()->ClearException(); + } + return klass; + } + + void SetVerifierDeps(const std::vector<const DexFile*>& dex_files) { + verifier_deps_.reset(new verifier::VerifierDeps(dex_files)); + VerifierDepsCompilerCallbacks* callbacks = + reinterpret_cast<VerifierDepsCompilerCallbacks*>(callbacks_.get()); + callbacks->SetVerifierDeps(verifier_deps_.get()); + } + + void LoadDexFile(ScopedObjectAccess* soa) REQUIRES_SHARED(Locks::mutator_lock_) { + class_loader_ = LoadDex("VerifierDeps"); + std::vector<const DexFile*> dex_files = GetDexFiles(class_loader_); + CHECK_EQ(dex_files.size(), 1u); + dex_file_ = dex_files.front(); + + SetVerifierDeps(dex_files); + + mirror::ClassLoader* loader = soa->Decode<mirror::ClassLoader*>(class_loader_); + class_linker_->RegisterDexFile(*dex_file_, loader); + + klass_Main_ = FindClassByName("LMain;", soa); + CHECK(klass_Main_ != nullptr); + } + + bool VerifyMethod(const std::string& method_name) { + ScopedObjectAccess soa(Thread::Current()); + LoadDexFile(&soa); + + StackHandleScope<2> hs(Thread::Current()); + Handle<mirror::ClassLoader> class_loader_handle( + hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_))); + Handle<mirror::DexCache> dex_cache_handle(hs.NewHandle(klass_Main_->GetDexCache())); + + const DexFile::ClassDef* class_def = klass_Main_->GetClassDef(); + const uint8_t* class_data = dex_file_->GetClassData(*class_def); + CHECK(class_data != nullptr); + + ClassDataItemIterator it(*dex_file_, class_data); + while (it.HasNextStaticField() || it.HasNextInstanceField()) { + it.Next(); + } + + ArtMethod* method = nullptr; + while (it.HasNextDirectMethod()) { + ArtMethod* resolved_method = class_linker_->ResolveMethod<ClassLinker::kNoICCECheckForCache>( + *dex_file_, + it.GetMemberIndex(), + dex_cache_handle, + class_loader_handle, + nullptr, + it.GetMethodInvokeType(*class_def)); + CHECK(resolved_method != nullptr); + if (method_name == resolved_method->GetName()) { + method = resolved_method; + break; + } + it.Next(); + } + CHECK(method != nullptr); + + MethodVerifier verifier(Thread::Current(), + dex_file_, + dex_cache_handle, + class_loader_handle, + *class_def, + it.GetMethodCodeItem(), + it.GetMemberIndex(), + method, + it.GetMethodAccessFlags(), + true /* can_load_classes */, + true /* allow_soft_failures */, + true /* need_precise_constants */, + false /* verify to dump */, + true /* allow_thread_suspension */); + verifier.Verify(); + return !verifier.HasFailures(); + } + + void VerifyDexFile() { + std::string error_msg; + ScopedObjectAccess soa(Thread::Current()); + + LoadDexFile(&soa); + SetVerifierDeps({ dex_file_ }); + + for (size_t i = 0; i < dex_file_->NumClassDefs(); i++) { + const char* descriptor = dex_file_->GetClassDescriptor(dex_file_->GetClassDef(i)); + mirror::Class* klass = FindClassByName(descriptor, &soa); + if (klass != nullptr) { + MethodVerifier::VerifyClass(Thread::Current(), + klass, + nullptr, + true, + HardFailLogMode::kLogWarning, + &error_msg); + } + } + } + + bool TestAssignabilityRecording(const std::string& dst, + const std::string& src, + bool is_strict, + bool is_assignable) { + ScopedObjectAccess soa(Thread::Current()); + LoadDexFile(&soa); + mirror::Class* klass_dst = FindClassByName(dst, &soa); + DCHECK(klass_dst != nullptr); + mirror::Class* klass_src = FindClassByName(src, &soa); + DCHECK(klass_src != nullptr); + verifier_deps_->AddAssignability(*dex_file_, + klass_dst, + klass_src, + is_strict, + is_assignable); + return true; + } + + // Iterates over all assignability records and tries to find an entry which + // matches the expected destination/source pair. + bool HasAssignable(const std::string& expected_destination, + const std::string& expected_source, + bool expected_is_assignable) { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + for (auto& dex_dep : verifier_deps_->dex_deps_) { + const DexFile& dex_file = *dex_dep.first; + auto& storage = expected_is_assignable ? dex_dep.second->assignable_types_ + : dex_dep.second->unassignable_types_; + for (auto& entry : storage) { + std::string actual_destination = + verifier_deps_->GetStringFromId(dex_file, entry.GetDestination()); + std::string actual_source = verifier_deps_->GetStringFromId(dex_file, entry.GetSource()); + if ((expected_destination == actual_destination) && (expected_source == actual_source)) { + return true; + } + } + } + return false; + } + + // Iterates over all class resolution records, finds an entry which matches + // the given class descriptor and tests its properties. + bool HasClass(const std::string& expected_klass, + bool expected_resolved, + const std::string& expected_access_flags = "") { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + for (auto& dex_dep : verifier_deps_->dex_deps_) { + for (auto& entry : dex_dep.second->classes_) { + if (expected_resolved != entry.IsResolved()) { + continue; + } + + std::string actual_klass = dex_dep.first->StringByTypeIdx(entry.GetDexTypeIndex()); + if (expected_klass != actual_klass) { + continue; + } + + if (expected_resolved) { + // Test access flags. Note that PrettyJavaAccessFlags always appends + // a space after the modifiers. Add it to the expected access flags. + std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags()); + if (expected_access_flags + " " != actual_access_flags) { + continue; + } + } + + return true; + } + } + return false; + } + + // Iterates over all field resolution records, finds an entry which matches + // the given field class+name+type and tests its properties. + bool HasField(const std::string& expected_klass, + const std::string& expected_name, + const std::string& expected_type, + bool expected_resolved, + const std::string& expected_access_flags = "", + const std::string& expected_decl_klass = "") { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + for (auto& dex_dep : verifier_deps_->dex_deps_) { + for (auto& entry : dex_dep.second->fields_) { + if (expected_resolved != entry.IsResolved()) { + continue; + } + + const DexFile::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex()); + + std::string actual_klass = dex_dep.first->StringByTypeIdx(field_id.class_idx_); + if (expected_klass != actual_klass) { + continue; + } + + std::string actual_name = dex_dep.first->StringDataByIdx(field_id.name_idx_); + if (expected_name != actual_name) { + continue; + } + + std::string actual_type = dex_dep.first->StringByTypeIdx(field_id.type_idx_); + if (expected_type != actual_type) { + continue; + } + + if (expected_resolved) { + // Test access flags. Note that PrettyJavaAccessFlags always appends + // a space after the modifiers. Add it to the expected access flags. + std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags()); + if (expected_access_flags + " " != actual_access_flags) { + continue; + } + + std::string actual_decl_klass = verifier_deps_->GetStringFromId( + *dex_dep.first, entry.GetDeclaringClassIndex()); + if (expected_decl_klass != actual_decl_klass) { + continue; + } + } + + return true; + } + } + return false; + } + + // Iterates over all method resolution records, finds an entry which matches + // the given field kind+class+name+signature and tests its properties. + bool HasMethod(const std::string& expected_kind, + const std::string& expected_klass, + const std::string& expected_name, + const std::string& expected_signature, + bool expected_resolved, + const std::string& expected_access_flags = "", + const std::string& expected_decl_klass = "") { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + for (auto& dex_dep : verifier_deps_->dex_deps_) { + auto& storage = (expected_kind == "direct") ? dex_dep.second->direct_methods_ + : (expected_kind == "virtual") ? dex_dep.second->virtual_methods_ + : dex_dep.second->interface_methods_; + for (auto& entry : storage) { + if (expected_resolved != entry.IsResolved()) { + continue; + } + + const DexFile::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex()); + + std::string actual_klass = dex_dep.first->StringByTypeIdx(method_id.class_idx_); + if (expected_klass != actual_klass) { + continue; + } + + std::string actual_name = dex_dep.first->StringDataByIdx(method_id.name_idx_); + if (expected_name != actual_name) { + continue; + } + + std::string actual_signature = dex_dep.first->GetMethodSignature(method_id).ToString(); + if (expected_signature != actual_signature) { + continue; + } + + if (expected_resolved) { + // Test access flags. Note that PrettyJavaAccessFlags always appends + // a space after the modifiers. Add it to the expected access flags. + std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags()); + if (expected_access_flags + " " != actual_access_flags) { + continue; + } + + std::string actual_decl_klass = verifier_deps_->GetStringFromId( + *dex_dep.first, entry.GetDeclaringClassIndex()); + if (expected_decl_klass != actual_decl_klass) { + continue; + } + } + + return true; + } + } + return false; + } + + size_t NumberOfCompiledDexFiles() { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + return verifier_deps_->dex_deps_.size(); + } + + size_t HasEachKindOfRecord() { + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + + bool has_strings = false; + bool has_assignability = false; + bool has_classes = false; + bool has_fields = false; + bool has_methods = false; + + for (auto& entry : verifier_deps_->dex_deps_) { + has_strings |= !entry.second->strings_.empty(); + has_assignability |= !entry.second->assignable_types_.empty(); + has_assignability |= !entry.second->unassignable_types_.empty(); + has_classes |= !entry.second->classes_.empty(); + has_fields |= !entry.second->fields_.empty(); + has_methods |= !entry.second->direct_methods_.empty(); + has_methods |= !entry.second->virtual_methods_.empty(); + has_methods |= !entry.second->interface_methods_.empty(); + } + + return has_strings && has_assignability && has_classes && has_fields && has_methods; + } + + std::unique_ptr<verifier::VerifierDeps> verifier_deps_; + const DexFile* dex_file_; + jobject class_loader_; + mirror::Class* klass_Main_; +}; + +TEST_F(VerifierDepsTest, StringToId) { + ScopedObjectAccess soa(Thread::Current()); + LoadDexFile(&soa); + + MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_); + + uint32_t id_Main1 = verifier_deps_->GetIdFromString(*dex_file_, "LMain;"); + ASSERT_LT(id_Main1, dex_file_->NumStringIds()); + ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*dex_file_, id_Main1)); + + uint32_t id_Main2 = verifier_deps_->GetIdFromString(*dex_file_, "LMain;"); + ASSERT_LT(id_Main2, dex_file_->NumStringIds()); + ASSERT_EQ("LMain;", verifier_deps_->GetStringFromId(*dex_file_, id_Main2)); + + uint32_t id_Lorem1 = verifier_deps_->GetIdFromString(*dex_file_, "Lorem ipsum"); + ASSERT_GE(id_Lorem1, dex_file_->NumStringIds()); + ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*dex_file_, id_Lorem1)); + + uint32_t id_Lorem2 = verifier_deps_->GetIdFromString(*dex_file_, "Lorem ipsum"); + ASSERT_GE(id_Lorem2, dex_file_->NumStringIds()); + ASSERT_EQ("Lorem ipsum", verifier_deps_->GetStringFromId(*dex_file_, id_Lorem2)); + + ASSERT_EQ(id_Main1, id_Main2); + ASSERT_EQ(id_Lorem1, id_Lorem2); + ASSERT_NE(id_Main1, id_Lorem1); +} + +TEST_F(VerifierDepsTest, Assignable_BothInBoot) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;", + /* src */ "Ljava/util/SimpleTimeZone;", + /* is_strict */ true, + /* is_assignable */ true)); + ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true)); +} + +TEST_F(VerifierDepsTest, Assignable_DestinationInBoot1) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/net/Socket;", + /* src */ "LMySSLSocket;", + /* is_strict */ true, + /* is_assignable */ true)); + ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "LMySSLSocket;", true)); +} + +TEST_F(VerifierDepsTest, Assignable_DestinationInBoot2) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;", + /* src */ "LMySimpleTimeZone;", + /* is_strict */ true, + /* is_assignable */ true)); + ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "LMySimpleTimeZone;", true)); +} + +TEST_F(VerifierDepsTest, Assignable_DestinationInBoot3) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/Collection;", + /* src */ "LMyThreadSet;", + /* is_strict */ true, + /* is_assignable */ true)); + ASSERT_TRUE(HasAssignable("Ljava/util/Collection;", "LMyThreadSet;", true)); +} + +TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;", + /* src */ "[[Ljava/util/SimpleTimeZone;", + /* is_strict */ true, + /* is_assignable */ true)); + // If the component types of both arrays are resolved, we optimize the list of + // dependencies by recording a dependency on the component types. + ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[Ljava/util/SimpleTimeZone;", true)); + ASSERT_FALSE(HasAssignable("[Ljava/util/TimeZone;", "[Ljava/util/SimpleTimeZone;", true)); + ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true)); +} + +TEST_F(VerifierDepsTest, Assignable_BothArrays_Erroneous) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;", + /* src */ "[[LMyErroneousTimeZone;", + /* is_strict */ true, + /* is_assignable */ true)); + // If the component type of an array is erroneous, we record the dependency on + // the array type. + ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[LMyErroneousTimeZone;", true)); + ASSERT_TRUE(HasAssignable("[Ljava/util/TimeZone;", "[LMyErroneousTimeZone;", true)); + ASSERT_FALSE(HasAssignable("Ljava/util/TimeZone;", "LMyErroneousTimeZone;", true)); +} + + // We test that VerifierDeps does not try to optimize by storing assignability + // of the component types. This is due to the fact that the component type may + // be an erroneous class, even though the array type has resolved status. + +TEST_F(VerifierDepsTest, Assignable_ArrayToInterface1) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/io/Serializable;", + /* src */ "[Ljava/util/TimeZone;", + /* is_strict */ true, + /* is_assignable */ true)); + ASSERT_TRUE(HasAssignable("Ljava/io/Serializable;", "[Ljava/util/TimeZone;", true)); +} + +TEST_F(VerifierDepsTest, Assignable_ArrayToInterface2) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/io/Serializable;", + /* src */ "[LMyThreadSet;", + /* is_strict */ true, + /* is_assignable */ true)); + ASSERT_TRUE(HasAssignable("Ljava/io/Serializable;", "[LMyThreadSet;", true)); +} + +TEST_F(VerifierDepsTest, NotAssignable_BothInBoot) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;", + /* src */ "Ljava/util/SimpleTimeZone;", + /* is_strict */ true, + /* is_assignable */ false)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false)); +} + +TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot1) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;", + /* src */ "LMySSLSocket;", + /* is_strict */ true, + /* is_assignable */ false)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LMySSLSocket;", false)); +} + +TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot2) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;", + /* src */ "LMySimpleTimeZone;", + /* is_strict */ true, + /* is_assignable */ false)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LMySimpleTimeZone;", false)); +} + +TEST_F(VerifierDepsTest, NotAssignable_BothArrays) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[Ljava/lang/Exception;", + /* src */ "[Ljava/util/SimpleTimeZone;", + /* is_strict */ true, + /* is_assignable */ false)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false)); +} + +TEST_F(VerifierDepsTest, ArgumentType_ResolvedClass) { + ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedClass")); + ASSERT_TRUE(HasClass("Ljava/lang/Thread;", true, "public")); +} + +TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) { + ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedReferenceArray")); + ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public final abstract")); +} + +TEST_F(VerifierDepsTest, ArgumentType_ResolvedPrimitiveArray) { + ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedPrimitiveArray")); + ASSERT_TRUE(HasClass("[B", true, "public final abstract")); +} + +TEST_F(VerifierDepsTest, ArgumentType_UnresolvedClass) { + ASSERT_TRUE(VerifyMethod("ArgumentType_UnresolvedClass")); + ASSERT_TRUE(HasClass("LUnresolvedClass;", false)); +} + +TEST_F(VerifierDepsTest, ArgumentType_UnresolvedSuper) { + ASSERT_TRUE(VerifyMethod("ArgumentType_UnresolvedSuper")); + ASSERT_TRUE(HasClass("LMySetWithUnresolvedSuper;", false)); +} + +TEST_F(VerifierDepsTest, ReturnType_Reference) { + ASSERT_TRUE(VerifyMethod("ReturnType_Reference")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/lang/IllegalStateException;", true)); +} + +TEST_F(VerifierDepsTest, ReturnType_Array) { + ASSERT_FALSE(VerifyMethod("ReturnType_Array")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/IllegalStateException;", false)); +} + +TEST_F(VerifierDepsTest, InvokeArgumentType) { + ASSERT_TRUE(VerifyMethod("InvokeArgumentType")); + ASSERT_TRUE(HasClass("Ljava/text/SimpleDateFormat;", true, "public")); + ASSERT_TRUE(HasClass("Ljava/util/SimpleTimeZone;", true, "public")); + ASSERT_TRUE(HasMethod("virtual", + "Ljava/text/SimpleDateFormat;", + "setTimeZone", + "(Ljava/util/TimeZone;)V", + true, + "public", + "Ljava/text/DateFormat;")); + ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true)); +} + +TEST_F(VerifierDepsTest, MergeTypes_RegisterLines) { + ASSERT_TRUE(VerifyMethod("MergeTypes_RegisterLines")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LMySocketTimeoutException;", true)); + ASSERT_TRUE(HasAssignable( + "Ljava/lang/Exception;", "Ljava/util/concurrent/TimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, MergeTypes_IfInstanceOf) { + ASSERT_TRUE(VerifyMethod("MergeTypes_IfInstanceOf")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/net/SocketTimeoutException;", true)); + ASSERT_TRUE(HasAssignable( + "Ljava/lang/Exception;", "Ljava/util/concurrent/TimeoutException;", true)); + ASSERT_TRUE(HasAssignable("Ljava/net/SocketTimeoutException;", "Ljava/lang/Exception;", false)); +} + +TEST_F(VerifierDepsTest, MergeTypes_Unresolved) { + ASSERT_TRUE(VerifyMethod("MergeTypes_Unresolved")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/net/SocketTimeoutException;", true)); + ASSERT_TRUE(HasAssignable( + "Ljava/lang/Exception;", "Ljava/util/concurrent/TimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, ConstClass_Resolved) { + ASSERT_TRUE(VerifyMethod("ConstClass_Resolved")); + ASSERT_TRUE(HasClass("Ljava/lang/IllegalStateException;", true, "public")); +} + +TEST_F(VerifierDepsTest, ConstClass_Unresolved) { + ASSERT_TRUE(VerifyMethod("ConstClass_Unresolved")); + ASSERT_TRUE(HasClass("LUnresolvedClass;", false)); +} + +TEST_F(VerifierDepsTest, CheckCast_Resolved) { + ASSERT_TRUE(VerifyMethod("CheckCast_Resolved")); + ASSERT_TRUE(HasClass("Ljava/lang/IllegalStateException;", true, "public")); +} + +TEST_F(VerifierDepsTest, CheckCast_Unresolved) { + ASSERT_TRUE(VerifyMethod("CheckCast_Unresolved")); + ASSERT_TRUE(HasClass("LUnresolvedClass;", false)); +} + +TEST_F(VerifierDepsTest, InstanceOf_Resolved) { + ASSERT_TRUE(VerifyMethod("InstanceOf_Resolved")); + ASSERT_TRUE(HasClass("Ljava/lang/IllegalStateException;", true, "public")); +} + +TEST_F(VerifierDepsTest, InstanceOf_Unresolved) { + ASSERT_TRUE(VerifyMethod("InstanceOf_Unresolved")); + ASSERT_TRUE(HasClass("LUnresolvedClass;", false)); +} + +TEST_F(VerifierDepsTest, NewInstance_Resolved) { + ASSERT_TRUE(VerifyMethod("NewInstance_Resolved")); + ASSERT_TRUE(HasClass("Ljava/lang/IllegalStateException;", true, "public")); +} + +TEST_F(VerifierDepsTest, NewInstance_Unresolved) { + ASSERT_TRUE(VerifyMethod("NewInstance_Unresolved")); + ASSERT_TRUE(HasClass("LUnresolvedClass;", false)); +} + +TEST_F(VerifierDepsTest, NewArray_Resolved) { + ASSERT_TRUE(VerifyMethod("NewArray_Resolved")); + ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public final abstract")); +} + +TEST_F(VerifierDepsTest, NewArray_Unresolved) { + ASSERT_TRUE(VerifyMethod("NewArray_Unresolved")); + ASSERT_TRUE(HasClass("[LUnresolvedClass;", false)); +} + +TEST_F(VerifierDepsTest, Throw) { + ASSERT_TRUE(VerifyMethod("Throw")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/lang/IllegalStateException;", true)); +} + +TEST_F(VerifierDepsTest, MoveException_Resolved) { + ASSERT_TRUE(VerifyMethod("MoveException_Resolved")); + ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public")); + ASSERT_TRUE(HasClass("Ljava/net/SocketTimeoutException;", true, "public")); + ASSERT_TRUE(HasClass("Ljava/util/zip/ZipException;", true, "public")); + + // Testing that all exception types are assignable to Throwable. + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/io/InterruptedIOException;", true)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/net/SocketTimeoutException;", true)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/util/zip/ZipException;", true)); + + // Testing that the merge type is assignable to Throwable. + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "Ljava/io/IOException;", true)); + + // Merging of exception types. + ASSERT_TRUE(HasAssignable("Ljava/io/IOException;", "Ljava/io/InterruptedIOException;", true)); + ASSERT_TRUE(HasAssignable("Ljava/io/IOException;", "Ljava/util/zip/ZipException;", true)); + ASSERT_TRUE(HasAssignable( + "Ljava/io/InterruptedIOException;", "Ljava/net/SocketTimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, MoveException_Unresolved) { + ASSERT_FALSE(VerifyMethod("MoveException_Unresolved")); + ASSERT_TRUE(HasClass("LUnresolvedException;", false)); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInReferenced) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInReferenced")); + ASSERT_TRUE(HasClass("Ljava/lang/System;", true, "public final")); + ASSERT_TRUE(HasField("Ljava/lang/System;", + "out", + "Ljava/io/PrintStream;", + true, + "public final static", + "Ljava/lang/System;")); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInSuperclass1) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInSuperclass1")); + ASSERT_TRUE(HasClass("Ljava/util/SimpleTimeZone;", true, "public")); + ASSERT_TRUE(HasField( + "Ljava/util/SimpleTimeZone;", "LONG", "I", true, "public final static", "Ljava/util/TimeZone;")); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInSuperclass2) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInSuperclass2")); + ASSERT_TRUE(HasField( + "LMySimpleTimeZone;", "SHORT", "I", true, "public final static", "Ljava/util/TimeZone;")); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface1) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInInterface1")); + ASSERT_TRUE(HasClass("Ljavax/xml/transform/dom/DOMResult;", true, "public")); + ASSERT_TRUE(HasField("Ljavax/xml/transform/dom/DOMResult;", + "PI_ENABLE_OUTPUT_ESCAPING", + "Ljava/lang/String;", + true, + "public final static", + "Ljavax/xml/transform/Result;")); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface2) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInInterface2")); + ASSERT_TRUE(HasField("LMyDOMResult;", + "PI_ENABLE_OUTPUT_ESCAPING", + "Ljava/lang/String;", + true, + "public final static", + "Ljavax/xml/transform/Result;")); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface3) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInInterface3")); + ASSERT_TRUE(HasField("LMyResult;", + "PI_ENABLE_OUTPUT_ESCAPING", + "Ljava/lang/String;", + true, + "public final static", + "Ljavax/xml/transform/Result;")); +} + +TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface4) { + ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInInterface4")); + ASSERT_TRUE(HasField("LMyDocument;", + "ELEMENT_NODE", + "S", + true, + "public final static", + "Lorg/w3c/dom/Node;")); +} + +TEST_F(VerifierDepsTest, StaticField_Unresolved_ReferrerInBoot) { + ASSERT_TRUE(VerifyMethod("StaticField_Unresolved_ReferrerInBoot")); + ASSERT_TRUE(HasClass("Ljava/util/TimeZone;", true, "public abstract")); + ASSERT_TRUE(HasField("Ljava/util/TimeZone;", "x", "I", false)); +} + +TEST_F(VerifierDepsTest, StaticField_Unresolved_ReferrerInDex) { + ASSERT_TRUE(VerifyMethod("StaticField_Unresolved_ReferrerInDex")); + ASSERT_TRUE(HasField("LMyThreadSet;", "x", "I", false)); +} + +TEST_F(VerifierDepsTest, InstanceField_Resolved_DeclaredInReferenced) { + ASSERT_TRUE(VerifyMethod("InstanceField_Resolved_DeclaredInReferenced")); + ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public")); + ASSERT_TRUE(HasField("Ljava/io/InterruptedIOException;", + "bytesTransferred", + "I", + true, + "public", + "Ljava/io/InterruptedIOException;")); + ASSERT_TRUE(HasAssignable( + "Ljava/io/InterruptedIOException;", "LMySocketTimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, InstanceField_Resolved_DeclaredInSuperclass1) { + ASSERT_TRUE(VerifyMethod("InstanceField_Resolved_DeclaredInSuperclass1")); + ASSERT_TRUE(HasClass("Ljava/net/SocketTimeoutException;", true, "public")); + ASSERT_TRUE(HasField("Ljava/net/SocketTimeoutException;", + "bytesTransferred", + "I", + true, + "public", + "Ljava/io/InterruptedIOException;")); + ASSERT_TRUE(HasAssignable( + "Ljava/io/InterruptedIOException;", "LMySocketTimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, InstanceField_Resolved_DeclaredInSuperclass2) { + ASSERT_TRUE(VerifyMethod("InstanceField_Resolved_DeclaredInSuperclass2")); + ASSERT_TRUE(HasField("LMySocketTimeoutException;", + "bytesTransferred", + "I", + true, + "public", + "Ljava/io/InterruptedIOException;")); + ASSERT_TRUE(HasAssignable( + "Ljava/io/InterruptedIOException;", "LMySocketTimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, InstanceField_Unresolved_ReferrerInBoot) { + ASSERT_TRUE(VerifyMethod("InstanceField_Unresolved_ReferrerInBoot")); + ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public")); + ASSERT_TRUE(HasField("Ljava/io/InterruptedIOException;", "x", "I", false)); +} + +TEST_F(VerifierDepsTest, InstanceField_Unresolved_ReferrerInDex) { + ASSERT_TRUE(VerifyMethod("InstanceField_Unresolved_ReferrerInDex")); + ASSERT_TRUE(HasField("LMyThreadSet;", "x", "I", false)); +} + +TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInReferenced) { + ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInReferenced")); + ASSERT_TRUE(HasClass("Ljava/net/Socket;", true, "public")); + ASSERT_TRUE(HasMethod("direct", + "Ljava/net/Socket;", + "setSocketImplFactory", + "(Ljava/net/SocketImplFactory;)V", + true, + "public static", + "Ljava/net/Socket;")); +} + +TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass1) { + ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInSuperclass1")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasMethod("direct", + "Ljavax/net/ssl/SSLSocket;", + "setSocketImplFactory", + "(Ljava/net/SocketImplFactory;)V", + true, + "public static", + "Ljava/net/Socket;")); +} + +TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass2) { + ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInSuperclass2")); + ASSERT_TRUE(HasMethod("direct", + "LMySSLSocket;", + "setSocketImplFactory", + "(Ljava/net/SocketImplFactory;)V", + true, + "public static", + "Ljava/net/Socket;")); +} + +TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface1) { + ASSERT_TRUE(VerifyMethod("InvokeStatic_DeclaredInInterface1")); + ASSERT_TRUE(HasClass("Ljava/util/Map$Entry;", true, "public abstract interface")); + ASSERT_TRUE(HasMethod("direct", + "Ljava/util/Map$Entry;", + "comparingByKey", + "()Ljava/util/Comparator;", + true, + "public static", + "Ljava/util/Map$Entry;")); +} + +TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface2) { + ASSERT_FALSE(VerifyMethod("InvokeStatic_DeclaredInInterface2")); + ASSERT_TRUE(HasClass("Ljava/util/AbstractMap$SimpleEntry;", true, "public")); + ASSERT_TRUE(HasMethod("direct", + "Ljava/util/AbstractMap$SimpleEntry;", + "comparingByKey", + "()Ljava/util/Comparator;", + false)); +} + +TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) { + ASSERT_FALSE(VerifyMethod("InvokeStatic_Unresolved1")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) { + ASSERT_FALSE(VerifyMethod("InvokeStatic_Unresolved2")); + ASSERT_TRUE(HasMethod("direct", "LMySSLSocket;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) { + ASSERT_TRUE(VerifyMethod("InvokeDirect_Resolved_DeclaredInReferenced")); + ASSERT_TRUE(HasClass("Ljava/net/Socket;", true, "public")); + ASSERT_TRUE(HasMethod( + "direct", "Ljava/net/Socket;", "<init>", "()V", true, "public", "Ljava/net/Socket;")); +} + +TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass1) { + ASSERT_FALSE(VerifyMethod("InvokeDirect_Resolved_DeclaredInSuperclass1")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasMethod("direct", + "Ljavax/net/ssl/SSLSocket;", + "checkOldImpl", + "()V", + true, + "private", + "Ljava/net/Socket;")); +} + +TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass2) { + ASSERT_FALSE(VerifyMethod("InvokeDirect_Resolved_DeclaredInSuperclass2")); + ASSERT_TRUE(HasMethod( + "direct", "LMySSLSocket;", "checkOldImpl", "()V", true, "private", "Ljava/net/Socket;")); +} + +TEST_F(VerifierDepsTest, InvokeDirect_Unresolved1) { + ASSERT_FALSE(VerifyMethod("InvokeDirect_Unresolved1")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) { + ASSERT_FALSE(VerifyMethod("InvokeDirect_Unresolved2")); + ASSERT_TRUE(HasMethod("direct", "LMySSLSocket;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) { + ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInReferenced")); + ASSERT_TRUE(HasClass("Ljava/lang/Throwable;", true, "public")); + ASSERT_TRUE(HasMethod("virtual", + "Ljava/lang/Throwable;", + "getMessage", + "()Ljava/lang/String;", + true, + "public", + "Ljava/lang/Throwable;")); + // Type dependency on `this` argument. + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "LMySocketTimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass1) { + ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInSuperclass1")); + ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public")); + ASSERT_TRUE(HasMethod("virtual", + "Ljava/io/InterruptedIOException;", + "getMessage", + "()Ljava/lang/String;", + true, + "public", + "Ljava/lang/Throwable;")); + // Type dependency on `this` argument. + ASSERT_TRUE(HasAssignable("Ljava/lang/Throwable;", "LMySocketTimeoutException;", true)); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass2) { + ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInSuperclass2")); + ASSERT_TRUE(HasMethod("virtual", + "LMySocketTimeoutException;", + "getMessage", + "()Ljava/lang/String;", + true, + "public", + "Ljava/lang/Throwable;")); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperinterface) { + ASSERT_TRUE(VerifyMethod("InvokeVirtual_Resolved_DeclaredInSuperinterface")); + ASSERT_TRUE(HasMethod("virtual", + "LMyThreadSet;", + "size", + "()I", + true, + "public abstract", + "Ljava/util/Set;")); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved1) { + ASSERT_FALSE(VerifyMethod("InvokeVirtual_Unresolved1")); + ASSERT_TRUE(HasClass("Ljava/io/InterruptedIOException;", true, "public")); + ASSERT_TRUE(HasMethod("virtual", "Ljava/io/InterruptedIOException;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) { + ASSERT_FALSE(VerifyMethod("InvokeVirtual_Unresolved2")); + ASSERT_TRUE(HasMethod("virtual", "LMySocketTimeoutException;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeVirtual_ActuallyDirect) { + ASSERT_FALSE(VerifyMethod("InvokeVirtual_ActuallyDirect")); + ASSERT_TRUE(HasMethod("virtual", "LMyThread;", "activeCount", "()I", false)); + ASSERT_TRUE(HasMethod("direct", + "LMyThread;", + "activeCount", + "()I", + true, + "public static", + "Ljava/lang/Thread;")); +} + +TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) { + ASSERT_TRUE(VerifyMethod("InvokeInterface_Resolved_DeclaredInReferenced")); + ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public abstract interface")); + ASSERT_TRUE(HasMethod("interface", + "Ljava/lang/Runnable;", + "run", + "()V", + true, + "public abstract", + "Ljava/lang/Runnable;")); +} + +TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperclass) { + ASSERT_FALSE(VerifyMethod("InvokeInterface_Resolved_DeclaredInSuperclass")); + ASSERT_TRUE(HasMethod("interface", "LMyThread;", "join", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface1) { + ASSERT_FALSE(VerifyMethod("InvokeInterface_Resolved_DeclaredInSuperinterface1")); + ASSERT_TRUE(HasMethod("interface", + "LMyThreadSet;", + "run", + "()V", + true, + "public abstract", + "Ljava/lang/Runnable;")); +} + +TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface2) { + ASSERT_FALSE(VerifyMethod("InvokeInterface_Resolved_DeclaredInSuperinterface2")); + ASSERT_TRUE(HasMethod("interface", + "LMyThreadSet;", + "isEmpty", + "()Z", + true, + "public abstract", + "Ljava/util/Set;")); +} + +TEST_F(VerifierDepsTest, InvokeInterface_Unresolved1) { + ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved1")); + ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public abstract interface")); + ASSERT_TRUE(HasMethod("interface", "Ljava/lang/Runnable;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) { + ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved2")); + ASSERT_TRUE(HasMethod("interface", "LMyThreadSet;", "x", "()V", false)); +} + +TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) { + ASSERT_TRUE(VerifyMethod("InvokeSuper_ThisAssignable")); + ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public abstract interface")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Runnable;", "LMain;", true)); + ASSERT_TRUE(HasMethod("interface", + "Ljava/lang/Runnable;", + "run", + "()V", + true, + "public abstract", + "Ljava/lang/Runnable;")); +} + +TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) { + ASSERT_FALSE(VerifyMethod("InvokeSuper_ThisNotAssignable")); + ASSERT_TRUE(HasClass("Ljava/lang/Integer;", true, "public final")); + ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "LMain;", false)); + ASSERT_TRUE(HasMethod( + "virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;")); +} + +TEST_F(VerifierDepsTest, EncodeDecode) { + VerifyDexFile(); + + ASSERT_EQ(1u, NumberOfCompiledDexFiles()); + ASSERT_TRUE(HasEachKindOfRecord()); + + std::vector<uint8_t> buffer; + verifier_deps_->Encode(&buffer); + ASSERT_FALSE(buffer.empty()); + + VerifierDeps decoded_deps({ dex_file_ }, ArrayRef<uint8_t>(buffer)); + ASSERT_TRUE(verifier_deps_->Equals(decoded_deps)); +} + +} // namespace verifier +} // namespace art diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index 5f5fbc89f9..16c7f773c8 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -48,8 +48,8 @@ jclass WellKnownClasses::java_lang_IllegalAccessError; jclass WellKnownClasses::java_lang_NoClassDefFoundError; jclass WellKnownClasses::java_lang_Object; jclass WellKnownClasses::java_lang_OutOfMemoryError; -jclass WellKnownClasses::java_lang_reflect_AbstractMethod; jclass WellKnownClasses::java_lang_reflect_Constructor; +jclass WellKnownClasses::java_lang_reflect_Executable; jclass WellKnownClasses::java_lang_reflect_Field; jclass WellKnownClasses::java_lang_reflect_Method; jclass WellKnownClasses::java_lang_reflect_Proxy; @@ -154,7 +154,7 @@ jfieldID WellKnownClasses::java_lang_Throwable_detailMessage; jfieldID WellKnownClasses::java_lang_Throwable_stackTrace; jfieldID WellKnownClasses::java_lang_Throwable_stackState; jfieldID WellKnownClasses::java_lang_Throwable_suppressedExceptions; -jfieldID WellKnownClasses::java_lang_reflect_AbstractMethod_artMethod; +jfieldID WellKnownClasses::java_lang_reflect_Executable_artMethod; jfieldID WellKnownClasses::java_lang_reflect_Proxy_h; jfieldID WellKnownClasses::java_nio_DirectByteBuffer_capacity; jfieldID WellKnownClasses::java_nio_DirectByteBuffer_effectiveDirectAddress; @@ -237,8 +237,8 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_ExceptionInInitializerError = CacheClass(env, "java/lang/ExceptionInInitializerError"); java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError"); java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError"); - java_lang_reflect_AbstractMethod = CacheClass(env, "java/lang/reflect/AbstractMethod"); java_lang_reflect_Constructor = CacheClass(env, "java/lang/reflect/Constructor"); + java_lang_reflect_Executable = CacheClass(env, "java/lang/reflect/Executable"); java_lang_reflect_Field = CacheClass(env, "java/lang/reflect/Field"); java_lang_reflect_Method = CacheClass(env, "java/lang/reflect/Method"); java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy"); @@ -362,7 +362,7 @@ void WellKnownClasses::Init(JNIEnv* env) { java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;"); java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "backtrace", "Ljava/lang/Object;"); java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;"); - java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "J"); + java_lang_reflect_Executable_artMethod = CacheField(env, java_lang_reflect_Executable, false, "artMethod", "J"); java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;"); java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I"); java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "address", "J"); diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h index ce710ffa29..b4d179c4df 100644 --- a/runtime/well_known_classes.h +++ b/runtime/well_known_classes.h @@ -59,8 +59,8 @@ struct WellKnownClasses { static jclass java_lang_NoClassDefFoundError; static jclass java_lang_Object; static jclass java_lang_OutOfMemoryError; - static jclass java_lang_reflect_AbstractMethod; static jclass java_lang_reflect_Constructor; + static jclass java_lang_reflect_Executable; static jclass java_lang_reflect_Field; static jclass java_lang_reflect_Method; static jclass java_lang_reflect_Proxy; @@ -148,7 +148,7 @@ struct WellKnownClasses { static jfieldID dalvik_system_DexPathList_dexElements; static jfieldID dalvik_system_DexPathList__Element_dexFile; static jfieldID dalvik_system_PathClassLoader_pathList; - static jfieldID java_lang_reflect_AbstractMethod_artMethod; + static jfieldID java_lang_reflect_Executable_artMethod; static jfieldID java_lang_reflect_Proxy_h; static jfieldID java_lang_Thread_daemon; static jfieldID java_lang_Thread_group; diff --git a/test/003-omnibus-opcodes/build b/test/003-omnibus-opcodes/build index 56e87844c0..dba3549b1a 100644 --- a/test/003-omnibus-opcodes/build +++ b/test/003-omnibus-opcodes/build @@ -26,6 +26,11 @@ if [ ${USE_JACK} = "true" ]; then jar cf classes.jill.jar -C classes . ${JACK} --import classes.jill.jar --output-dex . else - ${DX} -JXmx256m --debug --dex --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --output=classes.dex classes fi -zip $TEST_NAME.jar classes.dex +fi + +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex +fi diff --git a/test/005-annotations/build b/test/005-annotations/build index 93bee507df..216843d619 100644 --- a/test/005-annotations/build +++ b/test/005-annotations/build @@ -32,7 +32,11 @@ if [ ${USE_JACK} = "true" ]; then jar cf classes.jill.jar -C classes . ${JACK} --import classes.jill.jar --output-dex . else - ${DX} -JXmx256m --debug --dex --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --output=classes.dex classes + fi fi -zip $TEST_NAME.jar classes.dex +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex +fi diff --git a/test/023-many-interfaces/build b/test/023-many-interfaces/build index 3bb6747c17..b4b5bd4c4a 100644 --- a/test/023-many-interfaces/build +++ b/test/023-many-interfaces/build @@ -29,6 +29,8 @@ else ${JAVAC} -d classes src/*.java # dx needs more memory for that test so do not pass Xmx option here. - ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes - zip $TEST_NAME.jar classes.dex + if [ ${NEED_DEX} = "true" ]; then + ${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes + zip $TEST_NAME.jar classes.dex + fi fi diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java index 346e13d110..39e69a3066 100644 --- a/test/031-class-attributes/src/ClassAttrs.java +++ b/test/031-class-attributes/src/ClassAttrs.java @@ -1,9 +1,9 @@ import otherpackage.OtherPackageClass; import java.io.Serializable; -import java.lang.reflect.AbstractMethod; import java.lang.reflect.AccessibleObject; import java.lang.reflect.Constructor; +import java.lang.reflect.Executable; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -223,7 +223,7 @@ public class ClassAttrs { try { Class<?> c = obj.getClass(); if (c == Method.class || c == Constructor.class) { - c = AbstractMethod.class; + c = Executable.class; } method = c.getDeclaredMethod("getSignatureAttribute"); method.setAccessible(true); diff --git a/test/056-const-string-jumbo/build b/test/056-const-string-jumbo/build index ae42519b8f..5344ac38eb 100644 --- a/test/056-const-string-jumbo/build +++ b/test/056-const-string-jumbo/build @@ -45,7 +45,11 @@ else mkdir classes ${JAVAC} -d classes src/*.java - ${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes + fi fi -zip $TEST_NAME.jar classes.dex +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex +fi diff --git a/test/111-unresolvable-exception/build b/test/111-unresolvable-exception/build index 58ac26d836..cf19f60d51 100644 --- a/test/111-unresolvable-exception/build +++ b/test/111-unresolvable-exception/build @@ -25,6 +25,11 @@ if [ ${USE_JACK} = "true" ]; then jar cf classes.jill.jar -C classes . ${JACK} --import classes.jill.jar --output-dex . else - ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes + fi +fi + +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex fi -zip $TEST_NAME.jar classes.dex diff --git a/test/113-multidex/build b/test/113-multidex/build index 4557ccd22a..b980e501be 100644 --- a/test/113-multidex/build +++ b/test/113-multidex/build @@ -37,10 +37,15 @@ if [ ${USE_JACK} = "true" ]; then mv classes.dex classes2.dex mv classes-1.dex classes.dex else - # All except Main - ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + # All except Main + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes - # Only Main - ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2 + # Only Main + ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2 + fi +fi + +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex classes2.dex fi -zip $TEST_NAME.jar classes.dex classes2.dex diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run index fb0b967a2e..9290dd3cf4 100644 --- a/test/115-native-bridge/run +++ b/test/115-native-bridge/run @@ -18,6 +18,8 @@ ARGS=${@} # Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file). LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/') +# Trim all but the last entry in LIBPATH, which will be nativetest[64] +LIBPATH=${LIBPATH##*:} ln -sf ${LIBPATH}/libnativebridgetest.so . touch libarttest.so touch libarttestd.so diff --git a/test/124-missing-classes/build b/test/124-missing-classes/build index 0a340a26d6..ea45cd27e5 100644 --- a/test/124-missing-classes/build +++ b/test/124-missing-classes/build @@ -30,6 +30,11 @@ if [ ${USE_JACK} = "true" ]; then jar cf classes.jill.jar -C classes . ${JACK} --import classes.jill.jar --output-dex . else - ${DX} -JXmx256m --debug --dex --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + ${DX} -JXmx256m --debug --dex --output=classes.dex classes + fi +fi + +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex fi -zip $TEST_NAME.jar classes.dex diff --git a/test/126-miranda-multidex/build b/test/126-miranda-multidex/build index 00b9ba0ac2..2a5e7daa12 100644 --- a/test/126-miranda-multidex/build +++ b/test/126-miranda-multidex/build @@ -37,10 +37,15 @@ if [ ${USE_JACK} = "true" ]; then mv classes.dex classes2.dex mv classes-1.dex classes.dex else - # All except Main - ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes + if [ ${NEED_DEX} = "true" ]; then + # All except Main + ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes - # Only Main - ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2 + # Only Main + ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2 + fi +fi + +if [ ${NEED_DEX} = "true" ]; then + zip $TEST_NAME.jar classes.dex classes2.dex fi -zip $TEST_NAME.jar classes.dex classes2.dex diff --git a/test/128-reg-spilling-on-implicit-nullcheck/expected.txt b/test/128-reg-spill-on-implicit-nullcheck/expected.txt index 9bdf658823..9bdf658823 100644 --- a/test/128-reg-spilling-on-implicit-nullcheck/expected.txt +++ b/test/128-reg-spill-on-implicit-nullcheck/expected.txt diff --git a/test/128-reg-spilling-on-implicit-nullcheck/info.txt b/test/128-reg-spill-on-implicit-nullcheck/info.txt index 18b2112268..18b2112268 100644 --- a/test/128-reg-spilling-on-implicit-nullcheck/info.txt +++ b/test/128-reg-spill-on-implicit-nullcheck/info.txt diff --git a/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java b/test/128-reg-spill-on-implicit-nullcheck/src/Main.java index 48276bfd9f..48276bfd9f 100644 --- a/test/128-reg-spilling-on-implicit-nullcheck/src/Main.java +++ b/test/128-reg-spill-on-implicit-nullcheck/src/Main.java diff --git a/test/130-hprof/src/Main.java b/test/130-hprof/src/Main.java index c145f27103..57be3a7d2f 100644 --- a/test/130-hprof/src/Main.java +++ b/test/130-hprof/src/Main.java @@ -125,7 +125,7 @@ public class Main { private static File getHprofConf() { // Use the java.library.path. It points to the lib directory. - File libDir = new File(System.getProperty("java.library.path")); + File libDir = new File(System.getProperty("java.library.path").split(":")[0]); return new File(new File(libDir.getParentFile(), "bin"), "hprof-conv"); } diff --git a/test/201-built-in-exception-detail-messages/expected.txt b/test/201-built-in-except-detail-messages/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/201-built-in-exception-detail-messages/expected.txt +++ b/test/201-built-in-except-detail-messages/expected.txt diff --git a/test/201-built-in-exception-detail-messages/info.txt b/test/201-built-in-except-detail-messages/info.txt index a01b7277de..a01b7277de 100644 --- a/test/201-built-in-exception-detail-messages/info.txt +++ b/test/201-built-in-except-detail-messages/info.txt diff --git a/test/201-built-in-exception-detail-messages/src/Main.java b/test/201-built-in-except-detail-messages/src/Main.java index dc58819924..dc58819924 100644 --- a/test/201-built-in-exception-detail-messages/src/Main.java +++ b/test/201-built-in-except-detail-messages/src/Main.java diff --git a/test/303-verification-stress/build b/test/303-verification-stress/build index 5ff73eccf7..b67eaf2261 100644 --- a/test/303-verification-stress/build +++ b/test/303-verification-stress/build @@ -29,6 +29,8 @@ else ${JAVAC} -d classes src/*.java # dx needs more memory for that test so do not pass Xmx option here. - ${DX} --debug --dex --output=classes.dex classes - zip $TEST_NAME.jar classes.dex + if [ ${NEED_DEX} = "true" ]; then + ${DX} --debug --dex --output=classes.dex classes + zip $TEST_NAME.jar classes.dex + fi fi diff --git a/test/439-npe/expected.txt b/test/439-npe/expected.txt index 34855ee91d..b4fd6bb002 100644 --- a/test/439-npe/expected.txt +++ b/test/439-npe/expected.txt @@ -52,3 +52,73 @@ $opt$noinline$getByteElement $opt$noinline$getBooleanElement $opt$noinline$getCharElement $opt$noinline$getShortElement +i0=4 +i1=8 +i2=12 +i3=16 +i4=20 +i5=24 +i6=28 +i7=32 +i8=36 +i9=40 +i10=44 +i11=48 +i12=52 +i13=56 +i14=44 +i15=57 +l0=84 +l1=88 +l2=92 +l3=96 +l4=100 +l5=104 +l6=108 +l7=112 +l8=116 +l9=120 +l10=124 +l11=128 +l12=132 +l13=136 +l14=104 +l15=146 +f0=164.0 +f1=168.0 +f2=172.0 +f3=176.0 +f4=180.0 +f5=184.0 +f6=188.0 +f7=192.0 +f8=196.0 +f9=200.0 +f10=204.0 +f11=208.0 +f12=212.0 +f13=216.0 +f14=164.0 +f15=55.5 +d0=244.0 +d1=248.0 +d2=252.0 +d3=256.0 +d4=260.0 +d5=264.0 +d6=268.0 +d7=272.0 +d8=276.0 +d9=280.0 +d10=284.0 +d11=288.0 +d12=292.0 +d13=296.0 +d14=224.0 +d15=75.125 +addInt=42 +addLong=111 +addFloat=0.5 +addDouble=0.125 +m=null +i=2 diff --git a/test/439-npe/src/Main.java b/test/439-npe/src/Main.java index 8f66da04de..bc044a44c4 100644 --- a/test/439-npe/src/Main.java +++ b/test/439-npe/src/Main.java @@ -634,12 +634,246 @@ public class Main { } catch (NullPointerException npe) { check(npe, thisLine += 6, methodLine += 5, "$opt$noinline$getShortElement"); } - } - static void check(NullPointerException npe, int mainLine, int medthodLine, String methodName) { + $opt$noinline$testRegisterRetrieval(); + } + + static void $opt$noinline$testRegisterRetrieval() { + Main[] array = $noinline$PrepareArray(); + int i0 = 0; + int i1 = 1; + int i2 = 2; + int i3 = 3; + int i4 = 4; + int i5 = 5; + int i6 = 6; + int i7 = 7; + int i8 = 8; + int i9 = 9; + int i10 = 10; + int i11 = 11; + int i12 = 12; + int i13 = 13; + int i14 = 14; + int i15 = 15; + long l0 = 20L; + long l1 = 21L; + long l2 = 22L; + long l3 = 23L; + long l4 = 24L; + long l5 = 25L; + long l6 = 26L; + long l7 = 27L; + long l8 = 28L; + long l9 = 29L; + long l10 = 30L; + long l11 = 31L; + long l12 = 32L; + long l13 = 33L; + long l14 = 34L; + long l15 = 35L; + float f0 = 40.0f; + float f1 = 41.0f; + float f2 = 42.0f; + float f3 = 43.0f; + float f4 = 44.0f; + float f5 = 45.0f; + float f6 = 46.0f; + float f7 = 47.0f; + float f8 = 48.0f; + float f9 = 49.0f; + float f10 = 50.0f; + float f11 = 51.0f; + float f12 = 52.0f; + float f13 = 53.0f; + float f14 = 54.0f; + float f15 = 55.0f; + double d0 = 60.0; + double d1 = 61.0; + double d2 = 62.0; + double d3 = 63.0; + double d4 = 64.0; + double d5 = 65.0; + double d6 = 66.0; + double d7 = 67.0; + double d8 = 68.0; + double d9 = 69.0; + double d10 = 70.0; + double d11 = 71.0; + double d12 = 72.0; + double d13 = 73.0; + double d14 = 74.0; + double d15 = 75.0; + int addInt = -1; + long addLong = -2L; + float addFloat = -3.0f; + double addDouble = -4.0; + Main m = null; + int i = 0; + try { + for (i = 0; i < array.length; ++i) { + m = array[i]; + // We have 16 ints, 16 longs, 16 floats, 16 doubles and a few helper variables here, + // none of them anonymous. Hopefully, all available physical registers will be allocated + // to these variables, so that when `m.intField` throws NPE during the third iteration, + // we will fully test retrieval of values from all physical registers. + addInt = m.intField; + addLong = m.longField; + addFloat = m.floatField; + addDouble = m.doubleField; + i0 += i1; + i1 += i2; + i2 += i3; + i3 += i4; + i4 += i5; + i5 += i6; + i6 += i7; + i7 += i8; + i8 += i9; + i9 += i10; + i10 += i11; + i11 += i12; + i12 += i13; + i13 += i14; + i14 += i15; + i15 += addInt; + l0 += l1; + l1 += l2; + l2 += l3; + l3 += l4; + l4 += l5; + l5 += l6; + l6 += l7; + l7 += l8; + l8 += l9; + l9 += l10; + l10 += l11; + l11 += l12; + l12 += l13; + l13 += l14; + l14 += l15; + l15 += addLong; + f0 += f1; + f1 += f2; + f2 += f3; + f3 += f4; + f4 += f5; + f5 += f6; + f6 += f7; + f7 += f8; + f8 += f9; + f9 += f10; + f10 += f11; + f11 += f12; + f12 += f13; + f13 += f14; + f14 += f15; + f15 += addFloat; + d0 += d1; + d1 += d2; + d2 += d3; + d3 += d4; + d4 += d5; + d5 += d6; + d6 += d7; + d7 += d8; + d8 += d9; + d9 += d10; + d10 += d11; + d11 += d12; + d12 += d13; + d13 += d14; + d14 += d15; + d15 += addDouble; + } + } catch (NullPointerException npe) { + System.out.println("i0=" + i0); + System.out.println("i1=" + i1); + System.out.println("i2=" + i2); + System.out.println("i3=" + i3); + System.out.println("i4=" + i4); + System.out.println("i5=" + i5); + System.out.println("i6=" + i6); + System.out.println("i7=" + i7); + System.out.println("i8=" + i8); + System.out.println("i9=" + i9); + System.out.println("i10=" + i10); + System.out.println("i11=" + i11); + System.out.println("i12=" + i12); + System.out.println("i13=" + i13); + System.out.println("i14=" + i14); + System.out.println("i15=" + i15); + System.out.println("l0=" + l0); + System.out.println("l1=" + l1); + System.out.println("l2=" + l2); + System.out.println("l3=" + l3); + System.out.println("l4=" + l4); + System.out.println("l5=" + l5); + System.out.println("l6=" + l6); + System.out.println("l7=" + l7); + System.out.println("l8=" + l8); + System.out.println("l9=" + l9); + System.out.println("l10=" + l10); + System.out.println("l11=" + l11); + System.out.println("l12=" + l12); + System.out.println("l13=" + l13); + System.out.println("l14=" + l14); + System.out.println("l15=" + l15); + System.out.println("f0=" + f0); + System.out.println("f1=" + f1); + System.out.println("f2=" + f2); + System.out.println("f3=" + f3); + System.out.println("f4=" + f4); + System.out.println("f5=" + f5); + System.out.println("f6=" + f6); + System.out.println("f7=" + f7); + System.out.println("f8=" + f8); + System.out.println("f9=" + f9); + System.out.println("f10=" + f10); + System.out.println("f11=" + f11); + System.out.println("f12=" + f12); + System.out.println("f13=" + f13); + System.out.println("f14=" + f14); + System.out.println("f15=" + f15); + System.out.println("d0=" + d0); + System.out.println("d1=" + d1); + System.out.println("d2=" + d2); + System.out.println("d3=" + d3); + System.out.println("d4=" + d4); + System.out.println("d5=" + d5); + System.out.println("d6=" + d6); + System.out.println("d7=" + d7); + System.out.println("d8=" + d8); + System.out.println("d9=" + d9); + System.out.println("d10=" + d10); + System.out.println("d11=" + d11); + System.out.println("d12=" + d12); + System.out.println("d13=" + d13); + System.out.println("d14=" + d14); + System.out.println("d15=" + d15); + System.out.println("addInt=" + addInt); + System.out.println("addLong=" + addLong); + System.out.println("addFloat=" + addFloat); + System.out.println("addDouble=" + addDouble); + System.out.println("m=" + m); + System.out.println("i=" + i); + } + } + + static Main[] $noinline$PrepareArray() { + if (doThrow) { throw new Error(); } + Main[] array = new Main[] { new Main(), new Main(), null, new Main() }; + array[1].intField = 42; + array[1].longField = 111L; + array[1].floatField = 0.5f; + array[1].doubleField = 0.125; + return array; + } + + static void check(NullPointerException npe, int mainLine, int methodLine, String methodName) { System.out.println(methodName); StackTraceElement[] trace = npe.getStackTrace(); - checkElement(trace[0], "Main", methodName, "Main.java", medthodLine); + checkElement(trace[0], "Main", methodName, "Main.java", methodLine); checkElement(trace[1], "Main", "main", "Main.java", mainLine); } diff --git a/test/458-checker-instruction-simplification/expected.txt b/test/458-checker-instruct-simplification/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/458-checker-instruction-simplification/expected.txt +++ b/test/458-checker-instruct-simplification/expected.txt diff --git a/test/458-checker-instruction-simplification/info.txt b/test/458-checker-instruct-simplification/info.txt index 09da84b925..09da84b925 100644 --- a/test/458-checker-instruction-simplification/info.txt +++ b/test/458-checker-instruct-simplification/info.txt diff --git a/test/458-checker-instruction-simplification/smali/SmaliTests.smali b/test/458-checker-instruct-simplification/smali/SmaliTests.smali index 6845961f39..6845961f39 100644 --- a/test/458-checker-instruction-simplification/smali/SmaliTests.smali +++ b/test/458-checker-instruct-simplification/smali/SmaliTests.smali diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java index 5b1473523b..5b1473523b 100644 --- a/test/458-checker-instruction-simplification/src/Main.java +++ b/test/458-checker-instruct-simplification/src/Main.java diff --git a/test/462-checker-inlining-across-dex-files/expected.txt b/test/462-checker-inlining-dex-files/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/462-checker-inlining-across-dex-files/expected.txt +++ b/test/462-checker-inlining-dex-files/expected.txt diff --git a/test/462-checker-inlining-across-dex-files/info.txt b/test/462-checker-inlining-dex-files/info.txt index 57008c39e1..57008c39e1 100644 --- a/test/462-checker-inlining-across-dex-files/info.txt +++ b/test/462-checker-inlining-dex-files/info.txt diff --git a/test/462-checker-inlining-across-dex-files/multidex.jpp b/test/462-checker-inlining-dex-files/multidex.jpp index ae554566cb..ae554566cb 100644 --- a/test/462-checker-inlining-across-dex-files/multidex.jpp +++ b/test/462-checker-inlining-dex-files/multidex.jpp diff --git a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java b/test/462-checker-inlining-dex-files/src-multidex/OtherDex.java index 2056e2f2aa..2056e2f2aa 100644 --- a/test/462-checker-inlining-across-dex-files/src-multidex/OtherDex.java +++ b/test/462-checker-inlining-dex-files/src-multidex/OtherDex.java diff --git a/test/462-checker-inlining-across-dex-files/src/Main.java b/test/462-checker-inlining-dex-files/src/Main.java index c2bb479468..c2bb479468 100644 --- a/test/462-checker-inlining-across-dex-files/src/Main.java +++ b/test/462-checker-inlining-dex-files/src/Main.java diff --git a/test/468-checker-bool-simplifier-regression/expected.txt b/test/468-checker-bool-simplif-regression/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/468-checker-bool-simplifier-regression/expected.txt +++ b/test/468-checker-bool-simplif-regression/expected.txt diff --git a/test/468-checker-bool-simplifier-regression/info.txt b/test/468-checker-bool-simplif-regression/info.txt index 0a465846b1..0a465846b1 100644 --- a/test/468-checker-bool-simplifier-regression/info.txt +++ b/test/468-checker-bool-simplif-regression/info.txt diff --git a/test/468-checker-bool-simplifier-regression/smali/TestCase.smali b/test/468-checker-bool-simplif-regression/smali/TestCase.smali index 87ad21ead4..87ad21ead4 100644 --- a/test/468-checker-bool-simplifier-regression/smali/TestCase.smali +++ b/test/468-checker-bool-simplif-regression/smali/TestCase.smali diff --git a/test/468-checker-bool-simplifier-regression/src/Main.java b/test/468-checker-bool-simplif-regression/src/Main.java index 8fe05c7a8a..8fe05c7a8a 100644 --- a/test/468-checker-bool-simplifier-regression/src/Main.java +++ b/test/468-checker-bool-simplif-regression/src/Main.java diff --git a/test/477-long-to-float-conversion-precision/expected.txt b/test/477-long-2-float-convers-precision/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/477-long-to-float-conversion-precision/expected.txt +++ b/test/477-long-2-float-convers-precision/expected.txt diff --git a/test/477-long-to-float-conversion-precision/info.txt b/test/477-long-2-float-convers-precision/info.txt index 1e07cf3f0b..1e07cf3f0b 100644 --- a/test/477-long-to-float-conversion-precision/info.txt +++ b/test/477-long-2-float-convers-precision/info.txt diff --git a/test/477-long-to-float-conversion-precision/src/Main.java b/test/477-long-2-float-convers-precision/src/Main.java index 568bc04d6c..568bc04d6c 100644 --- a/test/477-long-to-float-conversion-precision/src/Main.java +++ b/test/477-long-2-float-convers-precision/src/Main.java diff --git a/test/496-checker-inlining-and-class-loader/expected.txt b/test/496-checker-inlining-class-loader/expected.txt index 312c28f8b0..312c28f8b0 100644 --- a/test/496-checker-inlining-and-class-loader/expected.txt +++ b/test/496-checker-inlining-class-loader/expected.txt diff --git a/test/496-checker-inlining-and-class-loader/info.txt b/test/496-checker-inlining-class-loader/info.txt index aa4b256207..aa4b256207 100644 --- a/test/496-checker-inlining-and-class-loader/info.txt +++ b/test/496-checker-inlining-class-loader/info.txt diff --git a/test/496-checker-inlining-and-class-loader/src/FirstSeenByMyClassLoader.java b/test/496-checker-inlining-class-loader/src/FirstSeenByMyClassLoader.java index e97b4e3391..e97b4e3391 100644 --- a/test/496-checker-inlining-and-class-loader/src/FirstSeenByMyClassLoader.java +++ b/test/496-checker-inlining-class-loader/src/FirstSeenByMyClassLoader.java diff --git a/test/496-checker-inlining-and-class-loader/src/Main.java b/test/496-checker-inlining-class-loader/src/Main.java index 15d4dc07bc..15d4dc07bc 100644 --- a/test/496-checker-inlining-and-class-loader/src/Main.java +++ b/test/496-checker-inlining-class-loader/src/Main.java diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java index 9435ef1def..3de900a3a9 100644 --- a/test/527-checker-array-access-split/src/Main.java +++ b/test/527-checker-array-access-split/src/Main.java @@ -101,7 +101,7 @@ public class Main { /// CHECK: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArrayGet [<<Address>>,<<Index>>] @@ -114,7 +114,7 @@ public class Main { /// CHECK: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArrayGet [<<Address>>,<<Index>>] public static int get(int array[], int index) { @@ -140,7 +140,7 @@ public class Main { /// CHECK: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address>>,<<Index>>,<<Arg>>] @@ -159,7 +159,7 @@ public class Main { /// CHECK: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address>>,<<Index>>,<<Arg>>] public static void set(int array[], int index, int value) { @@ -183,10 +183,10 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN$after_arch (after) @@ -194,7 +194,7 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK-NOT: IntermediateAddress @@ -214,10 +214,10 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] /// CHECK-START-ARM: void Main.getSet(int[], int) GVN$after_arch (after) @@ -225,7 +225,7 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK-NOT: IntermediateAddress @@ -253,11 +253,11 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK: NewArray - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN$after_arch (after) @@ -265,11 +265,11 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK: NewArray - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: ArraySet [<<Address2>>,<<Index>>,<<Add>>] @@ -287,11 +287,11 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK: NewArray - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] /// CHECK-START-ARM: int[] Main.accrossGC(int[], int) GVN$after_arch (after) @@ -299,11 +299,11 @@ public class Main { /// CHECK-DAG: <<DataOffset:i\d+>> IntConstant /// CHECK: <<Array:l\d+>> NullCheck /// CHECK: <<Index:i\d+>> BoundsCheck - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK: NewArray - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: ArraySet [<<Address2>>,<<Index>>,<<Add>>] public static int[] accrossGC(int array[], int index) { @@ -343,10 +343,10 @@ public class Main { /// CHECK: <<Index:i\d+>> Phi /// CHECK: If // -------------- Loop - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN$after_arch (after) @@ -356,7 +356,7 @@ public class Main { /// CHECK: <<Index:i\d+>> Phi /// CHECK: If // -------------- Loop - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK-NOT: IntermediateAddress @@ -380,10 +380,10 @@ public class Main { /// CHECK: <<Index:i\d+>> Phi /// CHECK: If // -------------- Loop - /// CHECK: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: <<ArrayGet:i\d+>> ArrayGet [<<Address1>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] - /// CHECK: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-NEXT: ArraySet [<<Address2>>,<<Index>>,<<Add>>] /// CHECK-START-ARM: int Main.canMergeAfterBCE1() GVN$after_arch (after) @@ -393,7 +393,7 @@ public class Main { /// CHECK: <<Index:i\d+>> Phi /// CHECK: If // -------------- Loop - /// CHECK: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: <<ArrayGet:i\d+>> ArrayGet [<<Address>>,<<Index>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGet>>,<<Const1>>] /// CHECK-NOT: IntermediateAddress @@ -437,12 +437,12 @@ public class Main { /// CHECK: If // -------------- Loop /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] - /// CHECK-DAG: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address1>>,<<Index>>] - /// CHECK-DAG: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address2>>,<<Index1>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] - /// CHECK: <<Address3:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address3:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>] /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN$after_arch (after) @@ -453,7 +453,7 @@ public class Main { /// CHECK: If // -------------- Loop /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] - /// CHECK-DAG: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address>>,<<Index>>] /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address>>,<<Index1>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] @@ -486,12 +486,12 @@ public class Main { /// CHECK: If // -------------- Loop /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] - /// CHECK-DAG: <<Address1:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<Address1:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address1>>,<<Index>>] - /// CHECK-DAG: <<Address2:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<Address2:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address2>>,<<Index1>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] - /// CHECK: <<Address3:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK: <<Address3:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK: ArraySet [<<Address3>>,<<Index1>>,<<Add>>] /// CHECK-START-ARM: int Main.canMergeAfterBCE2() GVN$after_arch (after) @@ -502,7 +502,7 @@ public class Main { /// CHECK: If // -------------- Loop /// CHECK-DAG: <<Index1:i\d+>> Add [<<Index>>,<<Const1>>] - /// CHECK-DAG: <<Address:l\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] + /// CHECK-DAG: <<Address:i\d+>> IntermediateAddress [<<Array>>,<<DataOffset>>] /// CHECK-DAG: <<ArrayGetI:i\d+>> ArrayGet [<<Address>>,<<Index>>] /// CHECK-DAG: <<ArrayGetI1:i\d+>> ArrayGet [<<Address>>,<<Index1>>] /// CHECK: <<Add:i\d+>> Add [<<ArrayGetI>>,<<ArrayGetI1>>] diff --git a/test/530-checker-regression-reftype-final/expected.txt b/test/530-checker-regression-reftyp-final/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/530-checker-regression-reftype-final/expected.txt +++ b/test/530-checker-regression-reftyp-final/expected.txt diff --git a/test/530-checker-regression-reftype-final/info.txt b/test/530-checker-regression-reftyp-final/info.txt index 07789d6e9e..07789d6e9e 100644 --- a/test/530-checker-regression-reftype-final/info.txt +++ b/test/530-checker-regression-reftyp-final/info.txt diff --git a/test/530-checker-regression-reftype-final/smali/TestCase.smali b/test/530-checker-regression-reftyp-final/smali/TestCase.smali index 44facfca05..44facfca05 100644 --- a/test/530-checker-regression-reftype-final/smali/TestCase.smali +++ b/test/530-checker-regression-reftyp-final/smali/TestCase.smali diff --git a/test/530-checker-regression-reftype-final/src/Main.java b/test/530-checker-regression-reftyp-final/src/Main.java index f86b515cae..f86b515cae 100644 --- a/test/530-checker-regression-reftype-final/src/Main.java +++ b/test/530-checker-regression-reftyp-final/src/Main.java diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java index 04a12fa212..02c609ef7c 100644 --- a/test/538-checker-embed-constants/src/Main.java +++ b/test/538-checker-embed-constants/src/Main.java @@ -37,7 +37,7 @@ public class Main { } /// CHECK-START-ARM: int Main.and511(int) disassembly (after) - /// CHECK: movw {{r\d+}}, #511 + /// CHECK: mov {{r\d+}}, #511 /// CHECK: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static int and511(int arg) { @@ -61,7 +61,7 @@ public class Main { } /// CHECK-START-ARM: int Main.or511(int) disassembly (after) - /// CHECK: movw {{r\d+}}, #511 + /// CHECK: mov {{r\d+}}, #511 /// CHECK: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static int or511(int arg) { @@ -85,7 +85,7 @@ public class Main { } /// CHECK-START-ARM: int Main.xor511(int) disassembly (after) - /// CHECK: movw {{r\d+}}, #511 + /// CHECK: mov {{r\d+}}, #511 /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} public static int xor511(int arg) { @@ -114,7 +114,7 @@ public class Main { } /// CHECK-START-ARM: long Main.and511(long) disassembly (after) - /// CHECK: movw {{r\d+}}, #511 + /// CHECK: mov {{r\d+}}, #511 /// CHECK-NEXT: movs {{r\d+}}, #0 /// CHECK-NOT: and{{(\.w)?}} /// CHECK-NOT: bic{{(\.w)?}} @@ -166,7 +166,7 @@ public class Main { } /// CHECK-START-ARM: long Main.or511(long) disassembly (after) - /// CHECK: movw {{r\d+}}, #511 + /// CHECK: mov {{r\d+}}, #511 /// CHECK-NEXT: movs {{r\d+}}, #0 /// CHECK-NOT: orr{{(\.w)?}} /// CHECK-NOT: orn @@ -217,7 +217,7 @@ public class Main { } /// CHECK-START-ARM: long Main.xor511(long) disassembly (after) - /// CHECK: movw {{r\d+}}, #511 + /// CHECK: mov {{r\d+}}, #511 /// CHECK-NEXT: movs {{r\d+}}, #0 /// CHECK-NOT: eor{{(\.w)?}} /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} @@ -230,7 +230,7 @@ public class Main { /// CHECK-START-ARM: long Main.xorNot15(long) disassembly (after) /// CHECK-DAG: mvn {{r\d+}}, #15 - /// CHECK-DAG: mov.w {{r\d+}}, #-1 + /// CHECK-DAG: mov {{r\d+}}, #4294967295 /// CHECK-NOT: eor{{(\.w)?}} /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}} @@ -258,7 +258,7 @@ public class Main { /// CHECK-NOT: mov.w {{r\d+}}, #-268435456 /// CHECK-NOT: eor{{(\.w)?}} /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #15 - /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #-268435456 + /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #4026531840 /// CHECK-NOT: eor{{(\.w)?}} public static long xor0xf00000000000000f(long arg) { @@ -285,7 +285,7 @@ public class Main { /// CHECK-START-ARM: long Main.shl2(long) disassembly (after) /// CHECK: lsl{{s?|\.w}} <<oh:r\d+>>, {{r\d+}}, #2 - /// CHECK: orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #30 + /// CHECK: orr <<oh>>, <<low:r\d+>>, lsr #30 /// CHECK: lsl{{s?|\.w}} {{r\d+}}, <<low>>, #2 /// CHECK-START-ARM: long Main.shl2(long) disassembly (after) @@ -297,7 +297,7 @@ public class Main { /// CHECK-START-ARM: long Main.shl31(long) disassembly (after) /// CHECK: lsl{{s?|\.w}} <<oh:r\d+>>, {{r\d+}}, #31 - /// CHECK: orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #1 + /// CHECK: orr <<oh>>, <<low:r\d+>>, lsr #1 /// CHECK: lsl{{s?|\.w}} {{r\d+}}, <<low>>, #31 /// CHECK-START-ARM: long Main.shl31(long) disassembly (after) @@ -342,7 +342,7 @@ public class Main { /// CHECK-START-ARM: long Main.shr1(long) disassembly (after) /// CHECK: asrs{{(\.w)?}} {{r\d+}}, {{r\d+}}, #1 - /// CHECK: mov.w {{r\d+}}, {{r\d+}}, rrx + /// CHECK: rrx {{r\d+}}, {{r\d+}} /// CHECK-START-ARM: long Main.shr1(long) disassembly (after) /// CHECK-NOT: asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} @@ -353,7 +353,7 @@ public class Main { /// CHECK-START-ARM: long Main.shr2(long) disassembly (after) /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #2 - /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #30 + /// CHECK: orr <<ol>>, <<high:r\d+>>, lsl #30 /// CHECK-DAG: asr{{s?|\.w}} {{r\d+}}, <<high>>, #2 /// CHECK-START-ARM: long Main.shr2(long) disassembly (after) @@ -365,7 +365,7 @@ public class Main { /// CHECK-START-ARM: long Main.shr31(long) disassembly (after) /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #31 - /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1 + /// CHECK: orr <<ol>>, <<high:r\d+>>, lsl #1 /// CHECK: asr{{s?|\.w}} {{r\d+}}, <<high>>, #31 /// CHECK-START-ARM: long Main.shr31(long) disassembly (after) @@ -411,7 +411,7 @@ public class Main { /// CHECK-START-ARM: long Main.ushr1(long) disassembly (after) /// CHECK: lsrs{{|.w}} {{r\d+}}, {{r\d+}}, #1 - /// CHECK: mov.w {{r\d+}}, {{r\d+}}, rrx + /// CHECK: rrx {{r\d+}}, {{r\d+}} /// CHECK-START-ARM: long Main.ushr1(long) disassembly (after) /// CHECK-NOT: lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}} @@ -422,7 +422,7 @@ public class Main { /// CHECK-START-ARM: long Main.ushr2(long) disassembly (after) /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #2 - /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #30 + /// CHECK: orr <<ol>>, <<high:r\d+>>, lsl #30 /// CHECK-DAG: lsr{{s?|\.w}} {{r\d+}}, <<high>>, #2 /// CHECK-START-ARM: long Main.ushr2(long) disassembly (after) @@ -434,7 +434,7 @@ public class Main { /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after) /// CHECK: lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #31 - /// CHECK: orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1 + /// CHECK: orr <<ol>>, <<high:r\d+>>, lsl #1 /// CHECK: lsr{{s?|\.w}} {{r\d+}}, <<high>>, #31 /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after) @@ -508,10 +508,10 @@ public class Main { /// CHECK: <<ConstM1:j\d+>> LongConstant -1 /// CHECK: Add [<<Arg>>,<<ConstM1>>] /// CHECK-NEXT: subs r{{\d+}}, #1 - /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #-1 + /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #4294967295 /// CHECK: Sub [<<Arg>>,<<ConstM1>>] /// CHECK-NEXT: adds r{{\d+}}, #1 - /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #0 + /// CHECK-NEXT: adc r{{\d+}}, #0 public static long addM1(long arg) { return (arg + (-1)) | (arg - (-1)); @@ -542,10 +542,10 @@ public class Main { /// CHECK-NEXT: sbc r{{\d+}}, r{{\d+}}, #249561088 /// CHECK: Add [<<Arg>>,<<ConstD>>] // There may or may not be a MOV here. - /// CHECK: addw r{{\d+}}, r{{\d+}}, #4095 + /// CHECK: add r{{\d+}}, r{{\d+}}, #4095 /// CHECK: Add [<<Arg>>,<<ConstE>>] // There may or may not be a MOV here. - /// CHECK: subw r{{\d+}}, r{{\d+}}, #2051 + /// CHECK: sub r{{\d+}}, r{{\d+}}, #2051 /// CHECK: Add [<<Arg>>,<<ConstF>>] /// CHECK-NEXT: adds{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}} /// CHECK-NEXT: adc{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}} @@ -597,10 +597,10 @@ public class Main { /// CHECK-NEXT: adc r{{\d+}}, r{{\d+}}, #249561088 /// CHECK: Sub [<<Arg>>,<<ConstD>>] // There may or may not be a MOV here. - /// CHECK: subw r{{\d+}}, r{{\d+}}, #4095 + /// CHECK: sub r{{\d+}}, r{{\d+}}, #4095 /// CHECK: Sub [<<Arg>>,<<ConstE>>] // There may or may not be a MOV here. - /// CHECK: addw r{{\d+}}, r{{\d+}}, #2051 + /// CHECK: add r{{\d+}}, r{{\d+}}, #2051 /// CHECK: Sub [<<Arg>>,<<ConstF>>] /// CHECK-NEXT: subs{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}} /// CHECK-NEXT: sbc{{(\.w)?}} r{{\d+}}, r{{\d+}}, r{{\d+}} diff --git a/test/547-regression-trycatch-critical-edge/expected.txt b/test/547-regression-trycatch-critic-edge/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/547-regression-trycatch-critical-edge/expected.txt +++ b/test/547-regression-trycatch-critic-edge/expected.txt diff --git a/test/547-regression-trycatch-critical-edge/info.txt b/test/547-regression-trycatch-critic-edge/info.txt index dc798c0016..dc798c0016 100644 --- a/test/547-regression-trycatch-critical-edge/info.txt +++ b/test/547-regression-trycatch-critic-edge/info.txt diff --git a/test/547-regression-trycatch-critical-edge/smali/TestCase.smali b/test/547-regression-trycatch-critic-edge/smali/TestCase.smali index 53a3cc5b62..53a3cc5b62 100644 --- a/test/547-regression-trycatch-critical-edge/smali/TestCase.smali +++ b/test/547-regression-trycatch-critic-edge/smali/TestCase.smali diff --git a/test/547-regression-trycatch-critical-edge/src/Main.java b/test/547-regression-trycatch-critic-edge/src/Main.java index 8eddac3fea..8eddac3fea 100644 --- a/test/547-regression-trycatch-critical-edge/src/Main.java +++ b/test/547-regression-trycatch-critic-edge/src/Main.java diff --git a/test/557-checker-instruction-simplifier-ror/expected.txt b/test/557-checker-instruct-simplifier-ror/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/557-checker-instruction-simplifier-ror/expected.txt +++ b/test/557-checker-instruct-simplifier-ror/expected.txt diff --git a/test/557-checker-instruction-simplifier-ror/info.txt b/test/557-checker-instruct-simplifier-ror/info.txt index f9a86f8009..f9a86f8009 100644 --- a/test/557-checker-instruction-simplifier-ror/info.txt +++ b/test/557-checker-instruct-simplifier-ror/info.txt diff --git a/test/557-checker-instruction-simplifier-ror/src/Main.java b/test/557-checker-instruct-simplifier-ror/src/Main.java index 0e3d14518a..0e3d14518a 100644 --- a/test/557-checker-instruction-simplifier-ror/src/Main.java +++ b/test/557-checker-instruct-simplifier-ror/src/Main.java diff --git a/test/564-checker-negbitwise/src/Main.java b/test/564-checker-negbitwise/src/Main.java index ccb8ff4fdf..a047d215a5 100644 --- a/test/564-checker-negbitwise/src/Main.java +++ b/test/564-checker-negbitwise/src/Main.java @@ -74,7 +74,7 @@ public class Main { /// CHECK-NOT: And /// CHECK-START-ARM: int Main.$opt$noinline$notAnd(int, int) disassembly (after) - /// CHECK: bic.w r{{\d+}}, r{{\d+}}, r{{\d+}} + /// CHECK: bic r{{\d+}}, r{{\d+}}, r{{\d+}} public static int $opt$noinline$notAnd(int base, int mask) { if (doThrow) throw new Error(); @@ -124,7 +124,7 @@ public class Main { /// CHECK-NOT: Or /// CHECK-START-ARM: long Main.$opt$noinline$notOr(long, long) disassembly (after) - /// CHECK: orn.w r{{\d+}}, r{{\d+}}, r{{\d+}} + /// CHECK: orn r{{\d+}}, r{{\d+}}, r{{\d+}} public static long $opt$noinline$notOr(long base, long mask) { if (doThrow) throw new Error(); diff --git a/test/580-checker-string-factory-intrinsics/expected.txt b/test/580-checker-string-fact-intrinsics/expected.txt index 86e041dad6..86e041dad6 100644 --- a/test/580-checker-string-factory-intrinsics/expected.txt +++ b/test/580-checker-string-fact-intrinsics/expected.txt diff --git a/test/580-checker-string-factory-intrinsics/info.txt b/test/580-checker-string-fact-intrinsics/info.txt index 3d01a1964a..3d01a1964a 100644 --- a/test/580-checker-string-factory-intrinsics/info.txt +++ b/test/580-checker-string-fact-intrinsics/info.txt diff --git a/test/580-checker-string-factory-intrinsics/src/Main.java b/test/580-checker-string-fact-intrinsics/src/Main.java index a2e34bffd0..a2e34bffd0 100644 --- a/test/580-checker-string-factory-intrinsics/src/Main.java +++ b/test/580-checker-string-fact-intrinsics/src/Main.java diff --git a/test/588-checker-irreducible-lifetime-hole/expected.txt b/test/588-checker-irreducib-lifetime-hole/expected.txt index aab200982b..aab200982b 100644 --- a/test/588-checker-irreducible-lifetime-hole/expected.txt +++ b/test/588-checker-irreducib-lifetime-hole/expected.txt diff --git a/test/588-checker-irreducible-lifetime-hole/info.txt b/test/588-checker-irreducib-lifetime-hole/info.txt index a2861a9fd5..a2861a9fd5 100644 --- a/test/588-checker-irreducible-lifetime-hole/info.txt +++ b/test/588-checker-irreducib-lifetime-hole/info.txt diff --git a/test/588-checker-irreducible-lifetime-hole/smali/IrreducibleLoop.smali b/test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali index 186f0ab3e8..186f0ab3e8 100644 --- a/test/588-checker-irreducible-lifetime-hole/smali/IrreducibleLoop.smali +++ b/test/588-checker-irreducib-lifetime-hole/smali/IrreducibleLoop.smali diff --git a/test/588-checker-irreducible-lifetime-hole/src/Main.java b/test/588-checker-irreducib-lifetime-hole/src/Main.java index 98565b103a..98565b103a 100644 --- a/test/588-checker-irreducible-lifetime-hole/src/Main.java +++ b/test/588-checker-irreducib-lifetime-hole/src/Main.java diff --git a/test/590-checker-array-set-null-regression/expected.txt b/test/590-checker-arr-set-null-regression/expected.txt index b0aad4deb5..b0aad4deb5 100644 --- a/test/590-checker-array-set-null-regression/expected.txt +++ b/test/590-checker-arr-set-null-regression/expected.txt diff --git a/test/590-checker-array-set-null-regression/info.txt b/test/590-checker-arr-set-null-regression/info.txt index fe173a334d..fe173a334d 100644 --- a/test/590-checker-array-set-null-regression/info.txt +++ b/test/590-checker-arr-set-null-regression/info.txt diff --git a/test/590-checker-array-set-null-regression/src/Main.java b/test/590-checker-arr-set-null-regression/src/Main.java index 792ee4ecd6..792ee4ecd6 100644 --- a/test/590-checker-array-set-null-regression/src/Main.java +++ b/test/590-checker-arr-set-null-regression/src/Main.java diff --git a/test/593-checker-boolean-to-integral-conv/expected.txt b/test/593-checker-boolean-2-integral-conv/expected.txt index b0aad4deb5..b0aad4deb5 100644 --- a/test/593-checker-boolean-to-integral-conv/expected.txt +++ b/test/593-checker-boolean-2-integral-conv/expected.txt diff --git a/test/593-checker-boolean-to-integral-conv/info.txt b/test/593-checker-boolean-2-integral-conv/info.txt index 2d883c77ee..2d883c77ee 100644 --- a/test/593-checker-boolean-to-integral-conv/info.txt +++ b/test/593-checker-boolean-2-integral-conv/info.txt diff --git a/test/593-checker-boolean-to-integral-conv/src/Main.java b/test/593-checker-boolean-2-integral-conv/src/Main.java index b4c91c8db6..b4c91c8db6 100644 --- a/test/593-checker-boolean-to-integral-conv/src/Main.java +++ b/test/593-checker-boolean-2-integral-conv/src/Main.java diff --git a/test/593-checker-long-to-float-regression/expected.txt b/test/593-checker-long-2-float-regression/expected.txt index e69de29bb2..e69de29bb2 100644 --- a/test/593-checker-long-to-float-regression/expected.txt +++ b/test/593-checker-long-2-float-regression/expected.txt diff --git a/test/593-checker-long-to-float-regression/info.txt b/test/593-checker-long-2-float-regression/info.txt index 39402e9cdd..39402e9cdd 100644 --- a/test/593-checker-long-to-float-regression/info.txt +++ b/test/593-checker-long-2-float-regression/info.txt diff --git a/test/593-checker-long-to-float-regression/src/Main.java b/test/593-checker-long-2-float-regression/src/Main.java index 9c07f3d692..9c07f3d692 100644 --- a/test/593-checker-long-to-float-regression/src/Main.java +++ b/test/593-checker-long-2-float-regression/src/Main.java diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-gen/build index ccebbe4ac9..ccebbe4ac9 100755 --- a/test/961-default-iface-resolution-generated/build +++ b/test/961-default-iface-resolution-gen/build diff --git a/test/961-default-iface-resolution-generated/expected.txt b/test/961-default-iface-resolution-gen/expected.txt index 1ddd65d177..1ddd65d177 100644 --- a/test/961-default-iface-resolution-generated/expected.txt +++ b/test/961-default-iface-resolution-gen/expected.txt diff --git a/test/961-default-iface-resolution-generated/info.txt b/test/961-default-iface-resolution-gen/info.txt index 2cd2cc75b7..2cd2cc75b7 100644 --- a/test/961-default-iface-resolution-generated/info.txt +++ b/test/961-default-iface-resolution-gen/info.txt diff --git a/test/961-default-iface-resolution-generated/util-src/generate_java.py b/test/961-default-iface-resolution-gen/util-src/generate_java.py index a205cd6ce0..a205cd6ce0 100755 --- a/test/961-default-iface-resolution-generated/util-src/generate_java.py +++ b/test/961-default-iface-resolution-gen/util-src/generate_java.py diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-gen/build index ccebbe4ac9..ccebbe4ac9 100755 --- a/test/964-default-iface-init-generated/build +++ b/test/964-default-iface-init-gen/build diff --git a/test/964-default-iface-init-generated/expected.txt b/test/964-default-iface-init-gen/expected.txt index 1ddd65d177..1ddd65d177 100644 --- a/test/964-default-iface-init-generated/expected.txt +++ b/test/964-default-iface-init-gen/expected.txt diff --git a/test/964-default-iface-init-generated/info.txt b/test/964-default-iface-init-gen/info.txt index 5805a86854..5805a86854 100644 --- a/test/964-default-iface-init-generated/info.txt +++ b/test/964-default-iface-init-gen/info.txt diff --git a/test/964-default-iface-init-generated/src/Displayer.java b/test/964-default-iface-init-gen/src/Displayer.java index 4be0ab2732..4be0ab2732 100644 --- a/test/964-default-iface-init-generated/src/Displayer.java +++ b/test/964-default-iface-init-gen/src/Displayer.java diff --git a/test/964-default-iface-init-generated/util-src/generate_java.py b/test/964-default-iface-init-gen/util-src/generate_java.py index b2df49f70e..b2df49f70e 100755 --- a/test/964-default-iface-init-generated/util-src/generate_java.py +++ b/test/964-default-iface-init-gen/util-src/generate_java.py diff --git a/test/968-default-partial-compile-generated/build b/test/968-default-partial-compile-gen/build index 1e9f8aadd5..1e9f8aadd5 100755 --- a/test/968-default-partial-compile-generated/build +++ b/test/968-default-partial-compile-gen/build diff --git a/test/968-default-partial-compile-generated/expected.txt b/test/968-default-partial-compile-gen/expected.txt index 1ddd65d177..1ddd65d177 100644 --- a/test/968-default-partial-compile-generated/expected.txt +++ b/test/968-default-partial-compile-gen/expected.txt diff --git a/test/968-default-partial-compile-generated/info.txt b/test/968-default-partial-compile-gen/info.txt index bc1c42816e..bc1c42816e 100644 --- a/test/968-default-partial-compile-generated/info.txt +++ b/test/968-default-partial-compile-gen/info.txt diff --git a/test/968-default-partial-compile-generated/util-src/generate_java.py b/test/968-default-partial-compile-gen/util-src/generate_java.py index 35290efe1d..35290efe1d 100755 --- a/test/968-default-partial-compile-generated/util-src/generate_java.py +++ b/test/968-default-partial-compile-gen/util-src/generate_java.py diff --git a/test/968-default-partial-compile-generated/util-src/generate_smali.py b/test/968-default-partial-compile-gen/util-src/generate_smali.py index 9855bcf854..9855bcf854 100755 --- a/test/968-default-partial-compile-generated/util-src/generate_smali.py +++ b/test/968-default-partial-compile-gen/util-src/generate_smali.py diff --git a/test/970-iface-super-resolution-generated/build b/test/970-iface-super-resolution-gen/build index fd1b271c1c..fd1b271c1c 100755 --- a/test/970-iface-super-resolution-generated/build +++ b/test/970-iface-super-resolution-gen/build diff --git a/test/970-iface-super-resolution-generated/expected.txt b/test/970-iface-super-resolution-gen/expected.txt index 1ddd65d177..1ddd65d177 100644 --- a/test/970-iface-super-resolution-generated/expected.txt +++ b/test/970-iface-super-resolution-gen/expected.txt diff --git a/test/970-iface-super-resolution-generated/info.txt b/test/970-iface-super-resolution-gen/info.txt index 2cd2cc75b7..2cd2cc75b7 100644 --- a/test/970-iface-super-resolution-generated/info.txt +++ b/test/970-iface-super-resolution-gen/info.txt diff --git a/test/970-iface-super-resolution-generated/util-src/generate_java.py b/test/970-iface-super-resolution-gen/util-src/generate_java.py index c12f10d790..c12f10d790 100755 --- a/test/970-iface-super-resolution-generated/util-src/generate_java.py +++ b/test/970-iface-super-resolution-gen/util-src/generate_java.py diff --git a/test/970-iface-super-resolution-generated/util-src/generate_smali.py b/test/970-iface-super-resolution-gen/util-src/generate_smali.py index cb7b0fa4f2..cb7b0fa4f2 100755 --- a/test/970-iface-super-resolution-generated/util-src/generate_smali.py +++ b/test/970-iface-super-resolution-gen/util-src/generate_smali.py diff --git a/test/Android.bp b/test/Android.bp new file mode 100644 index 0000000000..ff408f41a7 --- /dev/null +++ b/test/Android.bp @@ -0,0 +1,357 @@ +// +// Copyright (C) 2016 The Android Open Source Project +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +art_cc_defaults { + name: "art_test_defaults", + host_supported: true, + target: { + android_arm: { + relative_install_path: "art/arm", + }, + android_arm64: { + relative_install_path: "art/arm64", + }, + android_mips: { + relative_install_path: "art/mips", + }, + android_mips64: { + relative_install_path: "art/mips64", + }, + android_x86: { + relative_install_path: "art/x86", + }, + android_x86_64: { + relative_install_path: "art/x86_64", + }, + darwin: { + enabled: false, + }, + }, +} + +art_cc_defaults { + name: "art_gtest_defaults", + test_per_src: true, + // These really are gtests, but the gtest library comes from libart-gtest.so + gtest: false, + defaults: [ + "art_defaults", + "art_debug_defaults", + "art_test_defaults", + ], + + shared_libs: [ + "libartd", + "libartd-disassembler", + "libvixld-arm", + "libvixld-arm64", + "libart-gtest", + + "libicuuc", + "libicui18n", + "libnativehelper", + ], + whole_static_libs: [ + "libsigchain", + ], + include_dirs: [ + "art", + "art/cmdline", + ], + + target: { + linux: { + ldflags: [ + // Allow jni_compiler_test to find Java_MyClassNatives_bar + // within itself using dlopen(NULL, ...). + // Mac OS linker doesn't understand --export-dynamic. + "-Wl,--export-dynamic", + "-Wl,-u,Java_MyClassNatives_bar", + "-Wl,-u,Java_MyClassNatives_sbar", + ], + shared_libs: [ + "libziparchive", + "libz-host", + ], + host_ldlibs: [ + "-ldl", + "-lpthread", + ], + cflags: [ + // gtest issue + "-Wno-used-but-marked-unused", + "-Wno-deprecated", + "-Wno-missing-noreturn", + ], + }, + android: { + ldflags: [ + // Allow jni_compiler_test to find Java_MyClassNatives_bar + // within itself using dlopen(NULL, ...). + "-Wl,--export-dynamic", + "-Wl,-u,Java_MyClassNatives_bar", + "-Wl,-u,Java_MyClassNatives_sbar", + ], + shared_libs: [ + "libcutils", + "libdl", + "libz", + ], + cflags: [ + // gtest issue + "-Wno-used-but-marked-unused", + "-Wno-deprecated", + "-Wno-missing-noreturn", + ], + }, + }, +} + +art_cc_defaults { + name: "libart-gtest-defaults", + host_supported: true, + defaults: [ + "art_defaults", + "art_debug_defaults", + ], + shared_libs: [ + "libartd", + "libartd-compiler", + ], + static_libs: [ + "libgtest", + ], + target: { + android32: { + cflags: ["-DART_TARGET_NATIVETEST_DIR=/data/nativetest/art"], + }, + android64: { + cflags: ["-DART_TARGET_NATIVETEST_DIR=/data/nativetest64/art"], + }, + android: { + cflags: [ + // gtest issue + "-Wno-used-but-marked-unused", + "-Wno-deprecated", + "-Wno-missing-noreturn", + ], + }, + linux: { + cflags: [ + // gtest issue + "-Wno-used-but-marked-unused", + "-Wno-deprecated", + "-Wno-missing-noreturn", + ], + }, + darwin: { + enabled: false, + }, + }, +} + +art_cc_library { + name: "libart-gtest", + host_supported: true, + whole_static_libs: [ + "libart-compiler-gtest", + "libart-runtime-gtest", + "libgtest", + ], + shared_libs: [ + "libartd", + "libartd-compiler", + ], + target: { + android: { + shared_libs: [ + "libdl", + ], + }, + host: { + host_ldlibs: [ + "-ldl", + "-lpthread", + ], + }, + darwin: { + enabled: false, + }, + }, +} + +cc_defaults { + name: "libartagent-defaults", + defaults: [ + "art_defaults", + "art_test_defaults", + ], + shared_libs: [ + "libbacktrace", + "libnativehelper", + ], + target: { + android: { + shared_libs: ["libdl"], + }, + host: { + host_ldlibs: [ + "-ldl", + "-lpthread", + ], + }, + }, +} + +art_cc_test_library { + name: "libartagent", + srcs: ["900-hello-plugin/load_unload.cc"], + defaults: ["libartagent-defaults"], + shared_libs: ["libart"], +} + +art_cc_test_library { + name: "libartagentd", + srcs: ["900-hello-plugin/load_unload.cc"], + defaults: [ + "libartagent-defaults", + "art_debug_defaults", + ], + shared_libs: ["libartd"], +} + +art_cc_test_library { + name: "libtiagent", + defaults: ["libartagent-defaults"], + srcs: [ + "ti-agent/common_load.cc", + "901-hello-ti-agent/basics.cc", + ], + shared_libs: [ + "libart", + "libopenjdkjvmti", + ], +} + +art_cc_test_library { + name: "libtiagentd", + defaults: [ + "libartagent-defaults", + "art_debug_defaults", + ], + srcs: [ + "ti-agent/common_load.cc", + "901-hello-ti-agent/basics.cc", + ], + shared_libs: [ + "libartd", + "libopenjdkjvmtid", + ], +} + +cc_defaults { + name: "libarttest-defaults", + defaults: [ + "art_defaults", + "art_test_defaults", + ], + srcs: [ + "common/runtime_state.cc", + "common/stack_inspect.cc", + "004-JniTest/jni_test.cc", + "004-SignalTest/signaltest.cc", + "004-ReferenceMap/stack_walk_refmap_jni.cc", + "004-StackWalk/stack_walk_jni.cc", + "004-ThreadStress/thread_stress.cc", + "004-UnsafeTest/unsafe_test.cc", + "044-proxy/native_proxy.cc", + "051-thread/thread_test.cc", + "117-nopatchoat/nopatchoat.cc", + "1337-gc-coverage/gc_coverage.cc", + "136-daemon-jni-shutdown/daemon_jni_shutdown.cc", + "137-cfi/cfi.cc", + "139-register-natives/regnative.cc", + "141-class-unload/jni_unload.cc", + "148-multithread-gc-annotations/gc_coverage.cc", + "149-suspend-all-stress/suspend_all.cc", + "454-get-vreg/get_vreg_jni.cc", + "457-regs/regs_jni.cc", + "461-get-reference-vreg/get_reference_vreg_jni.cc", + "466-get-live-vreg/get_live_vreg_jni.cc", + "497-inlining-and-class-loader/clear_dex_cache.cc", + "543-env-long-ref/env_long_ref.cc", + "566-polymorphic-inlining/polymorphic_inline.cc", + "570-checker-osr/osr.cc", + "595-profile-saving/profile-saving.cc", + "596-app-images/app_images.cc", + "597-deopt-new-string/deopt.cc", + ], + shared_libs: [ + "libbacktrace", + "libnativehelper", + ], + target: { + android: { + shared_libs: ["libdl"], + }, + host: { + host_ldlibs: [ + "-ldl", + "-lpthread", + ], + }, + }, +} + +art_cc_test_library { + name: "libarttest", + defaults: ["libarttest-defaults"], + shared_libs: ["libart"], +} + +art_cc_test_library { + name: "libarttestd", + defaults: [ + "libarttest-defaults", + "art_debug_defaults", + ], + shared_libs: ["libartd"], +} + +art_cc_test_library { + name: "libnativebridgetest", + shared_libs: ["libart"], + defaults: [ + "art_defaults", + "art_debug_defaults", + "art_test_defaults", + ], + srcs: ["115-native-bridge/nativebridge.cc"], + target: { + android: { + shared_libs: ["libdl"], + }, + host: { + host_ldlibs: [ + "-ldl", + "-lpthread", + ], + }, + linux: { + host_ldlibs: ["-lrt"], + }, + }, +} diff --git a/test/Android.libartagent.mk b/test/Android.libartagent.mk deleted file mode 100644 index 729de3f7ae..0000000000 --- a/test/Android.libartagent.mk +++ /dev/null @@ -1,101 +0,0 @@ -# -# Copyright (C) 2016 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -LOCAL_PATH := $(call my-dir) - -include art/build/Android.common_build.mk - -LIBARTAGENT_COMMON_SRC_FILES := \ - 900-hello-plugin/load_unload.cc - -# $(1): target or host -# $(2): debug or <empty> -define build-libartagent - ifneq ($(1),target) - ifneq ($(1),host) - $$(error expected target or host for argument 1, received $(1)) - endif - endif - ifneq ($(2),debug) - ifneq ($(2),) - $$(error d or empty for argument 2, received $(2)) - endif - suffix := d - else - suffix := - endif - - art_target_or_host := $(1) - - include $(CLEAR_VARS) - LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) - LOCAL_MODULE := libartagent$$(suffix) - ifeq ($$(art_target_or_host),target) - LOCAL_MODULE_TAGS := tests - endif - LOCAL_SRC_FILES := $(LIBARTAGENT_COMMON_SRC_FILES) - LOCAL_SHARED_LIBRARIES += libart$$(suffix) libbacktrace libnativehelper - LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime - LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk - LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libartagent.mk - ifeq ($$(art_target_or_host),target) - $(call set-target-local-clang-vars) - ifeq ($$(suffix),d) - $(call set-target-local-cflags-vars,debug) - else - $(call set-target-local-cflags-vars,ndebug) - endif - LOCAL_SHARED_LIBRARIES += libdl - LOCAL_MULTILIB := both - LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32) - LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64) - LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) - include $(BUILD_SHARED_LIBRARY) - else # host - LOCAL_CLANG := $(ART_HOST_CLANG) - LOCAL_CFLAGS := $(ART_HOST_CFLAGS) - LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) - ifeq ($$(suffix),d) - LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS) - else - LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS) - endif - LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread - LOCAL_IS_HOST_MODULE := true - LOCAL_MULTILIB := both - include $(BUILD_HOST_SHARED_LIBRARY) - endif - - # Clear locally used variables. - art_target_or_host := - suffix := -endef - -ifeq ($(ART_BUILD_TARGET),true) - $(eval $(call build-libartagent,target,)) - $(eval $(call build-libartagent,target,debug)) -endif -ifeq ($(ART_BUILD_HOST),true) - $(eval $(call build-libartagent,host,)) - $(eval $(call build-libartagent,host,debug)) -endif - -# Clear locally used variables. -LOCAL_PATH := -LIBARTAGENT_COMMON_SRC_FILES := diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk deleted file mode 100644 index ec5b7d23c0..0000000000 --- a/test/Android.libarttest.mk +++ /dev/null @@ -1,134 +0,0 @@ -# -# Copyright (C) 2011 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -LOCAL_PATH := $(call my-dir) - -include art/build/Android.common_build.mk - -LIBARTTEST_COMMON_SRC_FILES := \ - common/runtime_state.cc \ - common/stack_inspect.cc \ - 004-JniTest/jni_test.cc \ - 004-SignalTest/signaltest.cc \ - 004-ReferenceMap/stack_walk_refmap_jni.cc \ - 004-StackWalk/stack_walk_jni.cc \ - 004-ThreadStress/thread_stress.cc \ - 004-UnsafeTest/unsafe_test.cc \ - 044-proxy/native_proxy.cc \ - 051-thread/thread_test.cc \ - 117-nopatchoat/nopatchoat.cc \ - 1337-gc-coverage/gc_coverage.cc \ - 136-daemon-jni-shutdown/daemon_jni_shutdown.cc \ - 137-cfi/cfi.cc \ - 139-register-natives/regnative.cc \ - 141-class-unload/jni_unload.cc \ - 148-multithread-gc-annotations/gc_coverage.cc \ - 149-suspend-all-stress/suspend_all.cc \ - 454-get-vreg/get_vreg_jni.cc \ - 457-regs/regs_jni.cc \ - 461-get-reference-vreg/get_reference_vreg_jni.cc \ - 466-get-live-vreg/get_live_vreg_jni.cc \ - 497-inlining-and-class-loader/clear_dex_cache.cc \ - 543-env-long-ref/env_long_ref.cc \ - 566-polymorphic-inlining/polymorphic_inline.cc \ - 570-checker-osr/osr.cc \ - 595-profile-saving/profile-saving.cc \ - 596-app-images/app_images.cc \ - 597-deopt-new-string/deopt.cc - -ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so -ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so -ifdef TARGET_2ND_ARCH - ART_TARGET_LIBARTTEST_$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so - ART_TARGET_LIBARTTEST_$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttestd.so -endif - -# $(1): target or host -define build-libarttest - ifneq ($(1),target) - ifneq ($(1),host) - $$(error expected target or host for argument 1, received $(1)) - endif - endif - ifneq ($(2),debug) - ifneq ($(2),) - $$(error d or empty for argument 2, received $(2)) - endif - suffix := d - else - suffix := - endif - - art_target_or_host := $(1) - - include $(CLEAR_VARS) - LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) - LOCAL_MODULE := libarttest$$(suffix) - ifeq ($$(art_target_or_host),target) - LOCAL_MODULE_TAGS := tests - endif - LOCAL_SRC_FILES := $(LIBARTTEST_COMMON_SRC_FILES) - LOCAL_SHARED_LIBRARIES += libart$$(suffix) libbacktrace libnativehelper - LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime - LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk - LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libarttest.mk - ifeq ($$(art_target_or_host),target) - LOCAL_CLANG := $(ART_TARGET_CLANG) - ifeq ($$(suffix),d) - $(call set-target-local-cflags-vars,debug) - else - $(call set-target-local-cflags-vars,ndebug) - endif - LOCAL_SHARED_LIBRARIES += libdl - LOCAL_MULTILIB := both - LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32) - LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64) - LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) - include $(BUILD_SHARED_LIBRARY) - else # host - LOCAL_CLANG := $(ART_HOST_CLANG) - LOCAL_CFLAGS := $(ART_HOST_CFLAGS) - LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) - ifeq ($$(suffix),d) - LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS) - else - LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS) - endif - LOCAL_LDLIBS := -ldl -lpthread - LOCAL_IS_HOST_MODULE := true - LOCAL_MULTILIB := both - include $(BUILD_HOST_SHARED_LIBRARY) - endif - - # Clear locally used variables. - art_target_or_host := - suffix := -endef - -ifeq ($(ART_BUILD_TARGET),true) - $(eval $(call build-libarttest,target,)) - $(eval $(call build-libarttest,target,debug)) -endif -ifeq ($(ART_BUILD_HOST),true) - $(eval $(call build-libarttest,host,)) - $(eval $(call build-libarttest,host,debug)) -endif - -# Clear locally used variables. -LOCAL_PATH := -LIBARTTEST_COMMON_SRC_FILES := diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk deleted file mode 100644 index aa83016d7b..0000000000 --- a/test/Android.libnativebridgetest.mk +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright (C) 2014 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -LOCAL_PATH := $(call my-dir) - -include art/build/Android.common_build.mk - -LIBNATIVEBRIDGETEST_COMMON_SRC_FILES := \ - 115-native-bridge/nativebridge.cc - -ART_TARGET_LIBNATIVEBRIDGETEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libnativebridgetest.so -ifdef TARGET_2ND_ARCH - ART_TARGET_LIBNATIVEBRIDGETEST_$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libnativebridgetest.so -endif - -# $(1): target or host -define build-libnativebridgetest - ifneq ($(1),target) - ifneq ($(1),host) - $$(error expected target or host for argument 1, received $(1)) - endif - endif - - art_target_or_host := $(1) - - include $(CLEAR_VARS) - LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) - LOCAL_MODULE := libnativebridgetest - ifeq ($$(art_target_or_host),target) - LOCAL_MODULE_TAGS := tests - endif - LOCAL_SRC_FILES := $(LIBNATIVEBRIDGETEST_COMMON_SRC_FILES) - LOCAL_SHARED_LIBRARIES += libartd - LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime - LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk - LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libnativebridgetest.mk - ifeq ($$(art_target_or_host),target) - LOCAL_CLANG := $(ART_TARGET_CLANG) - $(call set-target-local-cflags-vars,debug) - LOCAL_SHARED_LIBRARIES += libdl - LOCAL_STATIC_LIBRARIES := libgtest - LOCAL_MULTILIB := both - LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32) - LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64) - LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) - include $(BUILD_SHARED_LIBRARY) - else # host - LOCAL_CLANG := $(ART_HOST_CLANG) - LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS) - LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) $(ART_HOST_DEBUG_ASFLAGS) - LOCAL_SHARED_LIBRARIES += libcutils - LOCAL_LDLIBS := -ldl -lpthread - ifeq ($(HOST_OS),linux) - LOCAL_LDLIBS += -lrt - endif - LOCAL_IS_HOST_MODULE := true - LOCAL_MULTILIB := both - include $(BUILD_HOST_SHARED_LIBRARY) - endif - - # Clear locally used variables. - art_target_or_host := -endef - -ifeq ($(ART_BUILD_TARGET),true) - $(eval $(call build-libnativebridgetest,target)) -endif -ifeq ($(ART_BUILD_HOST),true) - $(eval $(call build-libnativebridgetest,host)) -endif - -# Clear locally used variables. -LOCAL_PATH := -LIBNATIVEBRIDGETEST_COMMON_SRC_FILES := diff --git a/test/Android.libtiagent.mk b/test/Android.libtiagent.mk deleted file mode 100644 index 626dc3b8b3..0000000000 --- a/test/Android.libtiagent.mk +++ /dev/null @@ -1,102 +0,0 @@ -# -# Copyright (C) 2016 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -LOCAL_PATH := $(call my-dir) - -include art/build/Android.common_build.mk - -LIBARTAGENT_COMMON_SRC_FILES := \ - ti-agent/common_load.cc \ - 901-hello-ti-agent/basics.cc - -# $(1): target or host -# $(2): debug or <empty> -define build-libtiagent - ifneq ($(1),target) - ifneq ($(1),host) - $$(error expected target or host for argument 1, received $(1)) - endif - endif - ifneq ($(2),debug) - ifneq ($(2),) - $$(error d or empty for argument 2, received $(2)) - endif - suffix := d - else - suffix := - endif - - art_target_or_host := $(1) - - include $(CLEAR_VARS) - LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) - LOCAL_MODULE := libtiagent$$(suffix) - ifeq ($$(art_target_or_host),target) - LOCAL_MODULE_TAGS := tests - endif - LOCAL_SRC_FILES := $(LIBARTAGENT_COMMON_SRC_FILES) - LOCAL_SHARED_LIBRARIES += libart$$(suffix) libbacktrace libnativehelper libopenjdkjvmti$$(suffix) - LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime art/test - LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk - LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.libtiagent.mk - ifeq ($$(art_target_or_host),target) - $(call set-target-local-clang-vars) - ifeq ($$(suffix),d) - $(call set-target-local-cflags-vars,debug) - else - $(call set-target-local-cflags-vars,ndebug) - endif - LOCAL_SHARED_LIBRARIES += libdl - LOCAL_MULTILIB := both - LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32) - LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64) - LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) - include $(BUILD_SHARED_LIBRARY) - else # host - LOCAL_CLANG := $(ART_HOST_CLANG) - LOCAL_CFLAGS := $(ART_HOST_CFLAGS) - LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) - ifeq ($$(suffix),d) - LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $(ART_HOST_DEBUG_ASFLAGS) - else - LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS) - LOCAL_ASFLAGS += $(ART_HOST_NON_DEBUG_ASFLAGS) - endif - LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread - LOCAL_IS_HOST_MODULE := true - LOCAL_MULTILIB := both - include $(BUILD_HOST_SHARED_LIBRARY) - endif - - # Clear locally used variables. - art_target_or_host := - suffix := -endef - -ifeq ($(ART_BUILD_TARGET),true) - $(eval $(call build-libtiagent,target,)) - $(eval $(call build-libtiagent,target,debug)) -endif -ifeq ($(ART_BUILD_HOST),true) - $(eval $(call build-libtiagent,host,)) - $(eval $(call build-libtiagent,host,debug)) -endif - -# Clear locally used variables. -LOCAL_PATH := -LIBARTAGENT_COMMON_SRC_FILES := diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index d6957fc39c..3bcea184b3 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -47,6 +47,14 @@ ifeq ($(ART_TEST_DEBUG_GC),true) ART_TEST_WITH_STRACE := true endif +ifeq ($(ART_TEST_BISECTION),true) + # Need to keep rebuilding the test to bisection search it. + ART_TEST_RUN_TEST_NO_PREBUILD := true + ART_TEST_RUN_TEST_PREBUILD := false + # Bisection search writes to standard output. + ART_TEST_QUIET := false +endif + # Helper to create individual build targets for tests. Must be called with $(eval). # $(1): the test number define define-build-art-run-test @@ -153,17 +161,17 @@ JNI_TYPES := checkjni ifeq ($(ART_TEST_JNI_FORCECOPY),true) JNI_TYPES += forcecopy endif -IMAGE_TYPES := image +IMAGE_TYPES := picimage ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true) IMAGE_TYPES += no-image endif ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true) - IMAGE_TYPES := multiimage + IMAGE_TYPES := multipicimage endif -ifeq ($(ART_TEST_PIC_IMAGE),true) - IMAGE_TYPES += picimage +ifeq ($(ART_TEST_NPIC_IMAGE),true) + IMAGE_TYPES += npicimage ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true) - IMAGE_TYPES := multipicimage + IMAGE_TYPES := multinpicimage endif endif PICTEST_TYPES := npictest @@ -270,11 +278,11 @@ TEST_ART_BROKEN_TARGET_TESTS := # Tests that require python3. TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \ 960-default-smali \ - 961-default-iface-resolution-generated \ - 964-default-iface-init-generated \ - 968-default-partial-compile-generated \ + 961-default-iface-resolution-gen \ + 964-default-iface-init-gen \ + 968-default-partial-compile-gen \ 969-iface-super \ - 970-iface-super-resolution-generated \ + 970-iface-super-resolution-gen \ 971-iface-super # Check if we have python3 to run our tests. @@ -354,13 +362,13 @@ TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := # Tests that are broken with GC stress. # * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we # hope the second process got into the expected state. The slowness of gcstress makes this bad. -# * 961-default-iface-resolution-generated and 964-default-iface-init-generated are very long tests -# that often will take more than the timeout to run when gcstress is enabled. This is because -# gcstress slows down allocations significantly which these tests do a lot. +# * 961-default-iface-resolution-gen and 964-default-iface-init-genare very long tests that often +# will take more than the timeout to run when gcstress is enabled. This is because gcstress +# slows down allocations significantly which these tests do a lot. TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \ 137-cfi \ - 961-default-iface-resolution-generated \ - 964-default-iface-init-generated + 961-default-iface-resolution-gen \ + 964-default-iface-init-gen ifneq (,$(filter gcstress,$(GC_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ @@ -644,6 +652,38 @@ endif TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS := +# Tests incompatible with bisection bug search. Sorted by incompatibility reason. +# 000 through 595 do not compile anything. 089 tests a build failure. 018 through 137 +# run dalvikvm more than once. 115 and 088 assume they are always compiled. +# 055 tests performance which is degraded during bisecting. +TEST_ART_INCOMPATIBLE_BISECTION_SEARCH_RUN_TESTS := \ + 000-nop \ + 134-nodex2oat-nofallback \ + 147-stripped-dex-fallback \ + 595-profile-saving \ + \ + 089-many-methods \ + \ + 018-stack-overflow \ + 116-nodex2oat \ + 117-nopatchoat \ + 118-noimage-dex2oat \ + 119-noimage-patchoat \ + 126-miranda-multidex \ + 137-cfi \ + \ + 115-native-bridge \ + 088-monitor-verification \ + \ + 055-enum-performance + +ifeq ($(ART_TEST_BISECTION),true) + ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), \ + $(PREBUILD_TYPES),$(OPTIMIZING_COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), \ + $(TEST_ART_INCOMPATIBLE_BISECTION_SEARCH_RUN_TESTS),$(ALL_ADDRESS_SIZES)) +endif + # Clear variables ahead of appending to them when defining tests. $(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=)) $(foreach target, $(TARGET_TYPES), \ @@ -685,59 +725,59 @@ $(foreach target, $(TARGET_TYPES), \ TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS) # Also need libartagent. -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libartagent.so -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libartagentd.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libartagent) +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libartagentd) ifdef TARGET_2ND_ARCH -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libartagent.so -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libartagentd.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libartagent) +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libartagentd) endif # Also need libtiagent. -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libtiagent.so -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libtiagentd.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libtiagent) +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libtiagentd) ifdef TARGET_2ND_ARCH -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libtiagent.so -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libtiagentd.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libtiagent) +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libtiagentd) endif # Also need libarttest. -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libarttest) +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libarttestd) ifdef TARGET_2ND_ARCH -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttest.so -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libarttestd.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libarttest) +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libarttestd) endif # Also need libnativebridgetest. -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libnativebridgetest.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_ARCH)_libnativebridgetest) ifdef TARGET_2ND_ARCH -TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_TEST_OUT)/$(TARGET_2ND_ARCH)/libnativebridgetest.so +TEST_ART_TARGET_SYNC_DEPS += $(OUT_DIR)/$(ART_TEST_LIST_device_$(TARGET_2ND_ARCH)_libnativebridgetest) endif # All tests require the host executables. The tests also depend on the core images, but on # specific version depending on the compiler. ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \ $(ART_HOST_EXECUTABLES) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libtiagent$(ART_HOST_SHLIB_EXTENSION) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libtiagentd$(ART_HOST_SHLIB_EXTENSION) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libartagent$(ART_HOST_SHLIB_EXTENSION) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libartagentd$(ART_HOST_SHLIB_EXTENSION) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \ - $(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagent) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagentd) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libarttest) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libarttestd) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libnativebridgetest) \ $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) ifneq ($(HOST_PREFER_32_BIT),true) ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libtiagent$(ART_HOST_SHLIB_EXTENSION) \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libtiagentd$(ART_HOST_SHLIB_EXTENSION) \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libartagent$(ART_HOST_SHLIB_EXTENSION) \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libartagentd$(ART_HOST_SHLIB_EXTENSION) \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \ - $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtiagent) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtiagentd) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libartagent) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libartagentd) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libarttest) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libarttestd) \ + $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libnativebridgetest) \ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) @@ -762,6 +802,9 @@ define define-test-art-run-test ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true) run_test_options += --always-clean endif + ifeq ($(ART_TEST_BISECTION),true) + run_test_options += --bisection-search + endif ifeq ($(1),host) uc_host_or_target := HOST test_groups := ART_RUN_TEST_HOST_RULES @@ -896,6 +939,11 @@ define define-test-art-run-test ifeq ($(4),regalloc_gc) # Graph coloring tests share the image_suffix with optimizing tests. image_suffix := optimizing + else + ifeq ($(4),jit) + # JIT tests share the image_suffix with interpreter tests. + image_suffix := interpreter + endif endif ifeq ($(9),no-image) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES @@ -907,8 +955,9 @@ define define-test-art-run-test prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_$(13)) endif else - ifeq ($(9),image) + ifeq ($(9),npicimage) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES + run_test_options += --npic-image # Add the core dependency. ifeq ($(1),host) prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_$(13)) @@ -918,16 +967,15 @@ define define-test-art-run-test else ifeq ($(9),picimage) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES - run_test_options += --pic-image ifeq ($(1),host) prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_$(13)) else prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(13)) endif else - ifeq ($(9),multiimage) + ifeq ($(9),multinpicimage) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES - run_test_options += --multi-image + run_test_options += --npic-image --multi-image ifeq ($(1),host) prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13)) else @@ -936,7 +984,7 @@ define define-test-art-run-test else ifeq ($(9),multipicimage) test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES - run_test_options += --pic-image --multi-image + run_test_options += --multi-image ifeq ($(1),host) prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13)) else @@ -1147,10 +1195,4 @@ ALL_ADDRESS_SIZES := RUN_TYPES := DEBUGGABLE_TYPES := -MY_LOCAL_PATH := $(LOCAL_PATH) -include $(MY_LOCAL_PATH)/Android.libartagent.mk -include $(MY_LOCAL_PATH)/Android.libtiagent.mk -include $(MY_LOCAL_PATH)/Android.libarttest.mk -include $(MY_LOCAL_PATH)/Android.libnativebridgetest.mk -MY_LOCAL_PATH := LOCAL_PATH := diff --git a/test/VerifierDeps/Main.smali b/test/VerifierDeps/Main.smali new file mode 100644 index 0000000000..74c0d037be --- /dev/null +++ b/test/VerifierDeps/Main.smali @@ -0,0 +1,464 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMain; +.super LMyThreadSet; + +.method public static ArgumentType_ResolvedClass(Ljava/lang/Thread;)V + .registers 1 + return-void +.end method + +.method public static ArgumentType_ResolvedReferenceArray([Ljava/lang/Thread;)V + .registers 1 + return-void +.end method + +.method public static ArgumentType_ResolvedPrimitiveArray([B)V + .registers 1 + return-void +.end method + +.method public static ArgumentType_UnresolvedClass(LUnresolvedClass;)V + .registers 1 + return-void +.end method + +.method public static ArgumentType_UnresolvedSuper(LMySetWithUnresolvedSuper;)V + .registers 1 + return-void +.end method + +.method public static ReturnType_Reference(Ljava/lang/IllegalStateException;)Ljava/lang/Throwable; + .registers 1 + return-object p0 +.end method + +.method public static ReturnType_Array([Ljava/lang/IllegalStateException;)[Ljava/lang/Integer; + .registers 1 + return-object p0 +.end method + +.method public static InvokeArgumentType(Ljava/text/SimpleDateFormat;Ljava/util/SimpleTimeZone;)V + .registers 2 + invoke-virtual {p0, p1}, Ljava/text/SimpleDateFormat;->setTimeZone(Ljava/util/TimeZone;)V + return-void +.end method + +.method public static MergeTypes_RegisterLines(Z)Ljava/lang/Object; + .registers 2 + if-eqz p0, :else + + new-instance v0, LMySocketTimeoutException; + invoke-direct {v0}, LMySocketTimeoutException;-><init>()V + goto :merge + + :else + new-instance v0, Ljava/util/concurrent/TimeoutException; + invoke-direct {v0}, Ljava/util/concurrent/TimeoutException;-><init>()V + goto :merge + + :merge + return-object v0 +.end method + +.method public static MergeTypes_IfInstanceOf(Ljava/net/SocketTimeoutException;)V + .registers 2 + instance-of v0, p0, Ljava/util/concurrent/TimeoutException; + if-eqz v0, :else + return-void + :else + return-void +.end method + +.method public static MergeTypes_Unresolved(ZZLUnresolvedClassA;)Ljava/lang/Object; + .registers 5 + if-eqz p0, :else1 + + move-object v0, p2 + goto :merge + + :else1 + if-eqz p1, :else2 + + new-instance v0, Ljava/util/concurrent/TimeoutException; + invoke-direct {v0}, Ljava/util/concurrent/TimeoutException;-><init>()V + goto :merge + + :else2 + new-instance v0, Ljava/net/SocketTimeoutException; + invoke-direct {v0}, Ljava/net/SocketTimeoutException;-><init>()V + goto :merge + + :merge + return-object v0 +.end method + +.method public static ConstClass_Resolved()V + .registers 1 + const-class v0, Ljava/lang/IllegalStateException; + return-void +.end method + +.method public static ConstClass_Unresolved()V + .registers 1 + const-class v0, LUnresolvedClass; + return-void +.end method + +.method public static CheckCast_Resolved(Ljava/lang/Object;)V + .registers 1 + check-cast p0, Ljava/lang/IllegalStateException; + return-void +.end method + +.method public static CheckCast_Unresolved(Ljava/lang/Object;)V + .registers 1 + check-cast p0, LUnresolvedClass; + return-void +.end method + +.method public static InstanceOf_Resolved(Ljava/lang/Object;)Z + .registers 1 + instance-of p0, p0, Ljava/lang/IllegalStateException; + return p0 +.end method + +.method public static InstanceOf_Unresolved(Ljava/lang/Object;)Z + .registers 1 + instance-of p0, p0, LUnresolvedClass; + return p0 +.end method + +.method public static NewInstance_Resolved()V + .registers 1 + new-instance v0, Ljava/lang/IllegalStateException; + return-void +.end method + +.method public static NewInstance_Unresolved()V + .registers 1 + new-instance v0, LUnresolvedClass; + return-void +.end method + +.method public static NewArray_Resolved()V + .registers 1 + const/4 v0, 0x1 + new-array v0, v0, [Ljava/lang/IllegalStateException; + return-void +.end method + +.method public static NewArray_Unresolved()V + .registers 2 + const/4 v0, 0x1 + new-array v0, v0, [LUnresolvedClass; + return-void +.end method + +.method public static Throw(Ljava/lang/IllegalStateException;)V + .registers 2 + throw p0 +.end method + +.method public static MoveException_Resolved()Ljava/lang/Object; + .registers 1 + :try_start + invoke-static {}, Ljava/lang/System;->nanoTime()J + :try_end + .catch Ljava/net/SocketTimeoutException; {:try_start .. :try_end} :catch_block + .catch Ljava/io/InterruptedIOException; {:try_start .. :try_end} :catch_block + .catch Ljava/util/zip/ZipException; {:try_start .. :try_end} :catch_block + const/4 v0, 0x0 + return-object v0 + + :catch_block + move-exception v0 + return-object v0 +.end method + +.method public static MoveException_Unresolved()Ljava/lang/Object; + .registers 1 + :try_start + invoke-static {}, Ljava/lang/System;->nanoTime()J + :try_end + .catch LUnresolvedException; {:try_start .. :try_end} :catch_block + const/4 v0, 0x0 + return-object v0 + + :catch_block + move-exception v0 + return-object v0 +.end method + +.method public static StaticField_Resolved_DeclaredInReferenced()V + .registers 1 + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + return-void +.end method + +.method public static StaticField_Resolved_DeclaredInSuperclass1()V + .registers 1 + sget v0, Ljava/util/SimpleTimeZone;->LONG:I + return-void +.end method + +.method public static StaticField_Resolved_DeclaredInSuperclass2()V + .registers 1 + sget v0, LMySimpleTimeZone;->SHORT:I + return-void +.end method + +.method public static StaticField_Resolved_DeclaredInInterface1()V + .registers 1 + # Case 1: DOMResult implements Result + sget-object v0, Ljavax/xml/transform/dom/DOMResult;->PI_ENABLE_OUTPUT_ESCAPING:Ljava/lang/String; + return-void +.end method + +.method public static StaticField_Resolved_DeclaredInInterface2()V + .registers 1 + # Case 2: MyDOMResult extends DOMResult, DOMResult implements Result + sget-object v0, LMyDOMResult;->PI_ENABLE_OUTPUT_ESCAPING:Ljava/lang/String; + return-void +.end method + +.method public static StaticField_Resolved_DeclaredInInterface3()V + .registers 1 + # Case 3: MyResult implements Result + sget-object v0, LMyResult;->PI_ENABLE_OUTPUT_ESCAPING:Ljava/lang/String; + return-void +.end method + +.method public static StaticField_Resolved_DeclaredInInterface4()V + .registers 1 + # Case 4: MyDocument implements Document, Document extends Node + sget-short v0, LMyDocument;->ELEMENT_NODE:S + return-void +.end method + +.method public static StaticField_Unresolved_ReferrerInBoot()V + .registers 1 + sget v0, Ljava/util/TimeZone;->x:I + return-void +.end method + +.method public static StaticField_Unresolved_ReferrerInDex()V + .registers 1 + sget v0, LMyThreadSet;->x:I + return-void +.end method + +.method public static InstanceField_Resolved_DeclaredInReferenced(LMySocketTimeoutException;)V + .registers 1 + iget v0, p0, Ljava/io/InterruptedIOException;->bytesTransferred:I + return-void +.end method + +.method public static InstanceField_Resolved_DeclaredInSuperclass1(LMySocketTimeoutException;)V + .registers 1 + iget v0, p0, Ljava/net/SocketTimeoutException;->bytesTransferred:I + return-void +.end method + +.method public static InstanceField_Resolved_DeclaredInSuperclass2(LMySocketTimeoutException;)V + .registers 1 + iget v0, p0, LMySocketTimeoutException;->bytesTransferred:I + return-void +.end method + +.method public static InstanceField_Unresolved_ReferrerInBoot(LMySocketTimeoutException;)V + .registers 1 + iget v0, p0, Ljava/io/InterruptedIOException;->x:I + return-void +.end method + +.method public static InstanceField_Unresolved_ReferrerInDex(LMyThreadSet;)V + .registers 1 + iget v0, p0, LMyThreadSet;->x:I + return-void +.end method + +.method public static InvokeStatic_Resolved_DeclaredInReferenced()V + .registers 1 + const v0, 0x0 + invoke-static {v0}, Ljava/net/Socket;->setSocketImplFactory(Ljava/net/SocketImplFactory;)V + return-void +.end method + +.method public static InvokeStatic_Resolved_DeclaredInSuperclass1()V + .registers 1 + const v0, 0x0 + invoke-static {v0}, Ljavax/net/ssl/SSLSocket;->setSocketImplFactory(Ljava/net/SocketImplFactory;)V + return-void +.end method + +.method public static InvokeStatic_Resolved_DeclaredInSuperclass2()V + .registers 1 + const v0, 0x0 + invoke-static {v0}, LMySSLSocket;->setSocketImplFactory(Ljava/net/SocketImplFactory;)V + return-void +.end method + +.method public static InvokeStatic_DeclaredInInterface1()V + .registers 1 + invoke-static {}, Ljava/util/Map$Entry;->comparingByKey()Ljava/util/Comparator; + return-void +.end method + +.method public static InvokeStatic_DeclaredInInterface2()V + .registers 1 + # AbstractMap$SimpleEntry implements Map$Entry + # INVOKE_STATIC does not resolve to methods in superinterfaces. This will + # therefore result in an unresolved method. + invoke-static {}, Ljava/util/AbstractMap$SimpleEntry;->comparingByKey()Ljava/util/Comparator; + return-void +.end method + +.method public static InvokeStatic_Unresolved1()V + .registers 1 + invoke-static {}, Ljavax/net/ssl/SSLSocket;->x()V + return-void +.end method + +.method public static InvokeStatic_Unresolved2()V + .registers 1 + invoke-static {}, LMySSLSocket;->x()V + return-void +.end method + +.method public static InvokeDirect_Resolved_DeclaredInReferenced()V + .registers 1 + new-instance v0, Ljava/net/Socket; + invoke-direct {v0}, Ljava/net/Socket;-><init>()V + return-void +.end method + +.method public static InvokeDirect_Resolved_DeclaredInSuperclass1(LMySSLSocket;)V + .registers 1 + invoke-direct {p0}, Ljavax/net/ssl/SSLSocket;->checkOldImpl()V + return-void +.end method + +.method public static InvokeDirect_Resolved_DeclaredInSuperclass2(LMySSLSocket;)V + .registers 1 + invoke-direct {p0}, LMySSLSocket;->checkOldImpl()V + return-void +.end method + +.method public static InvokeDirect_Unresolved1(LMySSLSocket;)V + .registers 1 + invoke-direct {p0}, Ljavax/net/ssl/SSLSocket;->x()V + return-void +.end method + +.method public static InvokeDirect_Unresolved2(LMySSLSocket;)V + .registers 1 + invoke-direct {p0}, LMySSLSocket;->x()V + return-void +.end method + +.method public static InvokeVirtual_Resolved_DeclaredInReferenced(LMySocketTimeoutException;)V + .registers 1 + invoke-virtual {p0}, Ljava/lang/Throwable;->getMessage()Ljava/lang/String; + return-void +.end method + +.method public static InvokeVirtual_Resolved_DeclaredInSuperclass1(LMySocketTimeoutException;)V + .registers 1 + invoke-virtual {p0}, Ljava/io/InterruptedIOException;->getMessage()Ljava/lang/String; + return-void +.end method + +.method public static InvokeVirtual_Resolved_DeclaredInSuperclass2(LMySocketTimeoutException;)V + .registers 1 + invoke-virtual {p0}, LMySocketTimeoutException;->getMessage()Ljava/lang/String; + return-void +.end method + +.method public static InvokeVirtual_Resolved_DeclaredInSuperinterface(LMyThreadSet;)V + .registers 1 + invoke-virtual {p0}, LMyThreadSet;->size()I + return-void +.end method + +.method public static InvokeVirtual_Unresolved1(LMySocketTimeoutException;)V + .registers 1 + invoke-virtual {p0}, Ljava/io/InterruptedIOException;->x()V + return-void +.end method + +.method public static InvokeVirtual_Unresolved2(LMySocketTimeoutException;)V + .registers 1 + invoke-virtual {p0}, LMySocketTimeoutException;->x()V + return-void +.end method + +.method public static InvokeVirtual_ActuallyDirect(LMyThread;)V + .registers 1 + invoke-virtual {p0}, LMyThread;->activeCount()I + return-void +.end method + +.method public static InvokeInterface_Resolved_DeclaredInReferenced(LMyThread;)V + .registers 1 + invoke-interface {p0}, Ljava/lang/Runnable;->run()V + return-void +.end method + +.method public static InvokeInterface_Resolved_DeclaredInSuperclass(LMyThread;)V + .registers 1 + # Method join() is declared in the superclass of MyThread. As such, it should + # be called with invoke-virtual and will not be resolved here. + invoke-interface {p0}, LMyThread;->join()V + return-void +.end method + +.method public static InvokeInterface_Resolved_DeclaredInSuperinterface1(LMyThreadSet;)V + .registers 1 + # Verification will fail because the referring class is not an interface. + invoke-interface {p0}, LMyThreadSet;->run()V + return-void +.end method + +.method public static InvokeInterface_Resolved_DeclaredInSuperinterface2(LMyThreadSet;)V + .registers 1 + # Verification will fail because the referring class is not an interface. + invoke-interface {p0}, LMyThreadSet;->isEmpty()Z + return-void +.end method + +.method public static InvokeInterface_Unresolved1(LMyThread;)V + .registers 1 + invoke-interface {p0}, Ljava/lang/Runnable;->x()V + return-void +.end method + +.method public static InvokeInterface_Unresolved2(LMyThread;)V + .registers 1 + invoke-interface {p0}, LMyThreadSet;->x()V + return-void +.end method + +.method public static InvokeSuper_ThisAssignable(Ljava/lang/Thread;)V + .registers 1 + invoke-super {p0}, Ljava/lang/Runnable;->run()V + return-void +.end method + +.method public static InvokeSuper_ThisNotAssignable(Ljava/lang/Integer;)V + .registers 1 + invoke-super {p0}, Ljava/lang/Integer;->intValue()I + return-void +.end method diff --git a/test/VerifierDeps/MyDOMResult.smali b/test/VerifierDeps/MyDOMResult.smali new file mode 100644 index 0000000000..12f6243d26 --- /dev/null +++ b/test/VerifierDeps/MyDOMResult.smali @@ -0,0 +1,16 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMyDOMResult; +.super Ljavax/xml/transform/dom/DOMResult; diff --git a/test/VerifierDeps/MyDocument.smali b/test/VerifierDeps/MyDocument.smali new file mode 100644 index 0000000000..3ce042c2a7 --- /dev/null +++ b/test/VerifierDeps/MyDocument.smali @@ -0,0 +1,17 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMyDocument; +.super Ljava/lang/Object; +.implements Lorg/w3c/dom/Document; diff --git a/test/VerifierDeps/MyErroneousTimeZone.smali b/test/VerifierDeps/MyErroneousTimeZone.smali new file mode 100644 index 0000000000..5f23dd93a7 --- /dev/null +++ b/test/VerifierDeps/MyErroneousTimeZone.smali @@ -0,0 +1,22 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMyErroneousTimeZone; +.super LMySimpleTimeZone; + +# Class is erroneous because foo() is defined final in the superclass. +.method public foo()V + .registers 1 + return-void +.end method diff --git a/test/VerifierDeps/MyResult.smali b/test/VerifierDeps/MyResult.smali new file mode 100644 index 0000000000..e00e7501d7 --- /dev/null +++ b/test/VerifierDeps/MyResult.smali @@ -0,0 +1,17 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMyResult; +.super Ljava/lang/Object; +.implements Ljavax/xml/transform/Result; diff --git a/test/VerifierDeps/MySSLSocket.smali b/test/VerifierDeps/MySSLSocket.smali new file mode 100644 index 0000000000..dd30081695 --- /dev/null +++ b/test/VerifierDeps/MySSLSocket.smali @@ -0,0 +1,16 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMySSLSocket; +.super Ljavax/net/ssl/SSLSocket; diff --git a/test/VerifierDeps/MySimpleTimeZone.smali b/test/VerifierDeps/MySimpleTimeZone.smali new file mode 100644 index 0000000000..f7a1e05b2f --- /dev/null +++ b/test/VerifierDeps/MySimpleTimeZone.smali @@ -0,0 +1,24 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMySimpleTimeZone; +.super Ljava/util/SimpleTimeZone; +.implements Ljava/io/Serializable; + +# Define foo() as a final method. It is used by the MyErroneousTimeZone subclass +# to generate a linkage error. +.method public final foo()V + .registers 1 + return-void +.end method diff --git a/test/VerifierDeps/MySocketTimeoutException.smali b/test/VerifierDeps/MySocketTimeoutException.smali new file mode 100644 index 0000000000..50e076244e --- /dev/null +++ b/test/VerifierDeps/MySocketTimeoutException.smali @@ -0,0 +1,16 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMySocketTimeoutException; +.super Ljava/net/SocketTimeoutException; diff --git a/test/VerifierDeps/MyThread.smali b/test/VerifierDeps/MyThread.smali new file mode 100644 index 0000000000..7fdb254aee --- /dev/null +++ b/test/VerifierDeps/MyThread.smali @@ -0,0 +1,16 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMyThread; +.super Ljava/lang/Thread; diff --git a/test/VerifierDeps/MyThreadSet.smali b/test/VerifierDeps/MyThreadSet.smali new file mode 100644 index 0000000000..f331fcf81d --- /dev/null +++ b/test/VerifierDeps/MyThreadSet.smali @@ -0,0 +1,17 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public abstract LMyThreadSet; +.super Ljava/lang/Thread; +.implements Ljava/util/Set; diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index d12bd79b3a..a445f4d630 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -32,6 +32,7 @@ JIT="n" INVOKE_WITH="" ISA=x86 LIBRARY_DIRECTORY="lib" +TEST_DIRECTORY="nativetest" MAIN="" OPTIMIZE="y" PATCHOAT="" @@ -55,6 +56,8 @@ DEX_VERIFY="" USE_DEX2OAT_AND_PATCHOAT="y" INSTRUCTION_SET_FEATURES="" ARGS="" +EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS. +DRY_RUN="n" # if y prepare to run the test but don't run it. while true; do if [ "x$1" = "x--quiet" ]; then @@ -220,6 +223,7 @@ while true; do GDB_SERVER="gdbserver64" DALVIKVM="dalvikvm64" LIBRARY_DIRECTORY="lib64" + TEST_DIRECTORY="nativetest64" ARCHITECTURES_PATTERN="${ARCHITECTURES_64}" shift elif [ "x$1" = "x--pic-test" ]; then @@ -233,6 +237,12 @@ while true; do fi EXPERIMENTAL="$EXPERIMENTAL $2" shift 2 + elif [ "x$1" = "x--external-log-tags" ]; then + EXTERNAL_LOG_TAGS="y" + shift + elif [ "x$1" = "x--dry-run" ]; then + DRY_RUN="y" + shift elif expr "x$1" : "x--" >/dev/null 2>&1; then echo "unknown $0 option: $1" 1>&2 exit 1 @@ -479,7 +489,7 @@ if [ "$HOST" = "n" ]; then adb push $TEST_NAME-ex.jar $DEX_LOCATION >/dev/null 2>&1 fi - LD_LIBRARY_PATH=/data/art-test/$ISA + LD_LIBRARY_PATH=/data/$TEST_DIRECTORY/art/$ISA if [ "$ANDROID_ROOT" != "/system" ]; then # Current default installation is dalvikvm 64bits and dex2oat 32bits, # so we can only use LD_LIBRARY_PATH when testing on a local @@ -491,12 +501,14 @@ if [ "$HOST" = "n" ]; then # Create a script with the command. The command can get longer than the longest # allowed adb command and there is no way to get the exit status from a adb shell - # command. + # command. Dalvik cache is cleaned before running to make subsequent executions + # of the script follow the same runtime path. cmdline="cd $DEX_LOCATION && \ export ANDROID_DATA=$DEX_LOCATION && \ export ANDROID_ADDITIONAL_PUBLIC_LIBRARIES=$PUBLIC_LIBS && \ export DEX_LOCATION=$DEX_LOCATION && \ export ANDROID_ROOT=$ANDROID_ROOT && \ + rm -rf ${DEX_LOCATION}/dalvik-cache/ && \ mkdir -p ${mkdir_locations} && \ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \ export PATH=$ANDROID_ROOT/bin:$PATH && \ @@ -517,7 +529,9 @@ if [ "$HOST" = "n" ]; then adb push $cmdfile $DEX_LOCATION/cmdline.sh > /dev/null 2>&1 fi - adb shell sh $DEX_LOCATION/cmdline.sh + if [ "$DRY_RUN" != "y" ]; then + adb shell sh $DEX_LOCATION/cmdline.sh + fi rm -f $cmdfile else @@ -525,16 +539,18 @@ else # By default, and for prebuild dex2oat, we are interested in errors being logged. In dev mode # we want debug messages. - if [ "$DEV_MODE" = "y" ]; then - export ANDROID_LOG_TAGS='*:d' - else - export ANDROID_LOG_TAGS='*:e' + if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then + if [ "$DEV_MODE" = "y" ]; then + export ANDROID_LOG_TAGS='*:d' + else + export ANDROID_LOG_TAGS='*:e' + fi fi export ANDROID_DATA="$DEX_LOCATION" export ANDROID_ROOT="${ANDROID_ROOT}" - export LD_LIBRARY_PATH="${ANDROID_ROOT}/lib" - export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/lib" + export LD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}" + export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}" export PATH="$PATH:${ANDROID_ROOT}/bin" # Temporarily disable address space layout randomization (ASLR). @@ -582,15 +598,21 @@ else # For running, we must turn off logging when dex2oat or patchoat are missing. Otherwise we use # the same defaults as for prebuilt: everything when --dev, otherwise errors and above only. - if [ "$DEV_MODE" = "y" ]; then - export ANDROID_LOG_TAGS='*:d' - elif [ "$USE_DEX2OAT_AND_PATCHOAT" = "n" ]; then - # All tests would log the error of failing dex2oat/patchoat. Be silent here and only - # log fatal events. - export ANDROID_LOG_TAGS='*:s' - else - # We are interested in LOG(ERROR) output. - export ANDROID_LOG_TAGS='*:e' + if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then + if [ "$DEV_MODE" = "y" ]; then + export ANDROID_LOG_TAGS='*:d' + elif [ "$USE_DEX2OAT_AND_PATCHOAT" = "n" ]; then + # All tests would log the error of failing dex2oat/patchoat. Be silent here and only + # log fatal events. + export ANDROID_LOG_TAGS='*:s' + else + # We are interested in LOG(ERROR) output. + export ANDROID_LOG_TAGS='*:e' + fi + fi + + if [ "$DRY_RUN" = "y" ]; then + exit 0 fi if [ "$USE_GDB" = "y" ]; then diff --git a/test/run-test b/test/run-test index 4c294203b3..ae53f9ecc0 100755 --- a/test/run-test +++ b/test/run-test @@ -130,9 +130,10 @@ have_image="yes" pic_image_suffix="" multi_image_suffix="" android_root="/system" +bisection_search="no" # By default we will use optimizing. image_args="" -image_suffix="-optimizing" +image_suffix="" while true; do if [ "x$1" = "x--host" ]; then @@ -180,8 +181,8 @@ while true; do elif [ "x$1" = "x--no-image" ]; then have_image="no" shift - elif [ "x$1" = "x--pic-image" ]; then - pic_image_suffix="-pic" + elif [ "x$1" = "x--npic-image" ]; then + pic_image_suffix="-npic" shift elif [ "x$1" = "x--multi-image" ]; then multi_image_suffix="-multi" @@ -258,11 +259,10 @@ while true; do shift elif [ "x$1" = "x--jit" ]; then image_args="--jit" - image_suffix="-jit" + image_suffix="-interpreter" shift elif [ "x$1" = "x--optimizing" ]; then image_args="-Xcompiler-option --compiler-backend=Optimizing" - image_suffix="-optimizing" shift elif [ "x$1" = "x--no-verify" ]; then run_args="${run_args} --no-verify" @@ -348,6 +348,9 @@ while true; do shift run_args="${run_args} --instruction-set-features $1" shift + elif [ "x$1" = "x--bisection-search" ]; then + bisection_search="yes" + shift elif expr "x$1" : "x--" >/dev/null 2>&1; then echo "unknown $0 option: $1" 1>&2 usage="yes" @@ -470,10 +473,10 @@ elif [ "$runtime" = "art" ]; then if [ "$target_mode" = "no" ]; then guess_host_arch_name run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art" - run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}" + run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}:${ANDROID_HOST_OUT}/nativetest${suffix64}" else guess_target_arch_name - run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}:/system/lib${suffix64}" + run_args="${run_args} --runtime-option -Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}:/system/lib${suffix64}" run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art" fi if [ "$relocate" = "yes" ]; then @@ -520,6 +523,21 @@ if [ "$dev_mode" = "yes" -a "$quiet" = "yes" ]; then usage="yes" fi +if [ "$bisection_search" = "yes" -a "$prebuild_mode" = "yes" ]; then + err_echo "--bisection-search and --prebuild are mutually exclusive" + usage="yes" +fi + +if [ "$bisection_search" = "yes" -a "$have_dex2oat" = "no" ]; then + err_echo "--bisection-search and --no-dex2oat are mutually exclusive" + usage="yes" +fi + +if [ "$bisection_search" = "yes" -a "$have_patchoat" = "no" ]; then + err_echo "--bisection-search and --no-patchoat are mutually exclusive" + usage="yes" +fi + if [ "$usage" = "no" ]; then if [ "x$1" = "x" -o "x$1" = "x-" ]; then test_dir=`basename "$oldwd"` @@ -540,6 +558,13 @@ if [ "$usage" = "no" ]; then shift fi +# For building with javac and dx always use Java 7. The dx compiler +# only support byte codes from Java 7 or earlier (class file major +# version 51 or lower). +if [ "$USE_JACK" != "true" ] && [ "$NEED_DEX" = "true" ]; then + export JAVAC="${JAVAC} -source 1.7 -target 1.7" +fi + if [ "$usage" = "yes" ]; then prog=`basename $prog` ( @@ -561,8 +586,8 @@ if [ "$usage" = "yes" ]; then echo " --gdb Run under gdb; incompatible with some tests." echo " --gdb-arg Pass an option to gdb." echo " --build-only Build test files only (off by default)." - echo " --build-with-javac-dx Build test files with javac and dx (on by default)." - echo " --build-with-jack Build test files with jack and jill (off by default)." + echo " --build-with-javac-dx Build test files with javac and dx (off by default)." + echo " --build-with-jack Build test files with jack and jill (on by default)." echo " --interpreter Enable interpreter only mode (off by default)." echo " --jit Enable jit (off by default)." echo " --optimizing Enable optimizing compiler (default)." @@ -608,12 +633,13 @@ if [ "$usage" = "yes" ]; then echo " --dex2oat-swap Use a dex2oat swap file." echo " --instruction-set-features [string]" echo " Set instruction-set-features for compilation." - echo " --pic-image Use an image compiled with position independent code for the" - echo " boot class path." + echo " --npic-image Use an image compiled with non-position independent code " + echo " for the boot class path." echo " --multi-image Use a set of images compiled with dex2oat multi-image for" echo " the boot class path." echo " --pic-test Compile the test code position independent." echo " --quiet Don't print anything except failure messages" + echo " --bisection-search Perform bisection bug search." ) 1>&2 # Direct to stderr so usage is not printed if --quiet is set. exit 1 fi @@ -678,7 +704,7 @@ function arch_supports_read_barrier() { # Tests named '<number>-checker-*' will also have their CFGs verified with # Checker when compiled with Optimizing on host. if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then - if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$USE_JACK" = "true" ]; then + if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$USE_JACK" = "true" ]; then # Optimizing has read barrier support for certain architectures # only. On other architectures, compiling is disabled when read # barriers are enabled, meaning that we do not produce a CFG file @@ -882,6 +908,41 @@ fi ) 2>&${real_stderr} 1>&2 +# Attempt bisection only if the test failed. +if [ "$bisection_search" = "yes" -a "$good" != "yes" ]; then + # Bisecting works by skipping different optimization passes which breaks checker assertions. + if [ "$run_checker" == "yes" ]; then + echo "${test_dir}: not bisecting, checker test." 1>&2 + else + # Increase file size limit, bisection search can generate large logfiles. + if ! ulimit -S unlimited; then + err_echo "ulimit file size setting failed" + fi + echo "${test_dir}: bisecting..." 1>&2 + cwd=`pwd` + maybe_device_mode="" + raw_cmd="" + if [ "$target_mode" = "yes" ]; then + # Produce cmdline.sh in $DEX_LOCATION. "$@" is passed as a runtime option + # so that cmdline.sh forwards its arguments to dalvikvm. invoke-with is set + # to exec in order to preserve pid when calling dalvikvm. This is required + # for bisection search to correctly retrieve logs from device. + "./${run}" $run_args --runtime-option '"$@"' --invoke-with exec --dry-run "$@" &> /dev/null + adb shell chmod u+x "$DEX_LOCATION/cmdline.sh" + maybe_device_mode="--device" + raw_cmd="$DEX_LOCATION/cmdline.sh" + else + raw_cmd="$cwd/${run} --external-log-tags $run_args $@" + fi + $ANDROID_BUILD_TOP/art/tools/bisection_search/bisection_search.py \ + $maybe_device_mode \ + --raw-cmd="$raw_cmd" \ + --check-script="$cwd/check" \ + --expected-output="$cwd/expected.txt" \ + --timeout=300 + fi +fi + # Clean up test files. if [ "$always_clean" = "yes" -o "$good" = "yes" ] && [ "$never_clean" = "no" ]; then cd "$oldwd" diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk index 60e0cd8ec8..4003ee08c5 100644 --- a/tools/ahat/Android.mk +++ b/tools/ahat/Android.mk @@ -16,7 +16,7 @@ LOCAL_PATH := $(call my-dir) -include art/build/Android.common_test.mk +include art/build/Android.common_path.mk # --- ahat.jar ---------------- include $(CLEAR_VARS) @@ -100,7 +100,7 @@ ANDROID_DATA=$ANDROID_DATA \ $invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \ -XXlib:$LIBART \ -Xnorelocate \ - -Ximage:$ANDROID_ROOT/framework/core-optimizing-pic.art \ + -Ximage:$ANDROID_ROOT/framework/core.art \ $DEBUG_OPTION \ "$@" diff --git a/tools/bisection_search/README.md b/tools/bisection_search/README.md index a7485c2bb5..d641102d1f 100644 --- a/tools/bisection_search/README.md +++ b/tools/bisection_search/README.md @@ -15,29 +15,56 @@ incorrect output. Prints Mi and Pj. How to run Bisection Bug Search =============================== +There are two supported invocation modes: + +1. Regular invocation, dalvikvm command is constructed internally: + + ./bisection_search.py -cp classes.dex --expected-output out_int --class Test + +2. Raw-cmd invocation, dalvikvm command is accepted as an argument. + + Extra dalvikvm arguments will be placed on second position in the command + by default. {ARGS} tag can be used to specify a custom position. + + If used in device mode, the command has to exec a dalvikvm instance. Bisection + will fail if pid of the process started by raw-cmd is different than pid of runtime. + + ./bisection_search.py --raw-cmd='run.sh -cp classes.dex Test' --expected-retcode SUCCESS + ./bisection_search.py --raw-cmd='/bin/sh art {ARGS} -cp classes.dex Test' --expected-retcode SUCCESS + +Help: + bisection_search.py [-h] [-cp CLASSPATH] [--class CLASSNAME] [--lib LIB] - [--dalvikvm-option [OPT [OPT ...]]] [--arg [ARG [ARG ...]]] - [--image IMAGE] [--raw-cmd RAW_CMD] - [--64] [--device] [--expected-output EXPECTED_OUTPUT] - [--check-script CHECK_SCRIPT] [--verbose] + [--dalvikvm-option [OPT [OPT ...]]] [--arg [ARG [ARG ...]]] + [--image IMAGE] [--raw-cmd RAW_CMD] + [--64] [--device] [--device-serial DEVICE_SERIAL] + [--expected-output EXPECTED_OUTPUT] + [--expected-retcode {SUCCESS,TIMEOUT,ERROR}] + [--check-script CHECK_SCRIPT] [--logfile LOGFILE] [--cleanup] + [--timeout TIMEOUT] [--verbose] Tool for finding compiler bugs. Either --raw-cmd or both -cp and --class are required. optional arguments: - -h, --help show this help message and exit + -h, --help show this help message and exit dalvikvm command options: - -cp CLASSPATH, --classpath CLASSPATH classpath - --class CLASSNAME name of main class - --lib LIB lib to use, default: libart.so - --dalvikvm-option [OPT [OPT ...]] additional dalvikvm option - --arg [ARG [ARG ...]] argument passed to test - --image IMAGE path to image - --raw-cmd RAW_CMD bisect with this command, ignore other command options + -cp CLASSPATH, --classpath CLASSPATH classpath + --class CLASSNAME name of main class + --lib LIB lib to use, default: libart.so + --dalvikvm-option [OPT [OPT ...]] additional dalvikvm option + --arg [ARG [ARG ...]] argument passed to test + --image IMAGE path to image + --raw-cmd RAW_CMD bisect with this command, ignore other command options bisection options: - --64 x64 mode - --device run on device - --expected-output EXPECTED_OUTPUT file containing expected output - --check-script CHECK_SCRIPT script comparing output and expected output - --verbose enable verbose output + --64 x64 mode + --device run on device + --device-serial DEVICE_SERIAL device serial number, implies --device + --expected-output EXPECTED_OUTPUT file containing expected output + --expected-retcode {SUCCESS,TIMEOUT,ERROR} expected normalized return code + --check-script CHECK_SCRIPT script comparing output and expected output + --logfile LOGFILE custom logfile location + --cleanup clean up after bisecting + --timeout TIMEOUT if timeout seconds pass assume test failed + --verbose enable verbose output diff --git a/tools/bisection_search/bisection_search.py b/tools/bisection_search/bisection_search.py index 110ef82433..b7f190714e 100755 --- a/tools/bisection_search/bisection_search.py +++ b/tools/bisection_search/bisection_search.py @@ -34,6 +34,8 @@ from common import DeviceTestEnv from common import FatalError from common import GetEnvVariableOrError from common import HostTestEnv +from common import LogSeverity +from common import RetCode # Passes that are never disabled during search process because disabling them @@ -51,6 +53,13 @@ MANDATORY_PASSES = ['dex_cache_array_fixups_arm', NON_PASSES = ['builder', 'prepare_for_register_allocation', 'liveness', 'register'] +# If present in raw cmd, this tag will be replaced with runtime arguments +# controlling the bisection search. Otherwise arguments will be placed on second +# position in the command. +RAW_CMD_RUNTIME_ARGS_TAG = '{ARGS}' + +# Default core image path relative to ANDROID_HOST_OUT. +DEFAULT_IMAGE_RELATIVE_PATH = '/framework/core.art' class Dex2OatWrapperTestable(object): """Class representing a testable compilation. @@ -58,21 +67,29 @@ class Dex2OatWrapperTestable(object): Accepts filters on compiled methods and optimization passes. """ - def __init__(self, base_cmd, test_env, output_checker=None, verbose=False): + def __init__(self, base_cmd, test_env, expected_retcode=None, + output_checker=None, verbose=False): """Constructor. Args: base_cmd: list of strings, base command to run. test_env: ITestEnv. + expected_retcode: RetCode, expected normalized return code. output_checker: IOutputCheck, output checker. verbose: bool, enable verbose output. """ self._base_cmd = base_cmd self._test_env = test_env + self._expected_retcode = expected_retcode self._output_checker = output_checker self._compiled_methods_path = self._test_env.CreateFile('compiled_methods') self._passes_to_run_path = self._test_env.CreateFile('run_passes') self._verbose = verbose + if RAW_CMD_RUNTIME_ARGS_TAG in self._base_cmd: + self._arguments_position = self._base_cmd.index(RAW_CMD_RUNTIME_ARGS_TAG) + self._base_cmd.pop(self._arguments_position) + else: + self._arguments_position = 1 def Test(self, compiled_methods, passes_to_run=None): """Tests compilation with compiled_methods and run_passes switches active. @@ -91,12 +108,14 @@ class Dex2OatWrapperTestable(object): print('Testing methods: {0} passes: {1}.'.format( compiled_methods, passes_to_run)) cmd = self._PrepareCmd(compiled_methods=compiled_methods, - passes_to_run=passes_to_run, - verbose_compiler=False) + passes_to_run=passes_to_run) (output, ret_code) = self._test_env.RunCommand( - cmd, {'ANDROID_LOG_TAGS': '*:e'}) - res = ((self._output_checker is None and ret_code == 0) - or self._output_checker.Check(output)) + cmd, LogSeverity.ERROR) + res = True + if self._expected_retcode: + res = self._expected_retcode == ret_code + if self._output_checker: + res = res and self._output_checker.Check(output) if self._verbose: print('Test passed: {0}.'.format(res)) return res @@ -110,8 +129,8 @@ class Dex2OatWrapperTestable(object): Raises: FatalError: An error occurred when retrieving methods list. """ - cmd = self._PrepareCmd(verbose_compiler=True) - (output, _) = self._test_env.RunCommand(cmd, {'ANDROID_LOG_TAGS': '*:i'}) + cmd = self._PrepareCmd() + (output, _) = self._test_env.RunCommand(cmd, LogSeverity.INFO) match_methods = re.findall(r'Building ([^\n]+)\n', output) if not match_methods: raise FatalError('Failed to retrieve methods list. ' @@ -130,9 +149,8 @@ class Dex2OatWrapperTestable(object): Raises: FatalError: An error occurred when retrieving passes list. """ - cmd = self._PrepareCmd(compiled_methods=[compiled_method], - verbose_compiler=True) - (output, _) = self._test_env.RunCommand(cmd, {'ANDROID_LOG_TAGS': '*:i'}) + cmd = self._PrepareCmd(compiled_methods=[compiled_method]) + (output, _) = self._test_env.RunCommand(cmd, LogSeverity.INFO) match_passes = re.findall(r'Starting pass: ([^\n]+)\n', output) if not match_passes: raise FatalError('Failed to retrieve passes list. ' @@ -142,8 +160,8 @@ class Dex2OatWrapperTestable(object): def _PrepareCmd(self, compiled_methods=None, passes_to_run=None, verbose_compiler=False): """Prepare command to run.""" - cmd = [self._base_cmd[0]] - # insert additional arguments + cmd = self._base_cmd[0:self._arguments_position] + # insert additional arguments before the first argument if compiled_methods is not None: self._test_env.WriteLines(self._compiled_methods_path, compiled_methods) cmd += ['-Xcompiler-option', '--compiled-methods={0}'.format( @@ -152,10 +170,9 @@ class Dex2OatWrapperTestable(object): self._test_env.WriteLines(self._passes_to_run_path, passes_to_run) cmd += ['-Xcompiler-option', '--run-passes={0}'.format( self._passes_to_run_path)] - if verbose_compiler: - cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option', - '-verbose:compiler', '-Xcompiler-option', '-j1'] - cmd += self._base_cmd[1:] + cmd += ['-Xcompiler-option', '--runtime-arg', '-Xcompiler-option', + '-verbose:compiler', '-Xcompiler-option', '-j1'] + cmd += self._base_cmd[self._arguments_position:] return cmd @@ -299,7 +316,7 @@ def PrepareParser(): command_opts.add_argument('-cp', '--classpath', type=str, help='classpath') command_opts.add_argument('--class', dest='classname', type=str, help='name of main class') - command_opts.add_argument('--lib', dest='lib', type=str, default='libart.so', + command_opts.add_argument('--lib', type=str, default='libart.so', help='lib to use, default: libart.so') command_opts.add_argument('--dalvikvm-option', dest='dalvikvm_opts', metavar='OPT', nargs='*', default=[], @@ -307,7 +324,7 @@ def PrepareParser(): command_opts.add_argument('--arg', dest='test_args', nargs='*', default=[], metavar='ARG', help='argument passed to test') command_opts.add_argument('--image', type=str, help='path to image') - command_opts.add_argument('--raw-cmd', dest='raw_cmd', type=str, + command_opts.add_argument('--raw-cmd', type=str, help='bisect with this command, ignore other ' 'command options') bisection_opts = parser.add_argument_group('bisection options') @@ -315,11 +332,22 @@ def PrepareParser(): default=False, help='x64 mode') bisection_opts.add_argument( '--device', action='store_true', default=False, help='run on device') + bisection_opts.add_argument( + '--device-serial', help='device serial number, implies --device') bisection_opts.add_argument('--expected-output', type=str, help='file containing expected output') bisection_opts.add_argument( - '--check-script', dest='check_script', type=str, + '--expected-retcode', type=str, help='expected normalized return code', + choices=[RetCode.SUCCESS.name, RetCode.TIMEOUT.name, RetCode.ERROR.name]) + bisection_opts.add_argument( + '--check-script', type=str, help='script comparing output and expected output') + bisection_opts.add_argument( + '--logfile', type=str, help='custom logfile location') + bisection_opts.add_argument('--cleanup', action='store_true', + default=False, help='clean up after bisecting') + bisection_opts.add_argument('--timeout', type=int, default=60, + help='if timeout seconds pass assume test failed') bisection_opts.add_argument('--verbose', action='store_true', default=False, help='enable verbose output') return parser @@ -334,8 +362,8 @@ def PrepareBaseCommand(args, classpath): if not args.device: base_cmd += ['-XXlib:{0}'.format(args.lib)] if not args.image: - image_path = '{0}/framework/core-optimizing-pic.art'.format( - GetEnvVariableOrError('ANDROID_HOST_OUT')) + image_path = (GetEnvVariableOrError('ANDROID_HOST_OUT') + + DEFAULT_IMAGE_RELATIVE_PATH) else: image_path = args.image base_cmd += ['-Ximage:{0}'.format(image_path)] @@ -351,15 +379,24 @@ def main(): args = parser.parse_args() if not args.raw_cmd and (not args.classpath or not args.classname): parser.error('Either --raw-cmd or both -cp and --class are required') + if args.device_serial: + args.device = True + if args.expected_retcode: + args.expected_retcode = RetCode[args.expected_retcode] + if not args.expected_retcode and not args.check_script: + args.expected_retcode = RetCode.SUCCESS # Prepare environment classpath = args.classpath if args.device: - test_env = DeviceTestEnv() + test_env = DeviceTestEnv( + 'bisection_search_', args.cleanup, args.logfile, args.timeout, + args.device_serial) if classpath: classpath = test_env.PushClasspath(classpath) else: - test_env = HostTestEnv(args.x64) + test_env = HostTestEnv( + 'bisection_search_', args.cleanup, args.logfile, args.timeout, args.x64) base_cmd = PrepareBaseCommand(args, classpath) output_checker = None if args.expected_output: @@ -372,11 +409,11 @@ def main(): # Perform the search try: - testable = Dex2OatWrapperTestable(base_cmd, test_env, output_checker, - args.verbose) + testable = Dex2OatWrapperTestable(base_cmd, test_env, args.expected_retcode, + output_checker, args.verbose) (method, opt_pass) = BugSearch(testable) except Exception as e: - print('Error. Refer to logfile: {0}'.format(test_env.logfile.name)) + print('Error occurred.\nLogfile: {0}'.format(test_env.logfile.name)) test_env.logfile.write('Exception: {0}\n'.format(e)) raise diff --git a/tools/bisection_search/common.py b/tools/bisection_search/common.py index d5029bb970..3d92ee5a60 100755 --- a/tools/bisection_search/common.py +++ b/tools/bisection_search/common.py @@ -18,7 +18,10 @@ import abc import os +import signal import shlex +import shutil +import time from subprocess import check_call from subprocess import PIPE @@ -29,6 +32,9 @@ from subprocess import TimeoutExpired from tempfile import mkdtemp from tempfile import NamedTemporaryFile +from enum import Enum +from enum import unique + # Temporary directory path on device. DEVICE_TMP_PATH = '/data/local/tmp' @@ -36,6 +42,58 @@ DEVICE_TMP_PATH = '/data/local/tmp' DALVIK_CACHE_ARCHS = ['arm', 'arm64', 'x86', 'x86_64'] +@unique +class RetCode(Enum): + """Enum representing normalized return codes.""" + SUCCESS = 0 + TIMEOUT = 1 + ERROR = 2 + NOTCOMPILED = 3 + NOTRUN = 4 + + +@unique +class LogSeverity(Enum): + VERBOSE = 0 + DEBUG = 1 + INFO = 2 + WARNING = 3 + ERROR = 4 + FATAL = 5 + SILENT = 6 + + @property + def symbol(self): + return self.name[0] + + @classmethod + def FromSymbol(cls, s): + for log_severity in LogSeverity: + if log_severity.symbol == s: + return log_severity + raise ValueError("{0} is not a valid log severity symbol".format(s)) + + def __ge__(self, other): + if self.__class__ is other.__class__: + return self.value >= other.value + return NotImplemented + + def __gt__(self, other): + if self.__class__ is other.__class__: + return self.value > other.value + return NotImplemented + + def __le__(self, other): + if self.__class__ is other.__class__: + return self.value <= other.value + return NotImplemented + + def __lt__(self, other): + if self.__class__ is other.__class__: + return self.value < other.value + return NotImplemented + + def GetEnvVariableOrError(variable_name): """Gets value of an environmental variable. @@ -70,35 +128,51 @@ def _DexArchCachePaths(android_data_path): for arch in DALVIK_CACHE_ARCHS) -def _RunCommandForOutputAndLog(cmd, env, logfile, timeout=60): - """Runs command and logs its output. Returns the output. +def RunCommandForOutput(cmd, env, stdout, stderr, timeout=60): + """Runs command piping output to files, stderr or stdout. Args: cmd: list of strings, command to run. env: shell environment to run the command with. - logfile: file handle to logfile. - timeout: int, timeout in seconds + stdout: file handle or one of Subprocess.PIPE, Subprocess.STDOUT, + Subprocess.DEVNULL, see Popen. + stderr: file handle or one of Subprocess.PIPE, Subprocess.STDOUT, + Subprocess.DEVNULL, see Popen. + timeout: int, timeout in seconds. Returns: - tuple (string, string, int) stdout output, stderr output, return code. + tuple (string, string, RetCode) stdout output, stderr output, normalized + return code. """ - proc = Popen(cmd, stderr=STDOUT, stdout=PIPE, env=env, - universal_newlines=True) - timeouted = False + proc = Popen(cmd, stdout=stdout, stderr=stderr, env=env, + universal_newlines=True, start_new_session=True) try: - (output, _) = proc.communicate(timeout=timeout) + (output, stderr_output) = proc.communicate(timeout=timeout) + if proc.returncode == 0: + retcode = RetCode.SUCCESS + else: + retcode = RetCode.ERROR except TimeoutExpired: - timeouted = True - proc.kill() - (output, _) = proc.communicate() + os.killpg(os.getpgid(proc.pid), signal.SIGTERM) + (output, stderr_output) = proc.communicate() + retcode = RetCode.TIMEOUT + return (output, stderr_output, retcode) + + +def _LogCmdOutput(logfile, cmd, output, retcode): + """Logs output of a command. + + Args: + logfile: file handle to logfile. + cmd: list of strings, command. + output: command output. + retcode: RetCode, normalized retcode. + """ logfile.write('Command:\n{0}\n{1}\nReturn code: {2}\n'.format( - _CommandListToCommandString(cmd), output, - 'TIMEOUT' if timeouted else proc.returncode)) - ret_code = 1 if timeouted else proc.returncode - return (output, ret_code) + CommandListToCommandString(cmd), output, retcode)) -def _CommandListToCommandString(cmd): +def CommandListToCommandString(cmd): """Converts shell command represented as list of strings to a single string. Each element of the list is wrapped in double quotes. @@ -109,7 +183,7 @@ def _CommandListToCommandString(cmd): Returns: string, shell command. """ - return ' '.join(['"{0}"'.format(segment) for segment in cmd]) + return ' '.join([shlex.quote(segment) for segment in cmd]) class FatalError(Exception): @@ -150,15 +224,14 @@ class ITestEnv(object): """ @abc.abstractmethod - def RunCommand(self, cmd, env_updates=None): - """Runs command in environment with updated environmental variables. + def RunCommand(self, cmd, log_severity=LogSeverity.ERROR): + """Runs command in environment. Args: cmd: list of strings, command to run. - env_updates: dict, string to string, maps names of variables to their - updated values. + log_severity: LogSeverity, minimum severity of logs included in output. Returns: - tuple (string, string, int) stdout output, stderr output, return code. + tuple (string, int) output, return code. """ @abc.abstractproperty @@ -175,14 +248,24 @@ class HostTestEnv(ITestEnv): For methods documentation see base class. """ - def __init__(self, x64): + def __init__(self, directory_prefix, cleanup=True, logfile_path=None, + timeout=60, x64=False): """Constructor. Args: + directory_prefix: string, prefix for environment directory name. + cleanup: boolean, if True remove test directory in destructor. + logfile_path: string, can be used to specify custom logfile location. + timeout: int, seconds, time to wait for single test run to finish. x64: boolean, whether to setup in x64 mode. """ - self._env_path = mkdtemp(dir='/tmp/', prefix='bisection_search_') - self._logfile = open('{0}/log'.format(self._env_path), 'w+') + self._cleanup = cleanup + self._timeout = timeout + self._env_path = mkdtemp(dir='/tmp/', prefix=directory_prefix) + if logfile_path is None: + self._logfile = open('{0}/log'.format(self._env_path), 'w+') + else: + self._logfile = open(logfile_path, 'w+') os.mkdir('{0}/dalvik-cache'.format(self._env_path)) for arch_cache_path in _DexArchCachePaths(self._env_path): os.mkdir(arch_cache_path) @@ -199,6 +282,10 @@ class HostTestEnv(ITestEnv): # Using dlopen requires load bias on the host. self._shell_env['LD_USE_LOAD_BIAS'] = '1' + def __del__(self): + if self._cleanup: + shutil.rmtree(self._env_path) + def CreateFile(self, name=None): if name is None: f = NamedTemporaryFile(dir=self._env_path, delete=False) @@ -211,13 +298,17 @@ class HostTestEnv(ITestEnv): f.writelines('{0}\n'.format(line) for line in lines) return - def RunCommand(self, cmd, env_updates=None): - if not env_updates: - env_updates = {} + def RunCommand(self, cmd, log_severity=LogSeverity.ERROR): self._EmptyDexCache() env = self._shell_env.copy() - env.update(env_updates) - return _RunCommandForOutputAndLog(cmd, env, self._logfile) + env.update({'ANDROID_LOG_TAGS':'*:' + log_severity.symbol.lower()}) + (output, err_output, retcode) = RunCommandForOutput( + cmd, env, PIPE, PIPE, self._timeout) + # We append err_output to output to stay consistent with DeviceTestEnv + # implementation. + output += err_output + _LogCmdOutput(self._logfile, cmd, output, retcode) + return (output, retcode) @property def logfile(self): @@ -239,16 +330,28 @@ class HostTestEnv(ITestEnv): class DeviceTestEnv(ITestEnv): """Device test environment. Concrete implementation of ITestEnv. - Makes use of HostTestEnv to maintain a test directory on host. Creates an - on device test directory which is kept in sync with the host one. - For methods documentation see base class. """ - def __init__(self): - """Constructor.""" - self._host_env_path = mkdtemp(dir='/tmp/', prefix='bisection_search_') - self._logfile = open('{0}/log'.format(self._host_env_path), 'w+') + def __init__(self, directory_prefix, cleanup=True, logfile_path=None, + timeout=60, specific_device=None): + """Constructor. + + Args: + directory_prefix: string, prefix for environment directory name. + cleanup: boolean, if True remove test directory in destructor. + logfile_path: string, can be used to specify custom logfile location. + timeout: int, seconds, time to wait for single test run to finish. + specific_device: string, serial number of device to use. + """ + self._cleanup = cleanup + self._timeout = timeout + self._specific_device = specific_device + self._host_env_path = mkdtemp(dir='/tmp/', prefix=directory_prefix) + if logfile_path is None: + self._logfile = open('{0}/log'.format(self._host_env_path), 'w+') + else: + self._logfile = open(logfile_path, 'w+') self._device_env_path = '{0}/{1}'.format( DEVICE_TMP_PATH, os.path.basename(self._host_env_path)) self._shell_env = os.environ.copy() @@ -257,6 +360,13 @@ class DeviceTestEnv(ITestEnv): for arch_cache_path in _DexArchCachePaths(self._device_env_path): self._AdbMkdir(arch_cache_path) + def __del__(self): + if self._cleanup: + shutil.rmtree(self._host_env_path) + check_call(shlex.split( + 'adb shell if [ -d "{0}" ]; then rm -rf "{0}"; fi' + .format(self._device_env_path))) + def CreateFile(self, name=None): with NamedTemporaryFile(mode='w') as temp_file: self._AdbPush(temp_file.name, self._device_env_path) @@ -271,19 +381,63 @@ class DeviceTestEnv(ITestEnv): self._AdbPush(temp_file.name, file_path) return - def RunCommand(self, cmd, env_updates=None): - if not env_updates: - env_updates = {} + def _ExtractPid(self, brief_log_line): + """Extracts PID from a single logcat line in brief format.""" + pid_start_idx = brief_log_line.find('(') + 2 + if pid_start_idx == -1: + return None + pid_end_idx = brief_log_line.find(')', pid_start_idx) + if pid_end_idx == -1: + return None + return brief_log_line[pid_start_idx:pid_end_idx] + + def _ExtractSeverity(self, brief_log_line): + """Extracts LogSeverity from a single logcat line in brief format.""" + if not brief_log_line: + return None + return LogSeverity.FromSymbol(brief_log_line[0]) + + def RunCommand(self, cmd, log_severity=LogSeverity.ERROR): self._EmptyDexCache() - if 'ANDROID_DATA' not in env_updates: - env_updates['ANDROID_DATA'] = self._device_env_path - env_updates_cmd = ' '.join(['{0}={1}'.format(var, val) for var, val - in env_updates.items()]) - cmd = _CommandListToCommandString(cmd) - cmd = ('adb shell "logcat -c && {0} {1} ; logcat -d -s dex2oat:* dex2oatd:*' - '| grep -v "^---------" 1>&2"').format(env_updates_cmd, cmd) - return _RunCommandForOutputAndLog( - shlex.split(cmd), self._shell_env, self._logfile) + env_vars_cmd = 'ANDROID_DATA={0} ANDROID_LOG_TAGS=*:i'.format( + self._device_env_path) + adb_cmd = ['adb'] + if self._specific_device: + adb_cmd += ['-s', self._specific_device] + logcat_cmd = adb_cmd + ['logcat', '-v', 'brief', '-s', '-b', 'main', + '-T', '1', 'dex2oat:*', 'dex2oatd:*'] + logcat_proc = Popen(logcat_cmd, stdout=PIPE, stderr=STDOUT, + universal_newlines=True) + cmd_str = CommandListToCommandString(cmd) + # Print PID of the shell and exec command. We later retrieve this PID and + # use it to filter dex2oat logs, keeping those with matching parent PID. + device_cmd = ('echo $$ && ' + env_vars_cmd + ' exec ' + cmd_str) + cmd = adb_cmd + ['shell', device_cmd] + (output, _, retcode) = RunCommandForOutput(cmd, self._shell_env, PIPE, + STDOUT, self._timeout) + # We need to make sure to only kill logcat once all relevant logs arrive. + # Sleep is used for simplicity. + time.sleep(0.5) + logcat_proc.kill() + end_of_first_line = output.find('\n') + if end_of_first_line != -1: + parent_pid = output[:end_of_first_line] + output = output[end_of_first_line + 1:] + logcat_output, _ = logcat_proc.communicate() + logcat_lines = logcat_output.splitlines(keepends=True) + dex2oat_pids = [] + for line in logcat_lines: + # Dex2oat was started by our runtime instance. + if 'Running dex2oat (parent PID = ' + parent_pid in line: + dex2oat_pids.append(self._ExtractPid(line)) + break + if dex2oat_pids: + for line in logcat_lines: + if (self._ExtractPid(line) in dex2oat_pids and + self._ExtractSeverity(line) >= log_severity): + output += line + _LogCmdOutput(self._logfile, cmd, output, retcode) + return (output, retcode) @property def logfile(self): diff --git a/tools/javafuzz/README.md b/tools/javafuzz/README.md index 68fc171aa9..b08075a9d8 100644 --- a/tools/javafuzz/README.md +++ b/tools/javafuzz/README.md @@ -39,9 +39,10 @@ a fixed testing class named Test. So a typical test run looks as follows. How to start the JavaFuzz tests =============================== - run_java_fuzz_test.py [--num_tests] - [--device] - [--mode1=mode] [--mode2=mode] + run_java_fuzz_test.py + [--num_tests=#TESTS] + [--device=DEVICE] + [--mode1=MODE] [--mode2=MODE] where diff --git a/tools/javafuzz/run_java_fuzz_test.py b/tools/javafuzz/run_java_fuzz_test.py index 5f527b804b..51d00be373 100755 --- a/tools/javafuzz/run_java_fuzz_test.py +++ b/tools/javafuzz/run_java_fuzz_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3.4 # # Copyright (C) 2016 The Android Open Source Project # @@ -16,68 +16,71 @@ import abc import argparse +import filecmp + +from glob import glob + +import os +import shlex +import shutil import subprocess import sys -import os from tempfile import mkdtemp -from threading import Timer -# Normalized return codes. -EXIT_SUCCESS = 0 -EXIT_TIMEOUT = 1 -EXIT_NOTCOMPILED = 2 -EXIT_NOTRUN = 3 +sys.path.append(os.path.dirname(os.path.dirname( + os.path.realpath(__file__)))) + +from bisection_search.common import RetCode +from bisection_search.common import CommandListToCommandString +from bisection_search.common import FatalError +from bisection_search.common import GetEnvVariableOrError +from bisection_search.common import RunCommandForOutput +from bisection_search.common import DeviceTestEnv + +# Return codes supported by bisection bug search. +BISECTABLE_RET_CODES = (RetCode.SUCCESS, RetCode.ERROR, RetCode.TIMEOUT) # # Utility methods. # -def RunCommand(cmd, args, out, err, timeout = 5): + +def RunCommand(cmd, out, err, timeout=5): """Executes a command, and returns its return code. Args: - cmd: string, a command to execute - args: string, arguments to pass to command (or None) + cmd: list of strings, a command to execute out: string, file name to open for stdout (or None) err: string, file name to open for stderr (or None) timeout: int, time out in seconds Returns: - return code of running command (forced EXIT_TIMEOUT on timeout) + RetCode, return code of running command (forced RetCode.TIMEOUT + on timeout) """ - cmd = 'exec ' + cmd # preserve pid - if args != None: - cmd = cmd + ' ' + args - outf = None - if out != None: + devnull = subprocess.DEVNULL + outf = devnull + if out is not None: outf = open(out, mode='w') - errf = None - if err != None: + errf = devnull + if err is not None: errf = open(err, mode='w') - proc = subprocess.Popen(cmd, stdout=outf, stderr=errf, shell=True) - timer = Timer(timeout, proc.kill) # enforces timeout - timer.start() - proc.communicate() - if timer.is_alive(): - timer.cancel() - returncode = proc.returncode - else: - returncode = EXIT_TIMEOUT - if outf != None: + (_, _, retcode) = RunCommandForOutput(cmd, None, outf, errf, timeout) + if outf != devnull: outf.close() - if errf != None: + if errf != devnull: errf.close() - return returncode + return retcode + def GetJackClassPath(): """Returns Jack's classpath.""" - top = os.environ.get('ANDROID_BUILD_TOP') - if top == None: - raise FatalError('Cannot find AOSP build top') + top = GetEnvVariableOrError('ANDROID_BUILD_TOP') libdir = top + '/out/host/common/obj/JAVA_LIBRARIES' return libdir + '/core-libart-hostdex_intermediates/classes.jack:' \ + libdir + '/core-oj-hostdex_intermediates/classes.jack' + def GetExecutionModeRunner(device, mode): """Returns a runner for the given execution mode. @@ -92,49 +95,44 @@ def GetExecutionModeRunner(device, mode): if mode == 'ri': return TestRunnerRIOnHost() if mode == 'hint': - return TestRunnerArtOnHost(True) + return TestRunnerArtIntOnHost() if mode == 'hopt': - return TestRunnerArtOnHost(False) + return TestRunnerArtOptOnHost() if mode == 'tint': - return TestRunnerArtOnTarget(device, True) + return TestRunnerArtIntOnTarget(device) if mode == 'topt': - return TestRunnerArtOnTarget(device, False) + return TestRunnerArtOptOnTarget(device) raise FatalError('Unknown execution mode') -def GetReturnCode(retc): - """Returns a string representation of the given normalized return code. - Args: - retc: int, normalized return code - Returns: - string representation of normalized return code - Raises: - FatalError: error for unknown normalized return code - """ - if retc == EXIT_SUCCESS: - return 'SUCCESS' - if retc == EXIT_TIMEOUT: - return 'TIMED-OUT' - if retc == EXIT_NOTCOMPILED: - return 'NOT-COMPILED' - if retc == EXIT_NOTRUN: - return 'NOT-RUN' - raise FatalError('Unknown normalized return code') - # # Execution mode classes. # + class TestRunner(object): """Abstraction for running a test in a particular execution mode.""" __meta_class__ = abc.ABCMeta - def GetDescription(self): + @abc.abstractproperty + def description(self): """Returns a description string of the execution mode.""" - return self._description - def GetId(self): + @abc.abstractproperty + def id(self): """Returns a short string that uniquely identifies the execution mode.""" - return self._id + + @property + def output_file(self): + return self.id + '_out.txt' + + @abc.abstractmethod + def GetBisectionSearchArgs(self): + """Get arguments to pass to bisection search tool. + + Returns: + list of strings - arguments for bisection search tool, or None if + runner is not bisectable + """ @abc.abstractmethod def CompileAndRunTest(self): @@ -142,8 +140,7 @@ class TestRunner(object): Ensures that the current Test.java in the temporary directory is compiled and executed under the current execution mode. On success, transfers the - generated output to the file GetId()_out.txt in the temporary directory. - Cleans up after itself. + generated output to the file self.output_file in the temporary directory. Most nonzero return codes are assumed non-divergent, since systems may exit in different ways. This is enforced by normalizing return codes. @@ -151,112 +148,196 @@ class TestRunner(object): Returns: normalized return code """ - pass + class TestRunnerRIOnHost(TestRunner): """Concrete test runner of the reference implementation on host.""" - def __init__(self): - """Constructor for the RI tester.""" - self._description = 'RI on host' - self._id = 'RI' + @property + def description(self): + return 'RI on host' + + @property + def id(self): + return 'RI' def CompileAndRunTest(self): - if RunCommand('javac', 'Test.java', - out=None, err=None, timeout=30) == EXIT_SUCCESS: - retc = RunCommand('java', 'Test', 'RI_run_out.txt', err=None) - if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT: - retc = EXIT_NOTRUN + if RunCommand(['javac', 'Test.java'], + out=None, err=None, timeout=30) == RetCode.SUCCESS: + retc = RunCommand(['java', 'Test'], self.output_file, err=None) else: - retc = EXIT_NOTCOMPILED - # Cleanup and return. - RunCommand('rm', '-f Test.class', out=None, err=None) + retc = RetCode.NOTCOMPILED return retc + def GetBisectionSearchArgs(self): + return None + + class TestRunnerArtOnHost(TestRunner): - """Concrete test runner of Art on host (interpreter or optimizing).""" + """Abstract test runner of Art on host.""" - def __init__(self, interpreter): + def __init__(self, extra_args=None): """Constructor for the Art on host tester. Args: - interpreter: boolean, selects between interpreter or optimizing + extra_args: list of strings, extra arguments for dalvikvm """ - self._art_args = '-cp classes.dex Test' - if interpreter: - self._description = 'Art interpreter on host' - self._id = 'HInt' - self._art_args = '-Xint ' + self._art_args - else: - self._description = 'Art optimizing on host' - self._id = 'HOpt' - self._jack_args = '-cp ' + GetJackClassPath() + ' --output-dex . Test.java' + self._art_cmd = ['/bin/bash', 'art', '-cp', 'classes.dex'] + if extra_args is not None: + self._art_cmd += extra_args + self._art_cmd.append('Test') + self._jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.', + 'Test.java'] def CompileAndRunTest(self): - if RunCommand('jack', self._jack_args, - out=None, err='jackerr.txt', timeout=30) == EXIT_SUCCESS: - out = self.GetId() + '_run_out.txt' - retc = RunCommand('art', self._art_args, out, 'arterr.txt') - if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT: - retc = EXIT_NOTRUN + if RunCommand(['jack'] + self._jack_args, out=None, err='jackerr.txt', + timeout=30) == RetCode.SUCCESS: + retc = RunCommand(self._art_cmd, self.output_file, 'arterr.txt') else: - retc = EXIT_NOTCOMPILED - # Cleanup and return. - RunCommand('rm', '-rf classes.dex jackerr.txt arterr.txt android-data*', - out=None, err=None) + retc = RetCode.NOTCOMPILED return retc -# TODO: very rough first version without proper cache, -# reuse staszkiewicz' module for properly setting up dalvikvm on target. + +class TestRunnerArtIntOnHost(TestRunnerArtOnHost): + """Concrete test runner of interpreter mode Art on host.""" + + def __init__(self): + """Constructor.""" + super().__init__(['-Xint']) + + @property + def description(self): + return 'Art interpreter on host' + + @property + def id(self): + return 'HInt' + + def GetBisectionSearchArgs(self): + return None + + +class TestRunnerArtOptOnHost(TestRunnerArtOnHost): + """Concrete test runner of optimizing compiler mode Art on host.""" + + def __init__(self): + """Constructor.""" + super().__init__(None) + + @property + def description(self): + return 'Art optimizing on host' + + @property + def id(self): + return 'HOpt' + + def GetBisectionSearchArgs(self): + cmd_str = CommandListToCommandString( + self._art_cmd[0:2] + ['{ARGS}'] + self._art_cmd[2:]) + return ['--raw-cmd={0}'.format(cmd_str), '--timeout', str(30)] + + class TestRunnerArtOnTarget(TestRunner): - """Concrete test runner of Art on target (interpreter or optimizing).""" + """Abstract test runner of Art on target.""" - def __init__(self, device, interpreter): + def __init__(self, device, extra_args=None): """Constructor for the Art on target tester. Args: device: string, target device serial number (or None) - interpreter: boolean, selects between interpreter or optimizing + extra_args: list of strings, extra arguments for dalvikvm """ - self._dalvik_args = 'shell dalvikvm -cp /data/local/tmp/classes.dex Test' - if interpreter: - self._description = 'Art interpreter on target' - self._id = 'TInt' - self._dalvik_args = '-Xint ' + self._dalvik_args - else: - self._description = 'Art optimizing on target' - self._id = 'TOpt' - self._adb = 'adb' - if device != None: - self._adb = self._adb + ' -s ' + device - self._jack_args = '-cp ' + GetJackClassPath() + ' --output-dex . Test.java' + self._test_env = DeviceTestEnv('javafuzz_', specific_device=device) + self._dalvik_cmd = ['dalvikvm'] + if extra_args is not None: + self._dalvik_cmd += extra_args + self._device = device + self._jack_args = ['-cp', GetJackClassPath(), '--output-dex', '.', + 'Test.java'] + self._device_classpath = None def CompileAndRunTest(self): - if RunCommand('jack', self._jack_args, - out=None, err='jackerr.txt', timeout=30) == EXIT_SUCCESS: - if RunCommand(self._adb, 'push classes.dex /data/local/tmp/', - 'adb.txt', err=None) != EXIT_SUCCESS: - raise FatalError('Cannot push to target device') - out = self.GetId() + '_run_out.txt' - retc = RunCommand(self._adb, self._dalvik_args, out, err=None) - if retc != EXIT_SUCCESS and retc != EXIT_TIMEOUT: - retc = EXIT_NOTRUN + if RunCommand(['jack'] + self._jack_args, out=None, err='jackerr.txt', + timeout=30) == RetCode.SUCCESS: + self._device_classpath = self._test_env.PushClasspath('classes.dex') + cmd = self._dalvik_cmd + ['-cp', self._device_classpath, 'Test'] + (output, retc) = self._test_env.RunCommand( + cmd, {'ANDROID_LOG_TAGS': '*:s'}) + with open(self.output_file, 'w') as run_out: + run_out.write(output) else: - retc = EXIT_NOTCOMPILED - # Cleanup and return. - RunCommand('rm', '-f classes.dex jackerr.txt adb.txt', - out=None, err=None) - RunCommand(self._adb, 'shell rm -f /data/local/tmp/classes.dex', - out=None, err=None) + retc = RetCode.NOTCOMPILED return retc + def GetBisectionSearchArgs(self): + cmd_str = CommandListToCommandString( + self._dalvik_cmd + ['-cp',self._device_classpath, 'Test']) + cmd = ['--raw-cmd={0}'.format(cmd_str), '--timeout', str(30)] + if self._device: + cmd += ['--device-serial', self._device] + else: + cmd.append('--device') + return cmd + + +class TestRunnerArtIntOnTarget(TestRunnerArtOnTarget): + """Concrete test runner of interpreter mode Art on target.""" + + def __init__(self, device): + """Constructor. + + Args: + device: string, target device serial number (or None) + """ + super().__init__(device, ['-Xint']) + + @property + def description(self): + return 'Art interpreter on target' + + @property + def id(self): + return 'TInt' + + def GetBisectionSearchArgs(self): + return None + + +class TestRunnerArtOptOnTarget(TestRunnerArtOnTarget): + """Concrete test runner of optimizing compiler mode Art on target.""" + + def __init__(self, device): + """Constructor. + + Args: + device: string, target device serial number (or None) + """ + super().__init__(device, None) + + @property + def description(self): + return 'Art optimizing on target' + + @property + def id(self): + return 'TOpt' + + def GetBisectionSearchArgs(self): + cmd_str = CommandListToCommandString( + self._dalvik_cmd + ['-cp', self._device_classpath, 'Test']) + cmd = ['--raw-cmd={0}'.format(cmd_str), '--timeout', str(30)] + if self._device: + cmd += ['--device-serial', self._device] + else: + cmd.append('--device') + return cmd + + # # Tester classes. # -class FatalError(Exception): - """Fatal error in the tester.""" - pass class JavaFuzzTester(object): """Tester that runs JavaFuzz many times and report divergences.""" @@ -265,10 +346,10 @@ class JavaFuzzTester(object): """Constructor for the tester. Args: - num_tests: int, number of tests to run - device: string, target device serial number (or None) - mode1: string, execution mode for first runner - mode2: string, execution mode for second runner + num_tests: int, number of tests to run + device: string, target device serial number (or None) + mode1: string, execution mode for first runner + mode2: string, execution mode for second runner """ self._num_tests = num_tests self._device = device @@ -291,8 +372,9 @@ class JavaFuzzTester(object): FatalError: error when temp directory cannot be constructed """ self._save_dir = os.getcwd() - self._tmp_dir = mkdtemp(dir="/tmp/") - if self._tmp_dir == None: + self._results_dir = mkdtemp(dir='/tmp/') + self._tmp_dir = mkdtemp(dir=self._results_dir) + if self._tmp_dir is None or self._results_dir is None: raise FatalError('Cannot obtain temp directory') os.chdir(self._tmp_dir) return self @@ -300,37 +382,38 @@ class JavaFuzzTester(object): def __exit__(self, etype, evalue, etraceback): """On exit, re-enters previously saved current directory and cleans up.""" os.chdir(self._save_dir) + shutil.rmtree(self._tmp_dir) if self._num_divergences == 0: - RunCommand('rm', '-rf ' + self._tmp_dir, out=None, err=None) + shutil.rmtree(self._results_dir) def Run(self): """Runs JavaFuzz many times and report divergences.""" - print - print '**\n**** JavaFuzz Testing\n**' - print - print '#Tests :', self._num_tests - print 'Device :', self._device - print 'Directory :', self._tmp_dir - print 'Exec-mode1:', self._runner1.GetDescription() - print 'Exec-mode2:', self._runner2.GetDescription() - print + print() + print('**\n**** JavaFuzz Testing\n**') + print() + print('#Tests :', self._num_tests) + print('Device :', self._device) + print('Directory :', self._results_dir) + print('Exec-mode1:', self._runner1.description) + print('Exec-mode2:', self._runner2.description) + print() self.ShowStats() for self._test in range(1, self._num_tests + 1): self.RunJavaFuzzTest() self.ShowStats() if self._num_divergences == 0: - print '\n\nsuccess (no divergences)\n' + print('\n\nsuccess (no divergences)\n') else: - print '\n\nfailure (divergences)\n' + print('\n\nfailure (divergences)\n') def ShowStats(self): """Shows current statistics (on same line) while tester is running.""" - print '\rTests:', self._test, \ - 'Success:', self._num_success, \ - 'Not-compiled:', self._num_not_compiled, \ - 'Not-run:', self._num_not_run, \ - 'Timed-out:', self._num_timed_out, \ - 'Divergences:', self._num_divergences, + print('\rTests:', self._test, \ + 'Success:', self._num_success, \ + 'Not-compiled:', self._num_not_compiled, \ + 'Not-run:', self._num_not_run, \ + 'Timed-out:', self._num_timed_out, \ + 'Divergences:', self._num_divergences, end='') sys.stdout.flush() def RunJavaFuzzTest(self): @@ -347,8 +430,7 @@ class JavaFuzzTester(object): Raises: FatalError: error when javafuzz fails """ - if RunCommand('javafuzz', args=None, - out='Test.java', err=None) != EXIT_SUCCESS: + if RunCommand(['javafuzz'], out='Test.java', err=None) != RetCode.SUCCESS: raise FatalError('Unexpected error while running JavaFuzz') def CheckForDivergence(self, retc1, retc2): @@ -360,38 +442,85 @@ class JavaFuzzTester(object): """ if retc1 == retc2: # Non-divergent in return code. - if retc1 == EXIT_SUCCESS: + if retc1 == RetCode.SUCCESS: # Both compilations and runs were successful, inspect generated output. - args = self._runner1.GetId() + '_run_out.txt ' \ - + self._runner2.GetId() + '_run_out.txt' - if RunCommand('diff', args, out=None, err=None) != EXIT_SUCCESS: - self.ReportDivergence('divergence in output') + runner1_out = self._runner1.output_file + runner2_out = self._runner2.output_file + if not filecmp.cmp(runner1_out, runner2_out, shallow=False): + self.ReportDivergence(retc1, retc2, is_output_divergence=True) else: self._num_success += 1 - elif retc1 == EXIT_TIMEOUT: + elif retc1 == RetCode.TIMEOUT: self._num_timed_out += 1 - elif retc1 == EXIT_NOTCOMPILED: + elif retc1 == RetCode.NOTCOMPILED: self._num_not_compiled += 1 else: self._num_not_run += 1 else: # Divergent in return code. - self.ReportDivergence('divergence in return code: ' + - GetReturnCode(retc1) + ' vs. ' + - GetReturnCode(retc2)) + self.ReportDivergence(retc1, retc2, is_output_divergence=False) + + def GetCurrentDivergenceDir(self): + return self._results_dir + '/divergence' + str(self._num_divergences) - def ReportDivergence(self, reason): + def ReportDivergence(self, retc1, retc2, is_output_divergence): """Reports and saves a divergence.""" self._num_divergences += 1 - print '\n', self._test, reason + print('\n' + str(self._num_divergences), end='') + if is_output_divergence: + print(' divergence in output') + else: + print(' divergence in return code: ' + retc1.name + ' vs. ' + + retc2.name) # Save. - ddir = 'divergence' + str(self._test) - RunCommand('mkdir', ddir, out=None, err=None) - RunCommand('mv', 'Test.java *.txt ' + ddir, out=None, err=None) + ddir = self.GetCurrentDivergenceDir() + os.mkdir(ddir) + for f in glob('*.txt') + ['Test.java']: + shutil.copy(f, ddir) + # Maybe run bisection bug search. + if retc1 in BISECTABLE_RET_CODES and retc2 in BISECTABLE_RET_CODES: + self.MaybeBisectDivergence(retc1, retc2, is_output_divergence) + + def RunBisectionSearch(self, args, expected_retcode, expected_output, + runner_id): + ddir = self.GetCurrentDivergenceDir() + outfile_path = ddir + '/' + runner_id + '_bisection_out.txt' + logfile_path = ddir + '/' + runner_id + '_bisection_log.txt' + errfile_path = ddir + '/' + runner_id + '_bisection_err.txt' + args = list(args) + ['--logfile', logfile_path, '--cleanup'] + args += ['--expected-retcode', expected_retcode.name] + if expected_output: + args += ['--expected-output', expected_output] + bisection_search_path = os.path.join( + GetEnvVariableOrError('ANDROID_BUILD_TOP'), + 'art/tools/bisection_search/bisection_search.py') + if RunCommand([bisection_search_path] + args, out=outfile_path, + err=errfile_path, timeout=300) == RetCode.TIMEOUT: + print('Bisection search TIMEOUT') + + def MaybeBisectDivergence(self, retc1, retc2, is_output_divergence): + bisection_args1 = self._runner1.GetBisectionSearchArgs() + bisection_args2 = self._runner2.GetBisectionSearchArgs() + if is_output_divergence: + maybe_output1 = self._runner1.output_file + maybe_output2 = self._runner2.output_file + else: + maybe_output1 = maybe_output2 = None + if bisection_args1 is not None: + self.RunBisectionSearch(bisection_args1, retc2, maybe_output2, + self._runner1.id) + if bisection_args2 is not None: + self.RunBisectionSearch(bisection_args2, retc1, maybe_output1, + self._runner2.id) def CleanupTest(self): """Cleans up after a single test run.""" - RunCommand('rm', '-f Test.java *.txt', out=None, err=None) + for file_name in os.listdir(self._tmp_dir): + file_path = os.path.join(self._tmp_dir, file_name) + if os.path.isfile(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) def main(): @@ -406,11 +535,11 @@ def main(): help='execution mode 2 (default: hopt)') args = parser.parse_args() if args.mode1 == args.mode2: - raise FatalError("Identical execution modes given") + raise FatalError('Identical execution modes given') # Run the JavaFuzz tester. with JavaFuzzTester(args.num_tests, args.device, args.mode1, args.mode2) as fuzzer: fuzzer.Run() -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt index 95f0c2dcf2..0e289a66d7 100644 --- a/tools/libcore_failures_concurrent_collector.txt +++ b/tools/libcore_failures_concurrent_collector.txt @@ -10,11 +10,4 @@ */ [ -{ - description: "Assertion failing on the concurrent collector configuration.", - result: EXEC_FAILED, - names: ["jsr166.LinkedTransferQueueTest#testTransfer2", - "jsr166.LinkedTransferQueueTest#testWaitingConsumer"], - bug: 25883050 -} ] diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh index 01dae432e6..74b0f16466 100755 --- a/tools/run-jdwp-tests.sh +++ b/tools/run-jdwp-tests.sh @@ -43,7 +43,7 @@ vm_command="--vm-command=$art" image_compiler_option="" debug="no" verbose="no" -image="-Ximage:/data/art-test/core-optimizing-pic.art" +image="-Ximage:/data/art-test/core.art" vm_args="" # By default, we run the whole JDWP test suite. test="org.apache.harmony.jpda.tests.share.AllTests" diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh index 2a6e172c69..01c7f203f9 100755 --- a/tools/run-libcore-tests.sh +++ b/tools/run-libcore-tests.sh @@ -95,7 +95,7 @@ while true; do if [[ "$1" == "--mode=device" ]]; then vogar_args="$vogar_args --device-dir=/data/local/tmp" vogar_args="$vogar_args --vm-command=/data/local/tmp/system/bin/art" - vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core-optimizing.art" + vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art" shift elif [[ "$1" == "--mode=host" ]]; then # We explicitly give a wrong path for the image, to ensure vogar |