summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk83
-rw-r--r--build/Android.common.mk75
-rw-r--r--build/Android.gtest.mk16
-rw-r--r--build/Android.oat.mk3
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/common_compiler_test.h60
-rw-r--r--compiler/dex/compiler_enums.h8
-rw-r--r--compiler/dex/frontend.cc11
-rw-r--r--compiler/dex/mir_optimization.cc5
-rw-r--r--compiler/dex/quick/arm/call_arm.cc28
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h4
-rw-r--r--compiler/dex/quick/arm/int_arm.cc103
-rw-r--r--compiler/dex/quick/arm/target_arm.cc6
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc61
-rw-r--r--compiler/dex/quick/codegen_util.cc17
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc12
-rw-r--r--compiler/dex/quick/gen_common.cc279
-rw-r--r--compiler/dex/quick/gen_invoke.cc136
-rw-r--r--compiler/dex/quick/gen_loadstore.cc23
-rw-r--r--compiler/dex/quick/mips/call_mips.cc15
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h4
-rw-r--r--compiler/dex/quick/mips/int_mips.cc83
-rw-r--r--compiler/dex/quick/mips/target_mips.cc8
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc36
-rw-r--r--compiler/dex/quick/mir_to_lir.cc47
-rw-r--r--compiler/dex/quick/mir_to_lir.h54
-rw-r--r--compiler/dex/quick/x86/call_x86.cc5
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h8
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc2
-rw-r--r--compiler/dex/quick/x86/int_x86.cc252
-rw-r--r--compiler/dex/quick/x86/target_x86.cc19
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc29
-rw-r--r--compiler/elf_writer_test.cc4
-rw-r--r--compiler/image_test.cc2
-rw-r--r--compiler/image_writer.cc24
-rw-r--r--compiler/oat_test.cc13
-rw-r--r--compiler/oat_writer.cc1271
-rw-r--r--compiler/oat_writer.h120
-rw-r--r--compiler/optimizing/code_generator.cc102
-rw-r--r--compiler/optimizing/code_generator.h114
-rw-r--r--compiler/optimizing/code_generator_arm.cc163
-rw-r--r--compiler/optimizing/code_generator_arm.h11
-rw-r--r--compiler/optimizing/code_generator_x86.cc226
-rw-r--r--compiler/optimizing/code_generator_x86.h11
-rw-r--r--compiler/optimizing/nodes.cc167
-rw-r--r--compiler/optimizing/nodes.h223
-rw-r--r--compiler/optimizing/optimizing_compiler.cc4
-rw-r--r--compiler/optimizing/pretty_printer.h14
-rw-r--r--compiler/optimizing/ssa_builder.cc134
-rw-r--r--compiler/optimizing/ssa_builder.h71
-rw-r--r--compiler/optimizing/ssa_test.cc444
-rw-r--r--compiler/utils/growable_array.h1
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc5
-rw-r--r--dex2oat/dex2oat.cc5
-rw-r--r--disassembler/disassembler_x86.cc34
-rw-r--r--oatdump/oatdump.cc25
-rw-r--r--runtime/Android.mk3
-rw-r--r--runtime/arch/arch_test.cc498
-rw-r--r--runtime/arch/arm/asm_support_arm.S7
-rw-r--r--runtime/arch/arm/asm_support_arm.h8
-rw-r--r--runtime/arch/arm/jni_entrypoints_arm.S24
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S31
-rw-r--r--runtime/arch/arm64/asm_support_arm64.S13
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h13
-rw-r--r--runtime/arch/arm64/jni_entrypoints_arm64.S38
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S130
-rw-r--r--runtime/arch/mips/asm_support_mips.S8
-rw-r--r--runtime/arch/mips/asm_support_mips.h8
-rw-r--r--runtime/arch/mips/jni_entrypoints_mips.S33
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S18
-rw-r--r--runtime/arch/stub_test.cc357
-rw-r--r--runtime/arch/x86/asm_support_x86.S4
-rw-r--r--runtime/arch/x86/asm_support_x86.h4
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S19
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S14
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.h4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S156
-rw-r--r--runtime/atomic.h67
-rw-r--r--runtime/base/bit_vector.cc127
-rw-r--r--runtime/base/bit_vector.h17
-rw-r--r--runtime/base/bit_vector_test.cc28
-rw-r--r--runtime/base/logging.cc2
-rw-r--r--runtime/base/logging.h3
-rw-r--r--runtime/base/mutex.cc22
-rw-r--r--runtime/catch_block_stack_visitor.cc66
-rw-r--r--runtime/catch_block_stack_visitor.h36
-rw-r--r--runtime/check_jni.cc11
-rw-r--r--runtime/class_linker.cc75
-rw-r--r--runtime/class_linker.h13
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/debugger.cc65
-rw-r--r--runtime/debugger.h15
-rw-r--r--runtime/deoptimize_stack_visitor.cc87
-rw-r--r--runtime/deoptimize_stack_visitor.h55
-rw-r--r--runtime/entrypoints/entrypoint_utils.h4
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc75
-rw-r--r--runtime/entrypoints/portable/portable_thread_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc1
-rw-r--r--runtime/exception_test.cc32
-rw-r--r--runtime/gc/accounting/heap_bitmap-inl.h70
-rw-r--r--runtime/gc/accounting/heap_bitmap.cc59
-rw-r--r--runtime/gc/accounting/heap_bitmap.h16
-rw-r--r--runtime/gc/accounting/remembered_set.cc26
-rw-r--r--runtime/gc/accounting/remembered_set.h1
-rw-r--r--runtime/gc/accounting/space_bitmap.cc49
-rw-r--r--runtime/gc/accounting/space_bitmap.h72
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc19
-rw-r--r--runtime/gc/allocator/rosalloc-inl.h2
-rw-r--r--runtime/gc/allocator/rosalloc.cc677
-rw-r--r--runtime/gc/allocator/rosalloc.h73
-rw-r--r--runtime/gc/collector/garbage_collector.cc13
-rw-r--r--runtime/gc/collector/mark_sweep.cc144
-rw-r--r--runtime/gc/collector/mark_sweep.h11
-rw-r--r--runtime/gc/collector/semi_space-inl.h53
-rw-r--r--runtime/gc/collector/semi_space.cc67
-rw-r--r--runtime/gc/collector/semi_space.h12
-rw-r--r--runtime/gc/heap.cc139
-rw-r--r--runtime/gc/heap.h14
-rw-r--r--runtime/gc/space/large_object_space.cc79
-rw-r--r--runtime/gc/space/large_object_space.h35
-rw-r--r--runtime/gc/space/malloc_space.cc1
-rw-r--r--runtime/gc/space/rosalloc_space.cc30
-rw-r--r--runtime/gc/space/space.cc25
-rw-r--r--runtime/gc/space/space.h23
-rw-r--r--runtime/gc/space/space_test.h17
-rw-r--r--runtime/gc/space/zygote_space.cc2
-rw-r--r--runtime/indirect_reference_table.cc13
-rw-r--r--runtime/instrumentation.cc32
-rw-r--r--runtime/interpreter/interpreter.cc8
-rw-r--r--runtime/interpreter/interpreter_common.h77
-rw-r--r--runtime/jdwp/jdwp_event.cc24
-rw-r--r--runtime/jdwp/jdwp_handler.cc7
-rw-r--r--runtime/jdwp/jdwp_main.cc2
-rw-r--r--runtime/jni_internal.cc479
-rw-r--r--runtime/jni_internal.h10
-rw-r--r--runtime/mirror/array-inl.h5
-rw-r--r--runtime/mirror/array.h2
-rw-r--r--runtime/mirror/art_field.cc21
-rw-r--r--runtime/mirror/art_field.h4
-rw-r--r--runtime/mirror/art_method-inl.h34
-rw-r--r--runtime/mirror/art_method.cc86
-rw-r--r--runtime/mirror/art_method.h62
-rw-r--r--runtime/mirror/class-inl.h13
-rw-r--r--runtime/mirror/class.cc10
-rw-r--r--runtime/mirror/class.h16
-rw-r--r--runtime/mirror/object-inl.h92
-rw-r--r--runtime/mirror/object.cc2
-rw-r--r--runtime/mirror/object.h26
-rw-r--r--runtime/mirror/object_array.h2
-rw-r--r--runtime/monitor.cc64
-rw-r--r--runtime/monitor.h4
-rw-r--r--runtime/native/dalvik_system_DexFile.cc27
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc19
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/oat.cc23
-rw-r--r--runtime/oat.h24
-rw-r--r--runtime/oat_file-inl.h60
-rw-r--r--runtime/oat_file.cc59
-rw-r--r--runtime/oat_file.h32
-rw-r--r--runtime/object_callbacks.h3
-rw-r--r--runtime/parsed_options.cc46
-rw-r--r--runtime/quick_exception_handler.cc (renamed from runtime/catch_finder.cc)70
-rw-r--r--runtime/quick_exception_handler.h (renamed from runtime/catch_finder.h)43
-rw-r--r--runtime/runtime.cc4
-rw-r--r--runtime/runtime.h4
-rw-r--r--runtime/runtime_linux.cc2
-rw-r--r--runtime/scoped_thread_state_change.h17
-rw-r--r--runtime/thread.cc191
-rw-r--r--runtime/thread.h9
-rw-r--r--runtime/thread_list.cc4
-rw-r--r--runtime/transaction_test.cc6
-rw-r--r--runtime/utils.cc48
-rw-r--r--runtime/utils.h2
-rw-r--r--runtime/verifier/method_verifier.cc108
-rw-r--r--test/046-reflect/expected.txt2
-rw-r--r--test/046-reflect/src/Main.java27
-rw-r--r--test/083-compiler-regressions/expected.txt1
-rw-r--r--test/083-compiler-regressions/src/Main.java27
-rw-r--r--test/Android.mk30
-rw-r--r--test/ThreadStress/ThreadStress.java6
-rwxr-xr-xtest/run-test4
182 files changed, 7032 insertions, 3569 deletions
diff --git a/Android.mk b/Android.mk
index 6139cb9ae2..4351be9875 100644
--- a/Android.mk
+++ b/Android.mk
@@ -99,6 +99,8 @@ include $(art_path)/dalvikvm/Android.mk
include $(art_path)/tools/Android.mk
include $(art_build_path)/Android.oat.mk
+
+
# ART_HOST_DEPENDENCIES depends on Android.executable.mk above for ART_HOST_EXECUTABLES
ART_HOST_DEPENDENCIES := $(ART_HOST_EXECUTABLES) $(HOST_OUT_JAVA_LIBRARIES)/core-libart-hostdex.jar
ART_HOST_DEPENDENCIES += $(HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
@@ -110,11 +112,18 @@ ART_TARGET_DEPENDENCIES := $(ART_TARGET_EXECUTABLES) $(TARGET_OUT_JAVA_LIBRARIES
include $(art_path)/test/Android.mk
include $(art_build_path)/Android.gtest.mk
+$(eval $(call combine-art-multi-target-var,ART_TARGET_GTEST_TARGETS))
+$(eval $(call combine-art-multi-target-var,ART_TARGET_GTEST_EXECUTABLES))
+
# The ART_*_TEST_DEPENDENCIES definitions:
# - depend on Android.oattest.mk above for ART_TEST_*_DEX_FILES
# - depend on Android.gtest.mk above for ART_*_GTEST_EXECUTABLES
ART_HOST_TEST_DEPENDENCIES := $(ART_HOST_DEPENDENCIES) $(ART_HOST_GTEST_EXECUTABLES) $(ART_TEST_HOST_DEX_FILES) $(HOST_CORE_IMG_OUT)
-ART_TARGET_TEST_DEPENDENCIES := $(ART_TARGET_DEPENDENCIES) $(ART_TARGET_GTEST_EXECUTABLES) $(ART_TEST_TARGET_DEX_FILES) $(TARGET_CORE_IMG_OUT)
+
+define declare-art-target-test-dependencies-var
+ART_TARGET_TEST_DEPENDENCIES$(1) := $(ART_TARGET_DEPENDENCIES) $(ART_TARGET_GTEST_EXECUTABLES$(1)) $(ART_TEST_TARGET_DEX_FILES$(1)) $(TARGET_CORE_IMG_OUT$(1))
+endef
+$(eval $(call call-art-multi-target-var,declare-art-target-test-dependencies-var,ART_TARGET_TEST_DEPENDENCIES))
include $(art_build_path)/Android.libarttest.mk
@@ -209,46 +218,70 @@ test-art-host-run-test: test-art-host-run-test-default test-art-host-run-test-in
# target test targets
# "mm test-art-target" to build and run all target tests
-.PHONY: test-art-target
-test-art-target: test-art-target-gtest test-art-target-oat test-art-target-run-test
- @echo test-art-target PASSED
+define declare-test-art-target
+.PHONY: test-art-target$(1)
+test-art-target$(1): test-art-target-gtest$(1) test-art-target-oat$(1) test-art-target-run-test$(1)
+ @echo test-art-target$(1) PASSED
+endef
+$(eval $(call call-art-multi-target-rule,declare-test-art-target,test-art-target))
+
+
+define declare-test-art-target-dependencies
+.PHONY: test-art-target-dependencies$(1)
+test-art-target-dependencies$(1): $(ART_TARGET_TEST_DEPENDENCIES$(1)) $(ART_TEST_OUT)/libarttest.so
+endef
+$(eval $(call call-art-multi-target-rule,declare-test-art-target-dependencies,test-art-target-dependencies))
-.PHONY: test-art-target-dependencies
-test-art-target-dependencies: $(ART_TARGET_TEST_DEPENDENCIES) $(ART_TEST_OUT)/libarttest.so
.PHONY: test-art-target-sync
-test-art-target-sync: test-art-target-dependencies
+test-art-target-sync: test-art-target-dependencies$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-dependencies$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
adb remount
adb sync
adb shell mkdir -p $(ART_TEST_DIR)
-.PHONY: test-art-target-gtest
-test-art-target-gtest: $(ART_TARGET_GTEST_TARGETS)
-.PHONY: test-art-target-oat
-test-art-target-oat: $(ART_TEST_TARGET_OAT_TARGETS)
- @echo test-art-target-oat PASSED
+define declare-test-art-target-gtest
+.PHONY: test-art-target-gtest$(1)
+test-art-target-gtest$(1): $(ART_TARGET_GTEST_TARGETS$(1))
+ @echo test-art-target-gtest$(1) PASSED
+endef
+$(eval $(call call-art-multi-target-rule,declare-test-art-target-gtest,test-art-target-gtest))
+
+
+define declare-test-art-target-oat
+.PHONY: test-art-target-oat$(1)
+test-art-target-oat$(1): $(ART_TEST_TARGET_OAT_TARGETS$(1))
+ @echo test-art-target-oat$(1) PASSED
+endef
+$(eval $(call call-art-multi-target-rule,declare-test-art-target-oat,test-art-target-oat))
+
define declare-test-art-target-run-test-impl
+$(2)run_test_$(1) :=
+ifeq ($($(2)ART_PHONY_TEST_TARGET_SUFFIX),64)
+ $(2)run_test_$(1) := --64
+endif
.PHONY: test-art-target-run-test-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target-run-test-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-sync $(DX) $(HOST_OUT_EXECUTABLES)/jasmin
- DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) art/test/run-test $(DALVIKVM_FLAGS) $(1) $(3)
+ DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) art/test/run-test $(DALVIKVM_FLAGS) $$($(2)run_test_$(1)) $(1)
@echo test-art-target-run-test-$(1)$($(2)ART_PHONY_TEST_TARGET_SUFFIX) PASSED
endef
define declare-test-art-target-run-test
ifdef TARGET_2ND_ARCH
- $(call declare-test-art-target-run-test-impl,$(1),2ND_,)
+ $(call declare-test-art-target-run-test-impl,$(1),2ND_)
+
+ TEST_ART_TARGET_RUN_TEST_TARGETS$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += test-art-target-run-test-$(1)$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
ifneq ($(ART_PHONY_TEST_TARGET_SUFFIX),)
# Link primary to non-suffix
test-art-target-run-test-$(1): test-art-target-run-test-$(1)$(ART_PHONY_TEST_TARGET_SUFFIX)
endif
endif
- $(call declare-test-art-target-run-test-impl,$(1),,--$(ART_TARGET_BINARY_SUFFIX))
+ $(call declare-test-art-target-run-test-impl,$(1),)
- TEST_ART_TARGET_RUN_TEST_TARGETS += test-art-target-run-test-$(1)
+ TEST_ART_TARGET_RUN_TEST_TARGETS$(ART_PHONY_TEST_TARGET_SUFFIX) += test-art-target-run-test-$(1)$(ART_PHONY_TEST_TARGET_SUFFIX)
test-art-run-test-$(1): test-art-host-run-test-$(1) test-art-target-run-test-$(1)
@@ -256,9 +289,14 @@ endef
$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call declare-test-art-target-run-test,$(test))))
-.PHONY: test-art-target-run-test
-test-art-target-run-test: $(TEST_ART_TARGET_RUN_TEST_TARGETS)
- @echo test-art-target-run-test PASSED
+
+define declare-test-art-target-run-test
+.PHONY: test-art-target-run-test$(1)
+test-art-target-run-test$(1): $(TEST_ART_TARGET_RUN_TEST_TARGETS$(1))
+ @echo test-art-target-run-test$(1) PASSED
+endef
+$(eval $(call call-art-multi-target-rule,declare-test-art-target-run-test,test-art-target-run-test))
+
########################################################################
# oat-target and oat-target-sync targets
@@ -286,7 +324,12 @@ oat-target-$(1): $$(OUT_OAT_FILE)
$$(OUT_OAT_FILE): $(PRODUCT_OUT)/$(1) $(DEFAULT_DEX_PREOPT_BUILT_IMAGE) $(DEX2OATD_DEPENDENCY)
@mkdir -p $$(dir $$@)
- $(DEX2OATD) --runtime-arg -Xms64m --runtime-arg -Xmx64m --boot-image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE) --dex-file=$(PRODUCT_OUT)/$(1) --dex-location=/$(1) --oat-file=$$@ --instruction-set=$(TARGET_ARCH) --instruction-set-features=$(TARGET_INSTRUCTION_SET_FEATURES) --android-root=$(PRODUCT_OUT)/system
+ $(DEX2OATD) --runtime-arg -Xms64m --runtime-arg -Xmx64m \
+ --boot-image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE) --dex-file=$(PRODUCT_OUT)/$(1) \
+ --dex-location=/$(1) --oat-file=$$@ \
+ --instruction-set=$(DEX2OAT_TARGET_ARCH) \
+ --instruction-set-features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
+ --android-root=$(PRODUCT_OUT)/system
endif
diff --git a/build/Android.common.mk b/build/Android.common.mk
index b9a297b3c7..aaa1490ab3 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -254,6 +254,14 @@ ART_HOST_CFLAGS += -Wthread-safety
# To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16"
# ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs
+# Addition CPU specific CFLAGS.
+ifeq ($(TARGET_ARCH),arm)
+ ifneq ($(filter cortex-a15, $(TARGET_CPU_VARIANT)),)
+ # Fake a ARM feature for LPAE support.
+ ART_TARGET_CFLAGS += -D__ARM_FEATURE_LPAE=1
+ endif
+endif
+
ART_HOST_NON_DEBUG_CFLAGS := $(art_non_debug_cflags)
ART_TARGET_NON_DEBUG_CFLAGS := $(art_non_debug_cflags)
@@ -291,4 +299,71 @@ ifeq ($(ART_BUILD_HOST_DEBUG),true)
ART_BUILD_DEBUG := true
endif
+# Helper function to call a function twice with a target suffix
+# $(1): The generator function for the rules
+# Has one argument, the suffix
+define call-art-multi-target
+ $(call $(1),$(ART_PHONY_TEST_TARGET_SUFFIX))
+
+ ifdef TARGET_2ND_ARCH
+ $(call $(1),$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))
+ endif
+endef
+
+# Helper function to combine two variables with suffixes together.
+# $(1): The base name.
+define combine-art-multi-target-var
+ ifdef TARGET_2ND_ARCH
+ ifneq ($(ART_PHONY_TEST_TARGET_SUFFIX),)
+ ifneq ($(2ND_ART_PHONY_TEST_TARGET_SUFFIX),)
+$(1) := $($(1)$(ART_PHONY_TEST_TARGET_SUFFIX)) $($(1)$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))
+ endif
+ endif
+ endif
+endef
+
+
+# Helper function to define a variable twice with a target suffix. Assume the name generated is
+# derived from $(2) so we can create a combined var.
+# $(1): The generator function for the rules
+# Has one argument, the suffix
+define call-art-multi-target-var
+ $(call $(1),$(ART_PHONY_TEST_TARGET_SUFFIX))
+
+ ifdef TARGET_2ND_ARCH
+ $(call $(1),$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))
+
+ # Link both together, if it makes sense
+ ifneq ($(ART_PHONY_TEST_TARGET_SUFFIX),)
+ ifneq ($(2ND_ART_PHONY_TEST_TARGET_SUFFIX),)
+$(2) := $(2)$(ART_PHONY_TEST_TARGET_SUFFIX) $(2)$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ endif
+ endif
+
+ endif
+endef
+
+# Helper function to call a function twice with a target suffix. Assume it generates make rules
+# with the given name, and link them.
+# $(1): The generator function for the rules
+# Has one argument, the suffix
+# $(2): The base rule name, necessary for the link
+# We assume we can link the names together easily...
+define call-art-multi-target-rule
+ $(call $(1),$(ART_PHONY_TEST_TARGET_SUFFIX))
+
+ ifdef TARGET_2ND_ARCH
+ $(call $(1),$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))
+
+ # Link both together, if it makes sense
+ ifneq ($(ART_PHONY_TEST_TARGET_SUFFIX),)
+ ifneq ($(2ND_ART_PHONY_TEST_TARGET_SUFFIX),)
+.PHONY: $(2)
+$(2): $(2)$(ART_PHONY_TEST_TARGET_SUFFIX) $(2)$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ endif
+ endif
+ endif
+endef
+
+
endif # ANDROID_COMMON_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index b6e6fac9be..23583915ff 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -17,6 +17,8 @@
LOCAL_PATH := art
RUNTIME_GTEST_COMMON_SRC_FILES := \
+ runtime/arch/arch_test.cc \
+ runtime/arch/stub_test.cc \
runtime/barrier_test.cc \
runtime/base/bit_field_test.cc \
runtime/base/bit_vector_test.cc \
@@ -76,6 +78,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \
compiler/optimizing/codegen_test.cc \
compiler/optimizing/dominator_test.cc \
compiler/optimizing/pretty_printer_test.cc \
+ compiler/optimizing/ssa_test.cc \
compiler/output_stream_test.cc \
compiler/utils/arena_allocator_test.cc \
compiler/utils/dedupe_set_test.cc \
@@ -105,10 +108,12 @@ COMPILER_GTEST_HOST_SRC_FILES := \
compiler/utils/x86/assembler_x86_test.cc
ART_HOST_GTEST_EXECUTABLES :=
-ART_TARGET_GTEST_EXECUTABLES :=
+ART_TARGET_GTEST_EXECUTABLES$(ART_PHONY_TEST_TARGET_SUFFIX) :=
+ART_TARGET_GTEST_EXECUTABLES$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) :=
ART_HOST_GTEST_TARGETS :=
ART_HOST_VALGRIND_GTEST_TARGETS :=
-ART_TARGET_GTEST_TARGETS :=
+ART_TARGET_GTEST_TARGETS$(ART_PHONY_TEST_TARGET_SUFFIX) :=
+ART_TARGET_GTEST_TARGETS$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) :=
ART_TEST_CFLAGS :=
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
@@ -127,7 +132,7 @@ $$(art_gtest_target)$($(1)ART_PHONY_TEST_TARGET_SUFFIX): $($(1)ART_NATIVETEST_OU
$(hide) (adb pull $($(1)ART_TEST_DIR)/$$@ /tmp/ && echo $$@ PASSED) || (echo $$@ FAILED && exit 1)
$(hide) rm /tmp/$$@
- ART_TARGET_GTEST_TARGETS += $$(art_gtest_target)$($(1)ART_PHONY_TEST_TARGET_SUFFIX)
+ ART_TARGET_GTEST_TARGETS$($(1)ART_PHONY_TEST_TARGET_SUFFIX) += $$(art_gtest_target)$($(1)ART_PHONY_TEST_TARGET_SUFFIX)
endef
@@ -182,12 +187,15 @@ define build-art-test
LOCAL_MULTILIB := both
include art/build/Android.libcxx.mk
include $(BUILD_EXECUTABLE)
- ART_TARGET_GTEST_EXECUTABLES += $$(art_gtest_exe)
+
+ ART_TARGET_GTEST_EXECUTABLES$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_NATIVETEST_OUT)/$$(LOCAL_MODULE)
art_gtest_target := test-art-$$(art_target_or_host)-gtest-$$(art_gtest_name)
ifdef TARGET_2ND_ARCH
$(call build-art-test-make-target,2ND_)
+ ART_TARGET_GTEST_EXECUTABLES$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(2ND_ART_NATIVETEST_OUT)/$$(LOCAL_MODULE)
+
# Bind the primary to the non-suffix rule
ifneq ($(ART_PHONY_TEST_TARGET_SUFFIX),)
$$(art_gtest_target): $$(art_gtest_target)$(ART_PHONY_TEST_TARGET_SUFFIX)
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index cb45a8529e..9d7579de93 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -63,6 +63,9 @@ $$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENC
--oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) \
--instruction-set=$$($(1)TARGET_ARCH) --instruction-set-features=$$(TARGET_INSTRUCTION_SET_FEATURES) --android-root=$$(PRODUCT_OUT)/system
+# This "renaming" eases declaration in art/Android.mk
+TARGET_CORE_IMG_OUT$($(1)ART_PHONY_TEST_TARGET_SUFFIX) := $($(1)TARGET_CORE_IMG_OUT)
+
$$($(1)TARGET_CORE_OAT_OUT): $$($(1)TARGET_CORE_IMG_OUT)
endef
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 6d656e63f1..e3201e7f8b 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -78,6 +78,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/code_generator_x86.cc \
optimizing/nodes.cc \
optimizing/optimizing_compiler.cc \
+ optimizing/ssa_builder.cc \
trampolines/trampoline_compiler.cc \
utils/arena_allocator.cc \
utils/arena_bit_vector.cc \
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index fdf09a50a6..8bba84a9c1 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -132,34 +132,24 @@ static inline InstructionSetFeatures ParseFeatureList(std::string str) {
class CommonCompilerTest : public CommonRuntimeTest {
public:
- static void MakeExecutable(const std::vector<uint8_t>& code) {
- CHECK_NE(code.size(), 0U);
- MakeExecutable(&code[0], code.size());
- }
-
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const uint8_t* mapping_table,
- const uint8_t* vmap_table,
const uint8_t* gc_map) {
+ CHECK(code != nullptr);
const byte* base;
- uint32_t code_offset, mapping_table_offset, vmap_table_offset, gc_map_offset;
- if (mapping_table == nullptr && vmap_table == nullptr && gc_map == nullptr) {
+ uint32_t code_offset, gc_map_offset;
+ if (gc_map == nullptr) {
base = reinterpret_cast<const byte*>(code); // Base of data points at code.
base -= kPointerSize; // Move backward so that code_offset != 0.
code_offset = kPointerSize;
- mapping_table_offset = 0;
- vmap_table_offset = 0;
gc_map_offset = 0;
} else {
// TODO: 64bit support.
base = nullptr; // Base of data in oat file, ie 0.
code_offset = PointerToLowMemUInt32(code);
- mapping_table_offset = PointerToLowMemUInt32(mapping_table);
- vmap_table_offset = PointerToLowMemUInt32(vmap_table);
gc_map_offset = PointerToLowMemUInt32(gc_map);
}
return OatFile::OatMethod(base,
@@ -167,8 +157,6 @@ class CommonCompilerTest : public CommonRuntimeTest {
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
- mapping_table_offset,
- vmap_table_offset,
gc_map_offset);
}
@@ -185,19 +173,44 @@ class CommonCompilerTest : public CommonRuntimeTest {
}
if (compiled_method != nullptr) {
const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
- if (code == nullptr) {
+ const void* code_ptr;
+ if (code != nullptr) {
+ uint32_t code_size = code->size();
+ CHECK_NE(0u, code_size);
+ const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
+ uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+ : sizeof(OatMethodHeader) + vmap_table.size();
+ const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
+ uint32_t mapping_table_offset = mapping_table.empty() ? 0u
+ : sizeof(OatMethodHeader) + vmap_table.size() + mapping_table.size();
+ OatMethodHeader method_header(vmap_table_offset, mapping_table_offset, code_size);
+
+ header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
+ std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
+ size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table.size();
+ size_t code_offset = compiled_method->AlignCode(size - code_size);
+ size_t padding = code_offset - (size - code_size);
+ chunk->reserve(padding + size);
+ chunk->resize(sizeof(method_header));
+ memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
+ chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+ chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
+ chunk->insert(chunk->begin(), padding, 0);
+ chunk->insert(chunk->end(), code->begin(), code->end());
+ CHECK_EQ(padding + size, chunk->size());
+ code_ptr = &(*chunk)[code_offset];
+ } else {
code = compiled_method->GetPortableCode();
+ code_ptr = &(*code)[0];
}
- MakeExecutable(*code);
- const void* method_code = CompiledMethod::CodePointer(&(*code)[0],
+ MakeExecutable(code_ptr, code->size());
+ const void* method_code = CompiledMethod::CodePointer(code_ptr,
compiled_method->GetInstructionSet());
LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
OatFile::OatMethod oat_method = CreateOatMethod(method_code,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
compiled_method->GetFpSpillMask(),
- &compiled_method->GetMappingTable()[0],
- &compiled_method->GetVmapTable()[0],
nullptr);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
@@ -211,8 +224,6 @@ class CommonCompilerTest : public CommonRuntimeTest {
kStackAlignment,
0,
0,
- nullptr,
- nullptr,
nullptr);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
@@ -230,8 +241,6 @@ class CommonCompilerTest : public CommonRuntimeTest {
sirt_size,
callee_save_method->GetCoreSpillMask(),
callee_save_method->GetFpSpillMask(),
- nullptr,
- nullptr,
nullptr);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
@@ -436,6 +445,9 @@ class CommonCompilerTest : public CommonRuntimeTest {
private:
UniquePtr<MemMap> image_reservation_;
+
+ // Chunks must not move their storage after being created - use the node-based std::list.
+ std::list<std::vector<uint8_t> > header_code_and_maps_chunks_;
};
} // namespace art
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 8a88d618cc..ba4b5c356a 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -159,8 +159,10 @@ enum AssemblerStatus {
};
enum OpSize {
- kWord,
- kLong,
+ kWord, // Natural word size of target (32/64).
+ k32,
+ k64,
+ kReference, // Object reference; compressed on 64-bit targets.
kSingle,
kDouble,
kUnsignedHalf,
@@ -323,8 +325,6 @@ enum X86ConditionCode {
std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
enum ThrowKind {
- kThrowArrayBounds,
- kThrowConstantArrayBounds,
kThrowNoSuchMethod,
};
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 1bf5fce989..ed2ecace36 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -181,6 +181,17 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
(cu.enable_debug & (1 << kDebugVerbose));
}
+ if (gVerboseMethods.size() != 0) {
+ cu.verbose = false;
+ for (size_t i = 0; i < gVerboseMethods.size(); ++i) {
+ if (PrettyMethod(method_idx, dex_file).find(gVerboseMethods[i])
+ != std::string::npos) {
+ cu.verbose = true;
+ break;
+ }
+ }
+ }
+
/*
* TODO: rework handling of optimization and debug flags. Should we split out
* MIR and backend flags? Need command-line setting as well.
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 51419f4586..937e2585ef 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -42,6 +42,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
MIR* mir;
for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ // Skip pass if BB has MIR without SSA representation.
+ if (mir->ssa_rep == NULL) {
+ return;
+ }
+
uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
DecodedInstruction *d_insn = &mir->dalvikInsn;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index d0d0e6b3a7..b374ed861e 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -128,7 +128,7 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
- LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord);
+ LoadBaseIndexed(table_base, keyReg, disp_reg, 2, k32);
// ..and go! NOTE: No instruction set switch here - must stay Thumb2
LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
@@ -180,6 +180,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
*/
void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
+ // FIXME: need separate LoadValues for object references.
LoadValueDirectFixed(rl_src, rs_r0); // Get obj
LockCallTemps(); // Prepare for explicit register usage
constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
@@ -193,7 +194,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
- LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
+ Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
MarkPossibleNullPointerException(opt_flags);
LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL);
@@ -219,7 +220,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
} else {
// Explicit null-check as slow-path is entered using an IT.
GenNullCheck(rs_r0, opt_flags);
- LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
+ Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
MarkPossibleNullPointerException(opt_flags);
OpRegImm(kOpCmp, rs_r1, 0);
@@ -248,7 +249,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
LoadValueDirectFixed(rl_src, rs_r0); // Get obj
LockCallTemps(); // Prepare for explicit register usage
LIR* null_check_branch = nullptr;
- LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
+ Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
if (kArchVariantHasGoodBranchPredictor) {
if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
@@ -259,11 +260,11 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
- LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
+ Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
MarkPossibleNullPointerException(opt_flags);
LoadConstantNoClobber(rs_r3, 0);
LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL);
- StoreWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
+ Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -284,14 +285,14 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// Explicit null-check as slow-path is entered using an IT.
GenNullCheck(rs_r0, opt_flags);
- LoadWordDisp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock
+ Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock
MarkPossibleNullPointerException(opt_flags);
- LoadWordDisp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
+ Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
LoadConstantNoClobber(rs_r3, 0);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
OpRegReg(kOpCmp, rs_r1, rs_r2);
LIR* it = OpIT(kCondEq, "EE");
- StoreWordDisp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
+ Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
// Go expensive route - UnlockObjectFromCode(obj);
LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
rs_rARM_LR);
@@ -307,9 +308,9 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
RegStorage reset_reg = AllocTemp();
- LoadWordDisp(rs_rARM_SELF, ex_offset, rl_result.reg);
+ Load32Disp(rs_rARM_SELF, ex_offset, rl_result.reg);
LoadConstant(reset_reg, 0);
- StoreWordDisp(rs_rARM_SELF, ex_offset, reset_reg);
+ Store32Disp(rs_rARM_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
@@ -354,7 +355,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
if (!skip_overflow_check) {
if (Runtime::Current()->ExplicitStackOverflowChecks()) {
/* Load stack limit */
- LoadWordDisp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
+ Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
}
}
/* Spill core callee saves */
@@ -391,6 +392,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
// Load the entrypoint directly into the pc instead of doing a load + branch. Assumes
// codegen and target are in thumb2 mode.
+ // NOTE: native pointer.
m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC);
}
@@ -421,7 +423,7 @@ void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// a sub instruction. Otherwise we will get a temp allocation and the
// code size will increase.
OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, Thread::kStackOverflowReservedBytes);
- LoadWordDisp(rs_r12, 0, rs_r12);
+ Load32Disp(rs_r12, 0, rs_r12);
MarkPossibleStackOverflowException();
OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index b0bc11d458..646859c03b 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -126,8 +126,6 @@ class ArmMir2Lir FINAL : public Mir2Lir {
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
- LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
- ThrowKind kind);
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -162,7 +160,7 @@ class ArmMir2Lir FINAL : public Mir2Lir {
LIR* OpMem(OpKind op, RegStorage r_base, int disp);
LIR* OpPcRelLoad(RegStorage reg, LIR* target);
LIR* OpReg(OpKind op, RegStorage r_dest_src);
- LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index c876b3ac69..a2d6373622 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -314,11 +314,11 @@ LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_va
/*
* A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
* compare-and-branch if zero is ideal if it will reach. However, because null checks
- * branch forward to a launch pad, they will frequently not reach - and thus have to
+ * branch forward to a slow path, they will frequently not reach - and thus have to
* be converted to a long form during assembly (which will trigger another assembly
* pass). Here we estimate the branch distance for checks, and if large directly
* generate the long form in an attempt to avoid an extra assembly pass.
- * TODO: consider interspersing launchpads in code following unconditional branches.
+ * TODO: consider interspersing slowpaths in code following unconditional branches.
*/
bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
@@ -361,37 +361,40 @@ LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
return res;
}
-LIR* ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
- LIR* res = OpRegCopyNoInsert(r_dest, r_src);
- AppendLIR(res);
- return res;
+void ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ if (r_dest != r_src) {
+ LIR* res = OpRegCopyNoInsert(r_dest, r_src);
+ AppendLIR(res);
+ }
}
void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
- bool dest_fp = ARM_FPREG(r_dest.GetLowReg());
- bool src_fp = ARM_FPREG(r_src.GetLowReg());
- if (dest_fp) {
- if (src_fp) {
- // FIXME: handle 64-bit solo's here.
- OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
- RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
- } else {
- NewLIR3(kThumb2Fmdrr, S2d(r_dest.GetLowReg(), r_dest.GetHighReg()),
- r_src.GetLowReg(), r_src.GetHighReg());
- }
- } else {
- if (src_fp) {
- NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), S2d(r_src.GetLowReg(),
- r_src.GetHighReg()));
+ if (r_dest != r_src) {
+ bool dest_fp = ARM_FPREG(r_dest.GetLowReg());
+ bool src_fp = ARM_FPREG(r_src.GetLowReg());
+ if (dest_fp) {
+ if (src_fp) {
+ // FIXME: handle 64-bit solo's here.
+ OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+ RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
+ } else {
+ NewLIR3(kThumb2Fmdrr, S2d(r_dest.GetLowReg(), r_dest.GetHighReg()),
+ r_src.GetLowReg(), r_src.GetHighReg());
+ }
} else {
- // Handle overlap
- if (r_src.GetHighReg() == r_dest.GetLowReg()) {
- DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
- OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ if (src_fp) {
+ NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), S2d(r_src.GetLowReg(),
+ r_src.GetHighReg()));
} else {
- OpRegCopy(r_dest.GetLow(), r_src.GetLow());
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ // Handle overlap
+ if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ } else {
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ }
}
}
}
@@ -608,12 +611,6 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
return true;
}
-LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
- int offset, ThrowKind kind) {
- LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
- return NULL;
-}
-
RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, bool check_zero) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
@@ -684,18 +681,18 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (size == kLong) {
+ if (size == k64) {
// Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
- LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
- LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
+ Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
+ Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
} else {
- LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
- LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
+ Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
+ Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
}
StoreValueWide(rl_dest, rl_result);
} else {
- DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
+ DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
@@ -708,13 +705,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_src_value = info->args[2]; // [size] value
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
- if (size == kLong) {
+ if (size == k64) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), kWord);
- StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), kWord);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
+ StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
} else {
- DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
+ DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
@@ -1148,7 +1145,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.reg, len_offset, reg_len);
+ Load32Disp(rl_array.reg, len_offset, reg_len);
MarkPossibleNullPointerException(opt_flags);
} else {
ForceImplicitNullCheck(rl_array.reg, opt_flags);
@@ -1167,9 +1164,9 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (needs_range_check) {
if (constant_index) {
- GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
+ GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
}
FreeTemp(reg_len);
}
@@ -1196,7 +1193,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
@@ -1217,7 +1214,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
bool constant_index = rl_index.is_const;
int data_offset;
- if (size == kLong || size == kDouble) {
+ if (size == k64 || size == kDouble) {
data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
@@ -1254,7 +1251,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.reg, len_offset, reg_len);
+ Load32Disp(rl_array.reg, len_offset, reg_len);
MarkPossibleNullPointerException(opt_flags);
} else {
ForceImplicitNullCheck(rl_array.reg, opt_flags);
@@ -1271,9 +1268,9 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
if (needs_range_check) {
if (constant_index) {
- GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
+ GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.reg, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
}
FreeTemp(reg_len);
}
@@ -1289,7 +1286,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 1053a8fc41..305e89ba92 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -656,7 +656,7 @@ void ArmMir2Lir::FlushReg(RegStorage reg) {
if (info->live && info->dirty) {
info->dirty = false;
int v_reg = mir_graph_->SRegToVReg(info->s_reg);
- StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, kWord);
+ StoreBaseDisp(rs_rARM_SP, VRegOffset(v_reg), reg, k32);
}
}
@@ -738,8 +738,8 @@ RegStorage ArmMir2Lir::LoadHelper(ThreadOffset<4> offset) {
LIR* ArmMir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = rs_r0;
- LoadWordDisp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
- LIR* load2 = LoadWordDisp(tmp, 0, tmp);
+ Load32Disp(rs_rARM_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+ LIR* load2 = Load32Disp(tmp, 0, tmp);
return load2;
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 70cbdd2e31..2e64f74235 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -699,23 +699,24 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
if (ARM_FPREG(r_dest.GetReg())) {
if (ARM_SINGLEREG(r_dest.GetReg())) {
- DCHECK((size == kWord) || (size == kSingle));
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
opcode = kThumb2Vldrs;
size = kSingle;
} else {
DCHECK(ARM_DOUBLEREG(r_dest.GetReg()));
- DCHECK((size == kLong) || (size == kDouble));
+ DCHECK((size == k64) || (size == kDouble));
DCHECK_EQ((r_dest.GetReg() & 0x1), 0);
opcode = kThumb2Vldrd;
size = kDouble;
}
} else {
if (size == kSingle)
- size = kWord;
+ size = k32;
}
switch (size) {
case kDouble: // fall-through
+ // Intentional fall-though.
case kSingle:
reg_ptr = AllocTemp();
if (scale) {
@@ -727,7 +728,9 @@ LIR* ArmMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
FreeTemp(reg_ptr);
return load;
- case kWord:
+ case k32:
+ // Intentional fall-though.
+ case kReference:
opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
break;
case kUnsignedHalf:
@@ -764,23 +767,24 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
if (ARM_FPREG(r_src.GetReg())) {
if (ARM_SINGLEREG(r_src.GetReg())) {
- DCHECK((size == kWord) || (size == kSingle));
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
opcode = kThumb2Vstrs;
size = kSingle;
} else {
DCHECK(ARM_DOUBLEREG(r_src.GetReg()));
- DCHECK((size == kLong) || (size == kDouble));
+ DCHECK((size == k64) || (size == kDouble));
DCHECK_EQ((r_src.GetReg() & 0x1), 0);
opcode = kThumb2Vstrd;
size = kDouble;
}
} else {
if (size == kSingle)
- size = kWord;
+ size = k32;
}
switch (size) {
case kDouble: // fall-through
+ // Intentional fall-though.
case kSingle:
reg_ptr = AllocTemp();
if (scale) {
@@ -792,14 +796,18 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
FreeTemp(reg_ptr);
return store;
- case kWord:
+ case k32:
+ // Intentional fall-though.
+ case kReference:
opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
break;
case kUnsignedHalf:
+ // Intentional fall-though.
case kSignedHalf:
opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
break;
case kUnsignedByte:
+ // Intentional fall-though.
case kSignedByte:
opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
break;
@@ -832,7 +840,8 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
bool null_pointer_safepoint = false;
switch (size) {
case kDouble:
- case kLong:
+ // Intentional fall-though.
+ case k64:
if (ARM_FPREG(dest_low_reg)) {
// Note: following change to avoid using pairs for doubles, replace conversion w/ DCHECK.
if (r_dest.IsPair()) {
@@ -849,15 +858,18 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(),
displacement >> 2);
} else {
- load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), kWord, s_reg);
+ load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), k32, s_reg);
null_pointer_safepoint = true;
- LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), kWord, INVALID_SREG);
+ LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), k32, INVALID_SREG);
}
already_generated = true;
}
break;
case kSingle:
- case kWord:
+ // Intentional fall-though.
+ case k32:
+ // Intentional fall-though.
+ case kReference:
if (ARM_FPREG(r_dest.GetReg())) {
opcode = kThumb2Vldrs;
if (displacement <= 1020) {
@@ -953,13 +965,17 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg) {
- DCHECK(!((size == kLong) || (size == kDouble)));
+ DCHECK(!((size == k64) || (size == kDouble)));
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg);
}
LIR* ArmMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
int s_reg) {
- return LoadBaseDispBody(r_base, displacement, r_dest, kLong, s_reg);
+ return LoadBaseDispBody(r_base, displacement, r_dest, k64, s_reg);
}
@@ -975,16 +991,16 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
int src_low_reg = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
bool null_pointer_safepoint = false;
switch (size) {
- case kLong:
+ case k64:
case kDouble:
if (!ARM_FPREG(src_low_reg)) {
if (displacement <= 1020) {
store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(),
displacement >> 2);
} else {
- store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), kWord);
+ store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), k32);
null_pointer_safepoint = true;
- StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), kWord);
+ StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), k32);
}
already_generated = true;
} else {
@@ -1001,7 +1017,8 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
}
break;
case kSingle:
- case kWord:
+ case k32:
+ case kReference:
if (ARM_FPREG(r_src.GetReg())) {
DCHECK(ARM_SINGLEREG(r_src.GetReg()));
opcode = kThumb2Vstrs;
@@ -1082,12 +1099,16 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
LIR* ArmMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
- DCHECK(!((size == kLong) || (size == kDouble)));
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
+ DCHECK(!((size == k64) || (size == kDouble)));
return StoreBaseDispBody(r_base, displacement, r_src, size);
}
LIR* ArmMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDispBody(r_base, displacement, r_src, kLong);
+ return StoreBaseDispBody(r_base, displacement, r_src, k64);
}
LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 6e6b8f0a30..501e4e204b 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -274,6 +274,19 @@ void Mir2Lir::DumpPromotionMap() {
}
}
+void Mir2Lir::UpdateLIROffsets() {
+ // Only used for code listings.
+ size_t offset = 0;
+ for (LIR* lir = first_lir_insn_; lir != nullptr; lir = lir->next) {
+ lir->offset = offset;
+ if (!lir->flags.is_nop && !IsPseudoLirOp(lir->opcode)) {
+ offset += GetInsnSize(lir);
+ } else if (lir->opcode == kPseudoPseudoAlign4) {
+ offset += (offset & 0x2);
+ }
+ }
+}
+
/* Dump instructions and constant pool contents */
void Mir2Lir::CodegenDump() {
LOG(INFO) << "Dumping LIR insns for "
@@ -293,6 +306,7 @@ void Mir2Lir::CodegenDump() {
LOG(INFO) << "expansion factor: "
<< static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
DumpPromotionMap();
+ UpdateLIROffsets();
for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
DumpLIRInsn(lir_insn, 0);
}
@@ -926,7 +940,6 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
mir_graph_(mir_graph),
switch_tables_(arena, 4, kGrowableArraySwitchTables),
fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
- throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
tempreg_info_(arena, 20, kGrowableArrayMisc),
reginfo_map_(arena, 64, kGrowableArrayMisc),
@@ -1118,7 +1131,7 @@ bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target) {
// Handle this for architectures that can't compare to memory.
- LoadWordDisp(base_reg, offset, temp_reg);
+ Load32Disp(base_reg, offset, temp_reg);
LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
return branch;
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 8806e68b93..3ec31ba7d9 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -212,8 +212,8 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0),
INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, 0),
- INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, kWord),
- INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, kLong),
+ INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, k32),
+ INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, k64),
INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf),
INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0),
@@ -241,12 +241,12 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0),
INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte),
- INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, kWord),
- INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, kLong),
+ INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, k32),
+ INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, k64),
INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf),
INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte),
- INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, kWord),
- INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, kLong),
+ INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, k32),
+ INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, k64),
INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf),
INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 055f60c1c8..313174d218 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -19,12 +19,17 @@
#include "dex/quick/mir_to_lir-inl.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/array.h"
+#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "verifier/method_verifier.h"
#include <functional>
namespace art {
+// Shortcuts to repeatedly used long types.
+typedef mirror::ObjectArray<mirror::Object> ObjArray;
+typedef mirror::ObjectArray<mirror::Class> ClassArray;
+
/*
* This source files contains "gen" codegen routines that should
* be applicable to most targets. Only mid-level support utilities
@@ -42,22 +47,6 @@ void Mir2Lir::GenBarrier() {
barrier->u.m.def_mask = ENCODE_ALL;
}
-LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind) {
- LIR* tgt;
- LIR* branch;
- if (c_code == kCondAl) {
- tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, RegStorage::kInvalidRegVal,
- imm_val);
- branch = OpUnconditionalBranch(tgt);
- } else {
- tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg.GetReg(), imm_val);
- branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
- }
- // Remember branch target - will process later
- throw_launchpads_.Insert(tgt);
- return branch;
-}
-
void Mir2Lir::GenDivZeroException() {
LIR* branch = OpUnconditionalBranch(nullptr);
AddDivZeroCheckSlowPath(branch);
@@ -91,6 +80,59 @@ void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
AddSlowPath(new (arena_) DivZeroCheckSlowPath(this, branch));
}
+void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
+ class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
+ public:
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
+ index_(index), length_(length) {
+ }
+
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel();
+ m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
+ index_, length_, true);
+ }
+
+ private:
+ const RegStorage index_;
+ const RegStorage length_;
+ };
+
+ LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr);
+ AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length));
+}
+
+void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
+ class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
+ public:
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
+ index_(index), length_(length) {
+ }
+
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel();
+
+ m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_);
+ m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_);
+ m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
+ m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true);
+ }
+
+ private:
+ const int32_t index_;
+ const RegStorage length_;
+ };
+
+ LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr);
+ AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch, index, length));
+}
+
LIR* Mir2Lir::GenNullCheck(RegStorage reg) {
class NullCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
@@ -151,23 +193,12 @@ void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
// register with offset 0. This will cause a signal if the register contains 0 (null).
RegStorage tmp = AllocTemp();
// TODO: for Mips, would be best to use rZERO as the bogus register target.
- LIR* load = LoadWordDisp(reg, 0, tmp);
+ LIR* load = Load32Disp(reg, 0, tmp);
FreeTemp(tmp);
MarkSafepointPC(load);
}
}
-/* Perform check on two registers */
-LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2,
- ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1.GetReg(),
- reg2.GetReg());
- LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
- // Remember branch target - will process later
- throw_launchpads_.Insert(tgt);
- return branch;
-}
-
void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
RegLocation rl_src2, LIR* taken,
LIR* fall_through) {
@@ -373,7 +404,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
for (int i = 0; i < elems; i++) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
+ Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
}
}
/*
@@ -410,8 +441,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
// Generate the copy loop. Going backwards for convenience
LIR* target = NewLIR0(kPseudoTargetLabel);
// Copy next element
- LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
- StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
+ LoadBaseIndexed(r_src, r_idx, r_val, 2, k32);
+ StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
FreeTemp(r_val);
OpDecAndBranch(kCondGe, r_idx, target);
if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
@@ -423,9 +454,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
// TUNING: interleave
for (int i = 0; i < elems; i++) {
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
- StoreBaseDisp(TargetReg(kRet0),
- mirror::Array::DataOffset(component_size).Int32Value() + i * 4,
- rl_arg.reg, kWord);
+ Store32Disp(TargetReg(kRet0),
+ mirror::Array::DataOffset(component_size).Int32Value() + i * 4, rl_arg.reg);
// If the LoadValue caused a temp to be allocated, free it
if (IsTemp(rl_arg.reg)) {
FreeTemp(rl_arg.reg);
@@ -476,7 +506,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
if (IsTemp(rl_method.reg)) {
FreeTemp(rl_method.reg);
}
@@ -493,9 +523,9 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
- LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * field_info.StorageIndex(), r_base);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
+ int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
+ LoadRefDisp(r_base, offset_of_field, r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -535,8 +565,10 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
}
if (is_long_or_double) {
StoreBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
+ } else if (rl_src.ref) {
+ StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
} else {
- StoreWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
+ Store32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg);
}
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
@@ -567,7 +599,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
@@ -580,9 +612,9 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
LoadCurrMethodDirect(r_method);
r_base = TargetReg(kArg0);
LockTemp(r_base);
- LoadWordDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
- LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * field_info.StorageIndex(), r_base);
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base);
+ int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
+ LoadRefDisp(r_base, offset_of_field, r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
if (!field_info.IsInitialized() &&
(mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
@@ -615,8 +647,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
if (is_long_or_double) {
LoadBaseDispWide(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, INVALID_SREG);
+ } else if (rl_result.ref) {
+ LoadRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
} else {
- LoadWordDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
+ Load32Disp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg);
}
FreeTemp(r_base);
@@ -675,82 +709,6 @@ void Mir2Lir::HandleSuspendLaunchPads() {
}
}
-void Mir2Lir::HandleThrowLaunchPads() {
- int num_elems = throw_launchpads_.Size();
- for (int i = 0; i < num_elems; i++) {
- ResetRegPool();
- ResetDefTracking();
- LIR* lab = throw_launchpads_.Get(i);
- current_dalvik_offset_ = lab->operands[1];
- AppendLIR(lab);
- ThreadOffset<4> func_offset(-1);
- int v1 = lab->operands[2];
- int v2 = lab->operands[3];
- const bool target_x86 = cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64;
- switch (lab->operands[0]) {
- case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
- // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
- if (target_x86) {
- OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v1),
- mirror::Array::LengthOffset().Int32Value());
- } else {
- OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v1));
- }
- // Make sure the following LoadConstant doesn't mess with kArg1.
- LockTemp(TargetReg(kArg1));
- LoadConstant(TargetReg(kArg0), v2);
- func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds);
- break;
- case kThrowArrayBounds:
- // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
- if (v2 != TargetReg(kArg0).GetReg()) {
- OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
- if (target_x86) {
- // x86 leaves the array pointer in v2, so load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
- mirror::Array::LengthOffset().Int32Value());
- } else {
- OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
- }
- } else {
- if (v1 == TargetReg(kArg1).GetReg()) {
- // Swap v1 and v2, using kArg2 as a temp
- OpRegCopy(TargetReg(kArg2), RegStorage::Solo32(v1));
- if (target_x86) {
- // x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
- mirror::Array::LengthOffset().Int32Value());
- } else {
- OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
- }
- OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- } else {
- if (target_x86) {
- // x86 leaves the array pointer in v2; load the array length that the handler expects
- OpRegMem(kOpMov, TargetReg(kArg1), RegStorage::Solo32(v2),
- mirror::Array::LengthOffset().Int32Value());
- } else {
- OpRegCopy(TargetReg(kArg1), RegStorage::Solo32(v2));
- }
- OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
- }
- }
- func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds);
- break;
- case kThrowNoSuchMethod:
- OpRegCopy(TargetReg(kArg0), RegStorage::Solo32(v1));
- func_offset =
- QUICK_ENTRYPOINT_OFFSET(4, pThrowNoSuchMethod);
- break;
- default:
- LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
- }
- ClobberCallerSave();
- RegStorage r_tgt = CallHelperSetup(func_offset);
- CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */, true /* UseLink */);
- }
-}
-
void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
bool is_object) {
@@ -798,7 +756,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
GenNullCheck(rl_obj.reg, opt_flags);
- LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, kWord,
+ LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, k32,
rl_obj.s_reg_low);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
@@ -862,7 +820,7 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
// There might have been a store before this volatile one so insert StoreStore barrier.
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg, kWord);
+ Store32Disp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_src.reg);
MarkPossibleNullPointerException(opt_flags);
if (field_info.IsVolatile()) {
// A load might follow the volatile store so insert a StoreLoad barrier.
@@ -911,11 +869,9 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(rl_method.reg, dex_cache_offset, res_reg);
- int32_t offset_of_type =
- mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
- * type_idx);
- LoadWordDisp(res_reg, offset_of_type, rl_result.reg);
+ Load32Disp(rl_method.reg, dex_cache_offset, res_reg);
+ int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
+ Load32Disp(res_reg, offset_of_type, rl_result.reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
@@ -961,8 +917,8 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
/* NOTE: Most strings should be available at compile time */
- int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
- (sizeof(mirror::String*) * string_idx);
+ int32_t offset_of_string = mirror::ObjectArray<mirror::String>::OffsetOfElement(string_idx).
+ Int32Value();
if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
*cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
// slow path, resolve string if not in dex cache
@@ -980,11 +936,11 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
r_method = TargetReg(kArg2);
LoadCurrMethodDirect(r_method);
}
- LoadWordDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
- TargetReg(kArg0));
+ LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(),
+ TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
+ Load32Disp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
if (cu_->instruction_set == kThumb2 ||
cu_->instruction_set == kMips) {
// OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved?
@@ -1034,8 +990,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
RegStorage res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadWordDisp(res_reg, offset_of_string, rl_result.reg);
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
+ Load32Disp(res_reg, offset_of_string, rl_result.reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -1120,19 +1076,18 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
- LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
- LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- check_class);
- LoadWordDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
- int32_t offset_of_type =
- mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
- (sizeof(mirror::Class*) * type_idx);
- LoadWordDisp(check_class, offset_of_type, check_class);
+ LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ check_class);
+ LoadRefDisp(object.reg, mirror::Object::ClassOffset().Int32Value(), object_class);
+ int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
LIR* ne_branchover = NULL;
+ // FIXME: what should we be comparing here? compressed or decompressed references?
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, check_class, object_class); // Same?
LIR* it = OpIT(kCondEq, ""); // if-convert the test
@@ -1178,17 +1133,15 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
- LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg);
- int32_t offset_of_type =
- mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
- * type_idx);
- LoadWordDisp(class_reg, offset_of_type, class_reg);
+ LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
+ int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -1212,7 +1165,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
LIR* branchover = NULL;
if (type_known_final) {
@@ -1315,16 +1268,14 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
- LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- class_reg);
+ LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ class_reg);
} else {
// Load dex cache entry into class_reg (kArg2)
- LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
- class_reg);
- int32_t offset_of_type =
- mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
- (sizeof(mirror::Class*) * type_idx);
- LoadWordDisp(class_reg, offset_of_type, class_reg);
+ LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ class_reg);
+ int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value();
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
// Need to test presence of type in dex cache at runtime
LIR* hop_branch = OpCmpImmBranch(kCondEq, class_reg, 0, NULL);
@@ -1372,8 +1323,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
GenerateTargetLabel();
if (load_) {
- m2l_->LoadWordDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
- m2l_->TargetReg(kArg1));
+ m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
+ m2l_->TargetReg(kArg1));
}
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetReg(kArg2),
m2l_->TargetReg(kArg1), true);
@@ -1399,7 +1350,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
/* load object->klass_ */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1), class_reg, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
@@ -1586,7 +1537,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenDivZeroCheck(rl_src2.reg);
+ GenDivZeroCheck(rl_src2.reg);
}
rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
@@ -1597,7 +1548,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenDivZeroCheck(rl_src2.reg);
+ GenDivZeroCheck(rl_src2.reg);
}
rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, op == kOpDiv);
done = true;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 4aae16d103..53b6ed420e 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -34,10 +34,10 @@ namespace art {
* and "op" calls may be used here.
*/
-void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) {
- class IntrinsicLaunchpadPath : public Mir2Lir::LIRSlowPath {
+void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
+ class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
public:
- IntrinsicLaunchpadPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
+ IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
: LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
}
@@ -57,7 +57,7 @@ void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) {
CallInfo* const info_;
};
- AddSlowPath(new (arena_) IntrinsicLaunchpadPath(this, info, branch, resume));
+ AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
}
/*
@@ -255,12 +255,27 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<4> helper_off
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
+void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
+ if (arg1.GetReg() == TargetReg(kArg0).GetReg()) {
+ if (arg0.GetReg() == TargetReg(kArg1).GetReg()) {
+ // Swap kArg0 and kArg1 with kArg2 as temp.
+ OpRegCopy(TargetReg(kArg2), arg1);
+ OpRegCopy(TargetReg(kArg0), arg0);
+ OpRegCopy(TargetReg(kArg1), TargetReg(kArg2));
+ } else {
+ OpRegCopy(TargetReg(kArg1), arg1);
+ OpRegCopy(TargetReg(kArg0), arg0);
+ }
+ } else {
+ OpRegCopy(TargetReg(kArg0), arg0);
+ OpRegCopy(TargetReg(kArg1), arg1);
+ }
+}
+
void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<4> helper_offset, RegStorage arg0,
RegStorage arg1, bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg()); // check copy into arg0 won't clobber arg1
- OpRegCopy(TargetReg(kArg0), arg0);
- OpRegCopy(TargetReg(kArg1), arg1);
+ CopyToArgumentRegs(arg0, arg1);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
}
@@ -268,9 +283,7 @@ void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset<4> helper_offset, RegStorage
void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<4> helper_offset, RegStorage arg0,
RegStorage arg1, int arg2, bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(helper_offset);
- DCHECK_NE(TargetReg(kArg0).GetReg(), arg1.GetReg()); // check copy into arg0 won't clobber arg1
- OpRegCopy(TargetReg(kArg0), arg0);
- OpRegCopy(TargetReg(kArg1), arg1);
+ CopyToArgumentRegs(arg0, arg1);
LoadConstant(TargetReg(kArg2), arg2);
ClobberCallerSave();
CallHelper(r_tgt, helper_offset, safepoint_pc);
@@ -347,7 +360,11 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
rl_src.reg = TargetReg(kArg0);
rl_src.home = false;
MarkLive(rl_src.reg, rl_src.s_reg_low);
- StoreValue(rl_method, rl_src);
+ if (rl_method.wide) {
+ StoreValueWide(rl_method, rl_src);
+ } else {
+ StoreValue(rl_method, rl_src);
+ }
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
@@ -412,16 +429,15 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
}
}
if (need_flush) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kWord);
+ Store32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), reg);
}
} else {
// If arriving in frame & promoted
if (v_map->core_location == kLocPhysReg) {
- LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
- RegStorage::Solo32(v_map->core_reg));
+ Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg));
}
if (v_map->fp_location == kLocPhysReg) {
- LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
+ Load32Disp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->FpReg));
}
}
}
@@ -463,9 +479,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
break;
case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadWordDisp(cg->TargetReg(kArg0),
- mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- cg->TargetReg(kArg0));
+ cg->LoadRefDisp(cg->TargetReg(kArg0),
+ mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ cg->TargetReg(kArg0));
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<unsigned int>(-1)) {
@@ -478,9 +494,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
break;
case 2: // Grab target method*
CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadWordDisp(cg->TargetReg(kArg0),
- mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- (target_method.dex_method_index * 4), cg->TargetReg(kArg0));
+ cg->LoadRefDisp(cg->TargetReg(kArg0),
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ (target_method.dex_method_index * 4), cg->TargetReg(kArg0));
break;
case 3: // Grab the code from the method*
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -524,18 +540,18 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
case 1: // Is "this" null? [use kArg1]
cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// get this->klass_ [use kArg1, set kInvokeTgt]
- cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
- cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
break;
case 3: // Get target method [use kInvokeTgt, set kArg0]
- cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
- mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
- cg->TargetReg(kArg0));
+ cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
+ mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
+ cg->TargetReg(kArg0));
break;
case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
@@ -579,15 +595,17 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
case 2: // Is "this" null? [use kArg1]
cg->GenNullCheck(cg->TargetReg(kArg1), info->opt_flags);
// Get this->klass_ [use kArg1, set kInvokeTgt]
- cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ cg->LoadRefDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
cg->MarkPossibleNullPointerException(info->opt_flags);
break;
case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
- cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
- cg->TargetReg(kInvokeTgt));
+ // NOTE: native pointer.
+ cg->LoadRefDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
+ cg->TargetReg(kInvokeTgt));
break;
case 4: // Get target method [use kInvokeTgt, set kArg0]
+ // NOTE: native pointer.
cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
cg->TargetReg(kArg0));
@@ -740,11 +758,11 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
- LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
+ Load32Disp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
call_state = next_call_insn(cu_, info, call_state, target_method,
vtable_idx, direct_code, direct_method, type);
}
- StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
+ Store32Disp(TargetReg(kSp), (next_use + 1) * 4, reg);
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
direct_code, direct_method, type);
next_use++;
@@ -778,7 +796,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
StoreBaseDispWide(TargetReg(kSp), outs_offset, RegStorage::MakeRegPair(low_reg, high_reg));
next_use += 2;
} else {
- StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
+ Store32Disp(TargetReg(kSp), outs_offset, low_reg);
next_use++;
}
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
@@ -798,7 +816,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
RegStorage tmp = AllocTemp();
- LoadWordDisp(TargetReg(kArg1), 0, tmp);
+ Load32Disp(TargetReg(kArg1), 0, tmp);
MarkPossibleNullPointerException(info->opt_flags);
FreeTemp(tmp);
}
@@ -849,7 +867,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
} else {
loc = UpdateLoc(loc);
if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kWord);
+ Store32Disp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg);
}
next_arg++;
}
@@ -984,8 +1002,8 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
RegStorage temp = TargetReg(kArg3);
// Now load the argument VR and store to the outs.
- LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
- StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
+ Load32Disp(TargetReg(kSp), current_src_offset, temp);
+ Store32Disp(TargetReg(kSp), current_dest_offset, temp);
}
current_src_offset += bytes_to_move;
@@ -1014,7 +1032,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
RegStorage tmp = AllocTemp();
- LoadWordDisp(TargetReg(kArg1), 0, tmp);
+ Load32Disp(TargetReg(kArg1), 0, tmp);
MarkPossibleNullPointerException(info->opt_flags);
FreeTemp(tmp);
}
@@ -1074,14 +1092,14 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
reg_ptr = AllocTemp();
if (range_check) {
reg_max = AllocTemp();
- LoadWordDisp(rl_obj.reg, count_offset, reg_max);
+ Load32Disp(rl_obj.reg, count_offset, reg_max);
MarkPossibleNullPointerException(info->opt_flags);
}
- LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
+ Load32Disp(rl_obj.reg, offset_offset, reg_off);
MarkPossibleNullPointerException(info->opt_flags);
- LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
+ Load32Disp(rl_obj.reg, value_offset, reg_ptr);
if (range_check) {
- // Set up a launch pad to allow retry in case of bounds violation */
+ // Set up a slow path to allow retry in case of bounds violation */
OpRegReg(kOpCmp, rl_idx.reg, reg_max);
FreeTemp(reg_max);
range_check_branch = OpCondBranch(kCondUge, nullptr);
@@ -1102,8 +1120,8 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
}
reg_off = AllocTemp();
reg_ptr = AllocTemp();
- LoadWordDisp(rl_obj.reg, offset_offset, reg_off);
- LoadWordDisp(rl_obj.reg, value_offset, reg_ptr);
+ Load32Disp(rl_obj.reg, offset_offset, reg_off);
+ Load32Disp(rl_obj.reg, value_offset, reg_ptr);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
@@ -1128,7 +1146,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
if (range_check) {
DCHECK(range_check_branch != nullptr);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've already null checked.
- AddIntrinsicLaunchpad(info, range_check_branch);
+ AddIntrinsicSlowPath(info, range_check_branch);
}
return true;
}
@@ -1145,7 +1163,7 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
GenNullCheck(rl_obj.reg, info->opt_flags);
- LoadWordDisp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
+ Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
MarkPossibleNullPointerException(info->opt_flags);
if (is_empty) {
// dst = (dst == 0);
@@ -1169,9 +1187,9 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
return false;
}
RegLocation rl_src_i = info->args[0];
- RegLocation rl_dest = (size == kLong) ? InlineTargetWide(info) : InlineTarget(info); // result reg
+ RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (size == kLong) {
+ if (size == k64) {
RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
RegStorage r_i_low = rl_i.reg.GetLow();
if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
@@ -1186,8 +1204,8 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
}
StoreValueWide(rl_dest, rl_result);
} else {
- DCHECK(size == kWord || size == kSignedHalf);
- OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
+ DCHECK(size == k32 || size == kSignedHalf);
+ OpKind op = (size == k32) ? kOpRev : kOpRevsh;
RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
OpRegReg(op, rl_result.reg, rl_i.reg);
StoreValue(rl_dest, rl_result);
@@ -1339,7 +1357,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
DCHECK(high_code_point_branch != nullptr);
LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
- AddIntrinsicLaunchpad(info, high_code_point_branch, resume_tgt);
+ AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
} else {
DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
DCHECK(high_code_point_branch == nullptr);
@@ -1371,7 +1389,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// TUNING: check if rl_cmp.s_reg_low is already null checked
LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
- AddIntrinsicLaunchpad(info, cmp_null_check_branch);
+ AddIntrinsicSlowPath(info, cmp_null_check_branch);
// NOTE: not a safepoint
if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
OpReg(kOpBlx, r_tgt);
@@ -1389,7 +1407,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
ThreadOffset<4> offset = Thread::PeerOffset<4>();
if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
- LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
+ Load32Disp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg);
} else {
CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
@@ -1416,7 +1434,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
if (is_long) {
if (cu_->instruction_set == kX86) {
LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg.GetLow(),
- rl_result.reg.GetHigh(), kLong, INVALID_SREG);
+ rl_result.reg.GetHigh(), k64, INVALID_SREG);
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
@@ -1424,7 +1442,7 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
FreeTemp(rl_temp_offset.GetReg());
}
} else {
- LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, kWord);
+ LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
}
if (is_volatile) {
@@ -1464,7 +1482,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
rl_value = LoadValueWide(rl_src_value, kCoreReg);
if (cu_->instruction_set == kX86) {
StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg.GetLow(),
- rl_value.reg.GetHigh(), kLong, INVALID_SREG);
+ rl_value.reg.GetHigh(), k64, INVALID_SREG);
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
@@ -1473,7 +1491,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
}
} else {
rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, kWord);
+ StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 208eadde12..9808f7f36f 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -65,7 +65,7 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
} else {
// Lives in the frame, need to store.
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, kWord);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32);
}
if (!zero_reg.Valid()) {
FreeTemp(temp_reg);
@@ -74,15 +74,6 @@ void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
}
}
-/* Load a word at base + displacement. Displacement must be word multiple */
-LIR* Mir2Lir::LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, kWord, INVALID_SREG);
-}
-
-LIR* Mir2Lir::StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDisp(r_base, displacement, r_src, kWord);
-}
-
/*
* Load a Dalvik register into a physical register. Take care when
* using this routine, as it doesn't perform any bookkeeping regarding
@@ -93,11 +84,17 @@ void Mir2Lir::LoadValueDirect(RegLocation rl_src, RegStorage r_dest) {
if (rl_src.location == kLocPhysReg) {
OpRegCopy(r_dest, rl_src.reg);
} else if (IsInexpensiveConstant(rl_src)) {
+ // On 64-bit targets, will sign extend. Make sure constant reference is always NULL.
+ DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
+ if (rl_src.ref) {
+ LoadRefDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
+ } else {
+ Load32Disp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest);
+ }
}
}
@@ -194,7 +191,7 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) {
ResetDefLoc(rl_dest);
if (IsDirty(rl_dest.reg) && oat_live_out(rl_dest.s_reg_low)) {
def_start = last_lir_insn_;
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kWord);
+ Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
MarkClean(rl_dest);
def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -306,7 +303,7 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) {
if (IsDirty(rl_dest.reg) &&
oat_live_out(rl_dest.s_reg_low)) {
LIR *def_start = last_lir_insn_;
- StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg, kWord);
+ Store32Disp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), rl_dest.reg);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
if (!rl_dest.ref) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index a938478b3d..a237ac76b0 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,11 +112,11 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
- LoadWordDisp(r_base, 0, r_key);
+ Load32Disp(r_base, 0, r_key);
OpRegImm(kOpAdd, r_base, 8);
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
RegStorage r_disp = AllocTemp();
- LoadWordDisp(r_base, -4, r_disp);
+ Load32Disp(r_base, -4, r_disp);
OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
OpReg(kOpBx, rs_rRA);
@@ -200,7 +200,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
// Load the displacement from the switch table
RegStorage r_disp = AllocTemp();
- LoadBaseIndexed(r_base, r_key, r_disp, 2, kWord);
+ LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
// Add to rAP and go
OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
@@ -263,9 +263,9 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
RegStorage reset_reg = AllocTemp();
- LoadWordDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg);
+ Load32Disp(rs_rMIPS_SELF, ex_offset, rl_result.reg);
LoadConstant(reset_reg, 0);
- StoreWordDisp(rs_rMIPS_SELF, ex_offset, reset_reg);
+ Store32Disp(rs_rMIPS_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
@@ -277,6 +277,7 @@ void MipsMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+ // NOTE: native pointer.
LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
@@ -310,7 +311,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
RegStorage new_sp = AllocTemp();
if (!skip_overflow_check) {
/* Load stack limit */
- LoadWordDisp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ Load32Disp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg);
}
/* Spill core callee saves */
SpillCoreRegs();
@@ -328,7 +329,7 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
m2l_->ResetDefTracking();
GenerateTargetLabel();
// LR is offset 0 since we push in reverse order.
- m2l_->LoadWordDisp(rs_rMIPS_SP, 0, rs_rRA);
+ m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA);
m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
m2l_->ClobberCallerSave();
ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 40641d670d..81d6782288 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -126,8 +126,6 @@ class MipsMir2Lir FINAL : public Mir2Lir {
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
- LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
- ThrowKind kind);
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
@@ -161,7 +159,7 @@ class MipsMir2Lir FINAL : public Mir2Lir {
LIR* OpMem(OpKind op, RegStorage r_base, int disp);
LIR* OpPcRelLoad(RegStorage reg, LIR* target);
LIR* OpReg(OpKind op, RegStorage r_dest_src);
- LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index ac0847f22d..7c0becd41a 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -177,37 +177,40 @@ LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
return res;
}
-LIR* MipsMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
- LIR *res = OpRegCopyNoInsert(r_dest, r_src);
- AppendLIR(res);
- return res;
+void MipsMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ if (r_dest != r_src) {
+ LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+ AppendLIR(res);
+ }
}
void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
- bool dest_fp = MIPS_FPREG(r_dest.GetLowReg());
- bool src_fp = MIPS_FPREG(r_src.GetLowReg());
- if (dest_fp) {
- if (src_fp) {
- // FIXME: handle this here - reserve OpRegCopy for 32-bit copies.
- OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
- RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
- } else {
- /* note the operands are swapped for the mtc1 instr */
- NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
- NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
- }
- } else {
- if (src_fp) {
- NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
- NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
+ if (r_dest != r_src) {
+ bool dest_fp = MIPS_FPREG(r_dest.GetLowReg());
+ bool src_fp = MIPS_FPREG(r_src.GetLowReg());
+ if (dest_fp) {
+ if (src_fp) {
+ // FIXME: handle this here - reserve OpRegCopy for 32-bit copies.
+ OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+ RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
+ } else {
+ /* note the operands are swapped for the mtc1 instr */
+ NewLIR2(kMipsMtc1, r_src.GetLowReg(), r_dest.GetLowReg());
+ NewLIR2(kMipsMtc1, r_src.GetHighReg(), r_dest.GetHighReg());
+ }
} else {
- // Handle overlap
- if (r_src.GetHighReg() == r_dest.GetLowReg()) {
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
- OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ if (src_fp) {
+ NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
+ NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
} else {
- OpRegCopy(r_dest.GetLow(), r_src.GetLow());
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ // Handle overlap
+ if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ } else {
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ }
}
}
}
@@ -221,12 +224,6 @@ void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
-LIR* MipsMir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
- int offset, ThrowKind kind) {
- LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
- return NULL;
-}
-
RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
bool is_div) {
NewLIR2(kMipsDiv, reg1.GetReg(), reg2.GetReg());
@@ -346,7 +343,7 @@ void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
RegStorage t_reg = AllocTemp();
OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
- GenDivZeroCheck(kCondEq);
+ GenDivZeroCheck(t_reg);
FreeTemp(t_reg);
}
@@ -480,7 +477,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_array = LoadValue(rl_array, kCoreReg);
rl_index = LoadValue(rl_index, kCoreReg);
- if (size == kLong || size == kDouble) {
+ if (size == k64 || size == kDouble) {
data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
@@ -495,12 +492,12 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.reg, len_offset, reg_len);
+ Load32Disp(rl_array.reg, len_offset, reg_len);
}
/* reg_ptr -> array data */
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg.GetReg());
- if ((size == kLong) || (size == kDouble)) {
+ if ((size == k64) || (size == kDouble)) {
if (scale) {
RegStorage r_new_index = AllocTemp();
OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
@@ -513,7 +510,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
LoadBaseDispWide(reg_ptr, 0, rl_result.reg, INVALID_SREG);
@@ -524,7 +521,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
@@ -544,7 +541,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
- if (size == kLong || size == kDouble) {
+ if (size == k64 || size == kDouble) {
data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
@@ -572,12 +569,12 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.reg, len_offset, reg_len);
+ Load32Disp(rl_array.reg, len_offset, reg_len);
}
/* reg_ptr -> array data */
OpRegImm(kOpAdd, reg_ptr, data_offset);
/* at this point, reg_ptr points to array, 2 live temps */
- if ((size == kLong) || (size == kDouble)) {
+ if ((size == k64) || (size == kDouble)) {
// TUNING: specific wide routine that can handle fp regs
if (scale) {
RegStorage r_new_index = AllocTemp();
@@ -590,7 +587,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_src = LoadValueWide(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
@@ -598,7 +595,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg, reg_len, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 3e02faed55..7f4cd5e242 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -369,7 +369,7 @@ void MipsMir2Lir::FlushReg(RegStorage reg) {
if (info->live && info->dirty) {
info->dirty = false;
int v_reg = mir_graph_->SRegToVReg(info->s_reg);
- StoreBaseDisp(rs_rMIPS_SP, VRegOffset(v_reg), reg, kWord);
+ Store32Disp(rs_rMIPS_SP, VRegOffset(v_reg), reg);
}
}
@@ -531,12 +531,14 @@ void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
* there is a trap in the shadow. Allocate a temp register.
*/
RegStorage MipsMir2Lir::LoadHelper(ThreadOffset<4> offset) {
+ // NOTE: native pointer.
LoadWordDisp(rs_rMIPS_SELF, offset.Int32Value(), rs_rT9);
return rs_rT9;
}
LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = AllocTemp();
+ // NOTE: native pointer.
LoadWordDisp(rs_rMIPS_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
LIR *inst = LoadWordDisp(tmp, 0, tmp);
FreeTemp(tmp);
@@ -553,7 +555,7 @@ void MipsMir2Lir::SpillCoreRegs() {
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
offset -= 4;
- StoreWordDisp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+ Store32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
}
}
}
@@ -567,7 +569,7 @@ void MipsMir2Lir::UnSpillCoreRegs() {
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
offset -= 4;
- LoadWordDisp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+ Load32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
}
}
OpRegImm(kOpAdd, rs_rSP, frame_size_);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index c959510025..a865430d6c 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -357,11 +357,11 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
if (MIPS_FPREG(r_dest.GetReg())) {
DCHECK(MIPS_SINGLEREG(r_dest.GetReg()));
- DCHECK((size == kWord) || (size == kSingle));
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
size = kSingle;
} else {
if (size == kSingle)
- size = kWord;
+ size = k32;
}
if (!scale) {
@@ -375,7 +375,8 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
case kSingle:
opcode = kMipsFlwc1;
break;
- case kWord:
+ case k32:
+ case kReference:
opcode = kMipsLw;
break;
case kUnsignedHalf:
@@ -408,11 +409,11 @@ LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
if (MIPS_FPREG(r_src.GetReg())) {
DCHECK(MIPS_SINGLEREG(r_src.GetReg()));
- DCHECK((size == kWord) || (size == kSingle));
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
size = kSingle;
} else {
if (size == kSingle)
- size = kWord;
+ size = k32;
}
if (!scale) {
@@ -426,7 +427,8 @@ LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
case kSingle:
opcode = kMipsFswc1;
break;
- case kWord:
+ case k32:
+ case kReference:
opcode = kMipsSw;
break;
case kUnsignedHalf:
@@ -463,7 +465,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
bool pair = false;
switch (size) {
- case kLong:
+ case k64:
case kDouble:
pair = true;
opcode = kMipsLw;
@@ -481,8 +483,9 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
short_form = IS_SIMM16_2WORD(displacement);
DCHECK_EQ((displacement & 0x3), 0);
break;
- case kWord:
+ case k32:
case kSingle:
+ case kReference:
opcode = kMipsLw;
if (MIPS_FPREG(r_dest.GetReg())) {
opcode = kMipsFlwc1;
@@ -544,13 +547,17 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, int s_reg) {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size,
s_reg);
}
LIR* MipsMir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
int s_reg) {
- return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg);
+ return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg);
}
LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
@@ -563,7 +570,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
bool pair = false;
switch (size) {
- case kLong:
+ case k64:
case kDouble:
pair = true;
opcode = kMipsSw;
@@ -580,8 +587,9 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
short_form = IS_SIMM16_2WORD(displacement);
DCHECK_EQ((displacement & 0x3), 0);
break;
- case kWord:
+ case k32:
case kSingle:
+ case kReference:
opcode = kMipsSw;
if (MIPS_FPREG(r_src.GetReg())) {
opcode = kMipsFswc1;
@@ -635,11 +643,15 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
return StoreBaseDispBody(r_base, displacement, r_src, RegStorage::InvalidReg(), size);
}
LIR* MipsMir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
- return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), kLong);
+ return StoreBaseDispBody(r_base, displacement, r_src.GetLow(), r_src.GetHigh(), k64);
}
LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 6fcdf70b12..b8ab609f31 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -63,14 +63,14 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) {
} else {
reg_arg_high = AllocTemp();
int offset_high = offset + sizeof(uint32_t);
- LoadWordDisp(TargetReg(kSp), offset_high, reg_arg_high);
+ Load32Disp(TargetReg(kSp), offset_high, reg_arg_high);
}
}
// If the low part is not in a register yet, we need to load it.
if (!reg_arg_low.Valid()) {
reg_arg_low = AllocTemp();
- LoadWordDisp(TargetReg(kSp), offset, reg_arg_low);
+ Load32Disp(TargetReg(kSp), offset, reg_arg_low);
}
if (wide) {
@@ -96,7 +96,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
if (reg.Valid()) {
OpRegCopy(rl_dest.reg, reg);
} else {
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg);
+ Load32Disp(TargetReg(kSp), offset, rl_dest.reg);
}
} else {
RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
@@ -107,10 +107,10 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
} else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
OpRegCopy(rl_dest.reg, reg_arg_low);
int offset_high = offset + sizeof(uint32_t);
- LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh());
+ Load32Disp(TargetReg(kSp), offset_high, rl_dest.reg.GetHigh());
} else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) {
OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
+ Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow());
} else {
LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg, INVALID_SREG);
}
@@ -137,7 +137,7 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
if (wide) {
LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg, INVALID_SREG);
} else {
- LoadWordDisp(reg_obj, data.field_offset, rl_dest.reg);
+ Load32Disp(reg_obj, data.field_offset, rl_dest.reg);
}
if (data.is_volatile) {
// Without context sensitive analysis, we must issue the most conservative barriers.
@@ -175,7 +175,7 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
if (wide) {
StoreBaseDispWide(reg_obj, data.field_offset, reg_src);
} else {
- StoreBaseDisp(reg_obj, data.field_offset, reg_src, kWord);
+ Store32Disp(reg_obj, data.field_offset, reg_src);
}
if (data.is_volatile) {
// A load might follow the volatile store so insert a StoreLoad barrier.
@@ -449,7 +449,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
GenNullCheck(rl_src[0].reg, opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_src[0].reg, len_offset, rl_result.reg);
+ Load32Disp(rl_src[0].reg, len_offset, rl_result.reg);
MarkPossibleNullPointerException(opt_flags);
StoreValue(rl_dest, rl_result);
break;
@@ -562,11 +562,13 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
}
case Instruction::AGET_WIDE:
- GenArrayGet(opt_flags, kLong, rl_src[0], rl_src[1], rl_dest, 3);
+ GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3);
break;
- case Instruction::AGET:
case Instruction::AGET_OBJECT:
- GenArrayGet(opt_flags, kWord, rl_src[0], rl_src[1], rl_dest, 2);
+ GenArrayGet(opt_flags, kReference, rl_src[0], rl_src[1], rl_dest, 2);
+ break;
+ case Instruction::AGET:
+ GenArrayGet(opt_flags, k32, rl_src[0], rl_src[1], rl_dest, 2);
break;
case Instruction::AGET_BOOLEAN:
GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
@@ -581,10 +583,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
break;
case Instruction::APUT_WIDE:
- GenArrayPut(opt_flags, kLong, rl_src[1], rl_src[2], rl_src[0], 3, false);
+ GenArrayPut(opt_flags, k64, rl_src[1], rl_src[2], rl_src[0], 3, false);
break;
case Instruction::APUT:
- GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2, false);
+ GenArrayPut(opt_flags, k32, rl_src[1], rl_src[2], rl_src[0], 2, false);
break;
case Instruction::APUT_OBJECT: {
bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]);
@@ -597,7 +599,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
if (is_null || is_safe) {
// Store of constant null doesn't require an assignability test and can be generated inline
// without fixed register usage or a card mark.
- GenArrayPut(opt_flags, kWord, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
+ GenArrayPut(opt_flags, kReference, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
} else {
GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]);
}
@@ -613,15 +615,15 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::IGET_OBJECT:
- GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, true);
+ GenIGet(mir, opt_flags, kReference, rl_dest, rl_src[0], false, true);
break;
case Instruction::IGET_WIDE:
- GenIGet(mir, opt_flags, kLong, rl_dest, rl_src[0], true, false);
+ GenIGet(mir, opt_flags, k64, rl_dest, rl_src[0], true, false);
break;
case Instruction::IGET:
- GenIGet(mir, opt_flags, kWord, rl_dest, rl_src[0], false, false);
+ GenIGet(mir, opt_flags, k32, rl_dest, rl_src[0], false, false);
break;
case Instruction::IGET_CHAR:
@@ -638,15 +640,15 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
break;
case Instruction::IPUT_WIDE:
- GenIPut(mir, opt_flags, kLong, rl_src[0], rl_src[1], true, false);
+ GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1], true, false);
break;
case Instruction::IPUT_OBJECT:
- GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, true);
+ GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1], false, true);
break;
case Instruction::IPUT:
- GenIPut(mir, opt_flags, kWord, rl_src[0], rl_src[1], false, false);
+ GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1], false, false);
break;
case Instruction::IPUT_BOOLEAN:
@@ -988,6 +990,9 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
ResetRegPool();
if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
ClobberAllRegs();
+ // Reset temp allocation to minimize differences when A/B testing.
+ reg_pool_->next_core_reg = 0;
+ reg_pool_->next_fp_reg = 0;
}
if (cu_->disable_opt & (1 << kSuppressLoads)) {
@@ -1097,8 +1102,6 @@ void Mir2Lir::MethodMIR2LIR() {
cu_->NewTimingSplit("Launchpads");
HandleSuspendLaunchPads();
-
- HandleThrowLaunchPads();
}
//
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 65910e9eb8..2b6d78b35a 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -109,6 +109,11 @@ typedef uint32_t CodeOffset; // Native code offset in bytes.
#define REG_USE23 (REG_USE2 | REG_USE3)
#define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3)
+// TODO: #includes need a cleanup
+#ifndef INVALID_SREG
+#define INVALID_SREG (-1)
+#endif
+
struct BasicBlock;
struct CallInfo;
struct CompilationUnit;
@@ -554,12 +559,11 @@ class Mir2Lir : public Backend {
RegisterInfo* GetRegInfo(int reg);
// Shared by all targets - implemented in gen_common.cc.
- void AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume = nullptr);
+ void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr);
bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit);
bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
void HandleSuspendLaunchPads();
- void HandleThrowLaunchPads();
void HandleSlowPaths();
void GenBarrier();
void GenDivZeroException();
@@ -567,6 +571,8 @@ class Mir2Lir : public Backend {
void GenDivZeroCheck(ConditionCode c_code);
// reg holds divisor.
void GenDivZeroCheck(RegStorage reg);
+ void GenArrayBoundsCheck(RegStorage index, RegStorage length);
+ void GenArrayBoundsCheck(int32_t index, RegStorage length);
LIR* GenNullCheck(RegStorage reg);
void MarkPossibleNullPointerException(int opt_flags);
void MarkPossibleStackOverflowException();
@@ -574,7 +580,6 @@ class Mir2Lir : public Backend {
LIR* GenImmedCheck(ConditionCode c_code, RegStorage reg, int imm_val, ThrowKind kind);
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
- LIR* GenRegRegCheck(ConditionCode c_code, RegStorage reg1, RegStorage reg2, ThrowKind kind);
void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
RegLocation rl_src2, LIR* taken, LIR* fall_through);
void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
@@ -723,14 +728,42 @@ class Mir2Lir : public Backend {
RegLocation LoadCurrMethod();
void LoadCurrMethodDirect(RegStorage r_tgt);
LIR* LoadConstant(RegStorage r_dest, int value);
- LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest);
+ // Natural word size.
+ LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ return LoadBaseDisp(r_base, displacement, r_dest, kWord, INVALID_SREG);
+ }
+ // Load 32 bits, regardless of target.
+ LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ return LoadBaseDisp(r_base, displacement, r_dest, k32, INVALID_SREG);
+ }
+ // Load a reference at base + displacement and decompress into register.
+ LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ return LoadBaseDisp(r_base, displacement, r_dest, kReference, INVALID_SREG);
+ }
+ // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
+ // Load Dalvik value with 64-bit memory storage.
RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
+ // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
void LoadValueDirect(RegLocation rl_src, RegStorage r_dest);
+ // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest);
+ // Load Dalvik value with 64-bit memory storage.
void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest);
+ // Load Dalvik value with 64-bit memory storage.
void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
- LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src);
+ // Store an item of natural word size.
+ LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDisp(r_base, displacement, r_src, kWord);
+ }
+ // Store an uncompressed reference into a compressed 32-bit container.
+ LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDisp(r_base, displacement, r_src, kReference);
+ }
+ // Store 32 bits, regardless of target.
+ LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
+ return StoreBaseDisp(r_base, displacement, r_src, k32);
+ }
/**
* @brief Used to do the final store in the destination as per bytecode semantics.
@@ -778,6 +811,8 @@ class Mir2Lir : public Backend {
bool MethodBlockCodeGen(BasicBlock* bb);
bool SpecialMIR2LIR(const InlineMethod& special);
void MethodMIR2LIR();
+ // Update LIR for verbose listings.
+ void UpdateLIROffsets();
/*
* @brief Load the address of the dex method into the register.
@@ -933,8 +968,6 @@ class Mir2Lir : public Backend {
RegLocation rl_src2) = 0;
virtual void GenXorLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) = 0;
- virtual LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
- int offset, ThrowKind kind) = 0;
virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) = 0;
virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
@@ -1019,7 +1052,7 @@ class Mir2Lir : public Backend {
virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0;
virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0;
virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0;
- virtual LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
+ virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
virtual LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) = 0;
@@ -1227,6 +1260,10 @@ class Mir2Lir : public Backend {
void AddDivZeroCheckSlowPath(LIR* branch);
+ // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using
+ // kArg2 as temp.
+ void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1);
+
public:
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
@@ -1240,7 +1277,6 @@ class Mir2Lir : public Backend {
MIRGraph* const mir_graph_;
GrowableArray<SwitchTable*> switch_tables_;
GrowableArray<FillArrayData*> fill_array_data_;
- GrowableArray<LIR*> throw_launchpads_;
GrowableArray<LIR*> suspend_launchpads_;
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 729b30d621..00831099fc 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -239,7 +239,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// mov esp, ebp
// in case a signal comes in that's not using an alternate signal stack and the large frame may
// have moved us outside of the reserved area at the end of the stack.
- // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
+ // cmp rX86_SP, fs:[stack_end_]; jcc throw_slowpath
OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset<4>());
LIR* branch = OpCondBranch(kCondUlt, nullptr);
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_ - 4));
@@ -251,7 +251,8 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// We have been asked to save the address of the method start for later use.
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rX86_ARG0);
int displacement = SRegOffset(base_of_code_->s_reg_low);
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0, kWord);
+ // Native pointer - must be natural word size.
+ setup_method_address_[1] = StoreWordDisp(rs_rX86_SP, displacement, rs_rX86_ARG0);
}
FreeTemp(rX86_ARG0);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 0b9823d667..760290cabe 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -127,15 +127,13 @@ class X86Mir2Lir FINAL : public Mir2Lir {
RegLocation rl_src2);
void GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2);
- LIR* GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base, int offset,
- ThrowKind kind);
- LIR* GenMemImmedCheck(ConditionCode c_code, RegStorage base, int offset, int check_value,
- ThrowKind kind);
// TODO: collapse reg_lo, reg_hi
RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
void GenDivZeroCheckWide(RegStorage reg);
+ void GenArrayBoundsCheck(RegStorage index, RegStorage array_base, int32_t len_offset);
+ void GenArrayBoundsCheck(int32_t index, RegStorage array_base, int32_t len_offset);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
void GenSpecialExitSequence();
@@ -234,7 +232,7 @@ class X86Mir2Lir FINAL : public Mir2Lir {
LIR* OpMem(OpKind op, RegStorage r_base, int disp);
LIR* OpPcRelLoad(RegStorage reg, LIR* target);
LIR* OpReg(OpKind op, RegStorage r_dest_src);
- LIR* OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
LIR* OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index ee5387f050..f7b0c9d892 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -193,7 +193,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
+ Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 4ffb9a4c38..3bff4976bd 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -24,34 +24,6 @@
namespace art {
/*
- * Perform register memory operation.
- */
-LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code, RegStorage reg1, RegStorage base,
- int offset, ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
- current_dalvik_offset_, reg1.GetReg(), base.GetReg(), offset);
- OpRegMem(kOpCmp, reg1, base, offset);
- LIR* branch = OpCondBranch(c_code, tgt);
- // Remember branch target - will process later
- throw_launchpads_.Insert(tgt);
- return branch;
-}
-
-/*
- * Perform a compare of memory to immediate value
- */
-LIR* X86Mir2Lir::GenMemImmedCheck(ConditionCode c_code, RegStorage base, int offset,
- int check_value, ThrowKind kind) {
- LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
- current_dalvik_offset_, base.GetReg(), check_value, 0);
- NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base.GetReg(), offset, check_value);
- LIR* branch = OpCondBranch(c_code, tgt);
- // Remember branch target - will process later
- throw_launchpads_.Insert(tgt);
- return branch;
-}
-
-/*
* Compare two 64-bit values
* x = y return 0
* x < y return -1
@@ -144,52 +116,55 @@ LIR* X86Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
return res;
}
-LIR* X86Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
- LIR *res = OpRegCopyNoInsert(r_dest, r_src);
- AppendLIR(res);
- return res;
+void X86Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ if (r_dest != r_src) {
+ LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+ AppendLIR(res);
+ }
}
void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
- // FIXME: handle k64BitSolo when we start using them.
- DCHECK(r_dest.IsPair());
- DCHECK(r_src.IsPair());
- bool dest_fp = X86_FPREG(r_dest.GetLowReg());
- bool src_fp = X86_FPREG(r_src.GetLowReg());
- if (dest_fp) {
- if (src_fp) {
- // TODO: we ought to handle this case here - reserve OpRegCopy for 32-bit copies.
- OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
- RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
- } else {
- // TODO: Prevent this from happening in the code. The result is often
- // unused or could have been loaded more easily from memory.
- NewLIR2(kX86MovdxrRR, r_dest.GetLowReg(), r_src.GetLowReg());
- RegStorage r_tmp = AllocTempDouble();
- NewLIR2(kX86MovdxrRR, r_tmp.GetLowReg(), r_src.GetHighReg());
- NewLIR2(kX86PunpckldqRR, r_dest.GetLowReg(), r_tmp.GetLowReg());
- FreeTemp(r_tmp);
- }
- } else {
- if (src_fp) {
- NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetLowReg());
- NewLIR2(kX86PsrlqRI, r_src.GetLowReg(), 32);
- NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), r_src.GetLowReg());
+ if (r_dest != r_src) {
+ // FIXME: handle k64BitSolo when we start using them.
+ DCHECK(r_dest.IsPair());
+ DCHECK(r_src.IsPair());
+ bool dest_fp = X86_FPREG(r_dest.GetLowReg());
+ bool src_fp = X86_FPREG(r_src.GetLowReg());
+ if (dest_fp) {
+ if (src_fp) {
+ // TODO: we ought to handle this case here - reserve OpRegCopy for 32-bit copies.
+ OpRegCopy(RegStorage::Solo64(S2d(r_dest.GetLowReg(), r_dest.GetHighReg())),
+ RegStorage::Solo64(S2d(r_src.GetLowReg(), r_src.GetHighReg())));
+ } else {
+ // TODO: Prevent this from happening in the code. The result is often
+ // unused or could have been loaded more easily from memory.
+ NewLIR2(kX86MovdxrRR, r_dest.GetLowReg(), r_src.GetLowReg());
+ RegStorage r_tmp = AllocTempDouble();
+ NewLIR2(kX86MovdxrRR, r_tmp.GetLowReg(), r_src.GetHighReg());
+ NewLIR2(kX86PunpckldqRR, r_dest.GetLowReg(), r_tmp.GetLowReg());
+ FreeTemp(r_tmp);
+ }
} else {
- // Handle overlap
- if (r_src.GetHighReg() == r_dest.GetLowReg() && r_src.GetLowReg() == r_dest.GetHighReg()) {
- // Deal with cycles.
- RegStorage temp_reg = AllocTemp();
- OpRegCopy(temp_reg, r_dest.GetHigh());
- OpRegCopy(r_dest.GetHigh(), r_dest.GetLow());
- OpRegCopy(r_dest.GetLow(), temp_reg);
- FreeTemp(temp_reg);
- } else if (r_src.GetHighReg() == r_dest.GetLowReg()) {
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
- OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ if (src_fp) {
+ NewLIR2(kX86MovdrxRR, r_dest.GetLowReg(), r_src.GetLowReg());
+ NewLIR2(kX86PsrlqRI, r_src.GetLowReg(), 32);
+ NewLIR2(kX86MovdrxRR, r_dest.GetHighReg(), r_src.GetLowReg());
} else {
- OpRegCopy(r_dest.GetLow(), r_src.GetLow());
- OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ // Handle overlap
+ if (r_src.GetHighReg() == r_dest.GetLowReg() && r_src.GetLowReg() == r_dest.GetHighReg()) {
+ // Deal with cycles.
+ RegStorage temp_reg = AllocTemp();
+ OpRegCopy(temp_reg, r_dest.GetHigh());
+ OpRegCopy(r_dest.GetHigh(), r_dest.GetLow());
+ OpRegCopy(r_dest.GetLow(), temp_reg);
+ FreeTemp(temp_reg);
+ } else if (r_src.GetHighReg() == r_dest.GetLowReg()) {
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ } else {
+ OpRegCopy(r_dest.GetLow(), r_src.GetLow());
+ OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
+ }
}
}
}
@@ -704,15 +679,15 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
- RegLocation rl_dest = size == kLong ? InlineTargetWide(info) : InlineTarget(info);
+ RegLocation rl_dest = size == k64 ? InlineTargetWide(info) : InlineTarget(info);
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (size == kLong) {
+ if (size == k64) {
// Unaligned access is allowed on x86.
LoadBaseDispWide(rl_address.reg, 0, rl_result.reg, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
+ DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned access is allowed on x86.
LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
@@ -725,12 +700,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_src_value = info->args[2]; // [size] value
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
- if (size == kLong) {
+ if (size == k64) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
StoreBaseDispWide(rl_address.reg, 0, rl_value.reg);
} else {
- DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
+ DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
@@ -780,6 +755,7 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
int srcObjSp = IsInReg(this, rl_src_obj, rs_rSI) ? 0
: (IsInReg(this, rl_src_obj, rs_rDI) ? 4
: (SRegOffset(rl_src_obj.s_reg_low) + push_offset));
+ // FIXME: needs 64-bit update.
LoadWordDisp(TargetReg(kSp), srcObjSp, rs_rDI);
int srcOffsetSp = IsInReg(this, rl_src_offset, rs_rSI) ? 0
: (IsInReg(this, rl_src_offset, rs_rDI) ? 4
@@ -891,6 +867,86 @@ void X86Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
FreeTemp(t_reg);
}
+void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index,
+ RegStorage array_base,
+ int len_offset) {
+ class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
+ public:
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
+ RegStorage index, RegStorage array_base, int32_t len_offset)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
+ index_(index), array_base_(array_base), len_offset_(len_offset) {
+ }
+
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel();
+
+ RegStorage new_index = index_;
+ // Move index out of kArg1, either directly to kArg0, or to kArg2.
+ if (index_.GetReg() == m2l_->TargetReg(kArg1).GetReg()) {
+ if (array_base_.GetReg() == m2l_->TargetReg(kArg0).GetReg()) {
+ m2l_->OpRegCopy(m2l_->TargetReg(kArg2), index_);
+ new_index = m2l_->TargetReg(kArg2);
+ } else {
+ m2l_->OpRegCopy(m2l_->TargetReg(kArg0), index_);
+ new_index = m2l_->TargetReg(kArg0);
+ }
+ }
+ // Load array length to kArg1.
+ m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_);
+ m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
+ new_index, m2l_->TargetReg(kArg1), true);
+ }
+
+ private:
+ const RegStorage index_;
+ const RegStorage array_base_;
+ const int32_t len_offset_;
+ };
+
+ OpRegMem(kOpCmp, index, array_base, len_offset);
+ LIR* branch = OpCondBranch(kCondUge, nullptr);
+ AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
+ index, array_base, len_offset));
+}
+
+void X86Mir2Lir::GenArrayBoundsCheck(int32_t index,
+ RegStorage array_base,
+ int32_t len_offset) {
+ class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
+ public:
+ ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
+ int32_t index, RegStorage array_base, int32_t len_offset)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
+ index_(index), array_base_(array_base), len_offset_(len_offset) {
+ }
+
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel();
+
+ // Load array length to kArg1.
+ m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_);
+ m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_);
+ m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
+ m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true);
+ }
+
+ private:
+ const int32_t index_;
+ const RegStorage array_base_;
+ const int32_t len_offset_;
+ };
+
+ NewLIR3(IS_SIMM8(index) ? kX86Cmp32MI8 : kX86Cmp32MI, array_base.GetReg(), len_offset, index);
+ LIR* branch = OpCondBranch(kCondLs, nullptr);
+ AddSlowPath(new (arena_) ArrayBoundsCheckSlowPath(this, branch,
+ index, array_base, len_offset));
+}
+
// Test suspend flag, return target of taken suspend branch
LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
@@ -944,7 +1000,7 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, kWord, sreg);
+ LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, sreg);
break;
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(), rX86_SP,
@@ -1050,7 +1106,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
} else {
LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1,
- kWord, GetSRegHi(rl_src1.s_reg_low));
+ k32, GetSRegHi(rl_src1.s_reg_low));
}
if (is_square) {
@@ -1073,7 +1129,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
} else {
LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0,
- kWord, GetSRegHi(rl_src2.s_reg_low));
+ k32, GetSRegHi(rl_src2.s_reg_low));
}
// EAX <- EAX * 1L (2H * 1L)
@@ -1105,7 +1161,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetLowReg());
} else {
LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0,
- kWord, rl_src2.s_reg_low);
+ k32, rl_src2.s_reg_low);
}
// EDX:EAX <- 2L * 1L (double precision)
@@ -1325,7 +1381,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
rl_array = LoadValue(rl_array, kCoreReg);
int data_offset;
- if (size == kLong || size == kDouble) {
+ if (size == k64 || size == kDouble) {
data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
@@ -1348,14 +1404,13 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.reg, len_offset,
- constant_index_value, kThrowConstantArrayBounds);
+ GenArrayBoundsCheck(constant_index_value, rl_array.reg, len_offset);
} else {
- GenRegMemCheck(kCondUge, rl_index.reg, rl_array.reg, len_offset, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset);
}
}
rl_result = EvalLoc(rl_dest, reg_class, true);
- if ((size == kLong) || (size == kDouble)) {
+ if ((size == k64) || (size == kDouble)) {
LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg.GetLow(),
rl_result.reg.GetHigh(), size, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
@@ -1376,7 +1431,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
- if (size == kLong || size == kDouble) {
+ if (size == k64 || size == kDouble) {
data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
} else {
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
@@ -1400,13 +1455,12 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.reg, len_offset,
- constant_index_value, kThrowConstantArrayBounds);
+ GenArrayBoundsCheck(constant_index_value, rl_array.reg, len_offset);
} else {
- GenRegMemCheck(kCondUge, rl_index.reg, rl_array.reg, len_offset, kThrowArrayBounds);
+ GenArrayBoundsCheck(rl_index.reg, rl_array.reg, len_offset);
}
}
- if ((size == kLong) || (size == kDouble)) {
+ if ((size == k64) || (size == kDouble)) {
rl_src = LoadValueWide(rl_src, reg_class);
} else {
rl_src = LoadValue(rl_src, reg_class);
@@ -1793,22 +1847,22 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
- LoadWordDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(rl_method.reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(check_class, offset_of_type, check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
} else {
LoadCurrMethodDirect(check_class);
if (use_declaring_class) {
- LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(check_class, offset_of_type, check_class);
+ LoadRefDisp(check_class, offset_of_type, check_class);
}
}
@@ -1849,17 +1903,17 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
} else if (use_declaring_class) {
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
- LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg);
} else {
// Load dex cache entry into class_reg (kArg2).
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
- LoadWordDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
- LoadWordDisp(class_reg, offset_of_type, class_reg);
+ LoadRefDisp(class_reg, offset_of_type, class_reg);
if (!can_assume_type_is_in_dex_cache) {
// Need to test presence of type in dex cache at runtime.
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
@@ -1883,7 +1937,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
/* Load object->klass_. */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
+ LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
LIR* branchover = nullptr;
if (type_known_final) {
@@ -2056,6 +2110,8 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Can we do this directly into memory?
rl_result = UpdateLoc(rl_dest);
if (rl_result.location == kLocPhysReg) {
+ // Ensure res is in a core reg
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Can we do this from memory directly?
rl_rhs = UpdateLoc(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 5a8ad7a2b4..3e3fa72150 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -369,12 +369,13 @@ void X86Mir2Lir::FlushRegWide(RegStorage reg) {
}
void X86Mir2Lir::FlushReg(RegStorage reg) {
+ // FIXME: need to handle 32 bits in 64-bit register as well as wide values held in single reg.
DCHECK(!reg.IsPair());
RegisterInfo* info = GetRegInfo(reg.GetReg());
if (info->live && info->dirty) {
info->dirty = false;
int v_reg = mir_graph_->SRegToVReg(info->s_reg);
- StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, kWord);
+ StoreBaseDisp(rs_rX86_SP, VRegOffset(v_reg), reg, k32);
}
}
@@ -1033,14 +1034,14 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
// Does the character fit in 16 bits?
- LIR* launchpad_branch = nullptr;
+ LIR* slowpath_branch = nullptr;
if (rl_char.is_const) {
// We need the value in EAX.
LoadConstantNoClobber(rs_rAX, char_value);
} else {
// Character is not a constant; compare at runtime.
LoadValueDirectFixed(rl_char, rs_rAX);
- launchpad_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
+ slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
}
// From here down, we know that we are looking for a char that fits in 16 bits.
@@ -1061,7 +1062,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
NewLIR1(kX86Push32R, rDI);
// Compute the number of words to search in to rCX.
- LoadWordDisp(rs_rDX, count_offset, rs_rCX);
+ Load32Disp(rs_rDX, count_offset, rs_rCX);
LIR *length_compare = nullptr;
int start_value = 0;
bool is_index_on_stack = false;
@@ -1101,7 +1102,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
} else {
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- LoadWordDisp(rs_rX86_SP, displacement, rs_rBX);
+ Load32Disp(rs_rX86_SP, displacement, rs_rBX);
OpRegReg(kOpXor, rs_rDI, rs_rDI);
OpRegReg(kOpCmp, rs_rBX, rs_rDI);
OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
@@ -1120,8 +1121,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Load the address of the string into EBX.
// The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
- LoadWordDisp(rs_rDX, value_offset, rs_rDI);
- LoadWordDisp(rs_rDX, offset_offset, rs_rBX);
+ Load32Disp(rs_rDX, value_offset, rs_rDI);
+ Load32Disp(rs_rDX, offset_offset, rs_rBX);
OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset);
// Now compute into EDI where the search will start.
@@ -1167,9 +1168,9 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
NewLIR1(kX86Pop32R, rDI);
// Out of line code returns here.
- if (launchpad_branch != nullptr) {
+ if (slowpath_branch != nullptr) {
LIR *return_point = NewLIR0(kPseudoTargetLabel);
- AddIntrinsicLaunchpad(info, launchpad_branch, return_point);
+ AddIntrinsicSlowPath(info, slowpath_branch, return_point);
}
StoreValue(rl_dest, rl_return);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index e9faa7ff53..4d45055927 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -426,7 +426,8 @@ LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
RegStorage t_reg = AllocTemp();
OpRegCopy(t_reg, r_src1);
OpRegReg(op, t_reg, r_src2);
- LIR* res = OpRegCopy(r_dest, t_reg);
+ LIR* res = OpRegCopyNoInsert(r_dest, t_reg);
+ AppendLIR(res);
FreeTemp(t_reg);
return res;
}
@@ -554,7 +555,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
bool is64bit = false;
X86OpCode opcode = kX86Nop;
switch (size) {
- case kLong:
+ case k64:
case kDouble:
// TODO: use regstorage attributes here.
is64bit = true;
@@ -567,8 +568,9 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
// TODO: double store is to unaligned address
DCHECK_EQ((displacement & 0x3), 0);
break;
- case kWord:
+ case k32:
case kSingle:
+ case kReference: // TODO: update for reference decompression on 64-bit targets.
opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
if (X86_FPREG(r_dest.GetReg())) {
opcode = is_array ? kX86MovssRA : kX86MovssRM;
@@ -669,6 +671,10 @@ LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStora
LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement,
RegStorage r_dest, OpSize size, int s_reg) {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
r_dest, RegStorage::InvalidReg(), size, s_reg);
}
@@ -676,7 +682,7 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement,
LIR* X86Mir2Lir::LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest,
int s_reg) {
return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
- r_dest.GetLow(), r_dest.GetHigh(), kLong, s_reg);
+ r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg);
}
LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
@@ -690,7 +696,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
bool is64bit = false;
X86OpCode opcode = kX86Nop;
switch (size) {
- case kLong:
+ case k64:
case kDouble:
is64bit = true;
if (X86_FPREG(r_src.GetReg())) {
@@ -702,8 +708,9 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
// TODO: double store is to unaligned address
DCHECK_EQ((displacement & 0x3), 0);
break;
- case kWord:
+ case k32:
case kSingle:
+ case kReference:
opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
if (X86_FPREG(r_src.GetReg())) {
opcode = is_array ? kX86MovssAR : kX86MovssMR;
@@ -763,13 +770,17 @@ LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement,
RegStorage r_src, OpSize size) {
- return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src,
- RegStorage::InvalidReg(), size, INVALID_SREG);
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k32;
+ }
+ return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src,
+ RegStorage::InvalidReg(), size, INVALID_SREG);
}
LIR* X86Mir2Lir::StoreBaseDispWide(RegStorage r_base, int displacement, RegStorage r_src) {
return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement,
- r_src.GetLow(), r_src.GetHigh(), kLong, INVALID_SREG);
+ r_src.GetLow(), r_src.GetHigh(), k64, INVALID_SREG);
}
/*
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 8175c35077..864dadc963 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -50,7 +50,11 @@ TEST_F(ElfWriterTest, dlsym) {
CHECK(host_dir != NULL);
elf_filename = StringPrintf("%s/framework/core.oat", host_dir);
} else {
+#ifdef __LP64__
+ elf_filename = "/data/art-test64/core.oat";
+#else
elf_filename = "/data/art-test/core.oat";
+#endif
}
LOG(INFO) << "elf_filename=" << elf_filename;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 05d6693f70..7c5741bb23 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -164,7 +164,7 @@ TEST_F(ImageTest, WriteRead) {
EXPECT_TRUE(reinterpret_cast<byte*>(klass) >= image_end ||
reinterpret_cast<byte*>(klass) < image_begin) << descriptor;
}
- EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord()));
+ EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
}
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index f76587a26e..c35d4007b5 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -179,7 +179,7 @@ void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
image_bitmap_->Set(obj);
// Before we stomp over the lock word, save the hash code for later.
Monitor::Deflate(Thread::Current(), object);;
- LockWord lw(object->GetLockWord());
+ LockWord lw(object->GetLockWord(false));
switch (lw.GetState()) {
case LockWord::kFatLocked: {
LOG(FATAL) << "Fat locked object " << obj << " found during object copy";
@@ -199,7 +199,7 @@ void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
LOG(FATAL) << "Unreachable.";
break;
}
- object->SetLockWord(LockWord::FromForwardingAddress(offset));
+ object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
DCHECK(IsImageOffsetAssigned(object));
}
@@ -212,13 +212,13 @@ void ImageWriter::AssignImageOffset(mirror::Object* object) {
bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const {
DCHECK(object != nullptr);
- return object->GetLockWord().GetState() == LockWord::kForwardingAddress;
+ return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress;
}
size_t ImageWriter::GetImageOffset(mirror::Object* object) const {
DCHECK(object != nullptr);
DCHECK(IsImageOffsetAssigned(object));
- LockWord lock_word = object->GetLockWord();
+ LockWord lock_word = object->GetLockWord(false);
size_t offset = lock_word.ForwardingAddress();
DCHECK_LT(offset, image_end_);
return offset;
@@ -555,15 +555,15 @@ void ImageWriter::CopyAndFixupObjects()
heap->VisitObjects(CopyAndFixupObjectsCallback, this);
// Fix up the object previously had hash codes.
for (const std::pair<mirror::Object*, uint32_t>& hash_pair : saved_hashes_) {
- hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second));
+ hash_pair.first->SetLockWord(LockWord::FromHashCode(hash_pair.second), false);
}
saved_hashes_.clear();
self->EndAssertNoThreadSuspension(old_cause);
}
void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
- DCHECK(obj != NULL);
- DCHECK(arg != NULL);
+ DCHECK(obj != nullptr);
+ DCHECK(arg != nullptr);
ImageWriter* image_writer = reinterpret_cast<ImageWriter*>(arg);
// see GetLocalAddress for similar computation
size_t offset = image_writer->GetImageOffset(obj);
@@ -575,7 +575,7 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) {
Object* copy = reinterpret_cast<Object*>(dst);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
// word.
- copy->SetLockWord(LockWord());
+ copy->SetLockWord(LockWord(), false);
image_writer->FixupObject(obj, copy);
}
@@ -680,14 +680,6 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
} else {
// Normal (non-abstract non-native) methods have various tables to relocate.
- uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
- const byte* mapping_table = GetOatAddress(mapping_table_off);
- copy->SetMappingTable<kVerifyNone>(mapping_table);
-
- uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
- const byte* vmap_table = GetOatAddress(vmap_table_offset);
- copy->SetVmapTable<kVerifyNone>(vmap_table);
-
uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 9cfef12b26..b5d39232ca 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -155,19 +155,19 @@ TEST_F(OatTest, WriteRead) {
SirtRef<mirror::ClassLoader> loader(soa.Self(), nullptr);
mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor, loader);
- UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(i));
- CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class->GetStatus()) << descriptor;
+ const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
+ CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class.GetStatus()) << descriptor;
CHECK_EQ(kCompile ? OatClassType::kOatClassAllCompiled : OatClassType::kOatClassNoneCompiled,
- oat_class->GetType()) << descriptor;
+ oat_class.GetType()) << descriptor;
size_t method_index = 0;
for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) {
CheckMethod(klass->GetDirectMethod(i),
- oat_class->GetOatMethod(method_index), dex_file);
+ oat_class.GetOatMethod(method_index), dex_file);
}
for (size_t i = 0; i < num_virtual_methods; i++, method_index++) {
CheckMethod(klass->GetVirtualMethod(i),
- oat_class->GetOatMethod(method_index), dex_file);
+ oat_class.GetOatMethod(method_index), dex_file);
}
}
}
@@ -176,7 +176,8 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
EXPECT_EQ(80U, sizeof(OatHeader));
- EXPECT_EQ(28U, sizeof(OatMethodOffsets));
+ EXPECT_EQ(20U, sizeof(OatMethodOffsets));
+ EXPECT_EQ(12U, sizeof(OatMethodHeader));
}
TEST_F(OatTest, OatHeaderIsValid) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index dc66e9c108..bbc9c3e325 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -38,6 +38,14 @@
namespace art {
+#define DCHECK_OFFSET() \
+ DCHECK_EQ(static_cast<off_t>(file_offset + relative_offset), out->Seek(0, kSeekCurrent)) \
+ << "file_offset=" << file_offset << " relative_offset=" << relative_offset
+
+#define DCHECK_OFFSET_() \
+ DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
+ << "file_offset=" << file_offset << " offset_=" << offset_
+
OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
uint32_t image_file_location_oat_checksum,
uintptr_t image_file_location_oat_begin,
@@ -66,7 +74,7 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
size_quick_resolution_trampoline_(0),
size_quick_to_interpreter_bridge_(0),
size_trampoline_alignment_(0),
- size_code_size_(0),
+ size_method_header_(0),
size_code_(0),
size_code_alignment_(0),
size_mapping_table_(0),
@@ -99,6 +107,10 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
offset = InitOatClasses(offset);
}
{
+ TimingLogger::ScopedSplit split("InitOatMaps", timings);
+ offset = InitOatMaps(offset);
+ }
+ {
TimingLogger::ScopedSplit split("InitOatCode", timings);
offset = InitOatCode(offset);
}
@@ -118,6 +130,605 @@ OatWriter::~OatWriter() {
STLDeleteElements(&oat_classes_);
}
+struct OatWriter::GcMapDataAccess {
+ static const std::vector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+ return &compiled_method->GetGcMap();
+ }
+
+ static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
+ return oat_class->method_offsets_[method_offsets_index].gc_map_offset_;
+ }
+
+ static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset)
+ ALWAYS_INLINE {
+ oat_class->method_offsets_[method_offsets_index].gc_map_offset_ = offset;
+ }
+
+ static const char* Name() ALWAYS_INLINE {
+ return "GC map";
+ }
+};
+
+struct OatWriter::MappingTableDataAccess {
+ static const std::vector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+ return &compiled_method->GetMappingTable();
+ }
+
+ static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
+ uint32_t offset = oat_class->method_headers_[method_offsets_index].mapping_table_offset_;
+ return offset == 0u ? 0u :
+ (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
+ }
+
+ static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset)
+ ALWAYS_INLINE {
+ oat_class->method_headers_[method_offsets_index].mapping_table_offset_ =
+ (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
+ }
+
+ static const char* Name() ALWAYS_INLINE {
+ return "mapping table";
+ }
+};
+
+struct OatWriter::VmapTableDataAccess {
+ static const std::vector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+ return &compiled_method->GetVmapTable();
+ }
+
+ static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
+ uint32_t offset = oat_class->method_headers_[method_offsets_index].vmap_table_offset_;
+ return offset == 0u ? 0u :
+ (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
+ }
+
+ static void SetOffset(OatClass* oat_class, size_t method_offsets_index, uint32_t offset)
+ ALWAYS_INLINE {
+ oat_class->method_headers_[method_offsets_index].vmap_table_offset_ =
+ (oat_class->method_offsets_[method_offsets_index].code_offset_ & ~1) - offset;
+ }
+
+ static const char* Name() ALWAYS_INLINE {
+ return "vmap table";
+ }
+};
+
+class OatWriter::DexMethodVisitor {
+ public:
+ DexMethodVisitor(OatWriter* writer, size_t offset)
+ : writer_(writer),
+ offset_(offset),
+ dex_file_(nullptr),
+ class_def_index_(DexFile::kDexNoIndex) {
+ }
+
+ virtual bool StartClass(const DexFile* dex_file, size_t class_def_index) {
+ DCHECK(dex_file_ == nullptr);
+ DCHECK_EQ(class_def_index_, DexFile::kDexNoIndex);
+ dex_file_ = dex_file;
+ class_def_index_ = class_def_index;
+ return true;
+ }
+
+ virtual bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) = 0;
+
+ virtual bool EndClass() {
+ if (kIsDebugBuild) {
+ dex_file_ = nullptr;
+ class_def_index_ = DexFile::kDexNoIndex;
+ }
+ return true;
+ }
+
+ size_t GetOffset() const {
+ return offset_;
+ }
+
+ protected:
+ virtual ~DexMethodVisitor() { }
+
+ OatWriter* const writer_;
+
+ // The offset is usually advanced for each visited method by the derived class.
+ size_t offset_;
+
+ // The dex file and class def index are set in StartClass().
+ const DexFile* dex_file_;
+ size_t class_def_index_;
+};
+
+class OatWriter::OatDexMethodVisitor : public DexMethodVisitor {
+ public:
+ OatDexMethodVisitor(OatWriter* writer, size_t offset)
+ : DexMethodVisitor(writer, offset),
+ oat_class_index_(0u),
+ method_offsets_index_(0u) {
+ }
+
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) {
+ DexMethodVisitor::StartClass(dex_file, class_def_index);
+ DCHECK_LT(oat_class_index_, writer_->oat_classes_.size());
+ method_offsets_index_ = 0u;
+ return true;
+ }
+
+ bool EndClass() {
+ ++oat_class_index_;
+ return DexMethodVisitor::EndClass();
+ }
+
+ protected:
+ size_t oat_class_index_;
+ size_t method_offsets_index_;
+};
+
+class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
+ public:
+ InitOatClassesMethodVisitor(OatWriter* writer, size_t offset)
+ : DexMethodVisitor(writer, offset),
+ compiled_methods_(),
+ num_non_null_compiled_methods_(0u) {
+ compiled_methods_.reserve(256u);
+ }
+
+ bool StartClass(const DexFile* dex_file, size_t class_def_index) {
+ DexMethodVisitor::StartClass(dex_file, class_def_index);
+ compiled_methods_.clear();
+ num_non_null_compiled_methods_ = 0u;
+ return true;
+ }
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ // Fill in the compiled_methods_ array for methods that have a
+ // CompiledMethod. We track the number of non-null entries in
+ // num_non_null_compiled_methods_ since we only want to allocate
+ // OatMethodOffsets for the compiled methods.
+ uint32_t method_idx = it.GetMemberIndex();
+ CompiledMethod* compiled_method =
+ writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
+ compiled_methods_.push_back(compiled_method);
+ if (compiled_method != nullptr) {
+ ++num_non_null_compiled_methods_;
+ }
+ return true;
+ }
+
+ bool EndClass() {
+ ClassReference class_ref(dex_file_, class_def_index_);
+ CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
+ mirror::Class::Status status;
+ if (compiled_class != NULL) {
+ status = compiled_class->GetStatus();
+ } else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
+ status = mirror::Class::kStatusError;
+ } else {
+ status = mirror::Class::kStatusNotReady;
+ }
+
+ OatClass* oat_class = new OatClass(offset_, compiled_methods_,
+ num_non_null_compiled_methods_, status);
+ writer_->oat_classes_.push_back(oat_class);
+ offset_ += oat_class->SizeOf();
+ return DexMethodVisitor::EndClass();
+ }
+
+ private:
+ std::vector<CompiledMethod*> compiled_methods_;
+ size_t num_non_null_compiled_methods_;
+};
+
+class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
+ public:
+ InitCodeMethodVisitor(OatWriter* writer, size_t offset)
+ : OatDexMethodVisitor(writer, offset) {
+ }
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
+ CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ if (compiled_method != nullptr) {
+ // Derived from CompiledMethod.
+ uint32_t quick_code_offset = 0;
+ uint32_t frame_size_in_bytes = kStackAlignment;
+ uint32_t core_spill_mask = 0;
+ uint32_t fp_spill_mask = 0;
+
+ const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
+ const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
+ if (portable_code != nullptr) {
+ CHECK(quick_code == nullptr);
+ size_t oat_method_offsets_offset =
+ oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index);
+ compiled_method->AddOatdataOffsetToCompliledCodeOffset(
+ oat_method_offsets_offset + OFFSETOF_MEMBER(OatMethodOffsets, code_offset_));
+ } else {
+ CHECK(quick_code != nullptr);
+ offset_ = compiled_method->AlignCode(offset_);
+ DCHECK_ALIGNED_PARAM(offset_,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
+ uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+ CHECK_NE(code_size, 0U);
+ uint32_t thumb_offset = compiled_method->CodeDelta();
+ quick_code_offset = offset_ + sizeof(OatMethodHeader) + thumb_offset;
+
+ std::vector<uint8_t>* cfi_info = writer_->compiler_driver_->GetCallFrameInformation();
+ if (cfi_info != nullptr) {
+ // Copy in the FDE, if present
+ const std::vector<uint8_t>* fde = compiled_method->GetCFIInfo();
+ if (fde != nullptr) {
+ // Copy the information into cfi_info and then fix the address in the new copy.
+ int cur_offset = cfi_info->size();
+ cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
+
+ // Set the 'initial_location' field to address the start of the method.
+ uint32_t new_value = quick_code_offset - writer_->oat_header_->GetExecutableOffset();
+ uint32_t offset_to_update = cur_offset + 2*sizeof(uint32_t);
+ (*cfi_info)[offset_to_update+0] = new_value;
+ (*cfi_info)[offset_to_update+1] = new_value >> 8;
+ (*cfi_info)[offset_to_update+2] = new_value >> 16;
+ (*cfi_info)[offset_to_update+3] = new_value >> 24;
+ std::string name = PrettyMethod(it.GetMemberIndex(), *dex_file_, false);
+ writer_->method_info_.push_back(DebugInfo(name, new_value, new_value + code_size));
+ }
+ }
+
+ DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
+ OatMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
+ method_header->code_size_ = code_size;
+
+ // Deduplicate code arrays.
+ auto code_iter = dedupe_map_.find(compiled_method);
+ if (code_iter != dedupe_map_.end()) {
+ quick_code_offset = code_iter->second;
+ FixupMethodHeader(method_header, quick_code_offset - thumb_offset);
+ } else {
+ dedupe_map_.Put(compiled_method, quick_code_offset);
+ FixupMethodHeader(method_header, quick_code_offset - thumb_offset);
+ writer_->oat_header_->UpdateChecksum(method_header, sizeof(*method_header));
+ offset_ += sizeof(*method_header); // Method header is prepended before code.
+ writer_->oat_header_->UpdateChecksum(&(*quick_code)[0], code_size);
+ offset_ += code_size;
+ }
+ }
+ frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+ core_spill_mask = compiled_method->GetCoreSpillMask();
+ fp_spill_mask = compiled_method->GetFpSpillMask();
+
+ if (kIsDebugBuild) {
+ // We expect GC maps except when the class hasn't been verified or the method is native.
+ const CompilerDriver* compiler_driver = writer_->compiler_driver_;
+ ClassReference class_ref(dex_file_, class_def_index_);
+ CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
+ mirror::Class::Status status;
+ if (compiled_class != NULL) {
+ status = compiled_class->GetStatus();
+ } else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
+ status = mirror::Class::kStatusError;
+ } else {
+ status = mirror::Class::kStatusNotReady;
+ }
+ const std::vector<uint8_t>& gc_map = compiled_method->GetGcMap();
+ size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
+ bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
+ CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
+ << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
+ << (status < mirror::Class::kStatusVerified) << " " << status << " "
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_);
+ }
+
+ DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
+ OatMethodOffsets* offsets = &oat_class->method_offsets_[method_offsets_index_];
+ offsets->code_offset_ = quick_code_offset;
+ offsets->frame_size_in_bytes_ = frame_size_in_bytes;
+ offsets->core_spill_mask_ = core_spill_mask;
+ offsets->fp_spill_mask_ = fp_spill_mask;
+ ++method_offsets_index_;
+ }
+
+ return true;
+ }
+
+ private:
+ static void FixupMethodHeader(OatMethodHeader* method_header, uint32_t code_offset) {
+ // The code offset was 0 when the mapping/vmap table offset was set, so it's set
+ // to 0-offset and we need to adjust it by code_offset.
+ if (method_header->mapping_table_offset_ != 0u) {
+ method_header->mapping_table_offset_ += code_offset;
+ DCHECK_LT(method_header->mapping_table_offset_, code_offset);
+ }
+ if (method_header->vmap_table_offset_ != 0u) {
+ method_header->vmap_table_offset_ += code_offset;
+ DCHECK_LT(method_header->vmap_table_offset_, code_offset);
+ }
+ }
+
+ // Deduplication is already done on a pointer basis by the compiler driver,
+ // so we can simply compare the pointers to find out if things are duplicated.
+ SafeMap<const CompiledMethod*, uint32_t, CodeOffsetsKeyComparator> dedupe_map_;
+};
+
+template <typename DataAccess>
+class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
+ public:
+ InitMapMethodVisitor(OatWriter* writer, size_t offset)
+ : OatDexMethodVisitor(writer, offset) {
+ }
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
+ CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ if (compiled_method != nullptr) {
+ DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
+ DCHECK_EQ(DataAccess::GetOffset(oat_class, method_offsets_index_), 0u);
+
+ const std::vector<uint8_t>* map = DataAccess::GetData(compiled_method);
+ uint32_t map_size = map->size() * sizeof((*map)[0]);
+ if (map_size != 0u) {
+ auto it = dedupe_map_.find(map);
+ if (it != dedupe_map_.end()) {
+ DataAccess::SetOffset(oat_class, method_offsets_index_, it->second);
+ } else {
+ DataAccess::SetOffset(oat_class, method_offsets_index_, offset_);
+ dedupe_map_.Put(map, offset_);
+ offset_ += map_size;
+ writer_->oat_header_->UpdateChecksum(&(*map)[0], map_size);
+ }
+ }
+ ++method_offsets_index_;
+ }
+
+ return true;
+ }
+
+ private:
+ // Deduplication is already done on a pointer basis by the compiler driver,
+ // so we can simply compare the pointers to find out if things are duplicated.
+ SafeMap<const std::vector<uint8_t>*, uint32_t> dedupe_map_;
+};
+
+class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
+ public:
+ InitImageMethodVisitor(OatWriter* writer, size_t offset)
+ : OatDexMethodVisitor(writer, offset) {
+ }
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
+ CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ OatMethodOffsets offsets(0u, kStackAlignment, 0u, 0u, 0u);
+ if (compiled_method != nullptr) {
+ DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
+ offsets = oat_class->method_offsets_[method_offsets_index_];
+ ++method_offsets_index_;
+ }
+
+ // Derive frame size and spill masks for native methods without code:
+ // These are generic JNI methods...
+ uint32_t method_idx = it.GetMemberIndex();
+ bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
+ if (is_native && compiled_method == nullptr) {
+ // Compute Sirt size as putting _every_ reference into it, even null ones.
+ uint32_t s_len;
+ const char* shorty = dex_file_->GetMethodShorty(dex_file_->GetMethodId(method_idx),
+ &s_len);
+ DCHECK(shorty != nullptr);
+ uint32_t refs = 1; // Native method always has "this" or class.
+ for (uint32_t i = 1; i < s_len; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ size_t pointer_size = GetInstructionSetPointerSize(
+ writer_->compiler_driver_->GetInstructionSet());
+ size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(pointer_size, refs);
+
+ // Get the generic spill masks and base frame size.
+ mirror::ArtMethod* callee_save_method =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ offsets.frame_size_in_bytes_ = callee_save_method->GetFrameSizeInBytes() + sirt_size;
+ offsets.core_spill_mask_ = callee_save_method->GetCoreSpillMask();
+ offsets.fp_spill_mask_ = callee_save_method->GetFpSpillMask();
+ DCHECK_EQ(offsets.gc_map_offset_, 0u);
+ }
+
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ InvokeType invoke_type = it.GetMethodInvokeType(dex_file_->GetClassDef(class_def_index_));
+ // Unchecked as we hold mutator_lock_ on entry.
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(*dex_file_));
+ SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
+ mirror::ArtMethod* method = linker->ResolveMethod(*dex_file_, method_idx, dex_cache,
+ class_loader, nullptr, invoke_type);
+ CHECK(method != NULL);
+ method->SetFrameSizeInBytes(offsets.frame_size_in_bytes_);
+ method->SetCoreSpillMask(offsets.core_spill_mask_);
+ method->SetFpSpillMask(offsets.fp_spill_mask_);
+ // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
+ method->SetQuickOatCodeOffset(offsets.code_offset_);
+ method->SetOatNativeGcMapOffset(offsets.gc_map_offset_);
+
+ return true;
+ }
+};
+
+class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
+ public:
+ WriteCodeMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
+ size_t relative_offset)
+ : OatDexMethodVisitor(writer, relative_offset),
+ out_(out),
+ file_offset_(file_offset) {
+ }
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
+ const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ if (compiled_method != NULL) { // ie. not an abstract method
+ size_t file_offset = file_offset_;
+ OutputStream* out = out_;
+
+ const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
+ if (quick_code != nullptr) {
+ CHECK(compiled_method->GetPortableCode() == nullptr);
+ uint32_t aligned_offset = compiled_method->AlignCode(offset_);
+ uint32_t aligned_code_delta = aligned_offset - offset_;
+ if (aligned_code_delta != 0) {
+ static const uint8_t kPadding[] = {
+ 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u
+ };
+ DCHECK_LE(aligned_code_delta, sizeof(kPadding));
+ if (UNLIKELY(!out->WriteFully(kPadding, aligned_code_delta))) {
+ ReportWriteFailure("code alignment padding", it);
+ return false;
+ }
+ writer_->size_code_alignment_ += aligned_code_delta;
+ offset_ += aligned_code_delta;
+ DCHECK_OFFSET_();
+ }
+ DCHECK_ALIGNED_PARAM(offset_,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
+ uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+ CHECK_NE(code_size, 0U);
+
+ // Deduplicate code arrays.
+ const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
+ DCHECK(method_offsets.code_offset_ < offset_ || method_offsets.code_offset_ ==
+ offset_ + sizeof(OatMethodHeader) + compiled_method->CodeDelta())
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_);
+ if (method_offsets.code_offset_ >= offset_) {
+ const OatMethodHeader& method_header = oat_class->method_headers_[method_offsets_index_];
+ if (!out->WriteFully(&method_header, sizeof(method_header))) {
+ ReportWriteFailure("method header", it);
+ return false;
+ }
+ writer_->size_method_header_ += sizeof(method_header);
+ offset_ += sizeof(method_header);
+ DCHECK_OFFSET_();
+ if (!out->WriteFully(&(*quick_code)[0], code_size)) {
+ ReportWriteFailure("method code", it);
+ return false;
+ }
+ writer_->size_code_ += code_size;
+ offset_ += code_size;
+ }
+ DCHECK_OFFSET_();
+ }
+ ++method_offsets_index_;
+ }
+
+ return true;
+ }
+
+ private:
+ OutputStream* const out_;
+ size_t const file_offset_;
+
+ void ReportWriteFailure(const char* what, const ClassDataItemIterator& it) {
+ PLOG(ERROR) << "Failed to write " << what << " for "
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " to " << out_->GetLocation();
+ }
+};
+
+template <typename DataAccess>
+class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
+ public:
+ WriteMapMethodVisitor(OatWriter* writer, OutputStream* out, const size_t file_offset,
+ size_t relative_offset)
+ : OatDexMethodVisitor(writer, relative_offset),
+ out_(out),
+ file_offset_(file_offset) {
+ }
+
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
+ const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
+
+ if (compiled_method != NULL) { // ie. not an abstract method
+ size_t file_offset = file_offset_;
+ OutputStream* out = out_;
+
+ uint32_t map_offset = DataAccess::GetOffset(oat_class, method_offsets_index_);
+ ++method_offsets_index_;
+
+ // Write deduplicated map.
+ const std::vector<uint8_t>* map = DataAccess::GetData(compiled_method);
+ size_t map_size = map->size() * sizeof((*map)[0]);
+ DCHECK((map_size == 0u && map_offset == 0u) ||
+ (map_size != 0u && map_offset != 0u && map_offset <= offset_))
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_);
+ if (map_size != 0u && map_offset == offset_) {
+ if (UNLIKELY(!out->WriteFully(&(*map)[0], map_size))) {
+ ReportWriteFailure(it);
+ return false;
+ }
+ offset_ += map_size;
+ }
+ DCHECK_OFFSET_();
+ }
+
+ return true;
+ }
+
+ private:
+ OutputStream* const out_;
+ size_t const file_offset_;
+
+ void ReportWriteFailure(const ClassDataItemIterator& it) {
+ PLOG(ERROR) << "Failed to write " << DataAccess::Name() << " for "
+ << PrettyMethod(it.GetMemberIndex(), *dex_file_) << " to " << out_->GetLocation();
+ }
+};
+
+// Visit all methods from all classes in all dex files with the specified visitor.
+bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
+ for (const DexFile* dex_file : *dex_files_) {
+ const size_t class_def_count = dex_file->NumClassDefs();
+ for (size_t class_def_index = 0; class_def_index != class_def_count; ++class_def_index) {
+ if (UNLIKELY(!visitor->StartClass(dex_file, class_def_index))) {
+ return false;
+ }
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
+ const byte* class_data = dex_file->GetClassData(class_def);
+ if (class_data != NULL) { // ie not an empty class, such as a marker interface
+ ClassDataItemIterator it(*dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ size_t class_def_method_index = 0u;
+ while (it.HasNextDirectMethod()) {
+ if (!visitor->VisitMethod(class_def_method_index, it)) {
+ return false;
+ }
+ ++class_def_method_index;
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ if (UNLIKELY(!visitor->VisitMethod(class_def_method_index, it))) {
+ return false;
+ }
+ ++class_def_method_index;
+ it.Next();
+ }
+ }
+ if (UNLIKELY(!visitor->EndClass())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
size_t OatWriter::InitOatHeader() {
// create the OatHeader
oat_header_ = new OatHeader(compiler_driver_->GetInstructionSet(),
@@ -161,78 +772,42 @@ size_t OatWriter::InitDexFiles(size_t offset) {
}
size_t OatWriter::InitOatClasses(size_t offset) {
- // create the OatClasses
// calculate the offsets within OatDexFiles to OatClasses
- for (size_t i = 0; i != dex_files_->size(); ++i) {
- const DexFile* dex_file = (*dex_files_)[i];
- for (size_t class_def_index = 0;
- class_def_index < dex_file->NumClassDefs();
- class_def_index++) {
- oat_dex_files_[i]->methods_offsets_[class_def_index] = offset;
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const byte* class_data = dex_file->GetClassData(class_def);
- uint32_t num_non_null_compiled_methods = 0;
- UniquePtr<std::vector<CompiledMethod*> > compiled_methods(new std::vector<CompiledMethod*>());
- if (class_data != NULL) { // ie not an empty class, such as a marker interface
- ClassDataItemIterator it(*dex_file, class_data);
- size_t num_direct_methods = it.NumDirectMethods();
- size_t num_virtual_methods = it.NumVirtualMethods();
- size_t num_methods = num_direct_methods + num_virtual_methods;
-
- // Fill in the compiled_methods_ array for methods that have a
- // CompiledMethod. We track the number of non-null entries in
- // num_non_null_compiled_methods since we only want to allocate
- // OatMethodOffsets for the compiled methods.
- compiled_methods->reserve(num_methods);
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
- size_t class_def_method_index = 0;
- while (it.HasNextDirectMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- CompiledMethod* compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
- compiled_methods->push_back(compiled_method);
- if (compiled_method != NULL) {
- num_non_null_compiled_methods++;
- }
- class_def_method_index++;
- it.Next();
- }
- while (it.HasNextVirtualMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- CompiledMethod* compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(dex_file, method_idx));
- compiled_methods->push_back(compiled_method);
- if (compiled_method != NULL) {
- num_non_null_compiled_methods++;
- }
- class_def_method_index++;
- it.Next();
- }
- }
-
- ClassReference class_ref(dex_file, class_def_index);
- CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
- mirror::Class::Status status;
- if (compiled_class != NULL) {
- status = compiled_class->GetStatus();
- } else if (compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
- status = mirror::Class::kStatusError;
- } else {
- status = mirror::Class::kStatusNotReady;
- }
-
- OatClass* oat_class = new OatClass(offset, compiled_methods.release(),
- num_non_null_compiled_methods, status);
- oat_classes_.push_back(oat_class);
- offset += oat_class->SizeOf();
+ InitOatClassesMethodVisitor visitor(this, offset);
+ bool success = VisitDexMethods(&visitor);
+ CHECK(success);
+ offset = visitor.GetOffset();
+
+ // Update oat_dex_files_.
+ auto oat_class_it = oat_classes_.begin();
+ for (OatDexFile* oat_dex_file : oat_dex_files_) {
+ for (uint32_t& offset : oat_dex_file->methods_offsets_) {
+ DCHECK(oat_class_it != oat_classes_.end());
+ offset = (*oat_class_it)->offset_;
+ ++oat_class_it;
}
- oat_dex_files_[i]->UpdateChecksum(oat_header_);
+ oat_dex_file->UpdateChecksum(oat_header_);
}
+ CHECK(oat_class_it == oat_classes_.end());
+
+ return offset;
+}
+
+size_t OatWriter::InitOatMaps(size_t offset) {
+ #define VISIT(VisitorType) \
+ do { \
+ VisitorType visitor(this, offset); \
+ bool success = VisitDexMethods(&visitor); \
+ DCHECK(success); \
+ offset = visitor.GetOffset(); \
+ } while (false)
+
+ VISIT(InitMapMethodVisitor<GcMapDataAccess>);
+ VISIT(InitMapMethodVisitor<MappingTableDataAccess>);
+ VISIT(InitMapMethodVisitor<VmapTableDataAccess>);
+
+ #undef VISIT
+
return offset;
}
@@ -280,280 +855,24 @@ size_t OatWriter::InitOatCode(size_t offset) {
}
size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
- size_t oat_class_index = 0;
- for (size_t i = 0; i != dex_files_->size(); ++i) {
- const DexFile* dex_file = (*dex_files_)[i];
- CHECK(dex_file != NULL);
- offset = InitOatCodeDexFile(offset, &oat_class_index, *dex_file);
- }
- return offset;
-}
-
-size_t OatWriter::InitOatCodeDexFile(size_t offset,
- size_t* oat_class_index,
- const DexFile& dex_file) {
- for (size_t class_def_index = 0;
- class_def_index < dex_file.NumClassDefs();
- class_def_index++, (*oat_class_index)++) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- offset = InitOatCodeClassDef(offset, *oat_class_index, class_def_index, dex_file, class_def);
- oat_classes_[*oat_class_index]->UpdateChecksum(oat_header_);
- }
- return offset;
-}
-
-size_t OatWriter::InitOatCodeClassDef(size_t offset,
- size_t oat_class_index, size_t class_def_index,
- const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
- const byte* class_data = dex_file.GetClassData(class_def);
- if (class_data == NULL) {
- // empty class, such as a marker interface
- return offset;
- }
- ClassDataItemIterator it(dex_file, class_data);
- CHECK_LE(oat_classes_[oat_class_index]->method_offsets_.size(),
- it.NumDirectMethods() + it.NumVirtualMethods());
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
- // Process methods
- size_t class_def_method_index = 0;
- size_t method_offsets_index = 0;
- while (it.HasNextDirectMethod()) {
- bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
- offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
- &method_offsets_index, is_native,
- it.GetMethodInvokeType(class_def), it.GetMemberIndex(), dex_file);
- class_def_method_index++;
- it.Next();
- }
- while (it.HasNextVirtualMethod()) {
- bool is_native = (it.GetMemberAccessFlags() & kAccNative) != 0;
- offset = InitOatCodeMethod(offset, oat_class_index, class_def_index, class_def_method_index,
- &method_offsets_index, is_native,
- it.GetMethodInvokeType(class_def), it.GetMemberIndex(), dex_file);
- class_def_method_index++;
- it.Next();
- }
- DCHECK(!it.HasNext());
- CHECK_LE(method_offsets_index, class_def_method_index);
- return offset;
-}
-
-size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index,
- size_t __attribute__((unused)) class_def_index,
- size_t class_def_method_index,
- size_t* method_offsets_index,
- bool __attribute__((unused)) is_native,
- InvokeType invoke_type,
- uint32_t method_idx, const DexFile& dex_file) {
- // Derived from CompiledMethod if available.
- uint32_t quick_code_offset = 0;
- uint32_t frame_size_in_bytes = kStackAlignment;
- uint32_t core_spill_mask = 0;
- uint32_t fp_spill_mask = 0;
- uint32_t mapping_table_offset = 0;
- uint32_t vmap_table_offset = 0;
- uint32_t gc_map_offset = 0;
-
- OatClass* oat_class = oat_classes_[oat_class_index];
- CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
-
- if (compiled_method != nullptr) {
- const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
- const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- if (portable_code != nullptr) {
- CHECK(quick_code == nullptr);
- size_t oat_method_offsets_offset =
- oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index);
- compiled_method->AddOatdataOffsetToCompliledCodeOffset(
- oat_method_offsets_offset + OFFSETOF_MEMBER(OatMethodOffsets, code_offset_));
- } else {
- CHECK(quick_code != nullptr);
- offset = compiled_method->AlignCode(offset);
- DCHECK_ALIGNED_PARAM(offset,
- GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
-
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
- CHECK_NE(code_size, 0U);
- uint32_t thumb_offset = compiled_method->CodeDelta();
- quick_code_offset = offset + sizeof(code_size) + thumb_offset;
-
- std::vector<uint8_t>* cfi_info = compiler_driver_->GetCallFrameInformation();
- if (cfi_info != nullptr) {
- // Copy in the FDE, if present
- const std::vector<uint8_t>* fde = compiled_method->GetCFIInfo();
- if (fde != nullptr) {
- // Copy the information into cfi_info and then fix the address in the new copy.
- int cur_offset = cfi_info->size();
- cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
-
- // Set the 'initial_location' field to address the start of the method.
- uint32_t new_value = quick_code_offset - oat_header_->GetExecutableOffset();
- uint32_t offset_to_update = cur_offset + 2*sizeof(uint32_t);
- (*cfi_info)[offset_to_update+0] = new_value;
- (*cfi_info)[offset_to_update+1] = new_value >> 8;
- (*cfi_info)[offset_to_update+2] = new_value >> 16;
- (*cfi_info)[offset_to_update+3] = new_value >> 24;
- method_info_.push_back(DebugInfo(PrettyMethod(method_idx, dex_file, false),
- new_value, new_value + code_size));
- }
- }
-
- // Deduplicate code arrays
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter =
- code_offsets_.find(quick_code);
- if (code_iter != code_offsets_.end()) {
- quick_code_offset = code_iter->second;
- } else {
- code_offsets_.Put(quick_code, quick_code_offset);
- offset += sizeof(code_size); // code size is prepended before code
- offset += code_size;
- oat_header_->UpdateChecksum(&(*quick_code)[0], code_size);
- }
- }
- frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- core_spill_mask = compiled_method->GetCoreSpillMask();
- fp_spill_mask = compiled_method->GetFpSpillMask();
-
- const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
- size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
- mapping_table_offset = (mapping_table_size == 0) ? 0 : offset;
-
- // Deduplicate mapping tables
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator mapping_iter =
- mapping_table_offsets_.find(&mapping_table);
- if (mapping_iter != mapping_table_offsets_.end()) {
- mapping_table_offset = mapping_iter->second;
- } else {
- mapping_table_offsets_.Put(&mapping_table, mapping_table_offset);
- offset += mapping_table_size;
- oat_header_->UpdateChecksum(&mapping_table[0], mapping_table_size);
- }
-
- const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
- size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
- vmap_table_offset = (vmap_table_size == 0) ? 0 : offset;
-
- // Deduplicate vmap tables
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator vmap_iter =
- vmap_table_offsets_.find(&vmap_table);
- if (vmap_iter != vmap_table_offsets_.end()) {
- vmap_table_offset = vmap_iter->second;
- } else {
- vmap_table_offsets_.Put(&vmap_table, vmap_table_offset);
- offset += vmap_table_size;
- oat_header_->UpdateChecksum(&vmap_table[0], vmap_table_size);
- }
-
- const std::vector<uint8_t>& gc_map = compiled_method->GetGcMap();
- size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
- gc_map_offset = (gc_map_size == 0) ? 0 : offset;
-
- if (kIsDebugBuild) {
- // We expect GC maps except when the class hasn't been verified or the method is native
- ClassReference class_ref(&dex_file, class_def_index);
- CompiledClass* compiled_class = compiler_driver_->GetCompiledClass(class_ref);
- mirror::Class::Status status;
- if (compiled_class != NULL) {
- status = compiled_class->GetStatus();
- } else if (compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
- status = mirror::Class::kStatusError;
- } else {
- status = mirror::Class::kStatusNotReady;
- }
- CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
- << &gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
- << (status < mirror::Class::kStatusVerified) << " " << status << " "
- << PrettyMethod(method_idx, dex_file);
- }
-
- // Deduplicate GC maps
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator gc_map_iter =
- gc_map_offsets_.find(&gc_map);
- if (gc_map_iter != gc_map_offsets_.end()) {
- gc_map_offset = gc_map_iter->second;
- } else {
- gc_map_offsets_.Put(&gc_map, gc_map_offset);
- offset += gc_map_size;
- oat_header_->UpdateChecksum(&gc_map[0], gc_map_size);
- }
-
- oat_class->method_offsets_[*method_offsets_index] =
- OatMethodOffsets(quick_code_offset,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
- mapping_table_offset,
- vmap_table_offset,
- gc_map_offset);
- (*method_offsets_index)++;
- }
-
-
+ #define VISIT(VisitorType) \
+ do { \
+ VisitorType visitor(this, offset); \
+ bool success = VisitDexMethods(&visitor); \
+ DCHECK(success); \
+ offset = visitor.GetOffset(); \
+ } while (false)
+
+ VISIT(InitCodeMethodVisitor);
if (compiler_driver_->IsImage()) {
- // Derive frame size and spill masks for native methods without code:
- // These are generic JNI methods...
- if (is_native && compiled_method == nullptr) {
- // Compute Sirt size as putting _every_ reference into it, even null ones.
- uint32_t s_len;
- const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx), &s_len);
- DCHECK(shorty != nullptr);
- uint32_t refs = 1; // Native method always has "this" or class.
- for (uint32_t i = 1; i < s_len; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- size_t pointer_size = GetInstructionSetPointerSize(compiler_driver_->GetInstructionSet());
- size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(pointer_size, refs);
-
- // Get the generic spill masks and base frame size.
- mirror::ArtMethod* callee_save_method =
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
-
- frame_size_in_bytes = callee_save_method->GetFrameSizeInBytes() + sirt_size;
- core_spill_mask = callee_save_method->GetCoreSpillMask();
- fp_spill_mask = callee_save_method->GetFpSpillMask();
- mapping_table_offset = 0;
- vmap_table_offset = 0;
- gc_map_offset = 0;
- }
-
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- // Unchecked as we hold mutator_lock_ on entry.
- ScopedObjectAccessUnchecked soa(Thread::Current());
- SirtRef<mirror::DexCache> dex_cache(soa.Self(), linker->FindDexCache(dex_file));
- SirtRef<mirror::ClassLoader> class_loader(soa.Self(), nullptr);
- mirror::ArtMethod* method = linker->ResolveMethod(dex_file, method_idx, dex_cache,
- class_loader, nullptr, invoke_type);
- CHECK(method != NULL);
- method->SetFrameSizeInBytes(frame_size_in_bytes);
- method->SetCoreSpillMask(core_spill_mask);
- method->SetFpSpillMask(fp_spill_mask);
- method->SetOatMappingTableOffset(mapping_table_offset);
- // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
- method->SetQuickOatCodeOffset(quick_code_offset);
- method->SetOatVmapTableOffset(vmap_table_offset);
- method->SetOatNativeGcMapOffset(gc_map_offset);
+ VISIT(InitImageMethodVisitor);
}
+ #undef VISIT
+
return offset;
}
-#define DCHECK_OFFSET() \
- DCHECK_EQ(static_cast<off_t>(file_offset + relative_offset), out->Seek(0, kSeekCurrent)) \
- << "file_offset=" << file_offset << " relative_offset=" << relative_offset
-
-#define DCHECK_OFFSET_() \
- DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
- << "file_offset=" << file_offset << " offset_=" << offset_
-
bool OatWriter::Write(OutputStream* out) {
const size_t file_offset = out->Seek(0, kSeekCurrent);
@@ -574,7 +893,14 @@ bool OatWriter::Write(OutputStream* out) {
return false;
}
- size_t relative_offset = WriteCode(out, file_offset);
+ size_t relative_offset = out->Seek(0, kSeekCurrent) - file_offset;
+ relative_offset = WriteMaps(out, file_offset, relative_offset);
+ if (relative_offset == 0) {
+ LOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
+ return false;
+ }
+
+ relative_offset = WriteCode(out, file_offset, relative_offset);
if (relative_offset == 0) {
LOG(ERROR) << "Failed to write oat code to " << out->GetLocation();
return false;
@@ -608,7 +934,7 @@ bool OatWriter::Write(OutputStream* out) {
DO_STAT(size_quick_resolution_trampoline_);
DO_STAT(size_quick_to_interpreter_bridge_);
DO_STAT(size_trampoline_alignment_);
- DO_STAT(size_code_size_);
+ DO_STAT(size_method_header_);
DO_STAT(size_code_);
DO_STAT(size_code_alignment_);
DO_STAT(size_mapping_table_);
@@ -669,9 +995,37 @@ bool OatWriter::WriteTables(OutputStream* out, const size_t file_offset) {
return true;
}
-size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset) {
- size_t relative_offset = oat_header_->GetExecutableOffset();
+size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) {
+ #define VISIT(VisitorType) \
+ do { \
+ VisitorType visitor(this, out, file_offset, relative_offset); \
+ if (UNLIKELY(!VisitDexMethods(&visitor))) { \
+ return 0; \
+ } \
+ relative_offset = visitor.GetOffset(); \
+ } while (false)
+
+ size_t gc_maps_offset = relative_offset;
+ VISIT(WriteMapMethodVisitor<GcMapDataAccess>);
+ size_gc_map_ = relative_offset - gc_maps_offset;
+
+ size_t mapping_tables_offset = relative_offset;
+ VISIT(WriteMapMethodVisitor<MappingTableDataAccess>);
+ size_mapping_table_ = relative_offset - mapping_tables_offset;
+
+ size_t vmap_tables_offset = relative_offset;
+ VISIT(WriteMapMethodVisitor<VmapTableDataAccess>);
+ size_vmap_table_ = relative_offset - vmap_tables_offset;
+
+ #undef VISIT
+
+ return relative_offset;
+}
+
+size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset) {
off_t new_offset = out->Seek(size_executable_offset_alignment_, kSeekCurrent);
+ relative_offset += size_executable_offset_alignment_;
+ DCHECK_EQ(relative_offset, oat_header_->GetExecutableOffset());
size_t expected_file_offset = file_offset + relative_offset;
if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
@@ -715,218 +1069,18 @@ size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset) {
size_t OatWriter::WriteCodeDexFiles(OutputStream* out,
const size_t file_offset,
size_t relative_offset) {
- size_t oat_class_index = 0;
- for (size_t i = 0; i != oat_dex_files_.size(); ++i) {
- const DexFile* dex_file = (*dex_files_)[i];
- CHECK(dex_file != NULL);
- relative_offset = WriteCodeDexFile(out, file_offset, relative_offset, &oat_class_index,
- *dex_file);
- if (relative_offset == 0) {
- return 0;
- }
- }
- return relative_offset;
-}
-
-size_t OatWriter::WriteCodeDexFile(OutputStream* out, const size_t file_offset,
- size_t relative_offset, size_t* oat_class_index,
- const DexFile& dex_file) {
- for (size_t class_def_index = 0; class_def_index < dex_file.NumClassDefs();
- class_def_index++, (*oat_class_index)++) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- relative_offset = WriteCodeClassDef(out, file_offset, relative_offset, *oat_class_index,
- dex_file, class_def);
- if (relative_offset == 0) {
- return 0;
- }
- }
- return relative_offset;
-}
-
-void OatWriter::ReportWriteFailure(const char* what, uint32_t method_idx,
- const DexFile& dex_file, const OutputStream& out) const {
- PLOG(ERROR) << "Failed to write " << what << " for " << PrettyMethod(method_idx, dex_file)
- << " to " << out.GetLocation();
-}
+ #define VISIT(VisitorType) \
+ do { \
+ VisitorType visitor(this, out, file_offset, relative_offset); \
+ if (UNLIKELY(!VisitDexMethods(&visitor))) { \
+ return 0; \
+ } \
+ relative_offset = visitor.GetOffset(); \
+ } while (false)
-size_t OatWriter::WriteCodeClassDef(OutputStream* out,
- const size_t file_offset,
- size_t relative_offset,
- size_t oat_class_index,
- const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
- const byte* class_data = dex_file.GetClassData(class_def);
- if (class_data == NULL) {
- // ie. an empty class such as a marker interface
- return relative_offset;
- }
- ClassDataItemIterator it(dex_file, class_data);
- // Skip fields
- while (it.HasNextStaticField()) {
- it.Next();
- }
- while (it.HasNextInstanceField()) {
- it.Next();
- }
- // Process methods
- size_t class_def_method_index = 0;
- size_t method_offsets_index = 0;
- while (it.HasNextDirectMethod()) {
- bool is_static = (it.GetMemberAccessFlags() & kAccStatic) != 0;
- relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index,
- class_def_method_index, &method_offsets_index, is_static,
- it.GetMemberIndex(), dex_file);
- if (relative_offset == 0) {
- return 0;
- }
- class_def_method_index++;
- it.Next();
- }
- while (it.HasNextVirtualMethod()) {
- relative_offset = WriteCodeMethod(out, file_offset, relative_offset, oat_class_index,
- class_def_method_index, &method_offsets_index, false,
- it.GetMemberIndex(), dex_file);
- if (relative_offset == 0) {
- return 0;
- }
- class_def_method_index++;
- it.Next();
- }
- DCHECK(!it.HasNext());
- CHECK_LE(method_offsets_index, class_def_method_index);
- return relative_offset;
-}
+ VISIT(WriteCodeMethodVisitor);
-size_t OatWriter::WriteCodeMethod(OutputStream* out, const size_t file_offset,
- size_t relative_offset, size_t oat_class_index,
- size_t class_def_method_index, size_t* method_offsets_index,
- bool is_static, uint32_t method_idx, const DexFile& dex_file) {
- OatClass* oat_class = oat_classes_[oat_class_index];
- const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
-
- if (compiled_method != NULL) { // ie. not an abstract method
- const OatMethodOffsets method_offsets = oat_class->method_offsets_[*method_offsets_index];
- (*method_offsets_index)++;
- const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- if (quick_code != nullptr) {
- CHECK(compiled_method->GetPortableCode() == nullptr);
- uint32_t aligned_offset = compiled_method->AlignCode(relative_offset);
- uint32_t aligned_code_delta = aligned_offset - relative_offset;
- if (aligned_code_delta != 0) {
- off_t new_offset = out->Seek(aligned_code_delta, kSeekCurrent);
- size_code_alignment_ += aligned_code_delta;
- uint32_t expected_offset = file_offset + aligned_offset;
- if (static_cast<uint32_t>(new_offset) != expected_offset) {
- PLOG(ERROR) << "Failed to seek to align oat code. Actual: " << new_offset
- << " Expected: " << expected_offset << " File: " << out->GetLocation();
- return 0;
- }
- relative_offset += aligned_code_delta;
- DCHECK_OFFSET();
- }
- DCHECK_ALIGNED_PARAM(relative_offset,
- GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
-
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
- CHECK_NE(code_size, 0U);
-
- // Deduplicate code arrays
- size_t code_offset = relative_offset + sizeof(code_size) + compiled_method->CodeDelta();
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator code_iter =
- code_offsets_.find(quick_code);
- if (code_iter != code_offsets_.end() && code_offset != method_offsets.code_offset_) {
- DCHECK(code_iter->second == method_offsets.code_offset_)
- << PrettyMethod(method_idx, dex_file);
- } else {
- DCHECK(code_offset == method_offsets.code_offset_) << PrettyMethod(method_idx, dex_file);
- if (!out->WriteFully(&code_size, sizeof(code_size))) {
- ReportWriteFailure("method code size", method_idx, dex_file, *out);
- return 0;
- }
- size_code_size_ += sizeof(code_size);
- relative_offset += sizeof(code_size);
- DCHECK_OFFSET();
- if (!out->WriteFully(&(*quick_code)[0], code_size)) {
- ReportWriteFailure("method code", method_idx, dex_file, *out);
- return 0;
- }
- size_code_ += code_size;
- relative_offset += code_size;
- }
- DCHECK_OFFSET();
- }
- const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
- size_t mapping_table_size = mapping_table.size() * sizeof(mapping_table[0]);
-
- // Deduplicate mapping tables
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator mapping_iter =
- mapping_table_offsets_.find(&mapping_table);
- if (mapping_iter != mapping_table_offsets_.end() &&
- relative_offset != method_offsets.mapping_table_offset_) {
- DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0)
- || mapping_iter->second == method_offsets.mapping_table_offset_)
- << PrettyMethod(method_idx, dex_file);
- } else {
- DCHECK((mapping_table_size == 0 && method_offsets.mapping_table_offset_ == 0)
- || relative_offset == method_offsets.mapping_table_offset_)
- << PrettyMethod(method_idx, dex_file);
- if (!out->WriteFully(&mapping_table[0], mapping_table_size)) {
- ReportWriteFailure("mapping table", method_idx, dex_file, *out);
- return 0;
- }
- size_mapping_table_ += mapping_table_size;
- relative_offset += mapping_table_size;
- }
- DCHECK_OFFSET();
-
- const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
- size_t vmap_table_size = vmap_table.size() * sizeof(vmap_table[0]);
-
- // Deduplicate vmap tables
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator vmap_iter =
- vmap_table_offsets_.find(&vmap_table);
- if (vmap_iter != vmap_table_offsets_.end() &&
- relative_offset != method_offsets.vmap_table_offset_) {
- DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0)
- || vmap_iter->second == method_offsets.vmap_table_offset_)
- << PrettyMethod(method_idx, dex_file);
- } else {
- DCHECK((vmap_table_size == 0 && method_offsets.vmap_table_offset_ == 0)
- || relative_offset == method_offsets.vmap_table_offset_)
- << PrettyMethod(method_idx, dex_file);
- if (!out->WriteFully(&vmap_table[0], vmap_table_size)) {
- ReportWriteFailure("vmap table", method_idx, dex_file, *out);
- return 0;
- }
- size_vmap_table_ += vmap_table_size;
- relative_offset += vmap_table_size;
- }
- DCHECK_OFFSET();
-
- const std::vector<uint8_t>& gc_map = compiled_method->GetGcMap();
- size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
-
- // Deduplicate GC maps
- SafeMap<const std::vector<uint8_t>*, uint32_t>::iterator gc_map_iter =
- gc_map_offsets_.find(&gc_map);
- if (gc_map_iter != gc_map_offsets_.end() &&
- relative_offset != method_offsets.gc_map_offset_) {
- DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0)
- || gc_map_iter->second == method_offsets.gc_map_offset_)
- << PrettyMethod(method_idx, dex_file);
- } else {
- DCHECK((gc_map_size == 0 && method_offsets.gc_map_offset_ == 0)
- || relative_offset == method_offsets.gc_map_offset_)
- << PrettyMethod(method_idx, dex_file);
- if (!out->WriteFully(&gc_map[0], gc_map_size)) {
- ReportWriteFailure("GC map", method_idx, dex_file, *out);
- return 0;
- }
- size_gc_map_ += gc_map_size;
- relative_offset += gc_map_size;
- }
- DCHECK_OFFSET();
- }
+ #undef VISIT
return relative_offset;
}
@@ -993,15 +1147,14 @@ bool OatWriter::OatDexFile::Write(OatWriter* oat_writer,
}
OatWriter::OatClass::OatClass(size_t offset,
- std::vector<CompiledMethod*>* compiled_methods,
+ const std::vector<CompiledMethod*>& compiled_methods,
uint32_t num_non_null_compiled_methods,
- mirror::Class::Status status) {
- CHECK(compiled_methods != NULL);
- uint32_t num_methods = compiled_methods->size();
+ mirror::Class::Status status)
+ : compiled_methods_(compiled_methods) {
+ uint32_t num_methods = compiled_methods.size();
CHECK_LE(num_non_null_compiled_methods, num_methods);
offset_ = offset;
- compiled_methods_ = compiled_methods;
oat_method_offsets_offsets_from_oat_class_.resize(num_methods);
// Since both kOatClassNoneCompiled and kOatClassAllCompiled could
@@ -1020,6 +1173,7 @@ OatWriter::OatClass::OatClass(size_t offset,
status_ = status;
method_offsets_.resize(num_non_null_compiled_methods);
+ method_headers_.resize(num_non_null_compiled_methods);
uint32_t oat_method_offsets_offset_from_oat_class = sizeof(type_) + sizeof(status_);
if (type_ == kOatClassSomeCompiled) {
@@ -1033,7 +1187,7 @@ OatWriter::OatClass::OatClass(size_t offset,
}
for (size_t i = 0; i < num_methods; i++) {
- CompiledMethod* compiled_method = (*compiled_methods_)[i];
+ CompiledMethod* compiled_method = compiled_methods_[i];
if (compiled_method == NULL) {
oat_method_offsets_offsets_from_oat_class_[i] = 0;
} else {
@@ -1048,7 +1202,6 @@ OatWriter::OatClass::OatClass(size_t offset,
OatWriter::OatClass::~OatClass() {
delete method_bitmap_;
- delete compiled_methods_;
}
size_t OatWriter::OatClass::GetOatMethodOffsetsOffsetFromOatHeader(
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index bab1a26d44..7cdd5329bd 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -50,16 +50,30 @@ class OutputStream;
// ...
// OatClass[C]
//
+// GcMap one variable sized blob with GC map.
+// GcMap GC maps are deduplicated.
+// ...
+// GcMap
+//
+// VmapTable one variable sized VmapTable blob (quick compiler only).
+// VmapTable VmapTables are deduplicated.
+// ...
+// VmapTable
+//
+// MappingTable one variable sized blob with MappingTable (quick compiler only).
+// MappingTable MappingTables are deduplicated.
+// ...
+// MappingTable
+//
// padding if necessary so that the following code will be page aligned
//
-// CompiledMethod one variable sized blob with the contents of each CompiledMethod
-// CompiledMethod
-// CompiledMethod
-// CompiledMethod
-// CompiledMethod
-// CompiledMethod
+// OatMethodHeader fixed size header for a CompiledMethod including the size of the MethodCode.
+// MethodCode one variable sized blob with the code of a CompiledMethod.
+// OatMethodHeader (OatMethodHeader, MethodCode) pairs are deduplicated.
+// MethodCode
// ...
-// CompiledMethod
+// OatMethodHeader
+// MethodCode
//
class OatWriter {
public:
@@ -96,43 +110,47 @@ class OatWriter {
}
private:
+ // The DataAccess classes are helper classes that provide access to members related to
+ // a given map, i.e. GC map, mapping table or vmap table. By abstracting these away
+ // we can share a lot of code for processing the maps with template classes below.
+ struct GcMapDataAccess;
+ struct MappingTableDataAccess;
+ struct VmapTableDataAccess;
+
+ // The function VisitDexMethods() below iterates through all the methods in all
+ // the compiled dex files in order of their definitions. The method visitor
+ // classes provide individual bits of processing for each of the passes we need to
+ // first collect the data we want to write to the oat file and then, in later passes,
+ // to actually write it.
+ class DexMethodVisitor;
+ class OatDexMethodVisitor;
+ class InitOatClassesMethodVisitor;
+ class InitCodeMethodVisitor;
+ template <typename DataAccess>
+ class InitMapMethodVisitor;
+ class InitImageMethodVisitor;
+ class WriteCodeMethodVisitor;
+ template <typename DataAccess>
+ class WriteMapMethodVisitor;
+
+ // Visit all the methods in all the compiled dex files in their definition order
+ // with a given DexMethodVisitor.
+ bool VisitDexMethods(DexMethodVisitor* visitor);
+
size_t InitOatHeader();
size_t InitOatDexFiles(size_t offset);
size_t InitDexFiles(size_t offset);
size_t InitOatClasses(size_t offset);
+ size_t InitOatMaps(size_t offset);
size_t InitOatCode(size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t InitOatCodeDexFiles(size_t offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t InitOatCodeDexFile(size_t offset,
- size_t* oat_class_index,
- const DexFile& dex_file)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t InitOatCodeClassDef(size_t offset,
- size_t oat_class_index, size_t class_def_index,
- const DexFile& dex_file,
- const DexFile::ClassDef& class_def)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- size_t InitOatCodeMethod(size_t offset, size_t oat_class_index, size_t class_def_index,
- size_t class_def_method_index, size_t* method_offsets_index,
- bool is_native, InvokeType type, uint32_t method_idx, const DexFile&)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool WriteTables(OutputStream* out, const size_t file_offset);
- size_t WriteCode(OutputStream* out, const size_t file_offset);
+ size_t WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset);
+ size_t WriteCode(OutputStream* out, const size_t file_offset, size_t relative_offset);
size_t WriteCodeDexFiles(OutputStream* out, const size_t file_offset, size_t relative_offset);
- size_t WriteCodeDexFile(OutputStream* out, const size_t file_offset, size_t relative_offset,
- size_t* oat_class_index, const DexFile& dex_file);
- size_t WriteCodeClassDef(OutputStream* out, const size_t file_offset, size_t relative_offset,
- size_t oat_class_index, const DexFile& dex_file,
- const DexFile::ClassDef& class_def);
- size_t WriteCodeMethod(OutputStream* out, const size_t file_offset, size_t relative_offset,
- size_t oat_class_index, size_t class_def_method_index,
- size_t* method_offsets_index, bool is_static, uint32_t method_idx,
- const DexFile& dex_file);
-
- void ReportWriteFailure(const char* what, uint32_t method_idx, const DexFile& dex_file,
- const OutputStream& out) const;
class OatDexFile {
public:
@@ -159,7 +177,7 @@ class OatWriter {
class OatClass {
public:
explicit OatClass(size_t offset,
- std::vector<CompiledMethod*>* compiled_methods,
+ const std::vector<CompiledMethod*>& compiled_methods,
uint32_t num_non_null_compiled_methods,
mirror::Class::Status status);
~OatClass();
@@ -170,8 +188,8 @@ class OatWriter {
bool Write(OatWriter* oat_writer, OutputStream* out, const size_t file_offset) const;
CompiledMethod* GetCompiledMethod(size_t class_def_method_index) const {
- DCHECK(compiled_methods_ != NULL);
- return (*compiled_methods_)[class_def_method_index];
+ DCHECK_LT(class_def_method_index, compiled_methods_.size());
+ return compiled_methods_[class_def_method_index];
}
// Offset of start of OatClass from beginning of OatHeader. It is
@@ -182,7 +200,7 @@ class OatWriter {
size_t offset_;
// CompiledMethods for each class_def_method_index, or NULL if no method is available.
- std::vector<CompiledMethod*>* compiled_methods_;
+ std::vector<CompiledMethod*> compiled_methods_;
// Offset from OatClass::offset_ to the OatMethodOffsets for the
// class_def_method_index. If 0, it means the corresponding
@@ -207,12 +225,13 @@ class OatWriter {
// not is kOatClassBitmap, the bitmap will be NULL.
BitVector* method_bitmap_;
- // OatMethodOffsets for each CompiledMethod present in the
- // OatClass. Note that some may be missing if
+ // OatMethodOffsets and OatMethodHeaders for each CompiledMethod
+ // present in the OatClass. Note that some may be missing if
// OatClass::compiled_methods_ contains NULL values (and
// oat_method_offsets_offsets_from_oat_class_ should contain 0
// values in this case).
std::vector<OatMethodOffsets> method_offsets_;
+ std::vector<OatMethodHeader> method_headers_;
private:
DISALLOW_COPY_AND_ASSIGN(OatClass);
@@ -265,7 +284,7 @@ class OatWriter {
uint32_t size_quick_resolution_trampoline_;
uint32_t size_quick_to_interpreter_bridge_;
uint32_t size_trampoline_alignment_;
- uint32_t size_code_size_;
+ uint32_t size_method_header_;
uint32_t size_code_;
uint32_t size_code_alignment_;
uint32_t size_mapping_table_;
@@ -281,12 +300,21 @@ class OatWriter {
uint32_t size_oat_class_method_bitmaps_;
uint32_t size_oat_class_method_offsets_;
- // Code mappings for deduplication. Deduplication is already done on a pointer basis by the
- // compiler driver, so we can simply compare the pointers to find out if things are duplicated.
- SafeMap<const std::vector<uint8_t>*, uint32_t> code_offsets_;
- SafeMap<const std::vector<uint8_t>*, uint32_t> vmap_table_offsets_;
- SafeMap<const std::vector<uint8_t>*, uint32_t> mapping_table_offsets_;
- SafeMap<const std::vector<uint8_t>*, uint32_t> gc_map_offsets_;
+ struct CodeOffsetsKeyComparator {
+ bool operator()(const CompiledMethod* lhs, const CompiledMethod* rhs) const {
+ if (lhs->GetQuickCode() != rhs->GetQuickCode()) {
+ return lhs->GetQuickCode() < rhs->GetQuickCode();
+ }
+ // If the code is the same, all other fields are likely to be the same as well.
+ if (UNLIKELY(&lhs->GetMappingTable() != &rhs->GetMappingTable())) {
+ return &lhs->GetMappingTable() < &rhs->GetMappingTable();
+ }
+ if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
+ return &lhs->GetVmapTable() < &rhs->GetVmapTable();
+ }
+ return false;
+ }
+ };
DISALLOW_COPY_AND_ASSIGN(OatWriter);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 7e63c69f5c..ff316e5b04 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -47,7 +47,7 @@ void CodeGenerator::CompileBlock(HBasicBlock* block) {
Bind(GetLabelOf(block));
HGraphVisitor* location_builder = GetLocationBuilder();
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
current->Accept(location_builder);
InitLocations(current);
@@ -55,9 +55,105 @@ void CodeGenerator::CompileBlock(HBasicBlock* block) {
}
}
+size_t CodeGenerator::AllocateFreeRegisterInternal(
+ bool* blocked_registers, size_t number_of_registers) const {
+ for (size_t regno = 0; regno < number_of_registers; regno++) {
+ if (!blocked_registers[regno]) {
+ blocked_registers[regno] = true;
+ return regno;
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ return -1;
+}
+
+
+void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations == nullptr) return;
+
+ for (size_t i = 0, e = GetNumberOfRegisters(); i < e; ++i) {
+ blocked_registers_[i] = false;
+ }
+
+ // Mark all fixed input, temp and output registers as used.
+ for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
+ Location loc = locations->InAt(i);
+ if (loc.IsRegister()) {
+ // Check that a register is not specified twice in the summary.
+ DCHECK(!blocked_registers_[loc.GetEncoding()]);
+ blocked_registers_[loc.GetEncoding()] = true;
+ }
+ }
+
+ for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
+ Location loc = locations->GetTemp(i);
+ if (loc.IsRegister()) {
+ // Check that a register is not specified twice in the summary.
+ DCHECK(!blocked_registers_[loc.GetEncoding()]);
+ blocked_registers_[loc.GetEncoding()] = true;
+ }
+ }
+
+ SetupBlockedRegisters(blocked_registers_);
+
+ // Allocate all unallocated input locations.
+ for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
+ Location loc = locations->InAt(i);
+ HInstruction* input = instruction->InputAt(i);
+ if (loc.IsUnallocated()) {
+ if (loc.GetPolicy() == Location::kRequiresRegister) {
+ loc = Location::RegisterLocation(
+ AllocateFreeRegister(input->GetType(), blocked_registers_));
+ } else {
+ DCHECK_EQ(loc.GetPolicy(), Location::kAny);
+ HLoadLocal* load = input->AsLoadLocal();
+ if (load != nullptr) {
+ loc = GetStackLocation(load);
+ } else {
+ loc = Location::RegisterLocation(
+ AllocateFreeRegister(input->GetType(), blocked_registers_));
+ }
+ }
+ locations->SetInAt(i, loc);
+ }
+ }
+
+ // Allocate all unallocated temp locations.
+ for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
+ Location loc = locations->GetTemp(i);
+ if (loc.IsUnallocated()) {
+ DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
+ // TODO: Adjust handling of temps. We currently consider temps to use
+ // core registers. They may also use floating point registers at some point.
+ loc = Location::RegisterLocation(static_cast<ManagedRegister>(
+ AllocateFreeRegister(Primitive::kPrimInt, blocked_registers_)));
+ locations->SetTempAt(i, loc);
+ }
+ }
+
+ Location result_location = locations->Out();
+ if (result_location.IsUnallocated()) {
+ switch (result_location.GetPolicy()) {
+ case Location::kAny:
+ case Location::kRequiresRegister:
+ result_location = Location::RegisterLocation(
+ AllocateFreeRegister(instruction->GetType(), blocked_registers_));
+ break;
+ case Location::kSameAsFirstInput:
+ result_location = locations->InAt(0);
+ break;
+ }
+ locations->SetOut(result_location);
+ }
+}
+
void CodeGenerator::InitLocations(HInstruction* instruction) {
- if (instruction->GetLocations() == nullptr) return;
- for (int i = 0; i < instruction->InputCount(); i++) {
+ if (instruction->GetLocations() == nullptr) {
+ return;
+ }
+ AllocateRegistersLocally(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
Location location = instruction->GetLocations()->InAt(i);
if (location.IsValid()) {
// Move the input to the desired location.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5c7cac1e5c..74cbccc4b8 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -62,6 +62,12 @@ class Location : public ValueObject {
// bits are in a stack slot. The kQuickParameter kind is for
// handling this special case.
kQuickParameter = 4,
+
+ // Unallocated location represents a location that is not fixed and can be
+ // allocated by a register allocator. Each unallocated location has
+ // a policy that specifies what kind of location is suitable. Payload
+ // contains register allocation policy.
+ kUnallocated = 5,
};
Location() : value_(kInvalid) {
@@ -166,10 +172,50 @@ class Location : public ValueObject {
case kStackSlot: return "S";
case kDoubleStackSlot: return "DS";
case kQuickParameter: return "Q";
+ case kUnallocated: return "U";
}
return "?";
}
+ // Unallocated locations.
+ enum Policy {
+ kAny,
+ kRequiresRegister,
+ kSameAsFirstInput,
+ };
+
+ bool IsUnallocated() const {
+ return GetKind() == kUnallocated;
+ }
+
+ static Location UnallocatedLocation(Policy policy) {
+ return Location(kUnallocated, PolicyField::Encode(policy));
+ }
+
+ // Any free register is suitable to replace this unallocated location.
+ static Location Any() {
+ return UnallocatedLocation(kAny);
+ }
+
+ static Location RequiresRegister() {
+ return UnallocatedLocation(kRequiresRegister);
+ }
+
+ // The location of the first input to the instruction will be
+ // used to replace this unallocated location.
+ static Location SameAsFirstInput() {
+ return UnallocatedLocation(kSameAsFirstInput);
+ }
+
+ Policy GetPolicy() const {
+ DCHECK(IsUnallocated());
+ return PolicyField::Decode(GetPayload());
+ }
+
+ uword GetEncoding() const {
+ return GetPayload();
+ }
+
private:
// Number of bits required to encode Kind value.
static constexpr uint32_t kBitsForKind = 4;
@@ -187,6 +233,9 @@ class Location : public ValueObject {
typedef BitField<Kind, 0, kBitsForKind> KindField;
typedef BitField<uword, kBitsForKind, kBitsForPayload> PayloadField;
+ // Layout for kUnallocated locations payload.
+ typedef BitField<Policy, 0, 3> PolicyField;
+
// Layout for stack slots.
static const intptr_t kStackIndexBias =
static_cast<intptr_t>(1) << (kBitsForPayload - 1);
@@ -208,40 +257,52 @@ class Location : public ValueObject {
class LocationSummary : public ArenaObject {
public:
explicit LocationSummary(HInstruction* instruction)
- : inputs(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
- temps(instruction->GetBlock()->GetGraph()->GetArena(), 0) {
- inputs.SetSize(instruction->InputCount());
- for (int i = 0; i < instruction->InputCount(); i++) {
- inputs.Put(i, Location());
+ : inputs_(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
+ temps_(instruction->GetBlock()->GetGraph()->GetArena(), 0) {
+ inputs_.SetSize(instruction->InputCount());
+ for (size_t i = 0; i < instruction->InputCount(); i++) {
+ inputs_.Put(i, Location());
}
}
void SetInAt(uint32_t at, Location location) {
- inputs.Put(at, location);
+ inputs_.Put(at, location);
}
Location InAt(uint32_t at) const {
- return inputs.Get(at);
+ return inputs_.Get(at);
+ }
+
+ size_t GetInputCount() const {
+ return inputs_.Size();
}
void SetOut(Location location) {
- output = Location(location);
+ output_ = Location(location);
}
void AddTemp(Location location) {
- temps.Add(location);
+ temps_.Add(location);
}
Location GetTemp(uint32_t at) const {
- return temps.Get(at);
+ return temps_.Get(at);
+ }
+
+ void SetTempAt(uint32_t at, Location location) {
+ temps_.Put(at, location);
}
- Location Out() const { return output; }
+ size_t GetTempCount() const {
+ return temps_.Size();
+ }
+
+ Location Out() const { return output_; }
private:
- GrowableArray<Location> inputs;
- GrowableArray<Location> temps;
- Location output;
+ GrowableArray<Location> inputs_;
+ GrowableArray<Location> temps_;
+ Location output_;
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
@@ -286,15 +347,33 @@ class CodeGenerator : public ArenaObject {
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
protected:
- explicit CodeGenerator(HGraph* graph)
+ CodeGenerator(HGraph* graph, size_t number_of_registers)
: frame_size_(0),
graph_(graph),
block_labels_(graph->GetArena(), 0),
- pc_infos_(graph->GetArena(), 32) {
+ pc_infos_(graph->GetArena(), 32),
+ blocked_registers_(static_cast<bool*>(
+ graph->GetArena()->Alloc(number_of_registers * sizeof(bool), kArenaAllocData))) {
block_labels_.SetSize(graph->GetBlocks()->Size());
}
~CodeGenerator() { }
+ // Register allocation logic.
+ void AllocateRegistersLocally(HInstruction* instruction) const;
+
+ // Backend specific implementation for allocating a register.
+ virtual ManagedRegister AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const = 0;
+
+ // Raw implementation of allocating a register: loops over blocked_registers to find
+ // the first available register.
+ size_t AllocateFreeRegisterInternal(bool* blocked_registers, size_t number_of_registers) const;
+
+ virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
+ virtual size_t GetNumberOfRegisters() const = 0;
+
+ virtual Location GetStackLocation(HLoadLocal* load) const = 0;
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
@@ -309,6 +388,9 @@ class CodeGenerator : public ArenaObject {
GrowableArray<Label> block_labels_;
GrowableArray<PcInfo> pc_infos_;
+ // Temporary data structure used when doing register allocation.
+ bool* const blocked_registers_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 27691ac080..a446701b39 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -35,6 +35,81 @@ namespace arm {
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
+CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
+ : CodeGenerator(graph, kNumberOfRegIds),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this) {}
+
+static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
+ return blocked_registers + kNumberOfAllocIds;
+}
+
+ManagedRegister CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const {
+ switch (type) {
+ case Primitive::kPrimLong: {
+ size_t reg = AllocateFreeRegisterInternal(
+ GetBlockedRegisterPairs(blocked_registers), kNumberOfRegisterPairs);
+ ArmManagedRegister pair =
+ ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ blocked_registers[pair.AsRegisterPairLow()] = true;
+ blocked_registers[pair.AsRegisterPairHigh()] = true;
+ return pair;
+ }
+
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ size_t reg = AllocateFreeRegisterInternal(blocked_registers, kNumberOfCoreRegisters);
+ return ArmManagedRegister::FromCoreRegister(static_cast<Register>(reg));
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << type;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ return ManagedRegister::NoRegister();
+}
+
+void CodeGeneratorARM::SetupBlockedRegisters(bool* blocked_registers) const {
+ bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
+
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs[R1_R2] = true;
+
+ // Stack register, LR and PC are always reserved.
+ blocked_registers[SP] = true;
+ blocked_registers[LR] = true;
+ blocked_registers[PC] = true;
+
+ // Reserve R4 for suspend check.
+ blocked_registers[R4] = true;
+ blocked_register_pairs[R4_R5] = true;
+
+ // Reserve thread register.
+ blocked_registers[TR] = true;
+
+ // TODO: We currently don't use Quick's callee saved registers.
+ blocked_registers[R5] = true;
+ blocked_registers[R6] = true;
+ blocked_registers[R7] = true;
+ blocked_registers[R8] = true;
+ blocked_registers[R10] = true;
+ blocked_registers[R11] = true;
+ blocked_register_pairs[R6_R7] = true;
+}
+
+size_t CodeGeneratorARM::GetNumberOfRegisters() const {
+ return kNumberOfRegIds;
+}
+
static Location ArmCoreLocation(Register reg) {
return Location::RegisterLocation(ArmManagedRegister::FromCoreRegister(reg));
}
@@ -85,6 +160,32 @@ int32_t CodeGeneratorARM::GetStackSlot(HLocal* local) const {
}
}
+Location CodeGeneratorARM::GetStackLocation(HLoadLocal* load) const {
+ switch (load->GetType()) {
+ case Primitive::kPrimLong:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented type " << load->GetType();
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << load->GetType();
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location();
+}
+
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
@@ -302,7 +403,7 @@ void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
void LocationsBuilderARM::VisitIf(HIf* if_instr) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
- locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetInAt(0, Location::RequiresRegister());
if_instr->SetLocations(locations);
}
@@ -317,9 +418,9 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
void LocationsBuilderARM::VisitEqual(HEqual* equal) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(equal);
- locations->SetInAt(0, ArmCoreLocation(R0));
- locations->SetInAt(1, ArmCoreLocation(R1));
- locations->SetOut(ArmCoreLocation(R0));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
equal->SetLocations(locations);
}
@@ -409,7 +510,8 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
break;
case Primitive::kPrimLong:
- locations->SetInAt(0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(
+ 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
break;
default:
@@ -444,10 +546,10 @@ void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
- locations->AddTemp(ArmCoreLocation(R0));
+ locations->AddTemp(Location::RequiresRegister());
InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (int i = 0; i < invoke->InputCount(); i++) {
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -512,19 +614,11 @@ void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
void LocationsBuilderARM::VisitAdd(HAdd* add) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add);
switch (add->GetResultType()) {
- case Primitive::kPrimInt: {
- locations->SetInAt(0, ArmCoreLocation(R0));
- locations->SetInAt(1, ArmCoreLocation(R1));
- locations->SetOut(ArmCoreLocation(R0));
- break;
- }
-
+ case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- locations->SetInAt(
- 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
- locations->SetInAt(
- 1, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R2_R3)));
- locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
break;
}
@@ -574,19 +668,11 @@ void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
void LocationsBuilderARM::VisitSub(HSub* sub) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub);
switch (sub->GetResultType()) {
- case Primitive::kPrimInt: {
- locations->SetInAt(0, ArmCoreLocation(R0));
- locations->SetInAt(1, ArmCoreLocation(R1));
- locations->SetOut(ArmCoreLocation(R0));
- break;
- }
-
+ case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- locations->SetInAt(
- 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
- locations->SetInAt(
- 1, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R2_R3)));
- locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
break;
}
@@ -649,6 +735,9 @@ class InvokeRuntimeCallingConvention : public CallingConvention<Register> {
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(1)));
locations->SetOut(ArmCoreLocation(R0));
instruction->SetLocations(locations);
}
@@ -683,8 +772,8 @@ void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instructi
void LocationsBuilderARM::VisitNot(HNot* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetInAt(0, ArmCoreLocation(R0));
- locations->SetOut(ArmCoreLocation(R0));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
instruction->SetLocations(locations);
}
@@ -694,5 +783,13 @@ void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(1));
}
+void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unimplemented";
+}
+
+void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unimplemented";
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index ed35f94e2b..2405d4b5a6 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -101,10 +101,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
class CodeGeneratorARM : public CodeGenerator {
public:
- explicit CodeGeneratorARM(HGraph* graph)
- : CodeGenerator(graph),
- location_builder_(graph, this),
- instruction_visitor_(graph, this) { }
+ explicit CodeGeneratorARM(HGraph* graph);
virtual ~CodeGeneratorARM() { }
virtual void GenerateFrameEntry() OVERRIDE;
@@ -128,7 +125,13 @@ class CodeGeneratorARM : public CodeGenerator {
return &assembler_;
}
+ virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
+ virtual ManagedRegister AllocateFreeRegister(
+ Primitive::Type type, bool* blocked_registers) const OVERRIDE;
+ virtual size_t GetNumberOfRegisters() const OVERRIDE;
+
int32_t GetStackSlot(HLocal* local) const;
+ virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
private:
// Helper method to move a 32bits value between two locations.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 114263161d..fbb054ae88 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -35,6 +35,72 @@ namespace x86 {
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
+CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
+ : CodeGenerator(graph, kNumberOfRegIds),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this) {}
+
+static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
+ return blocked_registers + kNumberOfAllocIds;
+}
+
+ManagedRegister CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const {
+ switch (type) {
+ case Primitive::kPrimLong: {
+ size_t reg = AllocateFreeRegisterInternal(
+ GetBlockedRegisterPairs(blocked_registers), kNumberOfRegisterPairs);
+ X86ManagedRegister pair =
+ X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ blocked_registers[pair.AsRegisterPairLow()] = true;
+ blocked_registers[pair.AsRegisterPairHigh()] = true;
+ return pair;
+ }
+
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ size_t reg = AllocateFreeRegisterInternal(blocked_registers, kNumberOfCpuRegisters);
+ return X86ManagedRegister::FromCpuRegister(static_cast<Register>(reg));
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << type;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ return ManagedRegister::NoRegister();
+}
+
+void CodeGeneratorX86::SetupBlockedRegisters(bool* blocked_registers) const {
+ bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
+
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs[ECX_EDX] = true;
+
+ // Stack register is always reserved.
+ blocked_registers[ESP] = true;
+
+ // TODO: We currently don't use Quick's callee saved registers.
+ blocked_registers[EBP] = true;
+ blocked_registers[ESI] = true;
+ blocked_registers[EDI] = true;
+ blocked_register_pairs[EAX_EDI] = true;
+ blocked_register_pairs[EDX_EDI] = true;
+ blocked_register_pairs[ECX_EDI] = true;
+ blocked_register_pairs[EBX_EDI] = true;
+}
+
+size_t CodeGeneratorX86::GetNumberOfRegisters() const {
+ return kNumberOfRegIds;
+}
+
static Location X86CpuLocation(Register reg) {
return Location::RegisterLocation(X86ManagedRegister::FromCpuRegister(reg));
}
@@ -90,6 +156,33 @@ int32_t CodeGeneratorX86::GetStackSlot(HLocal* local) const {
}
}
+
+Location CodeGeneratorX86::GetStackLocation(HLoadLocal* load) const {
+ switch (load->GetType()) {
+ case Primitive::kPrimLong:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+ break;
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented type " << load->GetType();
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << load->GetType();
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location();
+}
+
static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -311,13 +404,18 @@ void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
- locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetInAt(0, Location::Any());
if_instr->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
// TODO: Generate the input as a condition, instead of materializing in a register.
- __ cmpl(if_instr->GetLocations()->InAt(0).AsX86().AsCpuRegister(), Immediate(0));
+ Location location = if_instr->GetLocations()->InAt(0);
+ if (location.IsRegister()) {
+ __ cmpl(location.AsX86().AsCpuRegister(), Immediate(0));
+ } else {
+ __ cmpl(Address(ESP, location.GetStackIndex()), Immediate(0));
+ }
__ j(kEqual, codegen_->GetLabelOf(if_instr->IfFalseSuccessor()));
if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfTrueSuccessor())) {
__ jmp(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -367,16 +465,22 @@ void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
void LocationsBuilderX86::VisitEqual(HEqual* equal) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(equal);
- locations->SetInAt(0, X86CpuLocation(EAX));
- locations->SetInAt(1, X86CpuLocation(ECX));
- locations->SetOut(X86CpuLocation(EAX));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
equal->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitEqual(HEqual* equal) {
- __ cmpl(equal->GetLocations()->InAt(0).AsX86().AsCpuRegister(),
- equal->GetLocations()->InAt(1).AsX86().AsCpuRegister());
- __ setb(kEqual, equal->GetLocations()->Out().AsX86().AsCpuRegister());
+ LocationSummary* locations = equal->GetLocations();
+ if (locations->InAt(1).IsRegister()) {
+ __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->InAt(1).AsX86().AsCpuRegister());
+ } else {
+ __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
+ Address(ESP, locations->InAt(1).GetStackIndex()));
+ }
+ __ setb(kEqual, locations->Out().AsX86().AsCpuRegister());
}
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
@@ -453,10 +557,10 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
- locations->AddTemp(X86CpuLocation(EAX));
+ locations->AddTemp(Location::RequiresRegister());
InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (int i = 0; i < invoke->InputCount(); i++) {
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -514,18 +618,11 @@ void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
void LocationsBuilderX86::VisitAdd(HAdd* add) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(add);
switch (add->GetResultType()) {
- case Primitive::kPrimInt: {
- locations->SetInAt(0, X86CpuLocation(EAX));
- locations->SetInAt(1, X86CpuLocation(ECX));
- locations->SetOut(X86CpuLocation(EAX));
- break;
- }
+ case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- locations->SetInAt(
- 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
- locations->SetInAt(
- 1, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(ECX_EBX)));
- locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -548,18 +645,30 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
case Primitive::kPrimInt: {
DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(),
locations->Out().AsX86().AsCpuRegister());
- __ addl(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->InAt(1).AsX86().AsCpuRegister());
+ if (locations->InAt(1).IsRegister()) {
+ __ addl(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->InAt(1).AsX86().AsCpuRegister());
+ } else {
+ __ addl(locations->InAt(0).AsX86().AsCpuRegister(),
+ Address(ESP, locations->InAt(1).GetStackIndex()));
+ }
break;
}
case Primitive::kPrimLong: {
DCHECK_EQ(locations->InAt(0).AsX86().AsRegisterPair(),
locations->Out().AsX86().AsRegisterPair());
- __ addl(locations->InAt(0).AsX86().AsRegisterPairLow(),
- locations->InAt(1).AsX86().AsRegisterPairLow());
- __ adcl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
- locations->InAt(1).AsX86().AsRegisterPairHigh());
+ if (locations->InAt(1).IsRegister()) {
+ __ addl(locations->InAt(0).AsX86().AsRegisterPairLow(),
+ locations->InAt(1).AsX86().AsRegisterPairLow());
+ __ adcl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
+ locations->InAt(1).AsX86().AsRegisterPairHigh());
+ } else {
+ __ addl(locations->InAt(0).AsX86().AsRegisterPairLow(),
+ Address(ESP, locations->InAt(1).GetStackIndex()));
+ __ adcl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
+ Address(ESP, locations->InAt(1).GetHighStackIndex(kX86WordSize)));
+ }
break;
}
@@ -578,19 +687,11 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
void LocationsBuilderX86::VisitSub(HSub* sub) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(sub);
switch (sub->GetResultType()) {
- case Primitive::kPrimInt: {
- locations->SetInAt(0, X86CpuLocation(EAX));
- locations->SetInAt(1, X86CpuLocation(ECX));
- locations->SetOut(X86CpuLocation(EAX));
- break;
- }
-
+ case Primitive::kPrimInt:
case Primitive::kPrimLong: {
- locations->SetInAt(
- 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
- locations->SetInAt(
- 1, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(ECX_EBX)));
- locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
break;
}
@@ -613,18 +714,30 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
case Primitive::kPrimInt: {
DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(),
locations->Out().AsX86().AsCpuRegister());
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->InAt(1).AsX86().AsCpuRegister());
+ if (locations->InAt(1).IsRegister()) {
+ __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
+ locations->InAt(1).AsX86().AsCpuRegister());
+ } else {
+ __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
+ Address(ESP, locations->InAt(1).GetStackIndex()));
+ }
break;
}
case Primitive::kPrimLong: {
DCHECK_EQ(locations->InAt(0).AsX86().AsRegisterPair(),
locations->Out().AsX86().AsRegisterPair());
- __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
- locations->InAt(1).AsX86().AsRegisterPairLow());
- __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
- locations->InAt(1).AsX86().AsRegisterPairHigh());
+ if (locations->InAt(1).IsRegister()) {
+ __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
+ locations->InAt(1).AsX86().AsRegisterPairLow());
+ __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
+ locations->InAt(1).AsX86().AsRegisterPairHigh());
+ } else {
+ __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
+ Address(ESP, locations->InAt(1).GetStackIndex()));
+ __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
+ Address(ESP, locations->InAt(1).GetHighStackIndex(kX86WordSize)));
+ }
break;
}
@@ -643,14 +756,16 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetOut(X86CpuLocation(EAX));
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(X86CpuLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(X86CpuLocation(calling_convention.GetRegisterAt(1)));
instruction->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
InvokeRuntimeCallingConvention calling_convention;
LoadCurrentMethod(calling_convention.GetRegisterAt(1));
- __ movl(calling_convention.GetRegisterAt(0),
- Immediate(instruction->GetTypeIndex()));
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
__ fs()->call(
Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocObjectWithAccessCheck)));
@@ -676,15 +791,24 @@ void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instructi
void LocationsBuilderX86::VisitNot(HNot* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetInAt(0, X86CpuLocation(EAX));
- locations->SetOut(X86CpuLocation(EAX));
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
instruction->SetLocations(locations);
}
void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(), locations->Out().AsX86().AsCpuRegister());
- __ xorl(locations->Out().AsX86().AsCpuRegister(), Immediate(1));
+ Location out = locations->Out();
+ DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(), out.AsX86().AsCpuRegister());
+ __ xorl(out.AsX86().AsCpuRegister(), Immediate(1));
+}
+
+void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unimplemented";
+}
+
+void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unimplemented";
}
} // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index f22890e708..1ee11bf0e8 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -102,10 +102,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
class CodeGeneratorX86 : public CodeGenerator {
public:
- explicit CodeGeneratorX86(HGraph* graph)
- : CodeGenerator(graph),
- location_builder_(graph, this),
- instruction_visitor_(graph, this) { }
+ explicit CodeGeneratorX86(HGraph* graph);
virtual ~CodeGeneratorX86() { }
virtual void GenerateFrameEntry() OVERRIDE;
@@ -129,7 +126,13 @@ class CodeGeneratorX86 : public CodeGenerator {
return &assembler_;
}
+ virtual size_t GetNumberOfRegisters() const OVERRIDE;
+ virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
+ virtual ManagedRegister AllocateFreeRegister(
+ Primitive::Type type, bool* blocked_registers) const OVERRIDE;
+
int32_t GetStackSlot(HLocal* local) const;
+ virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
private:
// Helper method to move a 32bits value between two locations.
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 498deba2b4..3d6aeb7300 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -15,6 +15,7 @@
*/
#include "nodes.h"
+#include "ssa_builder.h"
#include "utils/growable_array.h"
namespace art {
@@ -34,7 +35,13 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) const {
if (!visited.IsBitSet(i)) {
HBasicBlock* block = blocks_.Get(i);
for (size_t j = 0; j < block->GetSuccessors()->Size(); j++) {
- block->GetSuccessors()->Get(j)->RemovePredecessor(block);
+ block->GetSuccessors()->Get(j)->RemovePredecessor(block, false);
+ }
+ for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ block->RemovePhi(it.Current()->AsPhi());
+ }
+ for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ block->RemoveInstruction(it.Current());
}
}
}
@@ -120,11 +127,112 @@ void HGraph::VisitBlockForDominatorTree(HBasicBlock* block,
}
}
-void HBasicBlock::AddInstruction(HInstruction* instruction) {
+void HGraph::TransformToSSA() {
+ DCHECK(!dominator_order_.IsEmpty());
+ SimplifyCFG();
+ SsaBuilder ssa_builder(this);
+ ssa_builder.BuildSsa();
+}
+
+void HGraph::SimplifyCFG() {
+ for (size_t i = 0; i < dominator_order_.Size(); i++) {
+ HBasicBlock* current = dominator_order_.Get(i);
+ if (current->IsLoopHeader()) {
+ // Make sure the loop has only one pre header. This simplifies SSA building by having
+ // to just look at the pre header to know which locals are initialized at entry of the
+ // loop.
+ HLoopInformation* info = current->GetLoopInformation();
+ size_t number_of_incomings = current->GetPredecessors()->Size() - info->NumberOfBackEdges();
+ if (number_of_incomings != 1) {
+ HBasicBlock* pre_header = new (arena_) HBasicBlock(this);
+ AddBlock(pre_header);
+ pre_header->AddInstruction(new (arena_) HGoto());
+ pre_header->SetDominator(current->GetDominator());
+ current->SetDominator(pre_header);
+ dominator_order_.InsertAt(i, pre_header);
+ i++;
+
+ ArenaBitVector back_edges(arena_, GetBlocks()->Size(), false);
+ for (size_t pred = 0; pred < info->GetBackEdges()->Size(); pred++) {
+ back_edges.SetBit(info->GetBackEdges()->Get(pred)->GetBlockId());
+ }
+ for (size_t pred = 0; pred < current->GetPredecessors()->Size(); pred++) {
+ HBasicBlock* predecessor = current->GetPredecessors()->Get(pred);
+ if (!back_edges.IsBitSet(predecessor->GetBlockId())) {
+ current->RemovePredecessor(predecessor);
+ pred--;
+ predecessor->AddSuccessor(pre_header);
+ }
+ }
+ pre_header->AddSuccessor(current);
+ }
+ info->SetPreHeader(current->GetDominator());
+ }
+ }
+}
+
+void HLoopInformation::SetPreHeader(HBasicBlock* block) {
+ DCHECK_EQ(header_->GetDominator(), block);
+ pre_header_ = block;
+}
+
+static void Add(HInstructionList* instruction_list,
+ HBasicBlock* block,
+ HInstruction* instruction) {
DCHECK(instruction->GetBlock() == nullptr);
DCHECK_EQ(instruction->GetId(), -1);
- instruction->SetBlock(this);
- instruction->SetId(GetGraph()->GetNextInstructionId());
+ instruction->SetBlock(block);
+ instruction->SetId(block->GetGraph()->GetNextInstructionId());
+ instruction_list->AddInstruction(instruction);
+}
+
+void HBasicBlock::AddInstruction(HInstruction* instruction) {
+ Add(&instructions_, this, instruction);
+}
+
+void HBasicBlock::AddPhi(HPhi* phi) {
+ Add(&phis_, this, phi);
+}
+
+static void Remove(HInstructionList* instruction_list,
+ HBasicBlock* block,
+ HInstruction* instruction) {
+ DCHECK_EQ(block, instruction->GetBlock());
+ DCHECK(instruction->GetUses() == nullptr);
+ DCHECK(instruction->GetEnvUses() == nullptr);
+ instruction->SetBlock(nullptr);
+ instruction_list->RemoveInstruction(instruction);
+
+ for (size_t i = 0; i < instruction->InputCount(); i++) {
+ instruction->InputAt(i)->RemoveUser(instruction, i);
+ }
+}
+
+void HBasicBlock::RemoveInstruction(HInstruction* instruction) {
+ Remove(&instructions_, this, instruction);
+}
+
+void HBasicBlock::RemovePhi(HPhi* phi) {
+ Remove(&phis_, this, phi);
+}
+
+void HInstruction::RemoveUser(HInstruction* user, size_t input_index) {
+ HUseListNode<HInstruction>* previous = nullptr;
+ HUseListNode<HInstruction>* current = uses_;
+ while (current != nullptr) {
+ if (current->GetUser() == user && current->GetIndex() == input_index) {
+ if (previous == NULL) {
+ uses_ = current->GetTail();
+ } else {
+ previous->SetTail(current->GetTail());
+ }
+ }
+ previous = current;
+ current = current->GetTail();
+ }
+}
+
+void HInstructionList::AddInstruction(HInstruction* instruction) {
if (first_instruction_ == nullptr) {
DCHECK(last_instruction_ == nullptr);
first_instruction_ = last_instruction_ = instruction;
@@ -133,9 +241,51 @@ void HBasicBlock::AddInstruction(HInstruction* instruction) {
instruction->previous_ = last_instruction_;
last_instruction_ = instruction;
}
- for (int i = 0; i < instruction->InputCount(); i++) {
- instruction->InputAt(i)->AddUse(instruction);
+ for (size_t i = 0; i < instruction->InputCount(); i++) {
+ instruction->InputAt(i)->AddUseAt(instruction, i);
+ }
+}
+
+void HInstructionList::RemoveInstruction(HInstruction* instruction) {
+ if (instruction->previous_ != nullptr) {
+ instruction->previous_->next_ = instruction->next_;
+ }
+ if (instruction->next_ != nullptr) {
+ instruction->next_->previous_ = instruction->previous_;
}
+ if (instruction == first_instruction_) {
+ first_instruction_ = instruction->next_;
+ }
+ if (instruction == last_instruction_) {
+ last_instruction_ = instruction->previous_;
+ }
+}
+
+void HInstruction::ReplaceWith(HInstruction* other) {
+ for (HUseIterator<HInstruction> it(GetUses()); !it.Done(); it.Advance()) {
+ HUseListNode<HInstruction>* current = it.Current();
+ HInstruction* user = current->GetUser();
+ size_t input_index = current->GetIndex();
+ user->SetRawInputAt(input_index, other);
+ other->AddUseAt(user, input_index);
+ }
+
+ for (HUseIterator<HEnvironment> it(GetEnvUses()); !it.Done(); it.Advance()) {
+ HUseListNode<HEnvironment>* current = it.Current();
+ HEnvironment* user = current->GetUser();
+ size_t input_index = current->GetIndex();
+ user->SetRawEnvAt(input_index, other);
+ other->AddEnvUseAt(user, input_index);
+ }
+
+ uses_ = nullptr;
+ env_uses_ = nullptr;
+}
+
+void HPhi::AddInput(HInstruction* input) {
+ DCHECK(input->GetBlock() != nullptr);
+ inputs_.Add(input);
+ input->AddUseAt(this, inputs_.Size() - 1);
}
#define DEFINE_ACCEPT(name) \
@@ -155,7 +305,10 @@ void HGraphVisitor::VisitInsertionOrder() {
}
void HGraphVisitor::VisitBasicBlock(HBasicBlock* block) {
- for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+ for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ it.Current()->Accept(this);
+ }
+ for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
it.Current()->Accept(this);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3da9ed9461..581c1d56f2 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -24,9 +24,11 @@
namespace art {
class HBasicBlock;
+class HEnvironment;
class HInstruction;
class HIntConstant;
class HGraphVisitor;
+class HPhi;
class LocationSummary;
static const int kDefaultNumberOfBlocks = 8;
@@ -34,6 +36,23 @@ static const int kDefaultNumberOfSuccessors = 2;
static const int kDefaultNumberOfPredecessors = 2;
static const int kDefaultNumberOfBackEdges = 1;
+class HInstructionList {
+ public:
+ HInstructionList() : first_instruction_(nullptr), last_instruction_(nullptr) {}
+
+ void AddInstruction(HInstruction* instruction);
+ void RemoveInstruction(HInstruction* instruction);
+
+ private:
+ HInstruction* first_instruction_;
+ HInstruction* last_instruction_;
+
+ friend class HBasicBlock;
+ friend class HInstructionIterator;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstructionList);
+};
+
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject {
public:
@@ -56,7 +75,10 @@ class HGraph : public ArenaObject {
void SetExitBlock(HBasicBlock* block) { exit_block_ = block; }
void AddBlock(HBasicBlock* block);
+
void BuildDominatorTree();
+ void TransformToSSA();
+ void SimplifyCFG();
int GetNextInstructionId() {
return current_instruction_id_++;
@@ -86,6 +108,9 @@ class HGraph : public ArenaObject {
return number_of_in_vregs_;
}
+ GrowableArray<HBasicBlock*>* GetDominatorOrder() {
+ return &dominator_order_;
+ }
private:
HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
@@ -138,7 +163,18 @@ class HLoopInformation : public ArenaObject {
return back_edges_.Size();
}
+ void SetPreHeader(HBasicBlock* block);
+
+ HBasicBlock* GetPreHeader() const {
+ return pre_header_;
+ }
+
+ const GrowableArray<HBasicBlock*>* GetBackEdges() const {
+ return &back_edges_;
+ }
+
private:
+ HBasicBlock* pre_header_;
HBasicBlock* header_;
GrowableArray<HBasicBlock*> back_edges_;
@@ -154,8 +190,6 @@ class HBasicBlock : public ArenaObject {
: graph_(graph),
predecessors_(graph->GetArena(), kDefaultNumberOfPredecessors),
successors_(graph->GetArena(), kDefaultNumberOfSuccessors),
- first_instruction_(nullptr),
- last_instruction_(nullptr),
loop_information_(nullptr),
dominator_(nullptr),
block_id_(-1) { }
@@ -189,26 +223,42 @@ class HBasicBlock : public ArenaObject {
: loop_information_->NumberOfBackEdges();
}
- HInstruction* GetFirstInstruction() const { return first_instruction_; }
- HInstruction* GetLastInstruction() const { return last_instruction_; }
+ HInstruction* GetFirstInstruction() const { return instructions_.first_instruction_; }
+ HInstruction* GetLastInstruction() const { return instructions_.last_instruction_; }
+ HInstructionList const* GetInstructions() const { return &instructions_; }
+ HInstructionList const* GetPhis() const { return &phis_; }
void AddSuccessor(HBasicBlock* block) {
successors_.Add(block);
block->predecessors_.Add(this);
}
- void RemovePredecessor(HBasicBlock* block) {
+ void RemovePredecessor(HBasicBlock* block, bool remove_in_successor = true) {
predecessors_.Delete(block);
+ if (remove_in_successor) {
+ block->successors_.Delete(this);
+ }
}
void AddInstruction(HInstruction* instruction);
+ void RemoveInstruction(HInstruction* instruction);
+ void AddPhi(HPhi* phi);
+ void RemovePhi(HPhi* phi);
+
+ bool IsLoopHeader() const {
+ return loop_information_ != nullptr;
+ }
+
+ HLoopInformation* GetLoopInformation() const {
+ return loop_information_;
+ }
private:
HGraph* const graph_;
GrowableArray<HBasicBlock*> predecessors_;
GrowableArray<HBasicBlock*> successors_;
- HInstruction* first_instruction_;
- HInstruction* last_instruction_;
+ HInstructionList instructions_;
+ HInstructionList phis_;
HLoopInformation* loop_information_;
HBasicBlock* dominator_;
int block_id_;
@@ -230,6 +280,7 @@ class HBasicBlock : public ArenaObject {
M(NewInstance) \
M(Not) \
M(ParameterValue) \
+ M(Phi) \
M(Return) \
M(ReturnVoid) \
M(StoreLocal) \
@@ -244,17 +295,22 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
virtual const char* DebugName() const { return #type; } \
virtual H##type* As##type() { return this; } \
+template <typename T>
class HUseListNode : public ArenaObject {
public:
- HUseListNode(HInstruction* instruction, HUseListNode* tail)
- : instruction_(instruction), tail_(tail) { }
+ HUseListNode(T* user, size_t index, HUseListNode* tail)
+ : user_(user), index_(index), tail_(tail) { }
HUseListNode* GetTail() const { return tail_; }
- HInstruction* GetInstruction() const { return instruction_; }
+ T* GetUser() const { return user_; }
+ size_t GetIndex() const { return index_; }
+
+ void SetTail(HUseListNode<T>* node) { tail_ = node; }
private:
- HInstruction* const instruction_;
- HUseListNode* const tail_;
+ T* const user_;
+ const size_t index_;
+ HUseListNode<T>* tail_;
DISALLOW_COPY_AND_ASSIGN(HUseListNode);
};
@@ -267,6 +323,8 @@ class HInstruction : public ArenaObject {
block_(nullptr),
id_(-1),
uses_(nullptr),
+ env_uses_(nullptr),
+ environment_(nullptr),
locations_(nullptr) { }
virtual ~HInstruction() { }
@@ -277,28 +335,43 @@ class HInstruction : public ArenaObject {
HBasicBlock* GetBlock() const { return block_; }
void SetBlock(HBasicBlock* block) { block_ = block; }
- virtual intptr_t InputCount() const = 0;
- virtual HInstruction* InputAt(intptr_t i) const = 0;
+ virtual size_t InputCount() const = 0;
+ virtual HInstruction* InputAt(size_t i) const = 0;
virtual void Accept(HGraphVisitor* visitor) = 0;
virtual const char* DebugName() const = 0;
virtual Primitive::Type GetType() const { return Primitive::kPrimVoid; }
+ virtual void SetRawInputAt(size_t index, HInstruction* input) = 0;
+
+ virtual bool NeedsEnvironment() const { return false; }
- void AddUse(HInstruction* user) {
- uses_ = new (block_->GetGraph()->GetArena()) HUseListNode(user, uses_);
+ void AddUseAt(HInstruction* user, size_t index) {
+ uses_ = new (block_->GetGraph()->GetArena()) HUseListNode<HInstruction>(user, index, uses_);
}
- HUseListNode* GetUses() const { return uses_; }
+ void AddEnvUseAt(HEnvironment* user, size_t index) {
+ env_uses_ = new (block_->GetGraph()->GetArena()) HUseListNode<HEnvironment>(
+ user, index, env_uses_);
+ }
+
+ void RemoveUser(HInstruction* user, size_t index);
+
+ HUseListNode<HInstruction>* GetUses() const { return uses_; }
+ HUseListNode<HEnvironment>* GetEnvUses() const { return env_uses_; }
bool HasUses() const { return uses_ != nullptr; }
int GetId() const { return id_; }
void SetId(int id) { id_ = id; }
+ void SetEnvironment(HEnvironment* environment) { environment_ = environment; }
+
LocationSummary* GetLocations() const { return locations_; }
void SetLocations(LocationSummary* locations) { locations_ = locations; }
+ void ReplaceWith(HInstruction* instruction);
+
#define INSTRUCTION_TYPE_CHECK(type) \
virtual H##type* As##type() { return nullptr; }
@@ -315,19 +388,27 @@ class HInstruction : public ArenaObject {
// has not beed added to the graph.
int id_;
- HUseListNode* uses_;
+ // List of instructions that have this instruction as input.
+ HUseListNode<HInstruction>* uses_;
+
+ // List of environments that contain this instruction.
+ HUseListNode<HEnvironment>* env_uses_;
+
+ HEnvironment* environment_;
// Set by the code generator.
LocationSummary* locations_;
friend class HBasicBlock;
+ friend class HInstructionList;
DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
+template<typename T>
class HUseIterator : public ValueObject {
public:
- explicit HUseIterator(HInstruction* instruction) : current_(instruction->GetUses()) { }
+ explicit HUseIterator(HUseListNode<T>* uses) : current_(uses) {}
bool Done() const { return current_ == nullptr; }
@@ -336,17 +417,51 @@ class HUseIterator : public ValueObject {
current_ = current_->GetTail();
}
- HInstruction* Current() const {
+ HUseListNode<T>* Current() const {
DCHECK(!Done());
- return current_->GetInstruction();
+ return current_;
}
private:
- HUseListNode* current_;
+ HUseListNode<T>* current_;
friend class HValue;
};
+// A HEnvironment object contains the values of virtual registers at a given location.
+class HEnvironment : public ArenaObject {
+ public:
+ HEnvironment(ArenaAllocator* arena, size_t number_of_vregs) : vregs_(arena, number_of_vregs) {
+ vregs_.SetSize(number_of_vregs);
+ for (size_t i = 0; i < number_of_vregs; i++) {
+ vregs_.Put(i, nullptr);
+ }
+ }
+
+ void Populate(const GrowableArray<HInstruction*>& env) {
+ for (size_t i = 0; i < env.Size(); i++) {
+ HInstruction* instruction = env.Get(i);
+ vregs_.Put(i, instruction);
+ if (instruction != nullptr) {
+ instruction->AddEnvUseAt(this, i);
+ }
+ }
+ }
+
+ void SetRawEnvAt(size_t index, HInstruction* instruction) {
+ vregs_.Put(index, instruction);
+ }
+
+ GrowableArray<HInstruction*>* GetVRegs() {
+ return &vregs_;
+ }
+
+ private:
+ GrowableArray<HInstruction*> vregs_;
+
+ DISALLOW_COPY_AND_ASSIGN(HEnvironment);
+};
+
class HInputIterator : public ValueObject {
public:
explicit HInputIterator(HInstruction* instruction) : instruction_(instruction), index_(0) { }
@@ -357,15 +472,15 @@ class HInputIterator : public ValueObject {
private:
HInstruction* instruction_;
- int index_;
+ size_t index_;
DISALLOW_COPY_AND_ASSIGN(HInputIterator);
};
class HInstructionIterator : public ValueObject {
public:
- explicit HInstructionIterator(HBasicBlock* block)
- : instruction_(block->GetFirstInstruction()) {
+ explicit HInstructionIterator(const HInstructionList& instructions)
+ : instruction_(instructions.first_instruction_) {
next_ = Done() ? nullptr : instruction_->GetNext();
}
@@ -434,16 +549,18 @@ class HTemplateInstruction: public HInstruction {
HTemplateInstruction<N>() : inputs_() { }
virtual ~HTemplateInstruction() { }
- virtual intptr_t InputCount() const { return N; }
- virtual HInstruction* InputAt(intptr_t i) const { return inputs_[i]; }
+ virtual size_t InputCount() const { return N; }
+ virtual HInstruction* InputAt(size_t i) const { return inputs_[i]; }
protected:
- void SetRawInputAt(intptr_t i, HInstruction* instruction) {
+ virtual void SetRawInputAt(size_t i, HInstruction* instruction) {
inputs_[i] = instruction;
}
private:
EmbeddedArray<HInstruction*, N> inputs_;
+
+ friend class SsaBuilder;
};
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
@@ -658,11 +775,19 @@ class HInvoke : public HInstruction {
inputs_.SetSize(number_of_arguments);
}
- virtual intptr_t InputCount() const { return inputs_.Size(); }
- virtual HInstruction* InputAt(intptr_t i) const { return inputs_.Get(i); }
+ virtual size_t InputCount() const { return inputs_.Size(); }
+ virtual HInstruction* InputAt(size_t i) const { return inputs_.Get(i); }
+
+ // Runtime needs to walk the stack, so Dex -> Dex calls need to
+ // know their environment.
+ virtual bool NeedsEnvironment() const { return true; }
void SetArgumentAt(size_t index, HInstruction* argument) {
- inputs_.Put(index, argument);
+ SetRawInputAt(index, argument);
+ }
+
+ virtual void SetRawInputAt(size_t index, HInstruction* input) {
+ inputs_.Put(index, input);
}
virtual Primitive::Type GetType() const { return return_type_; }
@@ -707,6 +832,9 @@ class HNewInstance : public HTemplateInstruction<0> {
virtual Primitive::Type GetType() const { return Primitive::kPrimNot; }
+ // Calls runtime so needs an environment.
+ virtual bool NeedsEnvironment() const { return true; }
+
DECLARE_INSTRUCTION(NewInstance)
private:
@@ -779,6 +907,39 @@ class HNot : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HNot);
};
+class HPhi : public HInstruction {
+ public:
+ HPhi(ArenaAllocator* arena, uint32_t reg_number, size_t number_of_inputs, Primitive::Type type)
+ : inputs_(arena, number_of_inputs),
+ reg_number_(reg_number),
+ type_(type) {
+ inputs_.SetSize(number_of_inputs);
+ }
+
+ virtual size_t InputCount() const { return inputs_.Size(); }
+ virtual HInstruction* InputAt(size_t i) const { return inputs_.Get(i); }
+
+ virtual void SetRawInputAt(size_t index, HInstruction* input) {
+ inputs_.Put(index, input);
+ }
+
+ void AddInput(HInstruction* input);
+
+ virtual Primitive::Type GetType() const { return type_; }
+
+ uint32_t GetRegNumber() const { return reg_number_; }
+
+ DECLARE_INSTRUCTION(Phi)
+
+ protected:
+ GrowableArray<HInstruction*> inputs_;
+ const uint32_t reg_number_;
+ const Primitive::Type type_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HPhi);
+};
+
class HGraphVisitor : public ValueObject {
public:
explicit HGraphVisitor(HGraph* graph) : graph_(graph) { }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d19c40c291..9438890941 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -100,6 +100,10 @@ CompiledMethod* OptimizingCompiler::TryCompile(CompilerDriver& driver,
std::vector<uint8_t> gc_map;
codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
+ // Run these phases to get some test coverage.
+ graph->BuildDominatorTree();
+ graph->TransformToSSA();
+
return new CompiledMethod(driver,
instruction_set,
allocator.GetMemory(),
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index 606c91519e..c82d0cc6a4 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -25,11 +25,19 @@ class HPrettyPrinter : public HGraphVisitor {
public:
explicit HPrettyPrinter(HGraph* graph) : HGraphVisitor(graph) { }
- virtual void VisitInstruction(HInstruction* instruction) {
+ void PrintPreInstruction(HInstruction* instruction) {
PrintString(" ");
PrintInt(instruction->GetId());
PrintString(": ");
+ }
+
+ virtual void VisitInstruction(HInstruction* instruction) {
+ PrintPreInstruction(instruction);
PrintString(instruction->DebugName());
+ PrintPostInstruction(instruction);
+ }
+
+ void PrintPostInstruction(HInstruction* instruction) {
if (instruction->InputCount() != 0) {
PrintString("(");
bool first = true;
@@ -46,13 +54,13 @@ class HPrettyPrinter : public HGraphVisitor {
if (instruction->HasUses()) {
PrintString(" [");
bool first = true;
- for (HUseIterator it(instruction); !it.Done(); it.Advance()) {
+ for (HUseIterator<HInstruction> it(instruction->GetUses()); !it.Done(); it.Advance()) {
if (first) {
first = false;
} else {
PrintString(", ");
}
- PrintInt(it.Current()->GetId());
+ PrintInt(it.Current()->GetUser()->GetId());
}
PrintString("]");
}
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
new file mode 100644
index 0000000000..bfb4f38f50
--- /dev/null
+++ b/compiler/optimizing/ssa_builder.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ssa_builder.h"
+#include "nodes.h"
+
+namespace art {
+
+void SsaBuilder::BuildSsa() {
+ // 1) Visit in dominator order. We need to have all predecessors of a block visited
+ // (with the exception of loops) in order to create the right environment for that
+ // block. For loops, we create phis whose inputs will be set in 2).
+ for (size_t i = 0; i < GetGraph()->GetDominatorOrder()->Size(); i++) {
+ VisitBasicBlock(GetGraph()->GetDominatorOrder()->Get(i));
+ }
+
+ // 2) Set inputs of loop phis.
+ for (size_t i = 0; i < loop_headers_.Size(); i++) {
+ HBasicBlock* block = loop_headers_.Get(i);
+ for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* phi = it.Current()->AsPhi();
+ for (size_t pred = 0; pred < block->GetPredecessors()->Size(); pred++) {
+ phi->AddInput(ValueOfLocal(block->GetPredecessors()->Get(pred), phi->GetRegNumber()));
+ }
+ }
+ }
+
+ // 3) Clear locals.
+ // TODO: Move this to a dead code eliminator phase.
+ for (HInstructionIterator it(*GetGraph()->GetEntryBlock()->GetInstructions());
+ !it.Done();
+ it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->AsLocal() != nullptr) {
+ current->GetBlock()->RemoveInstruction(current);
+ }
+ }
+}
+
+HInstruction* SsaBuilder::ValueOfLocal(HBasicBlock* block, size_t local) {
+ return GetLocalsFor(block)->Get(local);
+}
+
+void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
+ current_locals_ = GetLocalsFor(block);
+
+ if (block->IsLoopHeader()) {
+ // If the block is a loop header, we know we only have visited the pre header
+ // because we are visiting in dominator order. We create phis for all initialized
+ // locals from the pre header. Their inputs will be populated at the end of
+ // the analysis.
+ for (size_t local = 0; local < current_locals_->Size(); local++) {
+ HInstruction* incoming = ValueOfLocal(block->GetLoopInformation()->GetPreHeader(), local);
+ if (incoming != nullptr) {
+ // TODO: Compute union type.
+ HPhi* phi = new (GetGraph()->GetArena()) HPhi(
+ GetGraph()->GetArena(), local, 0, Primitive::kPrimVoid);
+ block->AddPhi(phi);
+ current_locals_->Put(local, phi);
+ }
+ }
+ // Save the loop header so that the last phase of the analysis knows which
+ // blocks need to be updated.
+ loop_headers_.Add(block);
+ } else if (block->GetPredecessors()->Size() > 0) {
+ // All predecessors have already been visited because we are visiting in dominator order.
+ // We merge the values of all locals, creating phis if those values differ.
+ for (size_t local = 0; local < current_locals_->Size(); local++) {
+ bool is_different = false;
+ HInstruction* value = ValueOfLocal(block->GetPredecessors()->Get(0), local);
+ for (size_t i = 1; i < block->GetPredecessors()->Size(); i++) {
+ if (ValueOfLocal(block->GetPredecessors()->Get(i), local) != value) {
+ is_different = true;
+ break;
+ }
+ }
+ if (is_different) {
+ // TODO: Compute union type.
+ HPhi* phi = new (GetGraph()->GetArena()) HPhi(
+ GetGraph()->GetArena(), local, block->GetPredecessors()->Size(), Primitive::kPrimVoid);
+ for (size_t i = 0; i < block->GetPredecessors()->Size(); i++) {
+ phi->SetRawInputAt(i, ValueOfLocal(block->GetPredecessors()->Get(i), local));
+ }
+ block->AddPhi(phi);
+ value = phi;
+ }
+ current_locals_->Put(local, value);
+ }
+ }
+
+ // Visit all instructions. The instructions of interest are:
+ // - HLoadLocal: replace them with the current value of the local.
+ // - HStoreLocal: update current value of the local and remove the instruction.
+ // - Instructions that require an environment: populate their environment
+ // with the current values of the locals.
+ for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ it.Current()->Accept(this);
+ }
+}
+
+void SsaBuilder::VisitLoadLocal(HLoadLocal* load) {
+ load->ReplaceWith(current_locals_->Get(load->GetLocal()->GetRegNumber()));
+ load->GetBlock()->RemoveInstruction(load);
+}
+
+void SsaBuilder::VisitStoreLocal(HStoreLocal* store) {
+ current_locals_->Put(store->GetLocal()->GetRegNumber(), store->InputAt(1));
+ store->GetBlock()->RemoveInstruction(store);
+}
+
+void SsaBuilder::VisitInstruction(HInstruction* instruction) {
+ if (!instruction->NeedsEnvironment()) {
+ return;
+ }
+ HEnvironment* environment = new (GetGraph()->GetArena()) HEnvironment(
+ GetGraph()->GetArena(), current_locals_->Size());
+ environment->Populate(*current_locals_);
+ instruction->SetEnvironment(environment);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
new file mode 100644
index 0000000000..b6c6c0b658
--- /dev/null
+++ b/compiler/optimizing/ssa_builder.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
+#define ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
+
+#include "nodes.h"
+
+namespace art {
+
+static constexpr int kDefaultNumberOfLoops = 2;
+
+class SsaBuilder : public HGraphVisitor {
+ public:
+ explicit SsaBuilder(HGraph* graph)
+ : HGraphVisitor(graph),
+ current_locals_(nullptr),
+ loop_headers_(graph->GetArena(), kDefaultNumberOfLoops),
+ locals_for_(graph->GetArena(), graph->GetBlocks()->Size()) {
+ locals_for_.SetSize(graph->GetBlocks()->Size());
+ }
+
+ void BuildSsa();
+
+ GrowableArray<HInstruction*>* GetLocalsFor(HBasicBlock* block) {
+ HEnvironment* env = locals_for_.Get(block->GetBlockId());
+ if (env == nullptr) {
+ env = new (GetGraph()->GetArena()) HEnvironment(
+ GetGraph()->GetArena(), GetGraph()->GetNumberOfVRegs());
+ locals_for_.Put(block->GetBlockId(), env);
+ }
+ return env->GetVRegs();
+ }
+
+ HInstruction* ValueOfLocal(HBasicBlock* block, size_t local);
+
+ void VisitBasicBlock(HBasicBlock* block);
+ void VisitLoadLocal(HLoadLocal* load);
+ void VisitStoreLocal(HStoreLocal* store);
+ void VisitInstruction(HInstruction* instruction);
+
+ private:
+ // Locals for the current block being visited.
+ GrowableArray<HInstruction*>* current_locals_;
+
+ // Keep track of loop headers found. The last phase of the analysis iterates
+ // over these blocks to set the inputs of their phis.
+ GrowableArray<HBasicBlock*> loop_headers_;
+
+ // HEnvironment for each block.
+ GrowableArray<HEnvironment*> locals_for_;
+
+ DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_SSA_BUILDER_H_
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
new file mode 100644
index 0000000000..7c3633b5e9
--- /dev/null
+++ b/compiler/optimizing/ssa_test.cc
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/stringprintf.h"
+#include "builder.h"
+#include "dex_file.h"
+#include "dex_instruction.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "pretty_printer.h"
+#include "ssa_builder.h"
+#include "utils/arena_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+class StringPrettyPrinter : public HPrettyPrinter {
+ public:
+ explicit StringPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
+
+ virtual void PrintInt(int value) {
+ str_ += StringPrintf("%d", value);
+ }
+
+ virtual void PrintString(const char* value) {
+ str_ += value;
+ }
+
+ virtual void PrintNewLine() {
+ str_ += '\n';
+ }
+
+ void Clear() { str_.clear(); }
+
+ std::string str() const { return str_; }
+
+ virtual void VisitIntConstant(HIntConstant* constant) {
+ PrintPreInstruction(constant);
+ str_ += constant->DebugName();
+ str_ += " ";
+ PrintInt(constant->GetValue());
+ PrintPostInstruction(constant);
+ }
+
+ private:
+ std::string str_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringPrettyPrinter);
+};
+
+static void ReNumberInstructions(HGraph* graph) {
+ int id = 0;
+ for (size_t i = 0; i < graph->GetBlocks()->Size(); i++) {
+ HBasicBlock* block = graph->GetBlocks()->Get(i);
+ for (HInstructionIterator it(*block->GetPhis()); !it.Done(); it.Advance()) {
+ it.Current()->SetId(id++);
+ }
+ for (HInstructionIterator it(*block->GetInstructions()); !it.Done(); it.Advance()) {
+ it.Current()->SetId(id++);
+ }
+ }
+}
+
+static void TestCode(const uint16_t* data, const char* expected) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraphBuilder builder(&allocator);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = builder.BuildGraph(*item);
+ ASSERT_NE(graph, nullptr);
+ graph->BuildDominatorTree();
+ graph->TransformToSSA();
+ ReNumberInstructions(graph);
+
+ StringPrettyPrinter printer(graph);
+ printer.VisitInsertionOrder();
+
+ ASSERT_STREQ(expected, printer.str().c_str());
+}
+
+TEST(SsaTest, CFG1) {
+ // Test that we get rid of loads and stores.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [2, 2]\n"
+ " 1: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 3, 2\n"
+ " 2: Equal(0, 0) [3]\n"
+ " 3: If(2)\n"
+ "BasicBlock 2, pred: 1, succ: 3\n"
+ " 4: Goto\n"
+ "BasicBlock 3, pred: 1, 2, succ: 4\n"
+ " 5: ReturnVoid\n"
+ "BasicBlock 4, pred: 3\n"
+ " 6: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::GOTO | 0x100,
+ Instruction::RETURN_VOID);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, CFG2) {
+ // Test that we create a phi for the join block of an if control flow instruction
+ // when there is only code in the else branch.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [6, 3, 3]\n"
+ " 1: IntConstant 4 [6]\n"
+ " 2: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 3, 2\n"
+ " 3: Equal(0, 0) [4]\n"
+ " 4: If(3)\n"
+ "BasicBlock 2, pred: 1, succ: 3\n"
+ " 5: Goto\n"
+ "BasicBlock 3, pred: 1, 2, succ: 4\n"
+ " 6: Phi(0, 1) [7]\n"
+ " 7: Return(6)\n"
+ "BasicBlock 4, pred: 3\n"
+ " 8: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::RETURN | 0 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, CFG3) {
+ // Test that we create a phi for the join block of an if control flow instruction
+ // when there both branches update a local.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [4, 4]\n"
+ " 1: IntConstant 4 [8]\n"
+ " 2: IntConstant 5 [8]\n"
+ " 3: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 3, 2\n"
+ " 4: Equal(0, 0) [5]\n"
+ " 5: If(4)\n"
+ "BasicBlock 2, pred: 1, succ: 4\n"
+ " 6: Goto\n"
+ "BasicBlock 3, pred: 1, succ: 4\n"
+ " 7: Goto\n"
+ "BasicBlock 4, pred: 2, 3, succ: 5\n"
+ " 8: Phi(1, 2) [9]\n"
+ " 9: Return(8)\n"
+ "BasicBlock 5, pred: 4\n"
+ " 10: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::GOTO | 0x200,
+ Instruction::CONST_4 | 5 << 12 | 0,
+ Instruction::RETURN | 0 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop1) {
+ // Test that we create a phi for an initialized local at entry of a loop.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [6, 4, 2, 2]\n"
+ " 1: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 3, 2\n"
+ " 2: Equal(0, 0) [3]\n"
+ " 3: If(2)\n"
+ "BasicBlock 2, pred: 1, 3, succ: 3\n"
+ " 4: Phi(0, 6) [6]\n"
+ " 5: Goto\n"
+ "BasicBlock 3, pred: 1, 2, succ: 2\n"
+ " 6: Phi(0, 4) [4]\n"
+ " 7: Goto\n"
+ "BasicBlock 4\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::GOTO | 0x100,
+ Instruction::GOTO | 0xFF00);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop2) {
+ // Simple loop with one preheader and one back edge.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [4]\n"
+ " 1: IntConstant 4 [4]\n"
+ " 2: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 3: Goto\n"
+ "BasicBlock 2, pred: 1, 3, succ: 4, 3\n"
+ " 4: Phi(0, 1) [5, 5]\n"
+ " 5: Equal(4, 4) [6]\n"
+ " 6: If(5)\n"
+ "BasicBlock 3, pred: 2, succ: 2\n"
+ " 7: Goto\n"
+ "BasicBlock 4, pred: 2, succ: 5\n"
+ " 8: ReturnVoid\n"
+ "BasicBlock 5, pred: 4\n"
+ " 9: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::GOTO | 0xFD00,
+ Instruction::RETURN_VOID);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop3) {
+ // Test that a local not yet defined at the entry of a loop is handled properly.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [5]\n"
+ " 1: IntConstant 4 [5]\n"
+ " 2: IntConstant 5 [9]\n"
+ " 3: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 4: Goto\n"
+ "BasicBlock 2, pred: 1, 3, succ: 4, 3\n"
+ " 5: Phi(0, 1) [6, 6]\n"
+ " 6: Equal(5, 5) [7]\n"
+ " 7: If(6)\n"
+ "BasicBlock 3, pred: 2, succ: 2\n"
+ " 8: Goto\n"
+ "BasicBlock 4, pred: 2, succ: 5\n"
+ " 9: Return(2)\n"
+ "BasicBlock 5, pred: 4\n"
+ " 10: Exit\n";
+
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::GOTO | 0xFD00,
+ Instruction::CONST_4 | 5 << 12 | 1 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop4) {
+ // Make sure we support a preheader of a loop not being the first predecessor
+ // in the predecessor list of the header.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [4]\n"
+ " 1: IntConstant 4 [4]\n"
+ " 2: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 4\n"
+ " 3: Goto\n"
+ "BasicBlock 2, pred: 3, 4, succ: 5, 3\n"
+ " 4: Phi(1, 0) [9, 5, 5]\n"
+ " 5: Equal(4, 4) [6]\n"
+ " 6: If(5)\n"
+ "BasicBlock 3, pred: 2, succ: 2\n"
+ " 7: Goto\n"
+ "BasicBlock 4, pred: 1, succ: 2\n"
+ " 8: Goto\n"
+ "BasicBlock 5, pred: 2, succ: 6\n"
+ " 9: Return(4)\n"
+ "BasicBlock 6, pred: 5\n"
+ " 10: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::GOTO | 0x500,
+ Instruction::IF_EQ, 5,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::GOTO | 0xFD00,
+ Instruction::GOTO | 0xFC00,
+ Instruction::RETURN | 0 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop5) {
+ // Make sure we create a preheader of a loop when a header originally has two
+ // incoming blocks and one back edge.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [4, 4]\n"
+ " 1: IntConstant 4 [14]\n"
+ " 2: IntConstant 5 [14]\n"
+ " 3: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 3, 2\n"
+ " 4: Equal(0, 0) [5]\n"
+ " 5: If(4)\n"
+ "BasicBlock 2, pred: 1, succ: 8\n"
+ " 6: Goto\n"
+ "BasicBlock 3, pred: 1, succ: 8\n"
+ " 7: Goto\n"
+ "BasicBlock 4, pred: 5, 8, succ: 6, 5\n"
+ " 8: Phi(8, 14) [8, 12, 9, 9]\n"
+ " 9: Equal(8, 8) [10]\n"
+ " 10: If(9)\n"
+ "BasicBlock 5, pred: 4, succ: 4\n"
+ " 11: Goto\n"
+ "BasicBlock 6, pred: 4, succ: 7\n"
+ " 12: Return(8)\n"
+ "BasicBlock 7, pred: 6\n"
+ " 13: Exit\n"
+ "BasicBlock 8, pred: 2, 3, succ: 4\n"
+ " 14: Phi(1, 2) [8]\n"
+ " 15: Goto\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::GOTO | 0x200,
+ Instruction::CONST_4 | 5 << 12 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::GOTO | 0xFE00,
+ Instruction::RETURN | 0 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop6) {
+ // Test a loop with one preheader and two back edges (e.g. continue).
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [5]\n"
+ " 1: IntConstant 4 [5, 8, 8]\n"
+ " 2: IntConstant 5 [5]\n"
+ " 3: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 4: Goto\n"
+ "BasicBlock 2, pred: 1, 4, 5, succ: 6, 3\n"
+ " 5: Phi(0, 2, 1) [12, 6, 6]\n"
+ " 6: Equal(5, 5) [7]\n"
+ " 7: If(6)\n"
+ "BasicBlock 3, pred: 2, succ: 5, 4\n"
+ " 8: Equal(1, 1) [9]\n"
+ " 9: If(8)\n"
+ "BasicBlock 4, pred: 3, succ: 2\n"
+ " 10: Goto\n"
+ "BasicBlock 5, pred: 3, succ: 2\n"
+ " 11: Goto\n"
+ "BasicBlock 6, pred: 2, succ: 7\n"
+ " 12: Return(5)\n"
+ "BasicBlock 7, pred: 6\n"
+ " 13: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 8,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::CONST_4 | 5 << 12 | 0,
+ Instruction::GOTO | 0xFA00,
+ Instruction::GOTO | 0xF900,
+ Instruction::RETURN | 0 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, Loop7) {
+ // Test a loop with one preheader, one back edge, and two exit edges (e.g. break).
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0 [5]\n"
+ " 1: IntConstant 4 [5, 8, 8]\n"
+ " 2: IntConstant 5 [12]\n"
+ " 3: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 4: Goto\n"
+ "BasicBlock 2, pred: 1, 5, succ: 6, 3\n"
+ " 5: Phi(0, 1) [12, 6, 6]\n"
+ " 6: Equal(5, 5) [7]\n"
+ " 7: If(6)\n"
+ "BasicBlock 3, pred: 2, succ: 5, 4\n"
+ " 8: Equal(1, 1) [9]\n"
+ " 9: If(8)\n"
+ "BasicBlock 4, pred: 3, succ: 6\n"
+ " 10: Goto\n"
+ "BasicBlock 5, pred: 3, succ: 2\n"
+ " 11: Goto\n"
+ "BasicBlock 6, pred: 2, 4, succ: 7\n"
+ " 12: Phi(5, 2) [13]\n"
+ " 13: Return(12)\n"
+ "BasicBlock 7, pred: 6\n"
+ " 14: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 8,
+ Instruction::CONST_4 | 4 << 12 | 0,
+ Instruction::IF_EQ, 4,
+ Instruction::CONST_4 | 5 << 12 | 0,
+ Instruction::GOTO | 0x0200,
+ Instruction::GOTO | 0xF900,
+ Instruction::RETURN | 0 << 8);
+
+ TestCode(data, expected);
+}
+
+TEST(SsaTest, DeadLocal) {
+ // Test that we correctly handle a local not being used.
+ const char* expected =
+ "BasicBlock 0, succ: 1\n"
+ " 0: IntConstant 0\n"
+ " 1: Goto\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 2: ReturnVoid\n"
+ "BasicBlock 2, pred: 1\n"
+ " 3: Exit\n";
+
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::RETURN_VOID);
+
+ TestCode(data, expected);
+}
+
+} // namespace art
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index e6c53dab24..993492da6d 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -31,7 +31,6 @@ enum OatListKind {
kGrowableArrayDfsOrder,
kGrowableArrayDfsPostOrder,
kGrowableArrayDomPostOrderTraversal,
- kGrowableArrayThrowLaunchPads,
kGrowableArraySuspendLaunchPads,
kGrowableArraySwitchTables,
kGrowableArrayFillArrayData,
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 7d02c7c8a8..9507e1207a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1493,7 +1493,7 @@ void X86_64Assembler::EmitOptionalRex(bool force, bool w, bool r, bool x, bool b
}
void X86_64Assembler::EmitOptionalRex32(CpuRegister reg) {
- EmitOptionalRex(false, false, reg.NeedsRex(), false, false);
+ EmitOptionalRex(false, false, false, false, reg.NeedsRex());
}
void X86_64Assembler::EmitOptionalRex32(CpuRegister dst, CpuRegister src) {
@@ -1540,8 +1540,9 @@ void X86_64Assembler::EmitOptionalRex32(XmmRegister dst, const Operand& operand)
}
void X86_64Assembler::EmitRex64(CpuRegister reg) {
- EmitOptionalRex(false, true, reg.NeedsRex(), false, false);
+ EmitOptionalRex(false, true, false, false, reg.NeedsRex());
}
+
void X86_64Assembler::EmitRex64(CpuRegister dst, CpuRegister src) {
EmitOptionalRex(false, true, dst.NeedsRex(), false, src.NeedsRex());
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index ac76c35f39..d3e56da168 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -110,7 +110,7 @@ static void Usage(const char* fmt, ...) {
UsageError(" Example: --oat-file=/system/framework/boot.oat");
UsageError("");
UsageError(" --oat-fd=<number>: specifies the oat output destination via a file descriptor.");
- UsageError(" Example: --oat-file=/system/framework/boot.oat");
+ UsageError(" Example: --oat-fd=6");
UsageError("");
UsageError(" --oat-location=<oat-name>: specifies a symbolic name for the file corresponding");
UsageError(" to the file descriptor specified by --oat-fd.");
@@ -909,7 +909,6 @@ static int dex2oat(int argc, char** argv) {
profile_file = option.substr(strlen("--profile-file=")).data();
VLOG(compiler) << "dex2oat: profile file is " << profile_file;
} else if (option == "--no-profile-file") {
- LOG(INFO) << "dex2oat: no profile file supplied (explictly)";
// No profile
} else if (option == "--print-pass-names") {
PassDriver::PrintPassNames();
@@ -1079,7 +1078,7 @@ static int dex2oat(int argc, char** argv) {
}
timings.StartSplit("dex2oat Setup");
- LOG(INFO) << "dex2oat: " << CommandLine();
+ LOG(INFO) << CommandLine();
Runtime::Options runtime_options;
std::vector<const DexFile*> boot_class_path;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 68e77d44e4..5cc6acf0bf 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -145,6 +145,9 @@ size_t DisassemblerX86::DumpInstruction(std::ostream& os, const uint8_t* instr)
}
} while (have_prefixes);
uint8_t rex = (supports_rex_ && (*instr >= 0x40) && (*instr <= 0x4F)) ? *instr : 0;
+ if (rex != 0) {
+ instr++;
+ }
bool has_modrm = false;
bool reg_is_opcode = false;
size_t immediate_bytes = 0;
@@ -735,7 +738,7 @@ DISASSEMBLER_ENTRY(cmp,
std::ostringstream args;
if (reg_in_opcode) {
DCHECK(!has_modrm);
- DumpReg(args, rex, *instr & 0x7, false, prefix[2], GPR);
+ DumpBaseReg(args, rex, *instr & 0x7);
}
instr++;
uint32_t address_bits = 0;
@@ -746,14 +749,18 @@ DISASSEMBLER_ENTRY(cmp,
uint8_t reg_or_opcode = (modrm >> 3) & 7;
uint8_t rm = modrm & 7;
std::ostringstream address;
- if (mod == 0 && rm == 5) { // fixed address
- address_bits = *reinterpret_cast<const uint32_t*>(instr);
- address << StringPrintf("[0x%x]", address_bits);
+ if (mod == 0 && rm == 5) {
+ if (!supports_rex_) { // Absolute address.
+ address_bits = *reinterpret_cast<const uint32_t*>(instr);
+ address << StringPrintf("[0x%x]", address_bits);
+ } else { // 64-bit RIP relative addressing.
+ address << StringPrintf("[RIP + 0x%x]", *reinterpret_cast<const uint32_t*>(instr));
+ }
instr += 4;
} else if (rm == 4 && mod != 3) { // SIB
uint8_t sib = *instr;
instr++;
- uint8_t ss = (sib >> 6) & 3;
+ uint8_t scale = (sib >> 6) & 3;
uint8_t index = (sib >> 3) & 7;
uint8_t base = sib & 7;
address << "[";
@@ -765,11 +772,22 @@ DISASSEMBLER_ENTRY(cmp,
}
if (index != 4) {
DumpIndexReg(address, rex, index);
- if (ss != 0) {
- address << StringPrintf(" * %d", 1 << ss);
+ if (scale != 0) {
+ address << StringPrintf(" * %d", 1 << scale);
}
}
- if (mod == 1) {
+ if (mod == 0) {
+ if (base == 5) {
+ if (index != 4) {
+ address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(instr));
+ } else {
+ // 64-bit low 32-bit absolute address, redundant absolute address encoding on 32-bit.
+ address_bits = *reinterpret_cast<const uint32_t*>(instr);
+ address << StringPrintf("%d", address_bits);
+ }
+ instr += 4;
+ }
+ } else if (mod == 1) {
address << StringPrintf(" + %d", *reinterpret_cast<const int8_t*>(instr));
instr++;
} else if (mod == 2) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 915c415c6a..1a67952e47 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -44,6 +44,7 @@
#include "mirror/object_array-inl.h"
#include "noop_compiler_callbacks.h"
#include "oat.h"
+#include "oat_file-inl.h"
#include "object_utils.h"
#include "os.h"
#include "runtime.h"
@@ -215,10 +216,9 @@ class OatDumper {
dex_file->FindClassDef(mh.GetDeclaringClassDescriptor());
if (class_def != NULL) {
uint16_t class_def_index = dex_file->GetIndexForClassDef(*class_def);
- const OatFile::OatClass* oat_class = oat_dex_file->GetOatClass(class_def_index);
- CHECK(oat_class != NULL);
+ const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
size_t method_index = m->GetMethodIndex();
- return oat_class->GetOatMethod(method_index).GetQuickCode();
+ return oat_class.GetOatMethod(method_index).GetQuickCode();
}
}
}
@@ -246,18 +246,18 @@ class OatDumper {
class_def_index < dex_file->NumClassDefs();
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(class_def_index));
+ const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
const byte* class_data = dex_file->GetClassData(class_def);
if (class_data != NULL) {
ClassDataItemIterator it(*dex_file, class_data);
SkipAllFields(it);
uint32_t class_method_index = 0;
while (it.HasNextDirectMethod()) {
- AddOffsets(oat_class->GetOatMethod(class_method_index++));
+ AddOffsets(oat_class.GetOatMethod(class_method_index++));
it.Next();
}
while (it.HasNextVirtualMethod()) {
- AddOffsets(oat_class->GetOatMethod(class_method_index++));
+ AddOffsets(oat_class.GetOatMethod(class_method_index++));
it.Next();
}
}
@@ -299,15 +299,14 @@ class OatDumper {
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
- UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file.GetOatClass(class_def_index));
- CHECK(oat_class.get() != NULL);
+ const OatFile::OatClass oat_class = oat_dex_file.GetOatClass(class_def_index);
os << StringPrintf("%zd: %s (type_idx=%d)", class_def_index, descriptor, class_def.class_idx_)
- << " (" << oat_class->GetStatus() << ")"
- << " (" << oat_class->GetType() << ")\n";
- // TODO: include bitmap here if type is kOatClassBitmap?
+ << " (" << oat_class.GetStatus() << ")"
+ << " (" << oat_class.GetType() << ")\n";
+ // TODO: include bitmap here if type is kOatClassSomeCompiled?
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indented_os(&indent_filter);
- DumpOatClass(indented_os, *oat_class.get(), *(dex_file.get()), class_def);
+ DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def);
}
os << std::flush;
@@ -864,7 +863,7 @@ class ImageDumper {
}
}
// Dump the large objects separately.
- heap->GetLargeObjectsSpace()->GetLiveObjects()->Walk(ImageDumper::Callback, this);
+ heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
indent_os << "\n";
os_ = saved_os;
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index d433fd5b86..bc971a9d8d 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -36,10 +36,10 @@ LIBART_COMMON_SRC_FILES := \
base/unix_file/string_file.cc \
check_jni.cc \
catch_block_stack_visitor.cc \
- catch_finder.cc \
class_linker.cc \
common_throws.cc \
debugger.cc \
+ deoptimize_stack_visitor.cc \
dex_file.cc \
dex_file_verifier.cc \
dex_instruction.cc \
@@ -129,6 +129,7 @@ LIBART_COMMON_SRC_FILES := \
os_linux.cc \
parsed_options.cc \
primitive.cc \
+ quick_exception_handler.cc \
quick/inline_method_analyser.cc \
reference_table.cc \
reflection.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
new file mode 100644
index 0000000000..47c6d28002
--- /dev/null
+++ b/runtime/arch/arch_test.cc
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "common_runtime_test.h"
+#include "mirror/art_method.h"
+
+namespace art {
+
+class ArchTest : public CommonRuntimeTest {
+ protected:
+ static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
+ NO_THREAD_SAFETY_ANALYSIS {
+ Runtime* r = Runtime::Current();
+
+ Thread* t = Thread::Current();
+ t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
+
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(isa, type);
+ EXPECT_EQ(save_method->GetFrameSizeInBytes(), save_size) << "Expected and real size differs for "
+ << type << " core spills=" << std::hex << save_method->GetCoreSpillMask() << " fp spills="
+ << save_method->GetFpSpillMask() << std::dec;
+
+ t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
+ }
+};
+
+
+TEST_F(ArchTest, ARM) {
+#include "arch/arm/asm_support_arm.h"
+#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+
+
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kArm, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for SaveAll";
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kArm, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsOnly";
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kArm, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsAndArgs";
+#endif
+
+
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+
+TEST_F(ArchTest, ARM64) {
+#include "arch/arm64/asm_support_arm64.h"
+#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
+
+
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kArm64, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for SaveAll";
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsOnly";
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kArm64, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsAndArgs";
+#endif
+
+
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+
+TEST_F(ArchTest, MIPS) {
+#include "arch/mips/asm_support_mips.h"
+#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+
+
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kMips, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for SaveAll";
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kMips, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsOnly";
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kMips, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsAndArgs";
+#endif
+
+
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+
+TEST_F(ArchTest, X86) {
+#include "arch/x86/asm_support_x86.h"
+#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+
+
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kX86, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for SaveAll";
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kX86, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsOnly";
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kX86, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsAndArgs";
+#endif
+
+
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+
+TEST_F(ArchTest, X86_64) {
+#include "arch/x86_64/asm_support_x86_64.h"
+#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+
+
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kX86_64, Runtime::kSaveAll, FRAME_SIZE_SAVE_ALL_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for SaveAll";
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsOnly, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsOnly";
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+ CheckFrameSize(InstructionSet::kX86_64, Runtime::kRefsAndArgs, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE);
+#else
+ LOG(WARNING) << "No frame size for RefsAndArgs";
+#endif
+
+
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+
+TEST_F(ArchTest, ThreadOffsets) {
+#if defined(__arm__)
+#include "arch/arm/asm_support_arm.h"
+#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+#elif defined(__aarch64__)
+#include "arch/arm64/asm_support_arm64.h"
+#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
+#elif defined(__mips__)
+#include "arch/mips/asm_support_mips.h"
+#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+#elif defined(__i386__)
+#include "arch/x86/asm_support_x86.h"
+#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+#elif defined(__x86_64__)
+#include "arch/x86_64/asm_support_x86_64.h"
+#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+#else
+ // This happens for the host test.
+#ifdef __LP64__
+#include "arch/x86_64/asm_support_x86_64.h"
+#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+#else
+#include "arch/x86/asm_support_x86.h"
+#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+#endif
+#endif
+
+ // Ugly hack, change when possible.
+#ifdef __LP64__
+#define POINTER_SIZE 8
+#else
+#define POINTER_SIZE 4
+#endif
+
+#if defined(THREAD_SELF_OFFSET)
+ ThreadOffset<POINTER_SIZE> self_offset = Thread::SelfOffset<POINTER_SIZE>();
+ EXPECT_EQ(self_offset.Int32Value(), THREAD_SELF_OFFSET);
+#else
+ LOG(INFO) << "No Thread Self Offset found.";
+#endif
+
+#if defined(THREAD_CARD_TABLE_OFFSET)
+ ThreadOffset<POINTER_SIZE> card_offset = Thread::CardTableOffset<POINTER_SIZE>();
+ EXPECT_EQ(card_offset.Int32Value(), THREAD_CARD_TABLE_OFFSET);
+#else
+ LOG(INFO) << "No Thread Card Table Offset found.";
+#endif
+
+#if defined(THREAD_EXCEPTION_OFFSET)
+ ThreadOffset<POINTER_SIZE> exc_offset = Thread::ExceptionOffset<POINTER_SIZE>();
+ EXPECT_EQ(exc_offset.Int32Value(), THREAD_EXCEPTION_OFFSET);
+#else
+ LOG(INFO) << "No Thread Exception Offset found.";
+#endif
+
+#if defined(THREAD_ID_OFFSET)
+ ThreadOffset<POINTER_SIZE> id_offset = Thread::ThinLockIdOffset<POINTER_SIZE>();
+ EXPECT_EQ(id_offset.Int32Value(), THREAD_ID_OFFSET);
+#else
+ LOG(INFO) << "No Thread ID Offset found.";
+#endif
+
+
+ // Undefine everything for the next test
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+
+TEST_F(ArchTest, CalleeSaveMethodOffsets) {
+#if defined(__arm__)
+#include "arch/arm/asm_support_arm.h"
+#undef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+#elif defined(__aarch64__)
+#include "arch/arm64/asm_support_arm64.h"
+#undef ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
+#elif defined(__mips__)
+#include "arch/mips/asm_support_mips.h"
+#undef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+#elif defined(__i386__)
+#include "arch/x86/asm_support_x86.h"
+#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+#elif defined(__x86_64__)
+#include "arch/x86_64/asm_support_x86_64.h"
+#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+#else
+ // This happens for the host test.
+#ifdef __LP64__
+#include "arch/x86_64/asm_support_x86_64.h"
+#undef ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
+#else
+#include "arch/x86/asm_support_x86.h"
+#undef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+#endif
+#endif
+
+
+#if defined(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET)
+ EXPECT_EQ(Runtime::GetCalleeSaveMethodOffset(Runtime::kSaveAll),
+ static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET));
+#else
+ LOG(INFO) << "No Runtime Save-all Offset found.";
+#endif
+
+#if defined(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET)
+ EXPECT_EQ(Runtime::GetCalleeSaveMethodOffset(Runtime::kRefsOnly),
+ static_cast<size_t>(RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET));
+#else
+ LOG(INFO) << "No Runtime Refs-only Offset found.";
+#endif
+
+#if defined(RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET)
+ EXPECT_EQ(Runtime::GetCalleeSaveMethodOffset(Runtime::kRefsAndArgs),
+ static_cast<size_t>(RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET));
+#else
+ LOG(INFO) << "No Runtime Refs-and-Args Offset found.";
+#endif
+
+
+ // Undefine everything for the next test
+#ifdef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#undef RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET
+#endif
+#ifdef THREAD_SELF_OFFSET
+#undef THREAD_SELF_OFFSET
+#endif
+#ifdef THREAD_CARD_TABLE_OFFSET
+#undef THREAD_CARD_TABLE_OFFSET
+#endif
+#ifdef THREAD_EXCEPTION_OFFSET
+#undef THREAD_EXCEPTION_OFFSET
+#endif
+#ifdef THREAD_ID_OFFSET
+#undef THREAD_ID_OFFSET
+#endif
+#ifdef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_ONLY_CALLEE_SAVE
+#endif
+#ifdef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#undef FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE
+#endif
+}
+
+} // namespace art
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index fb85feb34f..594252a1c2 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -19,6 +19,13 @@
#include "asm_support_arm.h"
+// Define special registers.
+
+// Register holding suspend check count down.
+#define rSUSPEND r4
+// Register holding Thread::Current().
+#define rSELF r9
+
.cfi_sections .debug_frame
.syntax unified
.arch armv7-a
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 4b64076953..a73d5228d8 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -19,10 +19,6 @@
#include "asm_support.h"
-// Register holding suspend check count down.
-#define rSUSPEND r4
-// Register holding Thread::Current().
-#define rSELF r9
// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::tls32_.thin_lock_thread_id verified in InitCpu
@@ -32,4 +28,8 @@
// Offset of field Thread::tlsPtr_.exception verified in InitCpu
#define THREAD_EXCEPTION_OFFSET 116
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 48
+
#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 4a69644b6c..1be34ba80e 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -41,27 +41,3 @@ ENTRY art_jni_dlsym_lookup_stub
pop {r0, r1, r2, r3, pc} @ restore regs and return to caller to handle exception
.cfi_adjust_cfa_offset -20
END art_jni_dlsym_lookup_stub
-
- /*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_work_around_app_jni_bugs
- @ save registers that may contain arguments and LR that will be crushed by a call
- push {r0-r3, lr}
- .save {r0-r3, lr}
- .cfi_adjust_cfa_offset 16
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r1, 4
- .cfi_rel_offset r2, 8
- .cfi_rel_offset r3, 12
- sub sp, #12 @ 3 words of space for alignment
- mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- bl artWorkAroundAppJniBugs @ (Thread*, SP)
- add sp, #12 @ rewind stack
- mov r12, r0 @ save target address
- pop {r0-r3, lr} @ restore possibly modified argument registers
- .cfi_adjust_cfa_offset -16
- bx r12 @ tail call into JNI routine
-END art_work_around_app_jni_bugs
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 4903732a7c..bc80644945 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -46,6 +46,11 @@
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.pad #12
.cfi_adjust_cfa_offset 12
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 36 + 128 + 12)
+#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#endif
.endm
/*
@@ -66,6 +71,11 @@
sub sp, #4 @ bottom word will hold Method*
.pad #4
.cfi_adjust_cfa_offset 4
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
+#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM) size not as expected."
+#endif
.endm
.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
@@ -114,6 +124,11 @@
sub sp, #8 @ 2 words of space, bottom word will hold Method*
.pad #8
.cfi_adjust_cfa_offset 8
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 40 + 8)
+#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
.endm
.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
@@ -492,11 +507,22 @@ ENTRY art_quick_aput_obj
blx lr
.Lcheck_assignability:
push {r0-r2, lr} @ save arguments
+ .save {r0-r2, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset lr, 12
mov r1, ip
mov r0, r3
bl artIsAssignableFromCode
cbz r0, .Lthrow_array_store_exception
pop {r0-r2, lr}
+ .cfi_restore r0
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -16
add r3, r0, #OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
@@ -505,6 +531,11 @@ ENTRY art_quick_aput_obj
blx lr
.Lthrow_array_store_exception:
pop {r0-r2, lr}
+ .cfi_restore r0
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -16
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
mov r1, r2
mov r2, r9 @ pass Thread::Current
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 634f77791d..9614c29b81 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -19,6 +19,19 @@
#include "asm_support_arm64.h"
+// Define special registers.
+
+// Register holding Thread::Current().
+#define xSELF x18
+// Frame Pointer
+#define xFP x29
+// Link Register
+#define xLR x30
+// Define the intraprocedural linkage temporary registers.
+#define xIP0 x16
+#define xIP1 x17
+
+
.cfi_sections .debug_frame
.macro ENTRY name
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index a7e68edf03..b18e41589b 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -28,15 +28,6 @@
// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
-// Register holding Thread::Current().
-#define xSELF x18
-// Frame Pointer
-#define xFP x29
-// Link Register
-#define xLR x30
-// Define the intraprocedural linkage temporary registers.
-#define xIP0 x16
-#define xIP1 x17
// Offset of field Thread::suspend_count_ verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::card_table_ verified in InitCpu
@@ -46,4 +37,8 @@
// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
#define THREAD_ID_OFFSET 12
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 368
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 176
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 304
+
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/jni_entrypoints_arm64.S b/runtime/arch/arm64/jni_entrypoints_arm64.S
index ba783ab820..c59a304fae 100644
--- a/runtime/arch/arm64/jni_entrypoints_arm64.S
+++ b/runtime/arch/arm64/jni_entrypoints_arm64.S
@@ -55,41 +55,3 @@ ENTRY art_jni_dlsym_lookup_stub
1:
ret // restore regs and return to caller to handle exception.
END art_jni_dlsym_lookup_stub
-
- /*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_work_around_app_jni_bugs
- // spill regs.
- stp x29, x30, [sp, #-16]!
- mov x29, sp
- stp d6, d7, [sp, #-16]!
- stp d4, d5, [sp, #-16]!
- stp d2, d3, [sp, #-16]!
- stp d0, d1, [sp, #-16]!
- stp x6, x7, [sp, #-16]!
- stp x4, x5, [sp, #-16]!
- stp x2, x3, [sp, #-16]!
- stp x0, x1, [sp, #-16]!
-
- mov x0, x19 // Thread::Current.
- mov x1, sp // SP.
- bl artWorkAroundAppJniBugs // (Thread*, SP).
- mov x17, x0 // save target return.
-
- // load spill regs.
- ldp x0, x1, [sp], #16
- ldp x2, x3, [sp], #16
- ldp x4, x5, [sp], #16
- ldp x6, x7, [sp], #16
- ldp d0, d1, [sp], #16
- ldp d2, d3, [sp], #16
- ldp d4, d5, [sp], #16
- ldp d6, d7, [sp], #16
- ldp x29, x30, [sp], #16
-
- //tail call into JNI routine.
- br x17
-END art_work_around_app_jni_bugs
-
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 6ce5d06f0e..71f5bf7ef2 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -36,6 +36,11 @@
sub sp, sp, #368
.cfi_adjust_cfa_offset 368
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 368)
+#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
+
// FP args
stp d1, d2, [sp, #8]
stp d2, d3, [sp, #24]
@@ -95,8 +100,61 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
+// WIP.
.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- brk 0
+ adrp x9, :got:_ZN3art7Runtime9instance_E
+ ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
+
+ // Our registers aren't intermixed - just spill in order.
+ ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
+
+ // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ ldr x9, [x9, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+
+ sub sp, sp, #176
+ .cfi_adjust_cfa_offset 176
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 176)
+#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
+
+ // FP callee-saves
+ stp d8, d9, [sp, #8]
+ stp d10, d11, [sp, #24]
+ stp d12, d13, [sp, #40]
+ stp d14, d15, [sp, #56]
+
+ // Callee saved.
+ stp xSELF, x19, [sp, #72]
+ .cfi_rel_offset x18, 72
+ .cfi_rel_offset x19, 80
+
+ stp x20, x21, [sp, #88]
+ .cfi_rel_offset x20, 88
+ .cfi_rel_offset x21, 96
+
+ stp x22, x23, [sp, #104]
+ .cfi_rel_offset x22, 104
+ .cfi_rel_offset x23, 112
+
+ stp x24, x25, [sp, #120]
+ .cfi_rel_offset x24, 120
+ .cfi_rel_offset x25, 128
+
+ stp x26, x27, [sp, #136]
+ .cfi_rel_offset x26, 136
+ .cfi_rel_offset x27, 144
+
+ stp x28, xFP, [sp, #152] // Save FP.
+ .cfi_rel_offset x28, 152
+ .cfi_rel_offset x29, 160
+
+ str xLR, [sp, #168]
+ .cfi_rel_offset x30, 168
+
+ // Loads appropriate callee-save-method
+ str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
.endm
.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
@@ -112,6 +170,11 @@
sub sp, sp, #304
.cfi_adjust_cfa_offset 304
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 304)
+#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
+#endif
+
stp d0, d1, [sp, #16]
stp d2, d3, [sp, #32]
stp d4, d5, [sp, #48]
@@ -325,10 +388,14 @@
DELIVER_PENDING_EXCEPTION
.endm
+// FIXME: Temporary fix for TR(XSELF).
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- brk 0
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov x0, x19 // pass Thread::Current
+ mov x1, sp // pass SP
+ b \cxx_name // \cxx_name(Thread*, SP)
END \c_name
.endm
@@ -339,15 +406,19 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context.
mov x1, x19 // pass Thread::Current.
mov x2, sp // pass SP.
- b \cxx_name // \cxx_name(Thread*, SP).
+ b \cxx_name // \cxx_name(arg, Thread*, SP).
brk 0
END \c_name
.endm
+// FIXME: Temporary fix for TR(XSELF).
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov x2, x19 // pass Thread::Current
+ mov x3, sp // pass SP
+ b \cxx_name // \cxx_name(arg1, arg2, Thread*, SP)
brk 0
END \c_name
.endm
@@ -864,7 +935,58 @@ UNIMPLEMENTED art_quick_handle_fill_data
UNIMPLEMENTED art_quick_lock_object
UNIMPLEMENTED art_quick_unlock_object
-UNIMPLEMENTED art_quick_check_cast
+
+ /*
+ * Entry from managed code that calls artIsAssignableFromCode and on failure calls
+ * artThrowClassCastException.
+ */
+ .extern artThrowClassCastException
+ENTRY art_quick_check_cast
+ // Store arguments and link register
+ sub sp, sp, #32 // Stack needs to be 16b aligned on calls
+ .cfi_adjust_cfa_offset 32
+ stp x0, x1, [sp]
+ .cfi_rel_offset x0, 0
+ .cfi_rel_offset x1, 8
+ stp xSELF, xLR, [sp, #16]
+ .cfi_rel_offset x18, 16
+ .cfi_rel_offset x30, 24
+
+ // Call runtime code
+ bl artIsAssignableFromCode
+
+ // Check for exception
+ cbz x0, .Lthrow_class_cast_exception
+
+ // Restore and return
+ ldp x0, x1, [sp]
+ .cfi_restore x0
+ .cfi_restore x1
+ ldp xSELF, xLR, [sp, #16]
+ .cfi_restore x18
+ .cfi_restore x30
+ add sp, sp, #32
+ .cfi_adjust_cfa_offset -32
+ ret
+
+.Lthrow_class_cast_exception:
+ // Restore
+ ldp x0, x1, [sp]
+ .cfi_restore x0
+ .cfi_restore x1
+ ldp xSELF, xLR, [sp, #16]
+ .cfi_restore x18
+ .cfi_restore x30
+ add sp, sp, #32
+ .cfi_adjust_cfa_offset -32
+
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov x2, xSELF // pass Thread::Current
+ mov x3, sp // pass SP
+ b artThrowClassCastException // (Class*, Class*, Thread*, SP)
+ brk 0 // We should not return here...
+END art_quick_check_cast
+
UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
UNIMPLEMENTED art_quick_aput_obj_with_bound_check
UNIMPLEMENTED art_quick_aput_obj
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index d110b953cf..d8ec9cd6c9 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -19,6 +19,14 @@
#include "asm_support_mips.h"
+// Define special registers.
+
+// Register holding suspend check count down.
+#define rSUSPEND $s0
+// Register holding Thread::Current().
+#define rSELF $s1
+
+
/* Cache alignment for function entry */
.macro ENTRY name
.type \name, %function
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 36ce1b6f57..2b4a745ad1 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -19,10 +19,6 @@
#include "asm_support.h"
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
@@ -30,4 +26,8 @@
// Offset of field Thread::tlsPtr_.exception verified in InitCpu
#define THREAD_EXCEPTION_OFFSET 116
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 64
+
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index f9ca7df763..e5f4a79237 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -54,36 +54,3 @@ ENTRY art_jni_dlsym_lookup_stub
jr $ra
nop
END art_jni_dlsym_lookup_stub
-
- /*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_work_around_app_jni_bugs
- GENERATE_GLOBAL_POINTER
- # save registers that may contain arguments and LR that will be crushed by a call
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $ra, 28($sp)
- .cfi_rel_offset 31, 28
- sw $a3, 24($sp)
- .cfi_rel_offset 7, 28
- sw $a2, 20($sp)
- .cfi_rel_offset 6, 28
- sw $a1, 16($sp)
- .cfi_rel_offset 5, 28
- sw $a0, 12($sp)
- .cfi_rel_offset 4, 28
- move $a0, rSELF # pass Thread::Current
- jal artWorkAroundAppJniBugs # (Thread*, $sp)
- move $a1, $sp # pass $sp
- move $t9, $v0 # save target address
- lw $a0, 12($sp)
- lw $a1, 16($sp)
- lw $a2, 20($sp)
- lw $a3, 24($sp)
- lw $ra, 28($sp)
- jr $t9 # tail call into JNI routine
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-END art_work_around_app_jni_bugs
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c3ae5630d4..95fcd7389c 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -34,6 +34,12 @@
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
addiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 64)
+#error "SAVE_ALL_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#endif
+
sw $ra, 60($sp)
.cfi_rel_offset 31, 60
sw $s8, 56($sp)
@@ -68,6 +74,12 @@
.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
addiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 64)
+#error "REFS_ONLY_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#endif
+
sw $ra, 60($sp)
.cfi_rel_offset 31, 60
sw $s8, 56($sp)
@@ -144,6 +156,12 @@
.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
addiu $sp, $sp, -64
.cfi_adjust_cfa_offset 64
+
+ // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 64)
+#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(MIPS) size not as expected."
+#endif
+
sw $ra, 60($sp)
.cfi_rel_offset 31, 60
sw $s8, 56($sp)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
new file mode 100644
index 0000000000..543e69581e
--- /dev/null
+++ b/runtime/arch/stub_test.cc
@@ -0,0 +1,357 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_runtime_test.h"
+
+#include <cstdio>
+
+namespace art {
+
+
+class StubTest : public CommonRuntimeTest {
+ protected:
+ // We need callee-save methods set up in the Runtime for exceptions.
+ void SetUp() OVERRIDE {
+ // Do the normal setup.
+ CommonRuntimeTest::SetUp();
+
+ {
+ // Create callee-save methods
+ ScopedObjectAccess soa(Thread::Current());
+ for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
+ Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+ if (!runtime_->HasCalleeSaveMethod(type)) {
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(kRuntimeISA, type), type);
+ }
+ }
+ }
+ }
+
+
+ size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
+ // Push a transition back into managed code onto the linked list in thread.
+ ManagedStack fragment;
+ self->PushManagedStackFragment(&fragment);
+
+ size_t result;
+#if defined(__i386__)
+ // TODO: Set the thread?
+ __asm__ __volatile__(
+ "pushl $0\n\t" // Push nullptr to terminate quick stack
+ "call *%%edi\n\t" // Call the stub
+ "addl $4, %%esp" // Pop nullptr
+ : "=a" (result)
+ // Use the result from eax
+ : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code)
+ // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
+ : ); // clobber.
+ // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
+ // but compilation fails when declaring that.
+#elif defined(__arm__)
+ __asm__ __volatile__(
+ "push {r1-r2,r9, lr}\n\t" // Save the link and thread register
+ ".cfi_adjust_cfa_offset 16\n\t"
+ "mov r0, %[arg0]\n\t" // Set arg0-arg2
+ "mov r1, %[arg1]\n\t" // TODO: Any way to use constraints like on x86?
+ "mov r2, %[arg2]\n\t"
+ // Use r9 last as we don't know whether it was used for arg0-arg2
+ "mov r9, #0\n\t" // Push nullptr to terminate stack
+ "push {r9}\n\t"
+ ".cfi_adjust_cfa_offset 4\n\t"
+ "mov r9, %[self]\n\t" // Set the thread
+ "blx %[code]\n\t" // Call the stub
+ "pop {r1}\n\t" // Pop nullptr
+ ".cfi_adjust_cfa_offset -4\n\t"
+ "pop {r1-r2,r9, lr}\n\t" // Restore the link and thread register
+ ".cfi_adjust_cfa_offset -16\n\t"
+ "mov %[result], r0\n\t" // Save the result
+ : [result] "=r" (result)
+ // Use the result from r0
+ : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
+ : ); // clobber.
+#elif defined(__aarch64__)
+ __asm__ __volatile__(
+ "sub sp, sp, #48\n\t" // Reserve stack space, 16B aligned
+ "stp xzr, x1, [sp]\n\t" // nullptr(end of quick stack), x1
+ "stp x2, x18, [sp, #16]\n\t" // Save x2, x18(xSELF)
+ "str x30, [sp, #32]\n\t" // Save xLR
+ "mov x0, %[arg0]\n\t" // Set arg0-arg2
+ "mov x1, %[arg1]\n\t" // TODO: Any way to use constraints like on x86?
+ "mov x2, %[arg2]\n\t"
+ // Use r18 last as we don't know whether it was used for arg0-arg2
+ "mov x18, %[self]\n\t" // Set the thread
+ "blr %[code]\n\t" // Call the stub
+ "ldp x1, x2, [sp, #8]\n\t" // Restore x1, x2
+ "ldp x18, x30, [sp, #24]\n\t" // Restore xSELF, xLR
+ "add sp, sp, #48\n\t" // Free stack space
+ "mov %[result], x0\n\t" // Save the result
+ : [result] "=r" (result)
+ // Use the result from r0
+ : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self)
+ : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17"); // clobber.
+#elif defined(__x86_64__)
+ // Note: Uses the native convention
+ // TODO: Set the thread?
+ __asm__ __volatile__(
+ "pushq $0\n\t" // Push nullptr to terminate quick stack
+ "pushq $0\n\t" // 16B alignment padding
+ "call *%%rax\n\t" // Call the stub
+ "addq $16, %%rsp" // Pop nullptr and padding
+ : "=a" (result)
+ // Use the result from rax
+ : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code)
+ // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
+ : "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"); // clobber all
+ // TODO: Should we clobber the other registers?
+ result = 0;
+#else
+ LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
+ result = 0;
+#endif
+ // Pop transition.
+ self->PopManagedStackFragment(fragment);
+ return result;
+ }
+};
+
+
+#if defined(__i386__) || defined(__x86_64__)
+extern "C" void art_quick_memcpy(void);
+#endif
+
+TEST_F(StubTest, Memcpy) {
+#if defined(__i386__) || defined(__x86_64__)
+ Thread* self = Thread::Current();
+
+ uint32_t orig[20];
+ uint32_t trg[20];
+ for (size_t i = 0; i < 20; ++i) {
+ orig[i] = i;
+ trg[i] = 0;
+ }
+
+ Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
+ 10 * sizeof(uint32_t), reinterpret_cast<uintptr_t>(&art_quick_memcpy), self);
+
+ EXPECT_EQ(orig[0], trg[0]);
+
+ for (size_t i = 1; i < 4; ++i) {
+ EXPECT_NE(orig[i], trg[i]);
+ }
+
+ for (size_t i = 4; i < 14; ++i) {
+ EXPECT_EQ(orig[i], trg[i]);
+ }
+
+ for (size_t i = 14; i < 20; ++i) {
+ EXPECT_NE(orig[i], trg[i]);
+ }
+
+ // TODO: Test overlapping?
+
+#else
+ LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+
+#if defined(__i386__) || defined(__arm__)
+extern "C" void art_quick_lock_object(void);
+#endif
+
+TEST_F(StubTest, LockObject) {
+#if defined(__i386__) || defined(__arm__)
+ Thread* self = Thread::Current();
+ // Create an object
+ ScopedObjectAccess soa(self);
+ // garbage is created during ClassLinker::Init
+
+ SirtRef<mirror::String> obj(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+ LockWord lock = obj->GetLockWord(false);
+ LockWord::LockState old_state = lock.GetState();
+ EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
+
+ Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+
+ LockWord lock_after = obj->GetLockWord(false);
+ LockWord::LockState new_state = lock_after.GetState();
+ EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
+
+ Invoke3(reinterpret_cast<size_t>(obj.get()), 0U, 0U,
+ reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+
+ LockWord lock_after2 = obj->GetLockWord(false);
+ LockWord::LockState new_state2 = lock_after2.GetState();
+ EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
+
+ // TODO: Improve this test. Somehow force it to go to fat locked. But that needs another thread.
+
+#else
+ LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+extern "C" void art_quick_check_cast(void);
+#endif
+
+TEST_F(StubTest, CheckCast) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__x86_64__)
+ Thread* self = Thread::Current();
+ // Find some classes.
+ ScopedObjectAccess soa(self);
+ // garbage is created during ClassLinker::Init
+
+ SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
+ "[Ljava/lang/Object;"));
+ SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
+ "[Ljava/lang/String;"));
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c.get()), 0U,
+ reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c2.get()), 0U,
+ reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ Invoke3(reinterpret_cast<size_t>(c.get()), reinterpret_cast<size_t>(c2.get()), 0U,
+ reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ // TODO: Make the following work. But that would require correct managed frames.
+
+ Invoke3(reinterpret_cast<size_t>(c2.get()), reinterpret_cast<size_t>(c.get()), 0U,
+ reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+
+ EXPECT_TRUE(self->IsExceptionPending());
+ self->ClearException();
+
+#else
+ LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+
+#if defined(__i386__) || defined(__arm__)
+extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
+// Do not check non-checked ones, we'd need handlers and stuff...
+#endif
+
+TEST_F(StubTest, APutObj) {
+#if defined(__i386__) || defined(__arm__)
+ Thread* self = Thread::Current();
+ // Create an object
+ ScopedObjectAccess soa(self);
+ // garbage is created during ClassLinker::Init
+
+ SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
+ "Ljava/lang/Object;"));
+ SirtRef<mirror::Class> c2(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
+ "Ljava/lang/String;"));
+ SirtRef<mirror::Class> ca(soa.Self(), class_linker_->FindSystemClass(soa.Self(),
+ "[Ljava/lang/String;"));
+
+ // Build a string array of size 1
+ SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.get(), 1));
+
+ // Build a string -> should be assignable
+ SirtRef<mirror::Object> str_obj(soa.Self(),
+ mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+
+ // Build a generic object -> should fail assigning
+ SirtRef<mirror::Object> obj_obj(soa.Self(), c->AllocObject(soa.Self()));
+
+ // Play with it...
+
+ // 1) Success cases
+ // 1.1) Assign str_obj to array[0]
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(str_obj.get()),
+ reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ // 1.2) Assign null to array[0]
+
+ Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(nullptr),
+ reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
+
+ // 2) Failure cases (str into str[])
+ // 2.1) Array = null
+ // TODO: Throwing NPE needs actual DEX code
+
+// Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.get()),
+// reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+//
+// EXPECT_TRUE(self->IsExceptionPending());
+// self->ClearException();
+
+ // 2.2) Index < 0
+
+ Invoke3(reinterpret_cast<size_t>(array.get()), static_cast<size_t>(-1),
+ reinterpret_cast<size_t>(str_obj.get()),
+ reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+
+ EXPECT_TRUE(self->IsExceptionPending());
+ self->ClearException();
+
+ // 2.3) Index > 0
+
+ Invoke3(reinterpret_cast<size_t>(array.get()), 1U, reinterpret_cast<size_t>(str_obj.get()),
+ reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+
+ EXPECT_TRUE(self->IsExceptionPending());
+ self->ClearException();
+
+ // 3) Failure cases (obj into str[])
+
+ Invoke3(reinterpret_cast<size_t>(array.get()), 0U, reinterpret_cast<size_t>(obj_obj.get()),
+ reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+
+ EXPECT_TRUE(self->IsExceptionPending());
+ self->ClearException();
+
+ // Tests done.
+#else
+ LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
+ // Force-print to std::cout so it's also outside the logcat.
+ std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+} // namespace art
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index e329530e83..642d9a3dd4 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -19,8 +19,8 @@
#include "asm_support_x86.h"
-#if defined(__clang__)
- // Clang's as(1) doesn't let you name macro parameters.
+#if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
+ // Clang's as(1) doesn't let you name macro parameters prior to 3.5.
#define MACRO0(macro_name) .macro macro_name
#define MACRO1(macro_name, macro_arg1) .macro macro_name
#define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index e986c415e9..fd5ed5abb8 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -28,4 +28,8 @@
// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
#define THREAD_ID_OFFSET 12
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 32
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 32
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 32
+
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index fd2cfeb403..339ed2eaf0 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -28,6 +28,11 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
PUSH ebp
subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
CFI_ADJUST_CFA_OFFSET(16)
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +4: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 3*4 + 16 + 4)
+#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86) size not as expected."
+#endif
END_MACRO
/*
@@ -40,10 +45,16 @@ MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
PUSH ebp
subl MACRO_LITERAL(16), %esp // Grow stack by 4 words, bottom word will hold Method*
CFI_ADJUST_CFA_OFFSET(16)
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +4: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 3*4 + 16 + 4)
+#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86) size not as expected."
+#endif
END_MACRO
MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
- addl MACRO_LITERAL(16), %esp // Unwind stack up to return address
+ addl MACRO_LITERAL(16), %esp // Unwind stack up to saved values
CFI_ADJUST_CFA_OFFSET(-16)
POP ebp // Restore callee saves (ebx is saved/restored by the upcall)
POP esi
@@ -62,6 +73,12 @@ MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
PUSH edx
PUSH ecx
PUSH eax // Align stack, eax will be clobbered by Method*
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +4: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 7*4 + 4)
+#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86) size not as expected."
+#endif
END_MACRO
MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index d03a4746ca..ad65033fdb 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -19,15 +19,15 @@
#include "asm_support_x86_64.h"
-#if defined(__clang__)
- // Clang's as(1) doesn't let you name macro parameters.
+#if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
+ // Clang's as(1) doesn't let you name macro parameters prior to 3.5.
#define MACRO0(macro_name) .macro macro_name
#define MACRO1(macro_name, macro_arg1) .macro macro_name
#define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
#define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
#define END_MACRO .endmacro
- // Clang's as(1) uses $0, $1, and so on for macro arguments.
+ // Clang's as(1) uses $0, $1, and so on for macro arguments prior to 3.5.
#define VAR(name,index) SYMBOL($index)
#define PLT_VAR(name, index) SYMBOL($index)@PLT
#define REG_VAR(name,index) %$index
@@ -137,4 +137,12 @@ VAR(name, 0):
SIZE(\name, 0)
END_MACRO
+MACRO0(UNREACHABLE)
+ int3
+END_MACRO
+
+MACRO0(UNTESTED)
+ int3
+END_MACRO
+
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 70ef3ef253..109533be16 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -35,4 +35,8 @@
// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
#define THREAD_ID_OFFSET 12
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 176
+
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 17b8556792..a31ea5858c 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -26,7 +26,7 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
// R10 := Runtime::Current()
movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
movq (%r10), %r10
- // Save callee and GPR args, mixed together to agree with core spills bitmap.
+ // Save callee save registers to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
PUSH r13 // Callee save.
@@ -35,10 +35,16 @@ MACRO0(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME)
PUSH rbx // Callee save.
subq MACRO_LITERAL(8), %rsp // Space for Method* (also aligns the frame).
CFI_ADJUST_CFA_OFFSET(8)
- // R10 := ArtMethod* for ref and args callee save frame method.
+ // R10 := ArtMethod* for save all callee save frame method.
movq RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 6*8 + 8 + 8)
+#error "SAVE_ALL_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#endif
END_MACRO
/*
@@ -46,13 +52,42 @@ END_MACRO
* Runtime::CreateCalleeSaveMethod(kRefsOnly)
*/
MACRO0(SETUP_REF_ONLY_CALLEE_SAVE_FRAME)
- int3
- int3
+ UNTESTED
+ // R10 := Runtime::Current()
+ movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+ movq (%r10), %r10
+ // Save callee and GPR args, mixed together to agree with core spills bitmap.
+ PUSH r15 // Callee save.
+ PUSH r14 // Callee save.
+ PUSH r13 // Callee save.
+ PUSH r12 // Callee save.
+ PUSH rbp // Callee save.
+ PUSH rbx // Callee save.
+ subq MACRO_LITERAL(8), %rsp // Space for Method* (also aligns the frame).
+ CFI_ADJUST_CFA_OFFSET(8)
+ // R10 := ArtMethod* for refs only callee save frame method.
+ movq RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(%r10), %r10
+ // Store ArtMethod* to bottom of stack.
+ movq %r10, 0(%rsp)
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 6*8 + 8 + 8)
+#error "REFS_ONLY_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#endif
END_MACRO
MACRO0(RESTORE_REF_ONLY_CALLEE_SAVE_FRAME)
- int3
- int3
+ UNTESTED
+ addq MACRO_LITERAL(8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // TODO: optimize by not restoring callee-saves restored by the ABI
+ POP rbx
+ POP rbp
+ POP r12
+ POP r13
+ POP r14
+ POP r15
END_MACRO
/*
@@ -91,6 +126,12 @@ MACRO0(SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME)
movq %xmm7, 72(%rsp)
// Store ArtMethod* to bottom of stack.
movq %r10, 0(%rsp)
+
+ // Ugly compile-time check, but we only have the preprocessor.
+ // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 11*8 + 80 + 8)
+#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(X86_64) size not as expected."
+#endif
END_MACRO
MACRO0(RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME)
@@ -130,13 +171,18 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rsp, %rsi
call PLT_SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
- int3 // unreached
+ UNREACHABLE
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ UNTESTED
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ // Outgoing argument set up
+ movq %rsp, %rsi // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
+ call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -144,17 +190,22 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name, 0)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
- mov %rsp, %rdx // pass SP
- mov %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ movq %rsp, %rdx // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
- int3 // unreached
+ UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ UNTESTED
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ // Outgoing argument set up
+ movq %rsp, %rcx // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -314,7 +365,7 @@ DEFINE_FUNCTION art_quick_invoke_stub
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
- mov %rsp, %rbp // Copy value of stack pointer into base pointer.
+ movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
addl LITERAL(64), %edx // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
@@ -385,7 +436,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
- mov %rsp, %rbp // Copy value of stack pointer into base pointer.
+ movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
addl LITERAL(64), %edx // Reserve space for return addr, method*, rbp, r8 and r9 in frame.
@@ -429,43 +480,67 @@ END_FUNCTION art_quick_invoke_static_stub
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ UNTESTED
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %rsp, %rsi // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
+ call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ UNTESTED
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %rsp, %rdx // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg0, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ UNTESTED
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %rsp, %rcx // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ UNTESTED
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ // Outgoing argument set up
+ movq %rsp, %r8 // pass SP
+ movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
- int3
- testl %eax, %eax // eax == 0 ?
- jz 1f // if eax == 0 goto 1
+ UNTESTED
+ testq %rax, %rax // rax == 0 ?
+ jz 1f // if rax == 0 goto 1
ret // return
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO0(RETURN_IF_EAX_ZERO)
- int3
+ UNTESTED
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
ret // return
@@ -609,8 +684,22 @@ DEFINE_FUNCTION art_quick_is_assignable
END_FUNCTION art_quick_is_assignable
DEFINE_FUNCTION art_quick_check_cast
- int3
- int3
+ PUSH rdi // Save args for exc
+ PUSH rsi
+ call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ testq %rax, %rax
+ jz 1f // jump forward if not assignable
+ addq LITERAL(16), %rsp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ ret
+1:
+ POP rsi // Pop arguments
+ POP rdi
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %rsp, %rcx // pass SP
+ mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call PLT_SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ int3 // unreached
END_FUNCTION art_quick_check_cast
/*
@@ -621,7 +710,12 @@ END_FUNCTION art_quick_check_cast
UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
UNIMPLEMENTED art_quick_aput_obj_with_bound_check
UNIMPLEMENTED art_quick_aput_obj
-UNIMPLEMENTED art_quick_memcpy
+
+// TODO: This is quite silly on X86_64 now.
+DEFINE_FUNCTION art_quick_memcpy
+ call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
+ ret
+END_FUNCTION art_quick_memcpy
NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 795f917841..6867fefb05 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include <vector>
+#include "base/logging.h"
#include "base/macros.h"
namespace art {
@@ -110,18 +111,76 @@ class QuasiAtomic {
// Reads the 64-bit value at "addr" without tearing.
static int64_t Read64(volatile const int64_t* addr) {
if (!kNeedSwapMutexes) {
- return *addr;
+ int64_t value;
+#if defined(__LP64__)
+ value = *addr;
+#else
+#if defined(__arm__)
+#if defined(__ARM_FEATURE_LPAE)
+ // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear.
+ __asm__ __volatile__("@ QuasiAtomic::Read64\n"
+ "ldrd %0, %H0, %1"
+ : "=r" (value)
+ : "m" (*addr));
+#else
+ // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary.
+ __asm__ __volatile__("@ QuasiAtomic::Read64\n"
+ "ldrexd %0, %H0, %1"
+ : "=r" (value)
+ : "Q" (*addr));
+#endif
+#elif defined(__i386__)
+ __asm__ __volatile__(
+ "movq %1, %0\n"
+ : "=x" (value)
+ : "m" (*addr));
+#else
+ LOG(FATAL) << "Unsupported architecture";
+#endif
+#endif // defined(__LP64__)
+ return value;
} else {
return SwapMutexRead64(addr);
}
}
// Writes to the 64-bit value at "addr" without tearing.
- static void Write64(volatile int64_t* addr, int64_t val) {
+ static void Write64(volatile int64_t* addr, int64_t value) {
if (!kNeedSwapMutexes) {
- *addr = val;
+#if defined(__LP64__)
+ *addr = value;
+#else
+#if defined(__arm__)
+#if defined(__ARM_FEATURE_LPAE)
+ // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear.
+ __asm__ __volatile__("@ QuasiAtomic::Write64\n"
+ "strd %1, %H1, %0"
+ : "=m"(*addr)
+ : "r" (value));
+#else
+ // The write is done as a swap so that the cache-line is in the exclusive state for the store.
+ int64_t prev;
+ int status;
+ do {
+ __asm__ __volatile__("@ QuasiAtomic::Write64\n"
+ "ldrexd %0, %H0, %2\n"
+ "strexd %1, %3, %H3, %2"
+ : "=&r" (prev), "=&r" (status), "+Q"(*addr)
+ : "r" (value)
+ : "cc");
+ } while (UNLIKELY(status != 0));
+#endif
+#elif defined(__i386__)
+ __asm__ __volatile__(
+ "movq %1, %0"
+ : "=m" (*addr)
+ : "x" (value));
+#else
+ LOG(FATAL) << "Unsupported architecture";
+#endif
+#endif // defined(__LP64__)
} else {
- SwapMutexWrite64(addr, val);
+ SwapMutexWrite64(addr, value);
}
}
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 590835e059..12c0352ef5 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -65,8 +65,7 @@ bool BitVector::IsBitSet(uint32_t num) const {
return false;
}
- uint32_t val = storage_[num >> 5] & check_masks[num & 0x1f];
- return (val != 0);
+ return IsBitSet(storage_, num);
}
// Mark all bits bit as "clear".
@@ -158,33 +157,27 @@ void BitVector::Intersect(const BitVector* src) {
* Union with another bit vector.
*/
void BitVector::Union(const BitVector* src) {
- uint32_t src_size = src->storage_size_;
+ // Get the highest bit to determine how much we need to expand.
+ int highest_bit = src->GetHighestBitSet();
+
+ // If src has no bit set, we are done: there is no need for a union with src.
+ if (highest_bit == -1) {
+ return;
+ }
- // Get our size, we use this variable for the last loop of the method:
- // - It can change in the if block if src is of a different size.
- uint32_t size = storage_size_;
+ // Update src_size to how many cells we actually care about: where the bit is + 1.
+ uint32_t src_size = BitsToWords(highest_bit + 1);
// Is the storage size smaller than src's?
if (storage_size_ < src_size) {
- // Get the highest bit to determine how much we need to expand.
- int highest_bit = src->GetHighestBitSet();
-
- // If src has no bit set, we are done: there is no need for a union with src.
- if (highest_bit == -1) {
- return;
- }
-
// Set it to reallocate.
SetBit(highest_bit);
// Paranoid: storage size should be big enough to hold this bit now.
DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * sizeof(*(storage_)) * 8);
-
- // Update the size, our size can now not be bigger than the src size
- size = storage_size_;
}
- for (uint32_t idx = 0; idx < size; idx++) {
+ for (uint32_t idx = 0; idx < src_size; idx++) {
storage_[idx] |= src->GetRawStorageWord(idx);
}
}
@@ -213,27 +206,10 @@ uint32_t BitVector::NumSetBits() const {
return count;
}
-// Count the number of bits that are set up through and including num.
-uint32_t BitVector::NumSetBits(uint32_t num) const {
- DCHECK_LT(num, storage_size_ * sizeof(*storage_) * 8);
- uint32_t last_word = num >> 5;
- uint32_t partial_word_bits = num & 0x1f;
-
- // partial_word_bits | # | | | partial_word_mask
- // 00000 | 0 | 0xffffffff >> (31 - 0) | (1 << (0 + 1)) - 1 | 0x00000001
- // 00001 | 1 | 0xffffffff >> (31 - 1) | (1 << (1 + 1)) - 1 | 0x00000003
- // 00010 | 2 | 0xffffffff >> (31 - 2) | (1 << (2 + 1)) - 1 | 0x00000007
- // ..... |
- // 11110 | 30 | 0xffffffff >> (31 - 30) | (1 << (30 + 1)) - 1 | 0x7fffffff
- // 11111 | 31 | 0xffffffff >> (31 - 31) | last_full_word++ | 0xffffffff
- uint32_t partial_word_mask = 0xffffffff >> (0x1f - partial_word_bits);
-
- uint32_t count = 0;
- for (uint32_t word = 0; word < last_word; word++) {
- count += __builtin_popcount(storage_[word]);
- }
- count += __builtin_popcount(storage_[last_word] & partial_word_mask);
- return count;
+// Count the number of bits that are set in range [0, end).
+uint32_t BitVector::NumSetBits(uint32_t end) const {
+ DCHECK_LE(end, storage_size_ * sizeof(*storage_) * 8);
+ return NumSetBits(storage_, end);
}
BitVector::Iterator* BitVector::GetIterator() const {
@@ -302,6 +278,23 @@ int BitVector::GetHighestBitSet() const {
return -1;
}
+bool BitVector::EnsureSizeAndClear(unsigned int num) {
+ // Check if the bitvector is expandable.
+ if (IsExpandable() == false) {
+ return false;
+ }
+
+ if (num > 0) {
+ // Now try to expand by setting the last bit.
+ SetBit(num - 1);
+ }
+
+ // We must clear all bits as per our specification.
+ ClearAllBits();
+
+ return true;
+}
+
void BitVector::Copy(const BitVector *src) {
// Get highest bit set, we only need to copy till then.
int highest_bit = src->GetHighestBitSet();
@@ -327,4 +320,60 @@ void BitVector::Copy(const BitVector *src) {
}
}
+bool BitVector::IsBitSet(const uint32_t* storage, uint32_t num) {
+ uint32_t val = storage[num >> 5] & check_masks[num & 0x1f];
+ return (val != 0);
+}
+
+uint32_t BitVector::NumSetBits(const uint32_t* storage, uint32_t end) {
+ uint32_t word_end = end >> 5;
+ uint32_t partial_word_bits = end & 0x1f;
+
+ uint32_t count = 0u;
+ for (uint32_t word = 0u; word < word_end; word++) {
+ count += __builtin_popcount(storage[word]);
+ }
+ if (partial_word_bits != 0u) {
+ count += __builtin_popcount(storage[word_end] & ~(0xffffffffu << partial_word_bits));
+ }
+ return count;
+}
+
+void BitVector::Dump(std::ostream& os, const char *prefix) {
+ std::ostringstream buffer;
+ DumpHelper(buffer, prefix);
+ os << buffer << std::endl;
+}
+
+void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) {
+ std::ostringstream buffer;
+ Dump(buffer, prefix);
+
+ // Now print it to the file.
+ fprintf(file, " {%s}", buffer.str().c_str());
+
+ // If it isn't the last entry, add a |.
+ if (last_entry == false) {
+ fprintf(file, "|");
+ }
+
+ // Add the \n.
+ fprintf(file, "\\\n");
+}
+
+void BitVector::DumpHelper(std::ostringstream& buffer, const char* prefix) {
+ // Initialize it.
+ if (prefix != nullptr) {
+ buffer << prefix;
+ }
+
+ int max = GetHighestBitSet();
+
+ for (int i = 0; i <= max; i++) {
+ if (IsBitSet(i)) {
+ buffer << i << " ";
+ }
+ }
+}
+
} // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index c8f285e879..db29c4969e 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -119,7 +119,9 @@ class BitVector {
bool SameBitsSet(const BitVector *src);
uint32_t NumSetBits() const;
- uint32_t NumSetBits(uint32_t num) const;
+
+ // Number of bits set in range [0, end).
+ uint32_t NumSetBits(uint32_t end) const;
Iterator* GetIterator() const;
@@ -135,6 +137,19 @@ class BitVector {
*/
int GetHighestBitSet() const;
+ // Is bit set in storage. (No range check.)
+ static bool IsBitSet(const uint32_t* storage, uint32_t num);
+ // Number of bits set in range [0, end) in storage. (No range check.)
+ static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+
+ bool EnsureSizeAndClear(unsigned int num);
+
+ void Dump(std::ostream& os, const char* prefix);
+ void DumpDot(FILE* file, const char* prefix, bool last_entry = false);
+
+ protected:
+ void DumpHelper(std::ostringstream& buffer, const char* prefix);
+
private:
Allocator* const allocator_;
const bool expandable_; // expand bitmap if we run out?
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index a67fb332b1..2ff55cbb3e 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -29,8 +29,8 @@ TEST(BitVector, Test) {
EXPECT_FALSE(bv.IsExpandable());
EXPECT_EQ(0U, bv.NumSetBits());
- EXPECT_EQ(0U, bv.NumSetBits(0));
- EXPECT_EQ(0U, bv.NumSetBits(kBits - 1));
+ EXPECT_EQ(0U, bv.NumSetBits(1));
+ EXPECT_EQ(0U, bv.NumSetBits(kBits));
for (size_t i = 0; i < kBits; i++) {
EXPECT_FALSE(bv.IsBitSet(i));
}
@@ -46,8 +46,8 @@ TEST(BitVector, Test) {
bv.SetBit(0);
bv.SetBit(kBits - 1);
EXPECT_EQ(2U, bv.NumSetBits());
- EXPECT_EQ(1U, bv.NumSetBits(0));
- EXPECT_EQ(2U, bv.NumSetBits(kBits - 1));
+ EXPECT_EQ(1U, bv.NumSetBits(1));
+ EXPECT_EQ(2U, bv.NumSetBits(kBits));
EXPECT_TRUE(bv.IsBitSet(0));
for (size_t i = 1; i < kBits - 1; i++) {
EXPECT_FALSE(bv.IsBitSet(i));
@@ -98,25 +98,25 @@ TEST(BitVector, NoopAllocator) {
EXPECT_EQ(0x00010001U, bv.GetRawStorageWord(1));
EXPECT_EQ(4U, bv.NumSetBits());
- EXPECT_EQ(0U, bv.NumSetBits(0));
+ EXPECT_EQ(0U, bv.NumSetBits(1));
- EXPECT_EQ(0U, bv.NumSetBits(7));
- EXPECT_EQ(1U, bv.NumSetBits(8));
+ EXPECT_EQ(0U, bv.NumSetBits(8));
EXPECT_EQ(1U, bv.NumSetBits(9));
+ EXPECT_EQ(1U, bv.NumSetBits(10));
- EXPECT_EQ(1U, bv.NumSetBits(15));
- EXPECT_EQ(2U, bv.NumSetBits(16));
+ EXPECT_EQ(1U, bv.NumSetBits(16));
EXPECT_EQ(2U, bv.NumSetBits(17));
+ EXPECT_EQ(2U, bv.NumSetBits(18));
- EXPECT_EQ(2U, bv.NumSetBits(31));
- EXPECT_EQ(3U, bv.NumSetBits(32));
+ EXPECT_EQ(2U, bv.NumSetBits(32));
EXPECT_EQ(3U, bv.NumSetBits(33));
+ EXPECT_EQ(3U, bv.NumSetBits(34));
- EXPECT_EQ(3U, bv.NumSetBits(47));
- EXPECT_EQ(4U, bv.NumSetBits(48));
+ EXPECT_EQ(3U, bv.NumSetBits(48));
EXPECT_EQ(4U, bv.NumSetBits(49));
+ EXPECT_EQ(4U, bv.NumSetBits(50));
- EXPECT_EQ(4U, bv.NumSetBits(63));
+ EXPECT_EQ(4U, bv.NumSetBits(64));
}
TEST(BitVector, SetInitialBits) {
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 46b8ff28fe..730a2c2cb4 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -26,6 +26,8 @@ namespace art {
LogVerbosity gLogVerbosity;
+std::vector<std::string> gVerboseMethods;
+
unsigned int gAborting = 0;
static LogSeverity gMinimumLogSeverity = INFO;
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index fcec733868..bd5ae85f5c 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -22,6 +22,7 @@
#include <iostream> // NOLINT
#include <sstream>
#include <signal.h>
+#include <vector>
#include "base/macros.h"
#include "log_severity.h"
#include "UniquePtr.h"
@@ -299,6 +300,8 @@ struct LogVerbosity {
extern LogVerbosity gLogVerbosity;
+extern std::vector<std::string> gVerboseMethods;
+
// Used on fatal exit. Prevents recursive aborts. Allows us to disable
// some error checking to ensure fatal shutdown makes forward progress.
extern unsigned int gAborting;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index fdd0249a6a..2bc17bf403 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -206,16 +206,16 @@ void BaseMutex::DumpContention(std::ostream& os) const {
os << "never contended";
} else {
os << "contended " << contention_count
- << " times, average wait of contender " << PrettyDuration(wait_time / contention_count);
+ << " total wait of contender " << PrettyDuration(wait_time)
+ << " average " << PrettyDuration(wait_time / contention_count);
SafeMap<uint64_t, size_t> most_common_blocker;
SafeMap<uint64_t, size_t> most_common_blocked;
- typedef SafeMap<uint64_t, size_t>::const_iterator It;
for (size_t i = 0; i < kContentionLogSize; ++i) {
uint64_t blocked_tid = log[i].blocked_tid;
uint64_t owner_tid = log[i].owner_tid;
uint32_t count = log[i].count;
if (count > 0) {
- It it = most_common_blocked.find(blocked_tid);
+ auto it = most_common_blocked.find(blocked_tid);
if (it != most_common_blocked.end()) {
most_common_blocked.Overwrite(blocked_tid, it->second + count);
} else {
@@ -231,10 +231,10 @@ void BaseMutex::DumpContention(std::ostream& os) const {
}
uint64_t max_tid = 0;
size_t max_tid_count = 0;
- for (It it = most_common_blocked.begin(); it != most_common_blocked.end(); ++it) {
- if (it->second > max_tid_count) {
- max_tid = it->first;
- max_tid_count = it->second;
+ for (const auto& pair : most_common_blocked) {
+ if (pair.second > max_tid_count) {
+ max_tid = pair.first;
+ max_tid_count = pair.second;
}
}
if (max_tid != 0) {
@@ -242,10 +242,10 @@ void BaseMutex::DumpContention(std::ostream& os) const {
}
max_tid = 0;
max_tid_count = 0;
- for (It it = most_common_blocker.begin(); it != most_common_blocker.end(); ++it) {
- if (it->second > max_tid_count) {
- max_tid = it->first;
- max_tid_count = it->second;
+ for (const auto& pair : most_common_blocker) {
+ if (pair.second > max_tid_count) {
+ max_tid = pair.first;
+ max_tid_count = pair.second;
}
}
if (max_tid != 0) {
diff --git a/runtime/catch_block_stack_visitor.cc b/runtime/catch_block_stack_visitor.cc
index f9acffbbf6..8d10a97b68 100644
--- a/runtime/catch_block_stack_visitor.cc
+++ b/runtime/catch_block_stack_visitor.cc
@@ -17,27 +17,26 @@
#include "catch_block_stack_visitor.h"
#include "dex_instruction.h"
-#include "catch_finder.h"
+#include "mirror/art_method-inl.h"
+#include "quick_exception_handler.h"
#include "sirt_ref.h"
#include "verifier/method_verifier.h"
namespace art {
bool CatchBlockStackVisitor::VisitFrame() {
- catch_finder_->SetHandlerFrameId(GetFrameId());
+ exception_handler_->SetHandlerFrameId(GetFrameId());
mirror::ArtMethod* method = GetMethod();
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
- catch_finder_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
- catch_finder_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
return false; // End stack walk.
} else {
if (method->IsRuntimeMethod()) {
// Ignore callee save method.
DCHECK(method->IsCalleeSaveMethod());
return true;
- } else if (is_deoptimization_) {
- return HandleDeoptimization(method);
} else {
return HandleTryItems(method);
}
@@ -46,66 +45,21 @@ bool CatchBlockStackVisitor::VisitFrame() {
bool CatchBlockStackVisitor::HandleTryItems(mirror::ArtMethod* method) {
uint32_t dex_pc = DexFile::kDexNoIndex;
- if (method->IsNative()) {
- ++native_method_count_;
- } else {
+ if (!method->IsNative()) {
dex_pc = GetDexPc();
}
if (dex_pc != DexFile::kDexNoIndex) {
bool clear_exception = false;
uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception);
- catch_finder_->SetClearException(clear_exception);
+ exception_handler_->SetClearException(clear_exception);
if (found_dex_pc != DexFile::kDexNoIndex) {
- catch_finder_->SetHandlerDexPc(found_dex_pc);
- catch_finder_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
- catch_finder_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerDexPc(found_dex_pc);
+ exception_handler_->SetHandlerQuickFramePc(method->ToNativePc(found_dex_pc));
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
return false; // End stack walk.
}
}
return true; // Continue stack walk.
}
-bool CatchBlockStackVisitor::HandleDeoptimization(mirror::ArtMethod* m) {
- MethodHelper mh(m);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- CHECK(code_item != nullptr);
- uint16_t num_regs = code_item->registers_size_;
- uint32_t dex_pc = GetDexPc();
- const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
- uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
- SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
- SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
- verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
- &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
- m->GetAccessFlags(), false, true);
- verifier.Verify();
- std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
- for (uint16_t reg = 0; reg < num_regs; ++reg) {
- VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
- switch (kind) {
- case kUndefined:
- new_frame->SetVReg(reg, 0xEBADDE09);
- break;
- case kConstant:
- new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
- break;
- case kReferenceVReg:
- new_frame->SetVRegReference(reg,
- reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
- break;
- default:
- new_frame->SetVReg(reg, GetVReg(m, reg, kind));
- break;
- }
- }
- if (prev_shadow_frame_ != nullptr) {
- prev_shadow_frame_->SetLink(new_frame);
- } else {
- catch_finder_->SetTopShadowFrame(new_frame);
- }
- prev_shadow_frame_ = new_frame;
- return true;
-}
-
} // namespace art
diff --git a/runtime/catch_block_stack_visitor.h b/runtime/catch_block_stack_visitor.h
index 175ad7dd94..6f0fe11e75 100644
--- a/runtime/catch_block_stack_visitor.h
+++ b/runtime/catch_block_stack_visitor.h
@@ -17,39 +17,39 @@
#ifndef ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
#define ART_RUNTIME_CATCH_BLOCK_STACK_VISITOR_H_
-#include "mirror/throwable.h"
-#include "thread.h"
+#include "mirror/object-inl.h"
+#include "stack.h"
+#include "sirt_ref-inl.h"
namespace art {
-class CatchFinder;
+
+namespace mirror {
+class Throwable;
+} // namespace mirror
+class Context;
+class QuickExceptionHandler;
+class Thread;
class ThrowLocation;
// Finds catch handler or prepares deoptimization.
-class CatchBlockStackVisitor : public StackVisitor {
+class CatchBlockStackVisitor FINAL : public StackVisitor {
public:
- CatchBlockStackVisitor(Thread* self, Context* context, mirror::Throwable* exception,
- bool is_deoptimization, CatchFinder* catch_finder)
+ CatchBlockStackVisitor(Thread* self, Context* context, SirtRef<mirror::Throwable>& exception,
+ QuickExceptionHandler* exception_handler)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, context),
- self_(self), is_deoptimization_(is_deoptimization),
- to_find_(is_deoptimization ? nullptr : exception->GetClass()),
- catch_finder_(catch_finder), native_method_count_(0), prev_shadow_frame_(nullptr) {
+ : StackVisitor(self, context), self_(self), to_find_(self, exception->GetClass()),
+ exception_handler_(exception_handler) {
}
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Thread* const self_;
- const bool is_deoptimization_;
// The type of the exception catch block to find.
- mirror::Class* const to_find_;
- CatchFinder* const catch_finder_;
- // Number of native methods passed in crawl (equates to number of SIRTs to pop)
- uint32_t native_method_count_;
- ShadowFrame* prev_shadow_frame_;
+ SirtRef<mirror::Class> to_find_;
+ QuickExceptionHandler* const exception_handler_;
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 960c26dfde..b52941b258 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -765,14 +765,9 @@ class ScopedCheck {
// Verify that the current thread is (a) attached and (b) associated with
// this particular instance of JNIEnv.
if (soa_.Env() != threadEnv) {
- if (soa_.Vm()->work_around_app_jni_bugs) {
- // If we're keeping broken code limping along, we need to suppress the abort...
- LOG(ERROR) << "APP BUG DETECTED: thread " << *self << " using JNIEnv* from thread " << *soa_.Self();
- } else {
- JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
- ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
- return;
- }
+ JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
+ ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
+ return;
}
// Verify that, if this thread previously made a critical "get" call, we
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index e690b30783..338133c22a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -185,7 +185,8 @@ ClassLinker::ClassLinker(InternTable* intern_table)
quick_resolution_trampoline_(nullptr),
portable_imt_conflict_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
- quick_generic_jni_trampoline_(nullptr) {
+ quick_generic_jni_trampoline_(nullptr),
+ quick_to_interpreter_bridge_trampoline_(nullptr) {
CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
}
@@ -1002,6 +1003,7 @@ void ClassLinker::InitFromImage() {
portable_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetPortableImtConflictTrampoline();
quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
quick_generic_jni_trampoline_ = oat_file.GetOatHeader().GetQuickGenericJniTrampoline();
+ quick_to_interpreter_bridge_trampoline_ = oat_file.GetOatHeader().GetQuickToInterpreterBridge();
mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
mirror::ObjectArray<mirror::DexCache>* dex_caches =
dex_caches_object->AsObjectArray<mirror::DexCache>();
@@ -1523,7 +1525,7 @@ uint32_t ClassLinker::SizeOfClass(const DexFile& dex_file,
return size;
}
-const OatFile::OatClass* ClassLinker::GetOatClass(const DexFile& dex_file, uint16_t class_def_idx) {
+OatFile::OatClass ClassLinker::GetOatClass(const DexFile& dex_file, uint16_t class_def_idx) {
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
CHECK(oat_file != NULL) << dex_file.GetLocation();
@@ -1531,9 +1533,7 @@ const OatFile::OatClass* ClassLinker::GetOatClass(const DexFile& dex_file, uint1
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
- const OatFile::OatClass* oat_class = oat_dex_file->GetOatClass(class_def_idx);
- CHECK(oat_class != NULL) << dex_file.GetLocation() << " " << class_def_idx;
- return oat_class;
+ return oat_dex_file->GetOatClass(class_def_idx);
}
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
@@ -1593,16 +1593,14 @@ const OatFile::OatMethod ClassLinker::GetOatMethodFor(mirror::ArtMethod* method)
}
CHECK(found) << "Didn't find oat method index for virtual method: " << PrettyMethod(method);
}
- UniquePtr<const OatFile::OatClass>
- oat_class(GetOatClass(*declaring_class->GetDexCache()->GetDexFile(),
- declaring_class->GetDexClassDefIndex()));
- CHECK(oat_class.get() != NULL);
DCHECK_EQ(oat_method_index,
GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
method->GetDeclaringClass()->GetDexClassDefIndex(),
method->GetDexMethodIndex()));
+ const OatFile::OatClass oat_class = GetOatClass(*declaring_class->GetDexCache()->GetDexFile(),
+ declaring_class->GetDexClassDefIndex());
- return oat_class->GetOatMethod(oat_method_index);
+ return oat_class.GetOatMethod(oat_method_index);
}
// Special case to get oat code without overwriting a trampoline.
@@ -1634,9 +1632,10 @@ const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
if (method->IsProxyMethod()) {
return GetPortableProxyInvokeHandler();
}
- const void* result = GetOatMethodFor(method).GetPortableCode();
+ const OatFile::OatMethod oat_method = GetOatMethodFor(method);
+ const void* result = oat_method.GetPortableCode();
if (result == nullptr) {
- if (GetOatMethodFor(method).GetQuickCode() == nullptr) {
+ if (oat_method.GetQuickCode() == nullptr) {
// No code? You must mean to go into the interpreter.
result = GetPortableToInterpreterBridge();
} else {
@@ -1651,18 +1650,16 @@ const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- UniquePtr<const OatFile::OatClass> oat_class(GetOatClass(dex_file, class_def_idx));
- CHECK(oat_class.get() != nullptr);
+ const OatFile::OatClass oat_class = GetOatClass(dex_file, class_def_idx);
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
- return oat_class->GetOatMethod(oat_method_idx).GetQuickCode();
+ return oat_class.GetOatMethod(oat_method_idx).GetQuickCode();
}
const void* ClassLinker::GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- UniquePtr<const OatFile::OatClass> oat_class(GetOatClass(dex_file, class_def_idx));
- CHECK(oat_class.get() != nullptr);
+ const OatFile::OatClass oat_class = GetOatClass(dex_file, class_def_idx);
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
- return oat_class->GetOatMethod(oat_method_idx).GetPortableCode();
+ return oat_class.GetOatMethod(oat_method_idx).GetPortableCode();
}
// Returns true if the method must run with interpreter, false otherwise.
@@ -1703,8 +1700,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
const byte* class_data = dex_file.GetClassData(*dex_class_def);
// There should always be class data if there were direct methods.
CHECK(class_data != nullptr) << PrettyDescriptor(klass);
- UniquePtr<const OatFile::OatClass> oat_class(GetOatClass(dex_file, klass->GetDexClassDefIndex()));
- CHECK(oat_class.get() != nullptr);
+ const OatFile::OatClass oat_class = GetOatClass(dex_file, klass->GetDexClassDefIndex());
ClassDataItemIterator it(dex_file, class_data);
// Skip fields
while (it.HasNextStaticField()) {
@@ -1720,8 +1716,8 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
// Only update static methods.
continue;
}
- const void* portable_code = oat_class->GetOatMethod(method_index).GetPortableCode();
- const void* quick_code = oat_class->GetOatMethod(method_index).GetQuickCode();
+ const void* portable_code = oat_class.GetOatMethod(method_index).GetPortableCode();
+ const void* quick_code = oat_class.GetOatMethod(method_index).GetQuickCode();
const bool enter_interpreter = NeedsInterpreter(method, quick_code, portable_code);
bool have_portable_code = false;
if (enter_interpreter) {
@@ -1869,11 +1865,25 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
klass->SetDexClassDefIndex(dex_file.GetIndexForClassDef(dex_class_def));
klass->SetDexTypeIndex(dex_class_def.class_idx_);
- // Load fields fields.
const byte* class_data = dex_file.GetClassData(dex_class_def);
if (class_data == NULL) {
return; // no fields or methods - for example a marker interface
}
+
+ if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
+ const OatFile::OatClass oat_class = GetOatClass(dex_file, klass->GetDexClassDefIndex());
+ LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
+ } else {
+ LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
+ }
+}
+
+void ClassLinker::LoadClassMembers(const DexFile& dex_file,
+ const byte* class_data,
+ const SirtRef<mirror::Class>& klass,
+ mirror::ClassLoader* class_loader,
+ const OatFile::OatClass* oat_class) {
+ // Load fields.
ClassDataItemIterator it(dex_file, class_data);
Thread* self = Thread::Current();
if (it.NumStaticFields() != 0) {
@@ -1912,11 +1922,6 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
LoadField(dex_file, it, klass, ifield);
}
- UniquePtr<const OatFile::OatClass> oat_class;
- if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
- oat_class.reset(GetOatClass(dex_file, klass->GetDexClassDefIndex()));
- }
-
// Load methods.
if (it.NumDirectMethods() != 0) {
// TODO: append direct methods to class object
@@ -1946,8 +1951,8 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
return;
}
klass->SetDirectMethod(i, method.get());
- if (oat_class.get() != NULL) {
- LinkCode(method, oat_class.get(), dex_file, it.GetMemberIndex(), class_def_method_index);
+ if (oat_class != nullptr) {
+ LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
}
method->SetMethodIndex(class_def_method_index);
class_def_method_index++;
@@ -1960,8 +1965,8 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
}
klass->SetVirtualMethod(i, method.get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- if (oat_class.get() != NULL) {
- LinkCode(method, oat_class.get(), dex_file, it.GetMemberIndex(), class_def_method_index);
+ if (oat_class != nullptr) {
+ LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
}
class_def_method_index++;
}
@@ -2691,11 +2696,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation() << " " << PrettyClass(klass);
uint16_t class_def_index = klass->GetDexClassDefIndex();
- UniquePtr<const OatFile::OatClass> oat_class(oat_dex_file->GetOatClass(class_def_index));
- CHECK(oat_class.get() != NULL)
- << dex_file.GetLocation() << " " << PrettyClass(klass) << " "
- << ClassHelper(klass).GetDescriptor();
- oat_file_class_status = oat_class->GetStatus();
+ oat_file_class_status = oat_dex_file->GetOatClass(class_def_index).GetStatus();
if (oat_file_class_status == mirror::Class::kStatusVerified ||
oat_file_class_status == mirror::Class::kStatusInitialized) {
return true;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d684ad5689..9771318d49 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -369,6 +369,10 @@ class ClassLinker {
return quick_imt_conflict_trampoline_;
}
+ const void* GetQuickToInterpreterBridgeTrampoline() const {
+ return quick_to_interpreter_bridge_trampoline_;
+ }
+
InternTable* GetInternTable() const {
return intern_table_;
}
@@ -433,6 +437,12 @@ class ClassLinker {
const SirtRef<mirror::Class>& klass,
mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LoadClassMembers(const DexFile& dex_file,
+ const byte* class_data,
+ const SirtRef<mirror::Class>& klass,
+ mirror::ClassLoader* class_loader,
+ const OatFile::OatClass* oat_class)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
const SirtRef<mirror::Class>& klass, const SirtRef<mirror::ArtField>& dst)
@@ -446,7 +456,7 @@ class ClassLinker {
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Finds the associated oat class for a dex_file and descriptor
- const OatFile::OatClass* GetOatClass(const DexFile& dex_file, uint16_t class_def_idx)
+ OatFile::OatClass GetOatClass(const DexFile& dex_file, uint16_t class_def_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
@@ -652,6 +662,7 @@ class ClassLinker {
const void* portable_imt_conflict_trampoline_;
const void* quick_imt_conflict_trampoline_;
const void* quick_generic_jni_trampoline_;
+ const void* quick_to_interpreter_bridge_trampoline_;
friend class ImageWriter; // for GetClassRoots
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 5b72a4496f..121835713a 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -485,8 +485,6 @@ struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_), "entryPointFromQuickCompiledCode"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_), "gcMap"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_mapping_table_), "quickMappingTable"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, quick_vmap_table_), "quickVmapTable"));
// alphabetical 32-bit
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, access_flags_), "accessFlags"));
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c52a5887aa..07d3a2a5b8 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -225,6 +225,7 @@ size_t Dbg::alloc_record_count_ = 0;
Mutex* Dbg::deoptimization_lock_ = nullptr;
std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
size_t Dbg::full_deoptimization_event_count_ = 0;
+size_t Dbg::delayed_full_undeoptimization_count_ = 0;
// Breakpoints.
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
@@ -665,6 +666,7 @@ void Dbg::GoActive() {
MutexLock mu(Thread::Current(), *deoptimization_lock_);
CHECK_EQ(deoptimization_requests_.size(), 0U);
CHECK_EQ(full_deoptimization_event_count_, 0U);
+ CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
}
Runtime* runtime = Runtime::Current();
@@ -703,6 +705,7 @@ void Dbg::Disconnected() {
MutexLock mu(Thread::Current(), *deoptimization_lock_);
deoptimization_requests_.clear();
full_deoptimization_event_count_ = 0U;
+ delayed_full_undeoptimization_count_ = 0U;
}
runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, kListenerEvents);
runtime->GetInstrumentation()->DisableDeoptimization();
@@ -2668,20 +2671,24 @@ void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
LOG(WARNING) << "Ignoring empty deoptimization request.";
break;
case DeoptimizationRequest::kFullDeoptimization:
- VLOG(jdwp) << "Deoptimize the world";
+ VLOG(jdwp) << "Deoptimize the world ...";
instrumentation->DeoptimizeEverything();
+ VLOG(jdwp) << "Deoptimize the world DONE";
break;
case DeoptimizationRequest::kFullUndeoptimization:
- VLOG(jdwp) << "Undeoptimize the world";
+ VLOG(jdwp) << "Undeoptimize the world ...";
instrumentation->UndeoptimizeEverything();
+ VLOG(jdwp) << "Undeoptimize the world DONE";
break;
case DeoptimizationRequest::kSelectiveDeoptimization:
- VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method);
+ VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " ...";
instrumentation->Deoptimize(request.method);
+ VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " DONE";
break;
case DeoptimizationRequest::kSelectiveUndeoptimization:
- VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method);
+ VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " ...";
instrumentation->Undeoptimize(request.method);
+ VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " DONE";
break;
default:
LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind;
@@ -2689,17 +2696,43 @@ void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
}
}
+void Dbg::DelayFullUndeoptimization() {
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ ++delayed_full_undeoptimization_count_;
+ DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
+}
+
+void Dbg::ProcessDelayedFullUndeoptimizations() {
+ // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
+ {
+ MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ while (delayed_full_undeoptimization_count_ > 0) {
+ DeoptimizationRequest req;
+ req.kind = DeoptimizationRequest::kFullUndeoptimization;
+ req.method = nullptr;
+ RequestDeoptimizationLocked(req);
+ --delayed_full_undeoptimization_count_;
+ }
+ }
+ ManageDeoptimization();
+}
+
void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
if (req.kind == DeoptimizationRequest::kNothing) {
// Nothing to do.
return;
}
MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ RequestDeoptimizationLocked(req);
+}
+
+void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
switch (req.kind) {
case DeoptimizationRequest::kFullDeoptimization: {
DCHECK(req.method == nullptr);
if (full_deoptimization_event_count_ == 0) {
- VLOG(jdwp) << "Request full deoptimization";
+ VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
+ << " for full deoptimization";
deoptimization_requests_.push_back(req);
}
++full_deoptimization_event_count_;
@@ -2710,20 +2743,23 @@ void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
DCHECK_GT(full_deoptimization_event_count_, 0U);
--full_deoptimization_event_count_;
if (full_deoptimization_event_count_ == 0) {
- VLOG(jdwp) << "Request full undeoptimization";
+ VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
+ << " for full undeoptimization";
deoptimization_requests_.push_back(req);
}
break;
}
case DeoptimizationRequest::kSelectiveDeoptimization: {
DCHECK(req.method != nullptr);
- VLOG(jdwp) << "Request deoptimization of " << PrettyMethod(req.method);
+ VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
+ << " for deoptimization of " << PrettyMethod(req.method);
deoptimization_requests_.push_back(req);
break;
}
case DeoptimizationRequest::kSelectiveUndeoptimization: {
DCHECK(req.method != nullptr);
- VLOG(jdwp) << "Request undeoptimization of " << PrettyMethod(req.method);
+ VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
+ << " for undeoptimization of " << PrettyMethod(req.method);
deoptimization_requests_.push_back(req);
break;
}
@@ -2751,7 +2787,9 @@ void Dbg::ManageDeoptimization() {
const ThreadState old_state = self->SetStateUnsafe(kRunnable);
{
MutexLock mu(self, *deoptimization_lock_);
+ size_t req_index = 0;
for (const DeoptimizationRequest& request : deoptimization_requests_) {
+ VLOG(jdwp) << "Process deoptimization request #" << req_index++;
ProcessDeoptimizationRequest(request);
}
deoptimization_requests_.clear();
@@ -4177,6 +4215,13 @@ class StringTable {
DISALLOW_COPY_AND_ASSIGN(StringTable);
};
+static const char* GetMethodSourceFile(MethodHelper* mh)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(mh != nullptr);
+ const char* source_file = mh->GetDeclaringClassSourceFile();
+ return (source_file != nullptr) ? source_file : "";
+}
+
/*
* The data we send to DDMS contains everything we have recorded.
*
@@ -4249,7 +4294,7 @@ jbyteArray Dbg::GetRecentAllocations() {
mh.ChangeMethod(m);
class_names.Add(mh.GetDeclaringClassDescriptor());
method_names.Add(mh.GetName());
- filenames.Add(mh.GetDeclaringClassSourceFile());
+ filenames.Add(GetMethodSourceFile(&mh));
}
}
@@ -4311,7 +4356,7 @@ jbyteArray Dbg::GetRecentAllocations() {
mh.ChangeMethod(record->stack[stack_frame].method);
size_t class_name_index = class_names.IndexOf(mh.GetDeclaringClassDescriptor());
size_t method_name_index = method_names.IndexOf(mh.GetName());
- size_t file_name_index = filenames.IndexOf(mh.GetDeclaringClassSourceFile());
+ size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(&mh));
JDWP::Append2BE(bytes, class_name_index);
JDWP::Append2BE(bytes, method_name_index);
JDWP::Append2BE(bytes, file_name_index);
diff --git a/runtime/debugger.h b/runtime/debugger.h
index b3e94c35b2..bef708cdc3 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -448,6 +448,13 @@ class Dbg {
LOCKS_EXCLUDED(deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Support delayed full undeoptimization requests. This is currently only used for single-step
+ // events.
+ static void DelayFullUndeoptimization() LOCKS_EXCLUDED(deoptimization_lock_);
+ static void ProcessDelayedFullUndeoptimizations()
+ LOCKS_EXCLUDED(deoptimization_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
// request and finally resumes all threads.
static void ManageDeoptimization()
@@ -560,6 +567,10 @@ class Dbg {
static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
+ EXCLUSIVE_LOCKS_REQUIRED(deoptimization_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
static Mutex* alloc_tracker_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(alloc_tracker_lock_);
@@ -581,6 +592,10 @@ class Dbg {
// undeoptimize when the last event is unregistered (when the counter is set to 0).
static size_t full_deoptimization_event_count_ GUARDED_BY(deoptimization_lock_);
+ // Count the number of full undeoptimization requests delayed to next resume or end of debug
+ // session.
+ static size_t delayed_full_undeoptimization_count_ GUARDED_BY(deoptimization_lock_);
+
DISALLOW_COPY_AND_ASSIGN(Dbg);
};
diff --git a/runtime/deoptimize_stack_visitor.cc b/runtime/deoptimize_stack_visitor.cc
new file mode 100644
index 0000000000..3eb1792563
--- /dev/null
+++ b/runtime/deoptimize_stack_visitor.cc
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "deoptimize_stack_visitor.h"
+
+#include "mirror/art_method-inl.h"
+#include "object_utils.h"
+#include "quick_exception_handler.h"
+#include "sirt_ref-inl.h"
+#include "verifier/method_verifier.h"
+
+namespace art {
+
+bool DeoptimizeStackVisitor::VisitFrame() {
+ exception_handler_->SetHandlerFrameId(GetFrameId());
+ mirror::ArtMethod* method = GetMethod();
+ if (method == nullptr) {
+ // This is the upcall, we remember the frame and last pc so that we may long jump to them.
+ exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ return false; // End stack walk.
+ } else if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ } else {
+ return HandleDeoptimization(method);
+ }
+}
+
+bool DeoptimizeStackVisitor::HandleDeoptimization(mirror::ArtMethod* m) {
+ MethodHelper mh(m);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ CHECK(code_item != nullptr);
+ uint16_t num_regs = code_item->registers_size_;
+ uint32_t dex_pc = GetDexPc();
+ const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
+ uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
+ ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
+ SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
+ SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
+ verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
+ &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+ m->GetAccessFlags(), false, true);
+ verifier.Verify();
+ std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
+ for (uint16_t reg = 0; reg < num_regs; ++reg) {
+ VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
+ switch (kind) {
+ case kUndefined:
+ new_frame->SetVReg(reg, 0xEBADDE09);
+ break;
+ case kConstant:
+ new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
+ break;
+ case kReferenceVReg:
+ new_frame->SetVRegReference(reg,
+ reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
+ break;
+ default:
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ break;
+ }
+ }
+ if (prev_shadow_frame_ != nullptr) {
+ prev_shadow_frame_->SetLink(new_frame);
+ } else {
+ self_->SetDeoptimizationShadowFrame(new_frame);
+ }
+ prev_shadow_frame_ = new_frame;
+ return true;
+}
+
+} // namespace art
diff --git a/runtime/deoptimize_stack_visitor.h b/runtime/deoptimize_stack_visitor.h
new file mode 100644
index 0000000000..c41b80364b
--- /dev/null
+++ b/runtime/deoptimize_stack_visitor.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
+#define ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
+
+#include "base/mutex.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace mirror {
+class ArtMethod;
+} // namespace mirror
+class QuickExceptionHandler;
+class Thread;
+
+// Prepares deoptimization.
+class DeoptimizeStackVisitor FINAL : public StackVisitor {
+ public:
+ DeoptimizeStackVisitor(Thread* self, Context* context, QuickExceptionHandler* exception_handler)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(self, context), self_(self), exception_handler_(exception_handler),
+ prev_shadow_frame_(nullptr) {
+ CHECK(!self_->HasDeoptimizationShadowFrame());
+ }
+
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+ bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Thread* const self_;
+ QuickExceptionHandler* const exception_handler_;
+ ShadowFrame* prev_shadow_frame_;
+
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
+};
+
+} // namespace art
+#endif // ART_RUNTIME_DEOPTIMIZE_STACK_VISITOR_H_
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 8b48b3647f..05912bfed8 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -761,6 +761,10 @@ static inline const void* GetQuickGenericJniTrampoline(ClassLinker* class_linker
return class_linker->GetQuickGenericJniTrampoline();
}
+static inline const void* GetQuickToInterpreterBridgeTrampoline(ClassLinker* class_linker) {
+ return class_linker->GetQuickToInterpreterBridgeTrampoline();
+}
+
extern "C" void art_portable_proxy_invoke_handler();
static inline const void* GetPortableProxyInvokeHandler() {
return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index c0304eb8a1..bea7d960d4 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -46,79 +46,4 @@ extern "C" void* artFindNativeMethod() {
}
}
-static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- intptr_t value = *arg_ptr;
- mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
- mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
- CHECK(Runtime::Current()->GetHeap()->IsValidObjectAddress(value_as_work_around_rep))
- << value_as_work_around_rep;
- *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
-}
-
-extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(Thread::Current() == self);
- // TODO: this code is specific to ARM
- // On entry the stack pointed by sp is:
- // | arg3 | <- Calling JNI method's frame (and extra bit for out args)
- // | LR |
- // | R3 | arg2
- // | R2 | arg1
- // | R1 | jclass/jobject
- // | R0 | JNIEnv
- // | unused |
- // | unused |
- // | unused | <- sp
- mirror::ArtMethod* jni_method = self->GetCurrentMethod(NULL);
- DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
- intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack
- // Fix up this/jclass argument
- WorkAroundJniBugsForJobject(arg_ptr);
- arg_ptr++;
- // Fix up jobject arguments
- MethodHelper mh(jni_method);
- int reg_num = 2; // Current register being processed, -1 for stack arguments.
- for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
- char shorty_char = mh.GetShorty()[i];
- if (shorty_char == 'L') {
- WorkAroundJniBugsForJobject(arg_ptr);
- }
- if (shorty_char == 'J' || shorty_char == 'D') {
- if (reg_num == 2) {
- arg_ptr = sp + 8; // skip to out arguments
- reg_num = -1;
- } else if (reg_num == 3) {
- arg_ptr = sp + 10; // skip to out arguments plus 2 slots as long must be aligned
- reg_num = -1;
- } else {
- DCHECK_EQ(reg_num, -1);
- if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
- arg_ptr += 3; // unaligned, pad and move through stack arguments
- } else {
- arg_ptr += 2; // aligned, move through stack arguments
- }
- }
- } else {
- if (reg_num == 2) {
- arg_ptr++; // move through register arguments
- reg_num++;
- } else if (reg_num == 3) {
- arg_ptr = sp + 8; // skip to outgoing stack arguments
- reg_num = -1;
- } else {
- DCHECK_EQ(reg_num, -1);
- arg_ptr++; // move through stack arguments
- }
- }
- }
- // Load expected destination, see Method::RegisterNative
- const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
- if (UNLIKELY(code == NULL)) {
- code = GetJniDlsymLookupStub();
- jni_method->RegisterNative(self, code, false);
- }
- return code;
-}
-
} // namespace art
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
index 4f19964e8c..9e62e0e9ff 100644
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -78,7 +78,7 @@ extern "C" void art_portable_test_suspend_from_code(Thread* self)
visitor.WalkStack(true);
self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
self->SetDeoptimizationReturnValue(JValue());
- self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+ self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
}
}
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 51c647adf1..6448045e27 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -31,7 +31,7 @@ namespace art {
extern "C" void artDeoptimize(Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
- self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+ self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 633f580bd6..60c5377f86 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -32,6 +32,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
const void* result = instrumentation->GetQuickCodeFor(method);
+ DCHECK(result != GetQuickToInterpreterBridgeTrampoline(Runtime::Current()->GetClassLinker()));
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object,
method, lr, interpreter_entry);
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 208eb74a01..97a8367688 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -49,10 +49,6 @@ class ExceptionTest : public CommonRuntimeTest {
dex_ = my_klass_->GetDexCache()->GetDexFile();
uint32_t code_size = 12;
- fake_code_.push_back((code_size >> 24) & 0xFF);
- fake_code_.push_back((code_size >> 16) & 0xFF);
- fake_code_.push_back((code_size >> 8) & 0xFF);
- fake_code_.push_back((code_size >> 0) & 0xFF);
for (size_t i = 0 ; i < code_size; i++) {
fake_code_.push_back(0x70 | i);
}
@@ -74,20 +70,35 @@ class ExceptionTest : public CommonRuntimeTest {
fake_gc_map_.push_back(0); // 0 entries.
fake_gc_map_.push_back(0);
+ const std::vector<uint8_t>& fake_vmap_table_data = fake_vmap_table_data_.GetData();
+ const std::vector<uint8_t>& fake_mapping_data = fake_mapping_data_.GetData();
+ uint32_t vmap_table_offset = sizeof(OatMethodHeader) + fake_vmap_table_data.size();
+ uint32_t mapping_table_offset = vmap_table_offset + fake_mapping_data.size();
+ OatMethodHeader method_header(vmap_table_offset, mapping_table_offset, code_size);
+ fake_header_code_and_maps_.resize(sizeof(method_header));
+ memcpy(&fake_header_code_and_maps_[0], &method_header, sizeof(method_header));
+ fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
+ fake_vmap_table_data.begin(), fake_vmap_table_data.end());
+ fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(),
+ fake_mapping_data.begin(), fake_mapping_data.end());
+ fake_header_code_and_maps_.insert(fake_header_code_and_maps_.end(),
+ fake_code_.begin(), fake_code_.end());
+
+ // NOTE: Don't align the code (it will not be executed) but check that the Thumb2
+ // adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer().
+ CHECK_EQ(mapping_table_offset & 1u, 0u);
+ const uint8_t* code_ptr = &fake_header_code_and_maps_[mapping_table_offset];
+
method_f_ = my_klass_->FindVirtualMethod("f", "()I");
ASSERT_TRUE(method_f_ != NULL);
method_f_->SetFrameSizeInBytes(4 * kPointerSize);
- method_f_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
- method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
- method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
+ method_f_->SetEntryPointFromQuickCompiledCode(code_ptr);
method_f_->SetNativeGcMap(&fake_gc_map_[0]);
method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
ASSERT_TRUE(method_g_ != NULL);
method_g_->SetFrameSizeInBytes(4 * kPointerSize);
- method_g_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
- method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
- method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
+ method_g_->SetEntryPointFromQuickCompiledCode(code_ptr);
method_g_->SetNativeGcMap(&fake_gc_map_[0]);
}
@@ -97,6 +108,7 @@ class ExceptionTest : public CommonRuntimeTest {
Leb128EncodingVector fake_mapping_data_;
Leb128EncodingVector fake_vmap_table_data_;
std::vector<uint8_t> fake_gc_map_;
+ std::vector<uint8_t> fake_header_code_and_maps_;
mirror::ArtMethod* method_f_;
mirror::ArtMethod* method_g_;
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index ed7b427a5f..c67542f484 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -30,9 +30,8 @@ inline void HeapBitmap::Visit(const Visitor& visitor) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
- DCHECK(!discontinuous_space_sets_.empty());
- for (const auto& space_set : discontinuous_space_sets_) {
- space_set->Visit(visitor);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
}
}
@@ -40,46 +39,67 @@ inline bool HeapBitmap::Test(const mirror::Object* obj) {
ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
if (LIKELY(bitmap != nullptr)) {
return bitmap->Test(obj);
- } else {
- return GetDiscontinuousSpaceObjectSet(obj) != nullptr;
}
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap->Test(obj);
+ }
+ }
+ LOG(FATAL) << "Invalid object " << obj;
+ return false;
}
inline void HeapBitmap::Clear(const mirror::Object* obj) {
ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
if (LIKELY(bitmap != nullptr)) {
bitmap->Clear(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Clear(obj);
+ return;
+ }
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ bitmap->Clear(obj);
+ }
}
+ LOG(FATAL) << "Invalid object " << obj;
}
-inline void HeapBitmap::Set(const mirror::Object* obj) {
+template<typename LargeObjectSetVisitor>
+inline bool HeapBitmap::Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor) {
ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
- if (LIKELY(bitmap != NULL)) {
- bitmap->Set(obj);
- } else {
- ObjectSet* set = GetDiscontinuousSpaceObjectSet(obj);
- DCHECK(set != NULL);
- set->Set(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ return bitmap->Set(obj);
+ }
+ visitor(obj);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap->Set(obj);
+ }
}
+ LOG(FATAL) << "Invalid object " << obj;
+ return false;
}
-inline ContinuousSpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
- for (const auto& bitmap : continuous_space_bitmaps_) {
- if (bitmap->HasAddress(obj)) {
- return bitmap;
+template<typename LargeObjectSetVisitor>
+inline bool HeapBitmap::AtomicTestAndSet(const mirror::Object* obj,
+ const LargeObjectSetVisitor& visitor) {
+ ContinuousSpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+ if (LIKELY(bitmap != nullptr)) {
+ return bitmap->AtomicTestAndSet(obj);
+ }
+ visitor(obj);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ if (LIKELY(bitmap->HasAddress(obj))) {
+ return bitmap->AtomicTestAndSet(obj);
}
}
- return nullptr;
+ LOG(FATAL) << "Invalid object " << obj;
+ return false;
}
-inline ObjectSet* HeapBitmap::GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const {
- for (const auto& space_set : discontinuous_space_sets_) {
- if (space_set->Test(obj)) {
- return space_set;
+inline ContinuousSpaceBitmap* HeapBitmap::GetContinuousSpaceBitmap(const mirror::Object* obj) const {
+ for (const auto& bitmap : continuous_space_bitmaps_) {
+ if (bitmap->HasAddress(obj)) {
+ return bitmap;
}
}
return nullptr;
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index 1db886c06a..a5d59bfec2 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -25,61 +25,58 @@ namespace accounting {
void HeapBitmap::ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap,
ContinuousSpaceBitmap* new_bitmap) {
- for (auto& bitmap : continuous_space_bitmaps_) {
- if (bitmap == old_bitmap) {
- bitmap = new_bitmap;
- return;
- }
- }
- LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
+ auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(),
+ old_bitmap);
+ CHECK(it != continuous_space_bitmaps_.end()) << " continuous space bitmap " << old_bitmap
+ << " not found";
+ *it = new_bitmap;
}
-void HeapBitmap::ReplaceObjectSet(ObjectSet* old_set, ObjectSet* new_set) {
- for (auto& space_set : discontinuous_space_sets_) {
- if (space_set == old_set) {
- space_set = new_set;
- return;
- }
- }
- LOG(FATAL) << "object set " << static_cast<const void*>(old_set) << " not found";
+void HeapBitmap::ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap,
+ LargeObjectBitmap* new_bitmap) {
+ auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), old_bitmap);
+ CHECK(it != large_object_bitmaps_.end()) << " large object bitmap " << old_bitmap
+ << " not found";
+ *it = new_bitmap;
}
void HeapBitmap::AddContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
- DCHECK(bitmap != NULL);
-
- // Check for interval overlap.
+ DCHECK(bitmap != nullptr);
+ // Check that there is no bitmap overlap.
for (const auto& cur_bitmap : continuous_space_bitmaps_) {
- CHECK(!(
- bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
- bitmap->HeapLimit() > cur_bitmap->HeapBegin()))
- << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
+ CHECK(bitmap->HeapBegin() >= cur_bitmap->HeapLimit() ||
+ bitmap->HeapLimit() <= cur_bitmap->HeapBegin())
+ << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap "
+ << cur_bitmap->Dump();
}
continuous_space_bitmaps_.push_back(bitmap);
}
void HeapBitmap::RemoveContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(), bitmap);
DCHECK(it != continuous_space_bitmaps_.end());
continuous_space_bitmaps_.erase(it);
}
-void HeapBitmap::AddDiscontinuousObjectSet(ObjectSet* set) {
- DCHECK(set != nullptr);
- discontinuous_space_sets_.push_back(set);
+void HeapBitmap::AddLargeObjectBitmap(LargeObjectBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
+ large_object_bitmaps_.push_back(bitmap);
}
-void HeapBitmap::RemoveDiscontinuousObjectSet(ObjectSet* set) {
- auto it = std::find(discontinuous_space_sets_.begin(), discontinuous_space_sets_.end(), set);
- DCHECK(it != discontinuous_space_sets_.end());
- discontinuous_space_sets_.erase(it);
+void HeapBitmap::RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap) {
+ DCHECK(bitmap != nullptr);
+ auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), bitmap);
+ DCHECK(it != large_object_bitmaps_.end());
+ large_object_bitmaps_.erase(it);
}
void HeapBitmap::Walk(ObjectCallback* callback, void* arg) {
for (const auto& bitmap : continuous_space_bitmaps_) {
bitmap->Walk(callback, arg);
}
- for (const auto& space_set : discontinuous_space_sets_) {
- space_set->Walk(callback, arg);
+ for (const auto& bitmap : large_object_bitmaps_) {
+ bitmap->Walk(callback, arg);
}
}
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 61a24298d7..814dc0632b 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -33,9 +33,13 @@ class HeapBitmap {
public:
bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ template<typename LargeObjectSetVisitor>
+ bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
+ template<typename LargeObjectSetVisitor>
+ bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
- ObjectSet* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -50,7 +54,7 @@ class HeapBitmap {
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
- void ReplaceObjectSet(ObjectSet* old_set, ObjectSet* new_set)
+ void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
explicit HeapBitmap(Heap* heap) : heap_(heap) {}
@@ -60,15 +64,15 @@ class HeapBitmap {
void AddContinuousSpaceBitmap(ContinuousSpaceBitmap* bitmap);
void RemoveContinuousSpaceBitmap(ContinuousSpaceBitmap* bitmap);
- void AddDiscontinuousObjectSet(ObjectSet* set);
- void RemoveDiscontinuousObjectSet(ObjectSet* set);
+ void AddLargeObjectBitmap(LargeObjectBitmap* bitmap);
+ void RemoveLargeObjectBitmap(LargeObjectBitmap* bitmap);
// Bitmaps covering continuous spaces.
std::vector<ContinuousSpaceBitmap*, GcAllocator<ContinuousSpaceBitmap*>>
continuous_space_bitmaps_;
// Sets covering discontinuous spaces.
- std::vector<ObjectSet*, GcAllocator<ObjectSet*>> discontinuous_space_sets_;
+ std::vector<LargeObjectBitmap*, GcAllocator<LargeObjectBitmap*>> large_object_bitmaps_;
friend class art::gc::Heap;
};
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 044216e0e4..bbbd1ed055 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -61,9 +61,10 @@ void RememberedSet::ClearCards() {
class RememberedSetReferenceVisitor {
public:
RememberedSetReferenceVisitor(MarkHeapReferenceCallback* callback,
+ DelayReferenceReferentCallback* ref_callback,
space::ContinuousSpace* target_space,
bool* const contains_reference_to_target_space, void* arg)
- : callback_(callback), target_space_(target_space), arg_(arg),
+ : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
@@ -77,8 +78,18 @@ class RememberedSetReferenceVisitor {
}
}
+ void operator()(mirror::Class* klass, mirror::Reference* ref) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ if (target_space_->HasAddress(ref->GetReferent())) {
+ *contains_reference_to_target_space_ = true;
+ ref_callback_(klass, ref, arg_);
+ }
+ }
+
private:
MarkHeapReferenceCallback* const callback_;
+ DelayReferenceReferentCallback* const ref_callback_;
space::ContinuousSpace* const target_space_;
void* const arg_;
bool* const contains_reference_to_target_space_;
@@ -87,30 +98,33 @@ class RememberedSetReferenceVisitor {
class RememberedSetObjectVisitor {
public:
RememberedSetObjectVisitor(MarkHeapReferenceCallback* callback,
+ DelayReferenceReferentCallback* ref_callback,
space::ContinuousSpace* target_space,
bool* const contains_reference_to_target_space, void* arg)
- : callback_(callback), target_space_(target_space), arg_(arg),
+ : callback_(callback), ref_callback_(ref_callback), target_space_(target_space), arg_(arg),
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- RememberedSetReferenceVisitor ref_visitor(callback_, target_space_,
- contains_reference_to_target_space_, arg_);
- obj->VisitReferences<kMovingClasses>(ref_visitor);
+ RememberedSetReferenceVisitor visitor(callback_, ref_callback_, target_space_,
+ contains_reference_to_target_space_, arg_);
+ obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
private:
MarkHeapReferenceCallback* const callback_;
+ DelayReferenceReferentCallback* const ref_callback_;
space::ContinuousSpace* const target_space_;
void* const arg_;
bool* const contains_reference_to_target_space_;
};
void RememberedSet::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
+ DelayReferenceReferentCallback* ref_callback,
space::ContinuousSpace* target_space, void* arg) {
CardTable* card_table = heap_->GetCardTable();
bool contains_reference_to_target_space = false;
- RememberedSetObjectVisitor obj_visitor(callback, target_space,
+ RememberedSetObjectVisitor obj_visitor(callback, ref_callback, target_space,
&contains_reference_to_target_space, arg);
ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
CardSet remove_card_set;
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 4ed20ddc82..e3d853742f 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -53,6 +53,7 @@ class RememberedSet {
// Mark through all references to the target space.
void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
+ DelayReferenceReferentCallback* ref_callback,
space::ContinuousSpace* target_space, void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 7eed05a339..66f9a3a9ce 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -21,26 +21,41 @@ namespace gc {
namespace accounting {
template<size_t kAlignment>
+size_t SpaceBitmap<kAlignment>::ComputeBitmapSize(uint64_t capacity) {
+ const uint64_t kBytesCoveredPerWord = kAlignment * kBitsPerWord;
+ return (RoundUp(capacity, kBytesCoveredPerWord) / kBytesCoveredPerWord) * kWordSize;
+}
+
+template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
const std::string& name, MemMap* mem_map, byte* heap_begin, size_t heap_capacity) {
CHECK(mem_map != nullptr);
uword* bitmap_begin = reinterpret_cast<uword*>(mem_map->Begin());
- size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+ const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
return new SpaceBitmap(name, mem_map, bitmap_begin, bitmap_size, heap_begin);
}
template<size_t kAlignment>
+SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin,
+ size_t bitmap_size, const void* heap_begin)
+ : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
+ heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
+ name_(name) {
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK_NE(bitmap_size, 0U);
+}
+
+template<size_t kAlignment>
SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
const std::string& name, byte* heap_begin, size_t heap_capacity) {
- CHECK(heap_begin != NULL);
// Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
- size_t bitmap_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+ const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), NULL, bitmap_size,
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
PROT_READ | PROT_WRITE, false, &error_msg));
if (UNLIKELY(mem_map.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
- return NULL;
+ return nullptr;
}
return CreateFromMemMap(name, mem_map.release(), heap_begin, heap_capacity);
}
@@ -68,13 +83,13 @@ void SpaceBitmap<kAlignment>::Clear() {
}
template<size_t kAlignment>
-inline void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
+void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
DCHECK_EQ(Size(), source_bitmap->Size());
std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / kWordSize, Begin());
}
template<size_t kAlignment>
-inline void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
+void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
CHECK(bitmap_begin_ != NULL);
CHECK(callback != NULL);
@@ -96,11 +111,11 @@ inline void SpaceBitmap<kAlignment>::Walk(ObjectCallback* callback, void* arg) {
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitmap,
- const SpaceBitmap<kAlignment>& mark_bitmap,
- uintptr_t sweep_begin, uintptr_t sweep_end,
- SpaceBitmap::SweepCallback* callback, void* arg) {
- CHECK(live_bitmap.bitmap_begin_ != NULL);
- CHECK(mark_bitmap.bitmap_begin_ != NULL);
+ const SpaceBitmap<kAlignment>& mark_bitmap,
+ uintptr_t sweep_begin, uintptr_t sweep_end,
+ SpaceBitmap::SweepCallback* callback, void* arg) {
+ CHECK(live_bitmap.bitmap_begin_ != nullptr);
+ CHECK(mark_bitmap.bitmap_begin_ != nullptr);
CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_);
CHECK_EQ(live_bitmap.bitmap_size_, mark_bitmap.bitmap_size_);
CHECK(callback != NULL);
@@ -170,8 +185,8 @@ void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visite
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited,
- ObjectCallback* callback,
- mirror::Object* obj, void* arg) {
+ ObjectCallback* callback, mirror::Object* obj,
+ void* arg) {
if (visited->Test(obj)) {
return;
}
@@ -232,12 +247,6 @@ void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) {
}
}
-void ObjectSet::Walk(ObjectCallback* callback, void* arg) {
- for (const mirror::Object* obj : contained_) {
- callback(const_cast<mirror::Object*>(obj), arg);
- }
-}
-
template class SpaceBitmap<kObjectAlignment>;
template class SpaceBitmap<kPageSize>;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index b90a7998a3..5c7cce24da 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -198,10 +198,10 @@ class SpaceBitmap {
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
SpaceBitmap(const std::string& name, MemMap* mem_map, uword* bitmap_begin, size_t bitmap_size,
- const void* heap_begin)
- : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
- heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
- name_(name) {}
+ const void* heap_begin);
+
+ // Helper function for computing bitmap size based on a 64 bit capacity.
+ static size_t ComputeBitmapSize(uint64_t capacity);
template<bool kSetBit>
bool Modify(const mirror::Object* obj);
@@ -232,71 +232,7 @@ class SpaceBitmap {
std::string name_;
};
-// Like a bitmap except it keeps track of objects using sets.
-class ObjectSet {
- public:
- typedef std::set<
- const mirror::Object*, std::less<const mirror::Object*>,
- GcAllocator<const mirror::Object*> > Objects;
-
- bool IsEmpty() const {
- return contained_.empty();
- }
-
- inline void Set(const mirror::Object* obj) {
- contained_.insert(obj);
- }
-
- inline void Clear(const mirror::Object* obj) {
- Objects::iterator found = contained_.find(obj);
- if (found != contained_.end()) {
- contained_.erase(found);
- }
- }
-
- void Clear() {
- contained_.clear();
- }
-
- inline bool Test(const mirror::Object* obj) const {
- return contained_.find(obj) != contained_.end();
- }
-
- const std::string& GetName() const {
- return name_;
- }
-
- void SetName(const std::string& name) {
- name_ = name;
- }
-
- void CopyFrom(const ObjectSet& space_set) {
- contained_ = space_set.contained_;
- }
-
- void Walk(ObjectCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- template <typename Visitor>
- void Visit(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS {
- for (const mirror::Object* obj : contained_) {
- visitor(const_cast<mirror::Object*>(obj));
- }
- }
-
- explicit ObjectSet(const std::string& name) : name_(name) {}
- ~ObjectSet() {}
-
- Objects& GetObjects() {
- return contained_;
- }
-
- private:
- std::string name_;
- Objects contained_;
-};
-
typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
-// TODO: Replace usage of ObjectSet with LargeObjectBitmap.
typedef SpaceBitmap<kLargeObjectAlignment> LargeObjectBitmap;
template<size_t kAlignment>
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 7c180527d0..972f94d6db 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -110,7 +110,8 @@ class RandGen {
uint32_t val_;
};
-void compat_test() NO_THREAD_SAFETY_ANALYSIS {
+template <size_t kAlignment>
+void RunTest() NO_THREAD_SAFETY_ANALYSIS {
byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
size_t heap_capacity = 16 * MB;
@@ -123,7 +124,7 @@ void compat_test() NO_THREAD_SAFETY_ANALYSIS {
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
for (int j = 0; j < 10000; ++j) {
- size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
bool set = r.next() % 2 == 1;
if (set) {
@@ -137,15 +138,15 @@ void compat_test() NO_THREAD_SAFETY_ANALYSIS {
size_t count = 0;
SimpleCounter c(&count);
- size_t offset = (r.next() % heap_capacity) & ~(0x7);
+ size_t offset = RoundDown(r.next() % heap_capacity, kAlignment);
size_t remain = heap_capacity - offset;
- size_t end = offset + ((r.next() % (remain + 1)) & ~(0x7));
+ size_t end = offset + RoundDown(r.next() % (remain + 1), kAlignment);
space_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(heap_begin) + offset,
reinterpret_cast<uintptr_t>(heap_begin) + end, c);
size_t manual = 0;
- for (uintptr_t k = offset; k < end; k += kObjectAlignment) {
+ for (uintptr_t k = offset; k < end; k += kAlignment) {
if (space_bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
manual++;
}
@@ -156,8 +157,12 @@ void compat_test() NO_THREAD_SAFETY_ANALYSIS {
}
}
-TEST_F(SpaceBitmapTest, Visitor) {
- compat_test();
+TEST_F(SpaceBitmapTest, VisitorObjectAlignment) {
+ RunTest<kObjectAlignment>();
+}
+
+TEST_F(SpaceBitmapTest, VisitorPageAlignment) {
+ RunTest<kPageSize>();
}
} // namespace accounting
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index f395314a4d..ac0f67bd69 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -29,7 +29,7 @@ inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* by
}
void* m = AllocFromRun(self, size, bytes_allocated);
// Check if the returned memory is really all zero.
- if (kCheckZeroMemory && m != NULL) {
+ if (kCheckZeroMemory && m != nullptr) {
byte* bytes = reinterpret_cast<byte*>(m);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(bytes[i], 0);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index cbefa6aec2..ff59016423 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -32,6 +32,10 @@ namespace allocator {
extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
+static constexpr bool kUsePrefetchDuringAllocRun = true;
+static constexpr bool kPrefetchNewRunDataByZeroing = false;
+static constexpr size_t kPrefetchStride = 64;
+
size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
@@ -39,6 +43,9 @@ size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
bool RosAlloc::initialized_ = false;
+size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
+RosAlloc::Run* RosAlloc::dedicated_full_run_ =
+ reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
PageReleaseMode page_release_mode, size_t page_release_size_threshold)
@@ -62,8 +69,9 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
<< ", max_capacity=" << std::dec << max_capacity_;
memset(current_runs_, 0, sizeof(current_runs_));
for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
- size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
- kRosAllocBracketLock);
+ size_bracket_lock_names[i] =
+ StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
+ size_bracket_locks_[i] = new Mutex(size_bracket_lock_names[i].c_str(), kRosAllocBracketLock);
}
DCHECK_EQ(footprint_, capacity_);
size_t num_of_pages = footprint_ / kPageSize;
@@ -71,7 +79,7 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
std::string error_msg;
page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
PROT_READ | PROT_WRITE, false, &error_msg));
- CHECK(page_map_mem_map_.get() != NULL) << "Couldn't allocate the page map : " << error_msg;
+ CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
page_map_ = page_map_mem_map_->Begin();
page_map_size_ = num_of_pages;
max_page_map_size_ = max_num_of_pages;
@@ -103,7 +111,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
lock_.AssertHeld(self);
DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
FreePageRun* res = NULL;
- size_t req_byte_size = num_pages * kPageSize;
+ const size_t req_byte_size = num_pages * kPageSize;
// Find the lowest address free page run that's large enough.
for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
FreePageRun* fpr = *it;
@@ -260,8 +268,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
break;
}
if (kIsDebugBuild) {
- // Clear the first page which isn't madvised away in the debug
- // build for the magic number.
+ // Clear the first page since it is not madvised due to the magic number.
memset(res, 0, kPageSize);
}
if (kTraceRosAlloc) {
@@ -279,7 +286,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
return nullptr;
}
-void RosAlloc::FreePages(Thread* self, void* ptr) {
+size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
lock_.AssertHeld(self);
size_t pm_idx = ToPageMapIndex(ptr);
DCHECK_LT(pm_idx, page_map_size_);
@@ -298,7 +305,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
<< static_cast<int>(pm_type) << ", ptr=" << std::hex
<< reinterpret_cast<intptr_t>(ptr);
- return;
+ return 0;
}
// Update the page map and count the number of pages.
size_t num_pages = 1;
@@ -310,10 +317,21 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
num_pages++;
idx++;
}
+ const size_t byte_size = num_pages * kPageSize;
+ if (already_zero) {
+ if (kCheckZeroMemory) {
+ const uword* word_ptr = reinterpret_cast<uword*>(ptr);
+ for (size_t i = 0; i < byte_size / sizeof(uword); ++i) {
+ CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
+ }
+ }
+ } else if (!DoesReleaseAllPages()) {
+ memset(ptr, 0, byte_size);
+ }
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
- << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
+ << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
<< "(" << std::dec << (num_pages * kPageSize) << ")";
}
@@ -322,8 +340,8 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
if (kIsDebugBuild) {
fpr->magic_num_ = kMagicNumFree;
}
- fpr->SetByteSize(this, num_pages * kPageSize);
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ fpr->SetByteSize(this, byte_size);
+ DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
if (!free_page_runs_.empty()) {
@@ -349,6 +367,10 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
if (kTraceRosAlloc) {
LOG(INFO) << "Success";
}
+ // Clear magic num since this is no longer the start of a free page run.
+ if (kIsDebugBuild) {
+ h->magic_num_ = 0;
+ }
free_page_runs_.erase(it++);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
@@ -395,6 +417,10 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
}
l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ // Clear magic num since this is no longer the start of a free page run.
+ if (kIsDebugBuild) {
+ fpr->magic_num_ = 0;
+ }
fpr = l;
} else {
// Not adjacent. Stop.
@@ -422,6 +448,7 @@ void RosAlloc::FreePages(Thread* self, void* ptr) {
LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
<< " into free_page_runs_";
}
+ return byte_size;
}
void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
@@ -438,34 +465,29 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
return nullptr;
}
- if (bytes_allocated != NULL) {
- *bytes_allocated = num_pages * kPageSize;
- }
+ const size_t total_bytes = num_pages * kPageSize;
+ *bytes_allocated = total_bytes;
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
<< "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
<< "(" << std::dec << (num_pages * kPageSize) << ")";
}
- if (!DoesReleaseAllPages()) {
- // If it does not release all pages, pages may not be zeroed out.
- memset(r, 0, size);
- }
// Check if the returned memory is really all zero.
if (kCheckZeroMemory) {
- byte* bytes = reinterpret_cast<byte*>(r);
- for (size_t i = 0; i < size; ++i) {
- DCHECK_EQ(bytes[i], 0);
+ CHECK_EQ(total_bytes % sizeof(uword), 0U);
+ const uword* words = reinterpret_cast<uword*>(r);
+ for (size_t i = 0; i < total_bytes / sizeof(uword); ++i) {
+ CHECK_EQ(words[i], 0U);
}
}
return r;
}
-void RosAlloc::FreeInternal(Thread* self, void* ptr) {
+size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
DCHECK_LE(base_, ptr);
DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
- bool free_from_run = false;
- Run* run = NULL;
+ Run* run = nullptr;
{
MutexLock mu(self, lock_);
DCHECK_LT(pm_idx, page_map_size_);
@@ -477,16 +499,14 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
switch (page_map_[pm_idx]) {
case kPageMapEmpty:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
case kPageMapLargeObject:
- FreePages(self, ptr);
- return;
+ return FreePages(self, ptr, false);
case kPageMapLargeObjectPart:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
case kPageMapRun:
case kPageMapRunPart: {
- free_from_run = true;
size_t pi = pm_idx;
DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
// Find the beginning of the run.
@@ -501,56 +521,69 @@ void RosAlloc::FreeInternal(Thread* self, void* ptr) {
}
default:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
- return;
+ return 0;
}
}
- if (LIKELY(free_from_run)) {
- DCHECK(run != NULL);
- FreeFromRun(self, ptr, run);
- }
+ DCHECK(run != nullptr);
+ return FreeFromRun(self, ptr, run);
}
-void RosAlloc::Free(Thread* self, void* ptr) {
+size_t RosAlloc::Free(Thread* self, void* ptr) {
ReaderMutexLock rmu(self, bulk_free_lock_);
- FreeInternal(self, ptr);
+ return FreeInternal(self, ptr);
}
-RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
- Run* new_run;
- size_t num_pages = numOfPages[idx];
- // Get the lowest address non-full run from the binary tree.
- Run* temp = NULL;
- std::set<Run*>* bt = &non_full_runs_[idx];
- std::set<Run*>::iterator found = bt->lower_bound(temp);
- if (found != bt->end()) {
- // If there's one, use it as the current run.
- Run* non_full_run = *found;
- DCHECK(non_full_run != NULL);
- new_run = non_full_run;
- DCHECK_EQ(new_run->is_thread_local_, 0);
- bt->erase(found);
- DCHECK_EQ(non_full_run->is_thread_local_, 0);
- } else {
- // If there's none, allocate a new run and use it as the
- // current run.
- {
- MutexLock mu(self, lock_);
- new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
- }
- if (new_run == NULL) {
- return NULL;
- }
+RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
+ RosAlloc::Run* new_run = nullptr;
+ {
+ MutexLock mu(self, lock_);
+ new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
+ }
+ if (LIKELY(new_run != nullptr)) {
if (kIsDebugBuild) {
new_run->magic_num_ = kMagicNum;
}
new_run->size_bracket_idx_ = idx;
- new_run->top_slot_idx_ = 0;
- new_run->ClearBitMaps();
- new_run->to_be_bulk_freed_ = false;
+ new_run->SetAllocBitMapBitsForInvalidSlots();
+ DCHECK(!new_run->IsThreadLocal());
+ DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
+ DCHECK(!new_run->to_be_bulk_freed_);
+ if (kUsePrefetchDuringAllocRun && idx <= kMaxThreadLocalSizeBracketIdx) {
+ // Take ownership of the cache lines if we are likely to be thread local run.
+ if (kPrefetchNewRunDataByZeroing) {
+ // Zeroing the data is sometimes faster than prefetching but it increases memory usage
+ // since we end up dirtying zero pages which may have been madvised.
+ new_run->ZeroData();
+ } else {
+ const size_t num_of_slots = numOfSlots[idx];
+ const size_t bracket_size = bracketSizes[idx];
+ const size_t num_of_bytes = num_of_slots * bracket_size;
+ byte* begin = reinterpret_cast<byte*>(new_run) + headerSizes[idx];
+ for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
+ __builtin_prefetch(begin + i);
+ }
+ }
+ }
}
return new_run;
}
+RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
+ // Get the lowest address non-full run from the binary tree.
+ std::set<Run*>* const bt = &non_full_runs_[idx];
+ if (!bt->empty()) {
+ // If there's one, use it as the current run.
+ auto it = bt->begin();
+ Run* non_full_run = *it;
+ DCHECK(non_full_run != nullptr);
+ DCHECK(!non_full_run->IsThreadLocal());
+ bt->erase(it);
+ return non_full_run;
+ }
+ // If there's none, allocate a new run and use it as the current run.
+ return AllocRun(self, idx);
+}
+
void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
@@ -566,66 +599,66 @@ void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
// Use a thread-local run.
Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
- if (UNLIKELY(thread_local_run == NULL)) {
+ // Allow invalid since this will always fail the allocation.
+ if (kIsDebugBuild) {
+ // Need the lock to prevent race conditions.
MutexLock mu(self, *size_bracket_locks_[idx]);
- thread_local_run = RefillRun(self, idx);
- if (UNLIKELY(thread_local_run == NULL)) {
- return NULL;
- }
- DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
- DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
- thread_local_run->is_thread_local_ = 1;
- self->SetRosAllocRun(idx, thread_local_run);
- DCHECK(!thread_local_run->IsFull());
+ CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+ CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
}
-
- DCHECK(thread_local_run != NULL);
- DCHECK_NE(thread_local_run->is_thread_local_, 0);
+ DCHECK(thread_local_run != nullptr);
+ DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
slot_addr = thread_local_run->AllocSlot();
-
- if (UNLIKELY(slot_addr == NULL)) {
+ // The allocation must fail if the run is invalid.
+ DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
+ << "allocated from an invalid run";
+ if (UNLIKELY(slot_addr == nullptr)) {
// The run got full. Try to free slots.
DCHECK(thread_local_run->IsFull());
MutexLock mu(self, *size_bracket_locks_[idx]);
bool is_all_free_after_merge;
+ // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
+ DCHECK_NE(thread_local_run, dedicated_full_run_);
// Some slot got freed. Keep it.
DCHECK(!thread_local_run->IsFull());
DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
if (is_all_free_after_merge) {
- // Reinstate the bump index mode if it's all free.
- DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
- thread_local_run->top_slot_idx_ = 0;
+ // Check that the bitmap idx is back at 0 if it's all free.
+ DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
}
} else {
// No slots got freed. Try to refill the thread-local run.
DCHECK(thread_local_run->IsFull());
- self->SetRosAllocRun(idx, nullptr);
- thread_local_run->is_thread_local_ = 0;
- if (kIsDebugBuild) {
- full_runs_[idx].insert(thread_local_run);
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
- << reinterpret_cast<intptr_t>(thread_local_run)
- << " into full_runs_[" << std::dec << idx << "]";
+ if (thread_local_run != dedicated_full_run_) {
+ self->SetRosAllocRun(idx, dedicated_full_run_);
+ thread_local_run->SetIsThreadLocal(false);
+ if (kIsDebugBuild) {
+ full_runs_[idx].insert(thread_local_run);
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
+ << reinterpret_cast<intptr_t>(thread_local_run)
+ << " into full_runs_[" << std::dec << idx << "]";
+ }
}
+ DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+ DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
}
- DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
- DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
+
thread_local_run = RefillRun(self, idx);
if (UNLIKELY(thread_local_run == NULL)) {
return NULL;
}
DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
- thread_local_run->is_thread_local_ = 1;
+ thread_local_run->SetIsThreadLocal(true);
self->SetRosAllocRun(idx, thread_local_run);
DCHECK(!thread_local_run->IsFull());
}
DCHECK(thread_local_run != NULL);
DCHECK(!thread_local_run->IsFull());
- DCHECK_NE(thread_local_run->is_thread_local_, 0);
+ DCHECK(thread_local_run->IsThreadLocal());
slot_addr = thread_local_run->AllocSlot();
// Must succeed now with a new run.
DCHECK(slot_addr != NULL);
@@ -646,7 +679,7 @@ void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
}
DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
- current_run->is_thread_local_ = 0;
+ current_run->SetIsThreadLocal(false);
current_runs_[idx] = current_run;
DCHECK(!current_run->IsFull());
}
@@ -673,7 +706,7 @@ void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
DCHECK(current_run != NULL);
DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
- current_run->is_thread_local_ = 0;
+ current_run->SetIsThreadLocal(false);
current_runs_[idx] = current_run;
DCHECK(!current_run->IsFull());
slot_addr = current_run->AllocSlot();
@@ -686,27 +719,27 @@ void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
<< "(" << std::dec << (bracket_size) << ")";
}
}
- if (LIKELY(bytes_allocated != NULL)) {
- *bytes_allocated = bracket_size;
- }
- memset(slot_addr, 0, size);
+ DCHECK(bytes_allocated != nullptr);
+ *bytes_allocated = bracket_size;
+ // Caller verifies that it is all 0.
return slot_addr;
}
-void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
+size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
DCHECK_EQ(run->magic_num_, kMagicNum);
DCHECK_LT(run, ptr);
DCHECK_LT(ptr, run->End());
- size_t idx = run->size_bracket_idx_;
- MutexLock mu(self, *size_bracket_locks_[idx]);
+ const size_t idx = run->size_bracket_idx_;
+ const size_t bracket_size = bracketSizes[idx];
bool run_was_full = false;
+ MutexLock mu(self, *size_bracket_locks_[idx]);
if (kIsDebugBuild) {
run_was_full = run->IsFull();
}
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
}
- if (LIKELY(run->is_thread_local_ != 0)) {
+ if (LIKELY(run->IsThreadLocal())) {
// It's a thread-local run. Just mark the thread-local free bit map and return.
DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
@@ -717,7 +750,7 @@ void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
<< reinterpret_cast<intptr_t>(run);
}
// A thread local run will be kept as a thread local even if it's become all free.
- return;
+ return bracket_size;
}
// Free the slot in the run.
run->FreeSlot(ptr);
@@ -737,9 +770,10 @@ void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
}
DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
+ run->ZeroHeader();
{
MutexLock mu(self, lock_);
- FreePages(self, run);
+ FreePages(self, run, true);
}
} else {
// It is not completely free. If it wasn't the current run or
@@ -769,6 +803,7 @@ void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
}
}
}
+ return bracket_size;
}
std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
@@ -794,7 +829,7 @@ std::string RosAlloc::Run::Dump() {
<< " size_bracket_idx=" << idx
<< " is_thread_local=" << static_cast<int>(is_thread_local_)
<< " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
- << " top_slot_idx=" << top_slot_idx_
+ << " first_search_vec_idx=" << first_search_vec_idx_
<< " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
<< " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
<< " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
@@ -802,64 +837,52 @@ std::string RosAlloc::Run::Dump() {
return stream.str();
}
-void* RosAlloc::Run::AllocSlot() {
- size_t idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- DCHECK_LE(top_slot_idx_, num_slots);
- if (LIKELY(top_slot_idx_ < num_slots)) {
- // If it's in bump index mode, grab the top slot and increment the top index.
- size_t slot_idx = top_slot_idx_;
- byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
- << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
- }
- top_slot_idx_++;
- size_t vec_idx = slot_idx / 32;
- size_t vec_off = slot_idx % 32;
- uint32_t* vec = &alloc_bit_map_[vec_idx];
- DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
- *vec |= 1 << vec_off;
- DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
- return slot_addr;
- }
- // Not in bump index mode. Search the alloc bit map for an empty slot.
- size_t num_vec = RoundUp(num_slots, 32) / 32;
- size_t slot_idx = 0;
- bool found_slot = false;
- for (size_t v = 0; v < num_vec; v++) {
- uint32_t *vecp = &alloc_bit_map_[v];
- uint32_t ffz1 = __builtin_ffs(~*vecp);
- uint32_t ffz;
- // TODO: Use LIKELY or UNLIKELY here?
- if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
+inline void* RosAlloc::Run::AllocSlot() {
+ const size_t idx = size_bracket_idx_;
+ while (true) {
+ if (kIsDebugBuild) {
+ // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
+ for (size_t i = 0; i < first_search_vec_idx_; ++i) {
+ CHECK_EQ(~alloc_bit_map_[i], 0U);
+ }
+ }
+ uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
+ uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
+ if (LIKELY(ffz1 != 0)) {
+ const uint32_t ffz = ffz1 - 1;
+ const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
+ const uint32_t mask = 1U << ffz;
+ DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
// Found an empty slot. Set the bit.
- DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
- *vecp |= (1 << ffz);
- DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
- slot_idx = ffz + v * 32;
- found_slot = true;
- break;
+ DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
+ *alloc_bitmap_ptr |= mask;
+ DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
+ byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
+ << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
+ }
+ return slot_addr;
}
- }
- if (LIKELY(found_slot)) {
- byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
- << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
+ const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
+ if (first_search_vec_idx_ + 1 >= num_words) {
+ DCHECK(IsFull());
+ // Already at the last word, return null.
+ return nullptr;
}
- return slot_addr;
+ // Increase the index to the next word and try again.
+ ++first_search_vec_idx_;
}
- return NULL;
}
-inline void RosAlloc::Run::FreeSlot(void* ptr) {
- DCHECK_EQ(is_thread_local_, 0);
- byte idx = size_bracket_idx_;
- size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
+void RosAlloc::Run::FreeSlot(void* ptr) {
+ DCHECK(!IsThreadLocal());
+ const byte idx = size_bracket_idx_;
+ const size_t bracket_size = bracketSizes[idx];
+ const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- (reinterpret_cast<byte*>(this) + headerSizes[idx]);
- DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
- size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
+ DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
+ size_t slot_idx = offset_from_slot_base / bracket_size;
DCHECK_LT(slot_idx, numOfSlots[idx]);
size_t vec_idx = slot_idx / 32;
if (kIsDebugBuild) {
@@ -868,9 +891,14 @@ inline void RosAlloc::Run::FreeSlot(void* ptr) {
}
size_t vec_off = slot_idx % 32;
uint32_t* vec = &alloc_bit_map_[vec_idx];
- DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
- *vec &= ~(1 << vec_off);
- DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
+ first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
+ const uint32_t mask = 1U << vec_off;
+ DCHECK_NE(*vec & mask, 0U);
+ *vec &= ~mask;
+ DCHECK_EQ(*vec & mask, 0U);
+ // Zero out the memory.
+ // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
+ memset(ptr, 0, bracket_size);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
<< ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
@@ -878,11 +906,11 @@ inline void RosAlloc::Run::FreeSlot(void* ptr) {
}
inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
- DCHECK_NE(is_thread_local_, 0);
+ DCHECK(IsThreadLocal());
// Free slots in the alloc bit map based on the thread local free bit map.
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
+ const size_t idx = size_bracket_idx_;
+ const size_t num_of_slots = numOfSlots[idx];
+ const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
bool changed = false;
uint32_t* vecp = &alloc_bit_map_[0];
uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
@@ -892,6 +920,7 @@ inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_
uint32_t vec_before = *vecp;
uint32_t vec_after;
if (tl_free_vec != 0) {
+ first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
vec_after = vec_before & ~tl_free_vec;
*vecp = vec_after;
changed = true;
@@ -900,7 +929,13 @@ inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_
vec_after = vec_before;
}
if (vec_after != 0) {
- is_all_free_after = false;
+ if (v == num_vec - 1) {
+ // Only not all free if a bit other than the mask bits are set.
+ is_all_free_after =
+ is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
+ } else {
+ is_all_free_after = false;
+ }
}
DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
}
@@ -911,16 +946,15 @@ inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_
}
inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
- DCHECK_EQ(is_thread_local_, 0);
+ DCHECK(!IsThreadLocal());
// Free slots in the alloc bit map based on the bulk free bit map.
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
+ const size_t num_vec = NumberOfBitmapVectors();
uint32_t* vecp = &alloc_bit_map_[0];
uint32_t* free_vecp = &BulkFreeBitMap()[0];
for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
uint32_t free_vec = *free_vecp;
if (free_vec != 0) {
+ first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
*vecp &= ~free_vec;
*free_vecp = 0; // clear the bulk free bit map.
}
@@ -929,11 +963,9 @@ inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
}
inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
- DCHECK_NE(is_thread_local_, 0);
+ DCHECK(IsThreadLocal());
// Union the thread local bit map with the bulk free bit map.
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
+ size_t num_vec = NumberOfBitmapVectors();
uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
uint32_t* from_vecp = &BulkFreeBitMap()[0];
for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
@@ -947,66 +979,71 @@ inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
}
inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
- DCHECK_NE(is_thread_local_, 0);
+ DCHECK(IsThreadLocal());
MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
}
-inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
- MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
+inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
+ return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
}
-inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
- const char* caller_name) {
- byte idx = size_bracket_idx_;
- size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
+inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
+ const char* caller_name) {
+ const byte idx = size_bracket_idx_;
+ const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- (reinterpret_cast<byte*>(this) + headerSizes[idx]);
- DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
- size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
+ const size_t bracket_size = bracketSizes[idx];
+ memset(ptr, 0, bracket_size);
+ DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
+ size_t slot_idx = offset_from_slot_base / bracket_size;
DCHECK_LT(slot_idx, numOfSlots[idx]);
size_t vec_idx = slot_idx / 32;
if (kIsDebugBuild) {
- size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
+ size_t num_vec = NumberOfBitmapVectors();
DCHECK_LT(vec_idx, num_vec);
}
size_t vec_off = slot_idx % 32;
uint32_t* vec = &free_bit_map_base[vec_idx];
- DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
- *vec |= 1 << vec_off;
- DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
+ const uint32_t mask = 1U << vec_off;
+ DCHECK_EQ(*vec & mask, 0U);
+ *vec |= mask;
+ DCHECK_NE(*vec & mask, 0U);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
<< reinterpret_cast<intptr_t>(ptr)
<< ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
}
+ return bracket_size;
+}
+
+inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
+ const size_t kBitsPerVec = 32;
+ DCHECK_GE(num_slots * kBitsPerVec, num_vec);
+ size_t remain = num_vec * kBitsPerVec - num_slots;
+ DCHECK_NE(remain, kBitsPerVec);
+ return ((1U << remain) - 1) << (kBitsPerVec - remain);
}
inline bool RosAlloc::Run::IsAllFree() {
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
- for (size_t v = 0; v < num_vec; v++) {
+ const byte idx = size_bracket_idx_;
+ const size_t num_slots = numOfSlots[idx];
+ const size_t num_vec = NumberOfBitmapVectors();
+ DCHECK_NE(num_vec, 0U);
+ // Check the last vector after the loop since it uses a special case for the masked bits.
+ for (size_t v = 0; v < num_vec - 1; v++) {
uint32_t vec = alloc_bit_map_[v];
if (vec != 0) {
return false;
}
}
- return true;
+ // Make sure the last word is equal to the mask, all other bits must be 0.
+ return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
}
inline bool RosAlloc::Run::IsFull() {
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
- size_t slots = 0;
- for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK_GE(num_slots, slots);
- uint32_t vec = alloc_bit_map_[v];
- uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
- : (1 << (num_slots - slots)) - 1;
- if ((num_slots - slots) >= 32) {
- DCHECK_EQ(mask, static_cast<uint32_t>(-1));
- }
- if (vec != mask) {
+ const size_t num_vec = NumberOfBitmapVectors();
+ for (size_t v = 0; v < num_vec; ++v) {
+ if (~alloc_bit_map_[v] != 0) {
return false;
}
}
@@ -1014,9 +1051,7 @@ inline bool RosAlloc::Run::IsFull() {
}
inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
+ const size_t num_vec = NumberOfBitmapVectors();
for (size_t v = 0; v < num_vec; v++) {
uint32_t vec = BulkFreeBitMap()[v];
if (vec != 0) {
@@ -1027,9 +1062,7 @@ inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
}
inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
+ const size_t num_vec = NumberOfBitmapVectors();
for (size_t v = 0; v < num_vec; v++) {
uint32_t vec = ThreadLocalFreeBitMap()[v];
if (vec != 0) {
@@ -1039,11 +1072,31 @@ inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
return true;
}
-inline void RosAlloc::Run::ClearBitMaps() {
- byte idx = size_bracket_idx_;
- size_t num_slots = numOfSlots[idx];
- size_t num_vec = RoundUp(num_slots, 32) / 32;
- memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
+inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
+ const size_t idx = size_bracket_idx_;
+ const size_t num_slots = numOfSlots[idx];
+ const size_t num_vec = RoundUp(num_slots, 32) / 32;
+ DCHECK_NE(num_vec, 0U);
+ // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
+ // don't represent valid slots.
+ alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
+}
+
+inline void RosAlloc::Run::ZeroHeader() {
+ const byte idx = size_bracket_idx_;
+ memset(this, 0, headerSizes[idx]);
+}
+
+inline void RosAlloc::Run::ZeroData() {
+ const byte idx = size_bracket_idx_;
+ byte* slot_begin = reinterpret_cast<byte*>(this) + headerSizes[idx];
+ memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
+}
+
+inline void RosAlloc::Run::FillAllocBitMap() {
+ size_t num_vec = NumberOfBitmapVectors();
+ memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
+ first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
}
void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
@@ -1075,15 +1128,16 @@ void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size
// lock for better performance, assuming that the existence of an
// allocated chunk/pointer being freed in BulkFree() guarantees that
// the page map entry won't change. Disabled for now.
-static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
+static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
-void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
+size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
+ size_t freed_bytes = 0;
if (false) {
// Used only to test Free() as GC uses only BulkFree().
for (size_t i = 0; i < num_ptrs; ++i) {
- FreeInternal(self, ptrs[i]);
+ freed_bytes += FreeInternal(self, ptrs[i]);
}
- return;
+ return freed_bytes;
}
WriterMutexLock wmu(self, bulk_free_lock_);
@@ -1097,11 +1151,10 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
#endif
for (size_t i = 0; i < num_ptrs; i++) {
void* ptr = ptrs[i];
- ptrs[i] = NULL;
DCHECK_LE(base_, ptr);
DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
- Run* run = NULL;
+ Run* run = nullptr;
if (kReadPageMapEntryWithoutLockInBulkFree) {
// Read the page map entries without locking the lock.
byte page_map_entry = page_map_[pm_idx];
@@ -1112,104 +1165,74 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
}
if (LIKELY(page_map_entry == kPageMapRun)) {
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
size_t pi = pm_idx;
- DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
// Find the beginning of the run.
- while (page_map_[pi] != kPageMapRun) {
- pi--;
+ do {
+ --pi;
DCHECK_LT(pi, capacity_ / kPageSize);
- }
- DCHECK_EQ(page_map_[pi], kPageMapRun);
+ } while (page_map_[pi] != kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
MutexLock mu(self, lock_);
- FreePages(self, ptr);
+ freed_bytes += FreePages(self, ptr, false);
continue;
} else {
LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
}
- DCHECK(run != NULL);
- // Set the bit in the bulk free bit map.
- run->MarkBulkFreeBitMap(ptr);
-#ifdef HAVE_ANDROID_OS
- if (!run->to_be_bulk_freed_) {
- run->to_be_bulk_freed_ = true;
- runs.push_back(run);
- }
-#else
- runs.insert(run);
-#endif
} else {
// Read the page map entries with a lock.
- bool free_from_run = false;
- {
- MutexLock mu(self, lock_);
- DCHECK_LT(pm_idx, page_map_size_);
- byte page_map_entry = page_map_[pm_idx];
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
- << std::dec << pm_idx
- << ", page_map_entry=" << static_cast<int>(page_map_entry);
- }
- if (LIKELY(page_map_entry == kPageMapRun)) {
- free_from_run = true;
- run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK_EQ(run->magic_num_, kMagicNum);
- } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
- free_from_run = true;
- size_t pi = pm_idx;
- DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
- // Find the beginning of the run.
- while (page_map_[pi] != kPageMapRun) {
- pi--;
- DCHECK_LT(pi, capacity_ / kPageSize);
- }
- DCHECK_EQ(page_map_[pi], kPageMapRun);
- run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK_EQ(run->magic_num_, kMagicNum);
- } else if (page_map_entry == kPageMapLargeObject) {
- FreePages(self, ptr);
- } else {
- LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
- }
+ MutexLock mu(self, lock_);
+ DCHECK_LT(pm_idx, page_map_size_);
+ byte page_map_entry = page_map_[pm_idx];
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
+ << std::dec << pm_idx
+ << ", page_map_entry=" << static_cast<int>(page_map_entry);
+ }
+ if (LIKELY(page_map_entry == kPageMapRun)) {
+ run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
+ } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
+ size_t pi = pm_idx;
+ // Find the beginning of the run.
+ do {
+ --pi;
+ DCHECK_LT(pi, capacity_ / kPageSize);
+ } while (page_map_[pi] != kPageMapRun);
+ run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
+ } else if (page_map_entry == kPageMapLargeObject) {
+ freed_bytes += FreePages(self, ptr, false);
+ continue;
+ } else {
+ LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
}
- if (LIKELY(free_from_run)) {
- DCHECK(run != NULL);
- // Set the bit in the bulk free bit map.
- run->MarkBulkFreeBitMap(ptr);
+ }
+ DCHECK(run != nullptr);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
+ // Set the bit in the bulk free bit map.
+ freed_bytes += run->MarkBulkFreeBitMap(ptr);
#ifdef HAVE_ANDROID_OS
- if (!run->to_be_bulk_freed_) {
- run->to_be_bulk_freed_ = true;
- runs.push_back(run);
- }
+ if (!run->to_be_bulk_freed_) {
+ run->to_be_bulk_freed_ = true;
+ runs.push_back(run);
+ }
#else
- runs.insert(run);
+ runs.insert(run);
#endif
- }
- }
}
// Now, iterate over the affected runs and update the alloc bit map
// based on the bulk free bit map (for non-thread-local runs) and
// union the bulk free bit map into the thread-local free bit map
// (for thread-local runs.)
-#ifdef HAVE_ANDROID_OS
- typedef std::vector<Run*>::iterator It;
-#else
- typedef hash_set<Run*, hash_run, eq_run>::iterator It;
-#endif
- for (It it = runs.begin(); it != runs.end(); ++it) {
- Run* run = *it;
+ for (Run* run : runs) {
#ifdef HAVE_ANDROID_OS
DCHECK(run->to_be_bulk_freed_);
run->to_be_bulk_freed_ = false;
#endif
size_t idx = run->size_bracket_idx_;
MutexLock mu(self, *size_bracket_locks_[idx]);
- if (run->is_thread_local_ != 0) {
+ if (run->IsThreadLocal()) {
DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
@@ -1218,7 +1241,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
<< std::hex << reinterpret_cast<intptr_t>(run);
}
- DCHECK_NE(run->is_thread_local_, 0);
+ DCHECK(run->IsThreadLocal());
// A thread local run will be kept as a thread local even if
// it's become all free.
} else {
@@ -1268,8 +1291,9 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
DCHECK(non_full_runs->find(run) == non_full_runs->end());
}
if (!run_was_current) {
+ run->ZeroHeader();
MutexLock mu(self, lock_);
- FreePages(self, run);
+ FreePages(self, run, true);
}
} else {
// It is not completely free. If it wasn't the current run or
@@ -1306,6 +1330,7 @@ void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
}
}
}
+ return freed_bytes;
}
std::string RosAlloc::DumpPageMap() {
@@ -1379,7 +1404,7 @@ std::string RosAlloc::DumpPageMap() {
stream << "[" << i << "]=Run (start)"
<< " idx=" << idx
<< " numOfPages=" << numOfPages[idx]
- << " thread_local=" << static_cast<int>(run->is_thread_local_)
+ << " is_thread_local=" << run->is_thread_local_
<< " is_all_free=" << (run->IsAllFree() ? 1 : 0)
<< std::endl;
break;
@@ -1554,6 +1579,8 @@ void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_by
// The start of a run.
Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
+ // The dedicated full run doesn't contain any real allocations, don't visit the slots in
+ // there.
run->InspectAllSlots(handler, arg);
size_t num_pages = numOfPages[run->size_bracket_idx_];
if (kIsDebugBuild) {
@@ -1603,14 +1630,16 @@ void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
MutexLock mu(self, *size_bracket_locks_[idx]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
- if (thread_local_run != NULL) {
+ CHECK(thread_local_run != nullptr);
+ // Invalid means already revoked.
+ DCHECK(thread_local_run->IsThreadLocal());
+ if (thread_local_run != dedicated_full_run_) {
+ thread->SetRosAllocRun(idx, dedicated_full_run_);
DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
- DCHECK_NE(thread_local_run->is_thread_local_, 0);
- thread->SetRosAllocRun(idx, nullptr);
// Note the thread local run may not be full here.
bool dont_care;
thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
- thread_local_run->is_thread_local_ = 0;
+ thread_local_run->SetIsThreadLocal(false);
thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
@@ -1626,7 +1655,8 @@ void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
}
} else if (thread_local_run->IsAllFree()) {
MutexLock mu(self, lock_);
- FreePages(self, thread_local_run);
+ thread_local_run->ZeroHeader();
+ FreePages(self, thread_local_run, true);
} else {
non_full_runs_[idx].insert(thread_local_run);
DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
@@ -1646,9 +1676,8 @@ void RosAlloc::RevokeAllThreadLocalRuns() {
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
- for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
- Thread* t = *it;
- RevokeThreadLocalRuns(t);
+ for (Thread* thread : thread_list) {
+ RevokeThreadLocalRuns(thread);
}
}
@@ -1660,7 +1689,7 @@ void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
MutexLock mu(self, *size_bracket_locks_[idx]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
- DCHECK(thread_local_run == nullptr);
+ DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
}
}
}
@@ -1768,6 +1797,15 @@ void RosAlloc::Initialize() {
<< ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
}
}
+ // Fill the alloc bitmap so nobody can successfully allocate from it.
+ if (kIsDebugBuild) {
+ dedicated_full_run_->magic_num_ = kMagicNum;
+ }
+ // It doesn't matter which size bracket we use since the main goal is to have the allocation
+ // fail 100% of the time you attempt to allocate into the dedicated full run.
+ dedicated_full_run_->size_bracket_idx_ = 0;
+ dedicated_full_run_->FillAllocBitMap();
+ dedicated_full_run_->SetIsThreadLocal(true);
}
void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
@@ -1865,6 +1903,7 @@ void RosAlloc::Verify() {
<< " and the run size : page index range " << i << " to " << (i + num_pages)
<< std::endl << DumpPageMap();
}
+ // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
runs.push_back(run);
i += num_pages;
CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
@@ -1889,34 +1928,25 @@ void RosAlloc::Verify() {
void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
- size_t idx = size_bracket_idx_;
+ const size_t idx = size_bracket_idx_;
CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
- size_t num_slots = numOfSlots[idx];
+ const size_t num_slots = numOfSlots[idx];
+ const size_t num_vec = RoundUp(num_slots, 32) / 32;
+ CHECK_GT(num_vec, 0U);
size_t bracket_size = IndexToBracketSize(idx);
CHECK_EQ(slot_base + num_slots * bracket_size,
reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
<< "Mismatch in the end address of the run " << Dump();
// Check that the bulk free bitmap is clean. It's only used during BulkFree().
CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
- // Check the bump index mode, if it's on.
- if (top_slot_idx_ < num_slots) {
- // If the bump index mode is on (top_slot_idx_ < num_slots), then
- // all of the slots after the top index must be free.
- for (size_t i = top_slot_idx_; i < num_slots; ++i) {
- size_t vec_idx = i / 32;
- size_t vec_off = i % 32;
- uint32_t vec = alloc_bit_map_[vec_idx];
- CHECK_EQ((vec & (1 << vec_off)), static_cast<uint32_t>(0))
- << "A slot >= top_slot_idx_ isn't free " << Dump();
- }
- } else {
- CHECK_EQ(top_slot_idx_, num_slots)
- << "If the bump index mode is off, the top index == the number of slots "
- << Dump();
- }
+ uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
+ // Make sure all the bits at the end of the run are set so that we don't allocate there.
+ CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
+ // Ensure that the first bitmap index is valid.
+ CHECK_LT(first_search_vec_idx_, num_vec);
// Check the thread local runs, the current runs, and the run sets.
- if (is_thread_local_) {
+ if (IsThreadLocal()) {
// If it's a thread local run, then it must be pointed to by an owner thread.
bool owner_found = false;
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
@@ -1978,7 +2008,6 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
}
}
// Check each slot.
- size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
DCHECK_GE(num_slots, slots) << "Out of bounds";
@@ -1989,7 +2018,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
bool is_allocated = ((vec >> i) & 0x1) != 0;
// If a thread local run, slots may be marked freed in the
// thread local free bitmap.
- bool is_thread_local_freed = is_thread_local_ && ((thread_local_free_vec >> i) & 0x1) != 0;
+ bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
if (is_allocated && !is_thread_local_freed) {
byte* slot_addr = slot_base + (slots + i) * bracket_size;
mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 5d9d75c22a..f7fa2da236 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -155,7 +155,7 @@ class RosAlloc {
// +-------------------+
// | to_be_bulk_freed |
// +-------------------+
- // | top_slot_idx |
+ // | top_bitmap_idx |
// +-------------------+
// | |
// | alloc bit map |
@@ -186,12 +186,12 @@ class RosAlloc {
//
class Run {
public:
- byte magic_num_; // The magic number used for debugging.
- byte size_bracket_idx_; // The index of the size bracket of this run.
- byte is_thread_local_; // True if this run is used as a thread-local run.
- byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
- uint32_t top_slot_idx_; // The top slot index when this run is in bump index mode.
- uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use.
+ byte magic_num_; // The magic number used for debugging.
+ byte size_bracket_idx_; // The index of the size bracket of this run.
+ byte is_thread_local_; // True if this run is used as a thread-local run.
+ byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
+ uint32_t first_search_vec_idx_; // The index of the first bitmap vector which may contain an available slot.
+ uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use.
// bulk_free_bit_map_[] : The bit map that is used for GC to
// temporarily mark the slots to free without using a lock. After
@@ -225,6 +225,16 @@ class RosAlloc {
void* End() {
return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
}
+ // Returns the number of bitmap words per run.
+ size_t NumberOfBitmapVectors() const {
+ return RoundUp(numOfSlots[size_bracket_idx_], 32) / 32;
+ }
+ void SetIsThreadLocal(bool is_thread_local) {
+ is_thread_local_ = is_thread_local ? 1 : 0;
+ }
+ bool IsThreadLocal() const {
+ return is_thread_local_ != 0;
+ }
// Frees slots in the allocation bit map with regard to the
// thread-local free bit map. Used when a thread-local run becomes
// full.
@@ -243,10 +253,13 @@ class RosAlloc {
void* AllocSlot();
// Frees a slot in a run. This is used in a non-bulk free.
void FreeSlot(void* ptr);
- // Marks the slots to free in the bulk free bit map.
- void MarkBulkFreeBitMap(void* ptr);
+ // Marks the slots to free in the bulk free bit map. Returns the bracket size.
+ size_t MarkBulkFreeBitMap(void* ptr);
// Marks the slots to free in the thread-local free bit map.
void MarkThreadLocalFreeBitMap(void* ptr);
+ // Last word mask, all of the bits in the last word which aren't valid slots are set to
+ // optimize allocation path.
+ static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec);
// Returns true if all the slots in the run are not in use.
bool IsAllFree();
// Returns true if all the slots in the run are in use.
@@ -255,8 +268,14 @@ class RosAlloc {
bool IsBulkFreeBitmapClean();
// Returns true if the thread local free bit map is clean.
bool IsThreadLocalFreeBitmapClean();
- // Clear all the bit maps.
- void ClearBitMaps();
+ // Set the alloc_bit_map_ bits for slots that are past the end of the run.
+ void SetAllocBitMapBitsForInvalidSlots();
+ // Zero the run's data.
+ void ZeroData();
+ // Zero the run's header.
+ void ZeroHeader();
+ // Fill the alloc bitmap with 1s.
+ void FillAllocBitMap();
// Iterate over all the slots and apply the given function.
void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
// Dump the run metadata for debugging.
@@ -267,8 +286,9 @@ class RosAlloc {
EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
private:
- // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap().
- void MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
+ // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket
+ // size.
+ size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
// Turns the bit map into a string for debugging.
static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
};
@@ -376,7 +396,7 @@ class RosAlloc {
return byte_offset / kPageSize;
}
// Returns the page map index from an address with rounding.
- size_t RoundDownToPageMapIndex(void* addr) {
+ size_t RoundDownToPageMapIndex(void* addr) const {
DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
}
@@ -446,12 +466,19 @@ class RosAlloc {
hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
// The set of free pages.
std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
+ // The dedicated full run, it is always full and shared by all threads when revoking happens.
+ // This is an optimization since enables us to avoid a null check for revoked runs.
+ static Run* dedicated_full_run_;
+ // Using size_t to ensure that it is at least word aligned.
+ static size_t dedicated_full_run_storage_[];
// The current runs where the allocations are first attempted for
// the size brackes that do not use thread-local
// runs. current_runs_[i] is guarded by size_bracket_locks_[i].
Run* current_runs_[kNumOfSizeBrackets];
// The mutexes, one per size bracket.
Mutex* size_bracket_locks_[kNumOfSizeBrackets];
+ // Bracket lock names (since locks only have char* names).
+ std::string size_bracket_lock_names[kNumOfSizeBrackets];
// The types of page map entries.
enum {
kPageMapEmpty = 0, // Not allocated.
@@ -493,20 +520,25 @@ class RosAlloc {
// Page-granularity alloc/free
void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Returns how many bytes were freed.
+ size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Allocate/free a run slot.
void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
LOCKS_EXCLUDED(lock_);
- void FreeFromRun(Thread* self, void* ptr, Run* run)
+ // Returns the bracket size.
+ size_t FreeFromRun(Thread* self, void* ptr, Run* run)
LOCKS_EXCLUDED(lock_);
+ // Used to allocate a new thread local run for a size bracket.
+ Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
+
// Used to acquire a new/reused run for a size bracket. Used when a
// thread-local or current run gets full.
Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
// The internal of non-bulk Free().
- void FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
+ size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
// Allocates large objects.
void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
@@ -518,9 +550,9 @@ class RosAlloc {
~RosAlloc();
void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
LOCKS_EXCLUDED(lock_);
- void Free(Thread* self, void* ptr)
+ size_t Free(Thread* self, void* ptr)
LOCKS_EXCLUDED(bulk_free_lock_);
- void BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
+ size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(void* ptr);
@@ -557,6 +589,9 @@ class RosAlloc {
void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
// Dumps the page map for debugging.
std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ static Run* GetDedicatedFullRun() {
+ return dedicated_full_run_;
+ }
// Callbacks for InspectAll that will count the number of bytes
// allocated and objects allocated, respectively.
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index d99136a4ef..615ec980c2 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -65,6 +65,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
uint64_t start_time = NanoTime();
+ timings_.Reset();
pause_times_.clear();
duration_ns_ = 0;
clear_soft_references_ = clear_soft_references;
@@ -185,12 +186,12 @@ void GarbageCollector::SwapBitmaps() {
}
}
for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
- space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(disc_space);
- accounting::ObjectSet* live_set = space->GetLiveObjects();
- accounting::ObjectSet* mark_set = space->GetMarkObjects();
- heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
- heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
- down_cast<space::LargeObjectSpace*>(space)->SwapBitmaps();
+ space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
+ accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
+ heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
+ heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
+ space->SwapBitmaps();
}
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index f07e6f1c3a..007eb23d52 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -99,13 +99,11 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre
name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
gc_barrier_(new Barrier(0)),
- large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
is_concurrent_(is_concurrent) {
}
void MarkSweep::InitializePhase() {
- timings_.Reset();
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
mark_stack_ = heap_->mark_stack_.get();
DCHECK(mark_stack_ != nullptr);
@@ -293,14 +291,20 @@ void MarkSweep::FindDefaultSpaceBitmap() {
TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
+ // We want to have the main space instead of non moving if possible.
if (bitmap != nullptr &&
space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
current_space_bitmap_ = bitmap;
- return;
+ // If we are not the non moving space exit the loop early since this will be good enough.
+ if (space != heap_->GetNonMovingSpace()) {
+ break;
+ }
}
}
- GetHeap()->DumpSpaces();
- LOG(FATAL) << "Could not find a default mark bitmap";
+ if (current_space_bitmap_ == nullptr) {
+ heap_->DumpSpaces();
+ LOG(FATAL) << "Could not find a default mark bitmap";
+ }
}
void MarkSweep::ExpandMarkStack() {
@@ -322,7 +326,7 @@ void MarkSweep::ResizeMarkStack(size_t new_size) {
}
inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
- DCHECK(obj != NULL);
+ DCHECK(obj != nullptr);
if (MarkObjectParallel(obj)) {
MutexLock mu(Thread::Current(), mark_stack_lock_);
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
@@ -343,6 +347,31 @@ void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*
reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
}
+class MarkSweepMarkObjectSlowPath {
+ public:
+ explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
+ }
+
+ void operator()(const Object* obj) const ALWAYS_INLINE {
+ if (kProfileLargeObjects) {
+ // TODO: Differentiate between marking and testing somehow.
+ ++mark_sweep_->large_object_test_;
+ ++mark_sweep_->large_object_mark_;
+ }
+ space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
+ if (UNLIKELY(!IsAligned<kPageSize>(obj) ||
+ (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+ LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
+ LOG(ERROR) << "Attempting see if it's a bad root";
+ mark_sweep_->VerifyRoots();
+ LOG(FATAL) << "Can't mark invalid object";
+ }
+ }
+
+ private:
+ MarkSweep* const mark_sweep_;
+};
+
inline void MarkSweep::MarkObjectNonNull(Object* obj) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
@@ -353,27 +382,24 @@ inline void MarkSweep::MarkObjectNonNull(Object* obj) {
if (kCountMarkedObjects) {
++mark_immune_count_;
}
- DCHECK(IsMarked(obj));
- return;
- }
- // Try to take advantage of locality of references within a space, failing this find the space
- // the hard way.
- accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
- if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- object_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
+ DCHECK(mark_bitmap_->Test(obj));
+ } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
+ if (kCountMarkedObjects) {
+ ++mark_fastpath_count_;
+ }
+ if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
+ PushOnMarkStack(obj); // This object was not previously marked.
+ }
+ } else {
if (kCountMarkedObjects) {
++mark_slowpath_count_;
}
- if (UNLIKELY(object_bitmap == nullptr)) {
- MarkLargeObject(obj, true);
- return;
+ MarkSweepMarkObjectSlowPath visitor(this);
+ // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
+ // will check again.
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ PushOnMarkStack(obj); // Was not already marked, push.
}
- } else if (kCountMarkedObjects) {
- ++mark_fastpath_count_;
- }
- // This object was not previously marked.
- if (!object_bitmap->Set(obj)) {
- PushOnMarkStack(obj);
}
}
@@ -387,34 +413,6 @@ inline void MarkSweep::PushOnMarkStack(Object* obj) {
mark_stack_->PushBack(obj);
}
-// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
- // TODO: support >1 discontinuous space.
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
- if (kProfileLargeObjects) {
- ++large_object_test_;
- }
- if (UNLIKELY(!large_objects->Test(obj))) {
- if (!large_object_space->Contains(obj)) {
- LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
- LOG(ERROR) << "Attempting see if it's a bad root";
- VerifyRoots();
- LOG(FATAL) << "Can't mark bad root";
- }
- if (kProfileLargeObjects) {
- ++large_object_mark_;
- }
- if (set) {
- large_objects->Set(obj);
- } else {
- large_objects->Clear(obj);
- }
- return true;
- }
- return false;
-}
-
inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
@@ -428,19 +426,11 @@ inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
// Try to take advantage of locality of references within a space, failing this find the space
// the hard way.
accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
- if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
- accounting::ContinuousSpaceBitmap* new_bitmap = mark_bitmap_->GetContinuousSpaceBitmap(obj);
- if (new_bitmap != NULL) {
- object_bitmap = new_bitmap;
- } else {
- // TODO: Remove the Thread::Current here?
- // TODO: Convert this to some kind of atomic marking?
- MutexLock mu(Thread::Current(), large_object_lock_);
- return MarkLargeObject(obj, true);
- }
+ if (LIKELY(object_bitmap->HasAddress(obj))) {
+ return !object_bitmap->AtomicTestAndSet(obj);
}
- // Return true if the object was not previously marked.
- return !object_bitmap->AtomicTestAndSet(obj);
+ MarkSweepMarkObjectSlowPath visitor(this);
+ return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
}
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
@@ -719,7 +709,7 @@ class CardScanTask : public MarkStackTask<false> {
size_t MarkSweep::GetThreadCount(bool paused) const {
if (heap_->GetThreadPool() == nullptr || !heap_->CareAboutPauseTimes()) {
- return 0;
+ return 1;
}
if (paused) {
return heap_->GetParallelGCThreadCount() + 1;
@@ -733,7 +723,7 @@ void MarkSweep::ScanGrayObjects(bool paused, byte minimum_age) {
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
size_t thread_count = GetThreadCount(paused);
// The parallel version with only one thread is faster for card scanning, TODO: fix.
- if (kParallelCardScan && thread_count > 0) {
+ if (kParallelCardScan && thread_count > 1) {
Thread* self = Thread::Current();
// Can't have a different split for each space since multiple spaces can have their cards being
// scanned at the same time.
@@ -944,14 +934,11 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
- if (!large_object_space->GetLiveObjects()->Test(obj)) {
- if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
- heap_->allocation_stack_->End()) {
- // Object not found!
- heap_->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
- }
+ if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
+ heap_->allocation_stack_->End()) {
+ // Object not found!
+ heap_->DumpSpaces();
+ LOG(FATAL) << "Found dead object " << obj;
}
}
}
@@ -1086,8 +1073,8 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
}
// Handle the large object space.
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
- accounting::ObjectSet* large_mark_objects = large_object_space->GetMarkObjects();
+ accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+ accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
if (swap_bitmaps) {
std::swap(large_live_objects, large_mark_objects);
}
@@ -1131,7 +1118,6 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
timings_.EndSplit();
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit("Sweep", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -1149,13 +1135,13 @@ void MarkSweep::Sweep(bool swap_bitmaps) {
}
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
- TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
size_t freed_objects = 0;
size_t freed_bytes = 0;
- GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
freed_large_objects_.FetchAndAdd(freed_objects);
freed_large_object_bytes_.FetchAndAdd(freed_bytes);
- GetHeap()->RecordFree(freed_objects, freed_bytes);
+ heap_->RecordFree(freed_objects, freed_bytes);
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 6dbb2709d2..41a7764ca8 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -227,11 +227,6 @@ class MarkSweep : public GarbageCollector {
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj);
- // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
- // mark, otherwise we unmark.
- bool MarkLargeObject(const mirror::Object* obj, bool set)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) LOCKS_EXCLUDED(large_object_lock_);
-
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -315,7 +310,6 @@ class MarkSweep : public GarbageCollector {
size_t live_stack_freeze_size_;
UniquePtr<Barrier> gc_barrier_;
- Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
const bool is_concurrent_;
@@ -326,8 +320,6 @@ class MarkSweep : public GarbageCollector {
friend class CheckBitmapVisitor;
friend class CheckReferenceVisitor;
friend class art::gc::Heap;
- friend class InternTableEntryIsUnmarked;
- friend class MarkIfReachesAllocspaceVisitor;
friend class MarkObjectVisitor;
friend class ModUnionCheckReferences;
friend class ModUnionClearCardVisitor;
@@ -336,10 +328,9 @@ class MarkSweep : public GarbageCollector {
friend class ModUnionTableBitmap;
friend class ModUnionTableReferenceCache;
friend class ModUnionScanImageRootVisitor;
- friend class ScanBitmapVisitor;
- friend class ScanImageRootVisitor;
template<bool kUseFinger> friend class MarkStackTask;
friend class FifoMarkStackChunk;
+ friend class MarkSweepMarkObjectSlowPath;
DISALLOW_COPY_AND_ASSIGN(MarkSweep);
};
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 8a9611f499..55140f613a 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -26,9 +26,24 @@ namespace art {
namespace gc {
namespace collector {
+class BitmapSetSlowPathVisitor {
+ public:
+ explicit BitmapSetSlowPathVisitor(SemiSpace* semi_space) : semi_space_(semi_space) {
+ }
+
+ void operator()(const mirror::Object* obj) const {
+ CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
+ // Marking a large object, make sure its aligned as a sanity check.
+ CHECK(IsAligned<kPageSize>(obj));
+ }
+
+ private:
+ SemiSpace* const semi_space_;
+};
+
inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
DCHECK(from_space_->HasAddress(obj));
- LockWord lock_word = obj->GetLockWord();
+ LockWord lock_word = obj->GetLockWord(false);
if (lock_word.GetState() != LockWord::kForwardingAddress) {
return nullptr;
}
@@ -53,37 +68,29 @@ inline void SemiSpace::MarkObject(
if (from_space_->HasAddress(obj)) {
mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
// If the object has already been moved, return the new forward address.
- if (forward_address == nullptr) {
+ if (UNLIKELY(forward_address == nullptr)) {
forward_address = MarkNonForwardedObject(obj);
DCHECK(forward_address != nullptr);
// Make sure to only update the forwarding address AFTER you copy the object so that the
// monitor word doesn't get stomped over.
- obj->SetLockWord(LockWord::FromForwardingAddress(
- reinterpret_cast<size_t>(forward_address)));
+ obj->SetLockWord(
+ LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
// Push the object onto the mark stack for later processing.
MarkStackPush(forward_address);
}
obj_ptr->Assign(forward_address);
} else {
- accounting::ContinuousSpaceBitmap* object_bitmap =
- heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
- if (LIKELY(object_bitmap != nullptr)) {
- if (generational_) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(whole_heap_collection_);
- }
- if (!object_bitmap->Set(obj)) {
- // This object was not previously marked.
- MarkStackPush(obj);
- }
- } else {
- CHECK(!to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
- if (MarkLargeObject(obj)) {
- MarkStackPush(obj);
- }
+ BitmapSetSlowPathVisitor visitor(this);
+ if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
+ // If a bump pointer space only collection, we should not
+ // reach here as we don't/won't mark the objects in the
+ // non-moving space (except for the promoted objects.) Note
+ // the non-moving space is added to the immune space.
+ DCHECK(!generational_ || whole_heap_collection_);
+ }
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c0e172e815..3b9e853bc7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -110,13 +110,14 @@ SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_pref
}
void SemiSpace::InitializePhase() {
- timings_.Reset();
TimingLogger::ScopedSplit split("InitializePhase", &timings_);
mark_stack_ = heap_->mark_stack_.get();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
is_large_object_space_immune_ = false;
saved_bytes_ = 0;
+ bytes_moved_ = 0;
+ objects_moved_ = 0;
self_ = Thread::Current();
// Do any pre GC verification.
timings_.NewSplit("PreGcVerification");
@@ -124,6 +125,11 @@ void SemiSpace::InitializePhase() {
CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
// Set the initial bitmap.
to_space_live_bitmap_ = to_space_->GetLiveBitmap();
+ {
+ // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
+ ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ mark_bitmap_ = heap_->GetMarkBitmap();
+ }
}
void SemiSpace::ProcessReferences(Thread* self) {
@@ -312,8 +318,8 @@ void SemiSpace::MarkReachableObjects() {
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
- timings_.EndSplit();
+ timings_.NewSplit("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
// If the space is immune and has no mod union table (the
// non-moving space when the bump pointer space only collection is
@@ -327,7 +333,8 @@ void SemiSpace::MarkReachableObjects() {
accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
if (kUseRememberedSet) {
DCHECK(rem_set != nullptr);
- rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this);
+ rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
+ from_space_, this);
if (kIsDebugBuild) {
// Verify that there are no from-space references that
// remain in the space, that is, the remembered set (and the
@@ -351,6 +358,7 @@ void SemiSpace::MarkReachableObjects() {
}
if (is_large_object_space_immune_) {
+ timings_.NewSplit("VisitLargeObjects");
DCHECK(generational_ && !whole_heap_collection_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
@@ -362,13 +370,13 @@ void SemiSpace::MarkReachableObjects() {
// classes (primitive array classes) that could move though they
// don't contain any other references.
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
+ accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
- for (const Object* obj : large_live_objects->GetObjects()) {
- visitor(const_cast<Object*>(obj));
- }
+ large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
+ reinterpret_cast<uintptr_t>(large_object_space->End()),
+ visitor);
}
-
+ timings_.EndSplit();
// Recursively process the mark stack.
ProcessMarkStack();
}
@@ -382,9 +390,9 @@ void SemiSpace::ReclaimPhase() {
}
// Record freed memory.
uint64_t from_bytes = from_space_->GetBytesAllocated();
- uint64_t to_bytes = to_space_->GetBytesAllocated();
+ uint64_t to_bytes = bytes_moved_;
uint64_t from_objects = from_space_->GetObjectsAllocated();
- uint64_t to_objects = to_space_->GetObjectsAllocated();
+ uint64_t to_objects = objects_moved_;
CHECK_LE(to_objects, from_objects);
int64_t freed_bytes = from_bytes - to_bytes;
int64_t freed_objects = from_objects - to_objects;
@@ -450,19 +458,6 @@ inline void SemiSpace::MarkStackPush(Object* obj) {
mark_stack_->PushBack(obj);
}
-// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool SemiSpace::MarkLargeObject(const Object* obj) {
- // TODO: support >1 discontinuous space.
- space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- DCHECK(large_object_space->Contains(obj));
- accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
- if (UNLIKELY(!large_objects->Test(obj))) {
- large_objects->Set(obj);
- return true;
- }
- return false;
-}
-
static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
// We will dirty the current page and somewhere in the middle of the next page. This means
@@ -521,15 +516,13 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
- size_t bytes_promoted;
space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr);
- if (forward_address == nullptr) {
+ forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_allocated, nullptr);
+ if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
} else {
- GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
- bytes_promoted_ += bytes_promoted;
+ bytes_promoted_ += bytes_allocated;
// Dirty the card at the destionation as it may contain
// references (including the class pointer) to the bump pointer
// space.
@@ -573,6 +566,8 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// If it's allocated after the last GC (younger), copy it to the to-space.
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
}
+ ++objects_moved_;
+ bytes_moved_ += bytes_allocated;
// Copy over the object and add it to the mark stack since we still need to update its
// references.
saved_bytes_ +=
@@ -608,6 +603,11 @@ void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>*
reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
}
+void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+ void* arg) {
+ reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
+}
+
void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
RootType /*root_type*/) {
auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
@@ -619,10 +619,9 @@ void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*
// Marks all objects in the root set.
void SemiSpace::MarkRoots() {
- timings_.StartSplit("MarkRoots");
+ timings_.NewSplit("MarkRoots");
// TODO: Visit up image roots as well?
Runtime::Current()->VisitRoots(MarkRootCallback, this);
- timings_.EndSplit();
}
mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
@@ -641,7 +640,7 @@ bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
void SemiSpace::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
- TimingLogger::ScopedSplit("Sweep", &timings_);
+ TimingLogger::ScopedSplit split("Sweep", &timings_);
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
if (space->IsContinuousMemMapAllocSpace()) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
@@ -665,13 +664,13 @@ void SemiSpace::Sweep(bool swap_bitmaps) {
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
DCHECK(!is_large_object_space_immune_);
- TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+ TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
size_t freed_objects = 0;
size_t freed_bytes = 0;
- GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
freed_large_objects_.FetchAndAdd(freed_objects);
freed_large_object_bytes_.FetchAndAdd(freed_bytes);
- GetHeap()->RecordFree(freed_objects, freed_bytes);
+ heap_->RecordFree(freed_objects, freed_bytes);
}
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4169ca92c6..51b08699b8 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -138,6 +138,10 @@ class SemiSpace : public GarbageCollector {
static void ProcessMarkStackCallback(void* arg)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+ void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -201,6 +205,8 @@ class SemiSpace : public GarbageCollector {
// Cached live bitmap as an optimization.
accounting::ContinuousSpaceBitmap* to_space_live_bitmap_;
space::ContinuousMemMapAllocSpace* from_space_;
+ // Cached mark bitmap as an optimization.
+ accounting::HeapBitmap* mark_bitmap_;
Thread* self_;
@@ -231,6 +237,11 @@ class SemiSpace : public GarbageCollector {
// whole_heap_collection_ once per interval.
int whole_heap_collection_interval_counter_;
+ // How many objects and bytes we moved, used so that we don't need to get the size of the
+ // to_space_ when calculating how many objects and bytes we freed.
+ size_t bytes_moved_;
+ size_t objects_moved_;
+
// How many bytes we avoided dirtying.
size_t saved_bytes_;
@@ -243,6 +254,7 @@ class SemiSpace : public GarbageCollector {
static constexpr int kDefaultWholeHeapCollectionInterval = 5;
private:
+ friend class BitmapSetSlowPathVisitor;
DISALLOW_COPY_AND_ASSIGN(SemiSpace);
};
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index de7d0b85b0..a9799b9679 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -78,9 +78,9 @@ static constexpr size_t kGcAlotInterval = KB;
static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
// Sticky GC throughput adjustment, divided by 4. Increasing this causes sticky GC to occur more
-// relative to partial/full GC. This is desirable since sticky GCs interfere less with mutator
+// relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
// threads (lower pauses, use less memory bandwidth).
-static constexpr double kStickyGcThroughputAdjustment = 1.25;
+static constexpr double kStickyGcThroughputAdjustment = 1.0;
// Whether or not we use the free list large object space.
static constexpr bool kUseFreeListSpaceForLOS = false;
// Whtehr or not we compact the zygote in PreZygoteFork.
@@ -355,15 +355,15 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
}
if (kUseRosAlloc) {
main_space_ = space::RosAllocSpace::CreateFromMemMap(mem_map, "main rosalloc space",
- kDefaultStartingSize, initial_size,
- growth_limit, capacity, low_memory_mode_,
- can_move_objects);
+ kDefaultStartingSize, initial_size,
+ growth_limit, capacity, low_memory_mode_,
+ can_move_objects);
CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
} else {
main_space_ = space::DlMallocSpace::CreateFromMemMap(mem_map, "main dlmalloc space",
- kDefaultStartingSize, initial_size,
- growth_limit, capacity,
- can_move_objects);
+ kDefaultStartingSize, initial_size,
+ growth_limit, capacity,
+ can_move_objects);
CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
}
main_space_->SetFootprintLimit(main_space_->Capacity());
@@ -569,7 +569,7 @@ void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
space2 = space1;
}
MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
- large_object_space_->GetLiveObjects(), stack);
+ large_object_space_->GetLiveBitmap(), stack);
}
void Heap::DeleteThreadPool() {
@@ -595,6 +595,11 @@ void Heap::AddSpace(space::Space* space, bool set_as_default) {
if (continuous_space->IsDlMallocSpace()) {
dlmalloc_space_ = continuous_space->AsDlMallocSpace();
} else if (continuous_space->IsRosAllocSpace()) {
+ // Revoke before if we already have a rosalloc_space_ so that we don't end up with non full
+ // runs from the previous one during the revoke after.
+ if (rosalloc_space_ != nullptr) {
+ rosalloc_space_->RevokeAllThreadLocalBuffers();
+ }
rosalloc_space_ = continuous_space->AsRosAllocSpace();
}
}
@@ -606,10 +611,8 @@ void Heap::AddSpace(space::Space* space, bool set_as_default) {
} else {
DCHECK(space->IsDiscontinuousSpace());
space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
- DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
- live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
- DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
- mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+ live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
+ mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
discontinuous_spaces_.push_back(discontinuous_space);
}
if (space->IsAllocSpace()) {
@@ -617,7 +620,7 @@ void Heap::AddSpace(space::Space* space, bool set_as_default) {
}
}
-void Heap::RemoveSpace(space::Space* space) {
+void Heap::RemoveSpace(space::Space* space, bool unset_as_default) {
DCHECK(space != nullptr);
WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
if (space->IsContinuousSpace()) {
@@ -634,25 +637,25 @@ void Heap::RemoveSpace(space::Space* space) {
auto it = std::find(continuous_spaces_.begin(), continuous_spaces_.end(), continuous_space);
DCHECK(it != continuous_spaces_.end());
continuous_spaces_.erase(it);
- if (continuous_space == dlmalloc_space_) {
- dlmalloc_space_ = nullptr;
- } else if (continuous_space == rosalloc_space_) {
- rosalloc_space_ = nullptr;
- }
- if (continuous_space == main_space_) {
- main_space_ = nullptr;
- } else if (continuous_space == bump_pointer_space_) {
- bump_pointer_space_ = nullptr;
- } else if (continuous_space == temp_space_) {
- temp_space_ = nullptr;
+ if (unset_as_default) {
+ if (continuous_space == dlmalloc_space_) {
+ dlmalloc_space_ = nullptr;
+ } else if (continuous_space == rosalloc_space_) {
+ rosalloc_space_ = nullptr;
+ }
+ if (continuous_space == main_space_) {
+ main_space_ = nullptr;
+ } else if (continuous_space == bump_pointer_space_) {
+ bump_pointer_space_ = nullptr;
+ } else if (continuous_space == temp_space_) {
+ temp_space_ = nullptr;
+ }
}
} else {
DCHECK(space->IsDiscontinuousSpace());
space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
- DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
- live_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
- DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
- mark_bitmap_->RemoveDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+ live_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
+ mark_bitmap_->RemoveLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
auto it = std::find(discontinuous_spaces_.begin(), discontinuous_spaces_.end(),
discontinuous_space);
DCHECK(it != discontinuous_spaces_.end());
@@ -729,6 +732,7 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
+ BaseMutex::DumpAll(os);
}
Heap::~Heap() {
@@ -1050,7 +1054,7 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
return temp_space_->Contains(obj);
}
space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
- space::DiscontinuousSpace* d_space = NULL;
+ space::DiscontinuousSpace* d_space = nullptr;
if (c_space != nullptr) {
if (c_space->GetLiveBitmap()->Test(obj)) {
return true;
@@ -1058,7 +1062,7 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
if (d_space != nullptr) {
- if (d_space->GetLiveObjects()->Test(obj)) {
+ if (d_space->GetLiveBitmap()->Test(obj)) {
return true;
}
}
@@ -1096,7 +1100,7 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
}
} else {
d_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (d_space != nullptr && d_space->GetLiveObjects()->Test(obj)) {
+ if (d_space != nullptr && d_space->GetLiveBitmap()->Test(obj)) {
return true;
}
}
@@ -1436,6 +1440,11 @@ void Heap::TransitionCollector(CollectorType collector_type) {
if (collector_type == collector_type_) {
return;
}
+ if (Runtime::Current()->IsShuttingDown(self)) {
+ // Don't allow heap transitions to happen if the runtime is shutting down since these can
+ // cause objects to get finalized.
+ return;
+ }
// GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
if (!copying_transition || disable_moving_gc_count_ == 0) {
// TODO: Not hard code in semi-space collector?
@@ -1456,6 +1465,10 @@ void Heap::TransitionCollector(CollectorType collector_type) {
// pointer space last transition it will be protected.
bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Compact(bump_pointer_space_, main_space_);
+ // Remove the main space so that we don't try to trim it, this doens't work for debug
+ // builds since RosAlloc attempts to read the magic number from a protected page.
+ // TODO: Clean this up by getting rid of the remove_as_default parameter.
+ RemoveSpace(main_space_, false);
}
break;
}
@@ -1464,6 +1477,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
case kCollectorTypeCMS: {
if (IsMovingGc(collector_type_)) {
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
+ AddSpace(main_space_, false);
main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Compact(main_space_, bump_pointer_space_);
}
@@ -1761,7 +1775,7 @@ void Heap::FlushAllocStack() {
void Heap::MarkAllocStack(accounting::ContinuousSpaceBitmap* bitmap1,
accounting::ContinuousSpaceBitmap* bitmap2,
- accounting::ObjectSet* large_objects,
+ accounting::LargeObjectBitmap* large_objects,
accounting::ObjectStack* stack) {
DCHECK(bitmap1 != nullptr);
DCHECK(bitmap2 != nullptr);
@@ -1896,36 +1910,37 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
EnqueueClearedReferences();
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector);
- if (CareAboutPauseTimes()) {
- const size_t duration = collector->GetDurationNs();
- std::vector<uint64_t> pauses = collector->GetPauseTimes();
+ const size_t duration = collector->GetDurationNs();
+ const std::vector<uint64_t>& pause_times = collector->GetPauseTimes();
+ // Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
+ // (mutator time blocked >= long_pause_log_threshold_).
+ bool log_gc = gc_cause == kGcCauseExplicit;
+ if (!log_gc && CareAboutPauseTimes()) {
// GC for alloc pauses the allocating thread, so consider it as a pause.
- bool was_slow = duration > long_gc_log_threshold_ ||
+ log_gc = duration > long_gc_log_threshold_ ||
(gc_cause == kGcCauseForAlloc && duration > long_pause_log_threshold_);
- if (!was_slow) {
- for (uint64_t pause : pauses) {
- was_slow = was_slow || pause > long_pause_log_threshold_;
- }
- }
- if (was_slow) {
- const size_t percent_free = GetPercentFree();
- const size_t current_heap_size = GetBytesAllocated();
- const size_t total_memory = GetTotalMemory();
- std::ostringstream pause_string;
- for (size_t i = 0; i < pauses.size(); ++i) {
- pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
- << ((i != pauses.size() - 1) ? ", " : "");
- }
- LOG(INFO) << gc_cause << " " << collector->GetName()
- << " GC freed " << collector->GetFreedObjects() << "("
- << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
- << collector->GetFreedLargeObjects() << "("
- << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
- << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
- << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
- << " total " << PrettyDuration((duration / 1000) * 1000);
- VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
- }
+ for (uint64_t pause : pause_times) {
+ log_gc = log_gc || pause >= long_pause_log_threshold_;
+ }
+ }
+ if (log_gc) {
+ const size_t percent_free = GetPercentFree();
+ const size_t current_heap_size = GetBytesAllocated();
+ const size_t total_memory = GetTotalMemory();
+ std::ostringstream pause_string;
+ for (size_t i = 0; i < pause_times.size(); ++i) {
+ pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
+ << ((i != pause_times.size() - 1) ? ", " : "");
+ }
+ LOG(INFO) << gc_cause << " " << collector->GetName()
+ << " GC freed " << collector->GetFreedObjects() << "("
+ << PrettySize(collector->GetFreedBytes()) << ") AllocSpace objects, "
+ << collector->GetFreedLargeObjects() << "("
+ << PrettySize(collector->GetFreedLargeObjectBytes()) << ") LOS objects, "
+ << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
+ << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
+ << " total " << PrettyDuration((duration / 1000) * 1000);
+ VLOG(heap) << ConstDumpable<TimingLogger>(collector->GetTimings());
}
FinishGC(self, gc_type);
ATRACE_END();
@@ -2888,7 +2903,7 @@ void Heap::ClearMarkedObjects() {
}
// Clear the marked objects in the discontinous space object sets.
for (const auto& space : GetDiscontinuousSpaces()) {
- space->GetMarkObjects()->Clear();
+ space->GetMarkBitmap()->Clear();
}
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 35724e3b5f..c37bb05b2f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -55,7 +55,6 @@ namespace gc {
namespace accounting {
class HeapBitmap;
class ModUnionTable;
- class ObjectSet;
class RememberedSet;
} // namespace accounting
@@ -290,6 +289,12 @@ class Heap {
void RegisterGCAllocation(size_t bytes);
void RegisterGCDeAllocation(size_t bytes);
+ // Public due to usage by tests.
+ void AddSpace(space::Space* space, bool set_as_default = true)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+ void RemoveSpace(space::Space* space, bool unset_as_default = true)
+ LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+
// Set target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.setTargetHeapUtilization.
void SetTargetHeapUtilization(float target);
@@ -477,7 +482,8 @@ class Heap {
// TODO: Refactor?
void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
- accounting::ObjectSet* large_objects, accounting::ObjectStack* stack)
+ accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
+ accounting::ObjectStack* stack)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Mark the specified allocation stack as live.
@@ -684,10 +690,6 @@ class Heap {
size_t GetPercentFree();
- void AddSpace(space::Space* space, bool set_as_default = true)
- LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
- void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-
static void VerificationCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 0b353c7f17..ce11b3d72c 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,12 +16,14 @@
#include "large_object_space.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "UniquePtr.h"
#include "image.h"
#include "os.h"
+#include "space-inl.h"
#include "thread-inl.h"
#include "utils.h"
@@ -74,26 +76,27 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
};
void LargeObjectSpace::SwapBitmaps() {
- live_objects_.swap(mark_objects_);
+ live_bitmap_.swap(mark_bitmap_);
// Swap names to get more descriptive diagnostics.
- std::string temp_name = live_objects_->GetName();
- live_objects_->SetName(mark_objects_->GetName());
- mark_objects_->SetName(temp_name);
+ std::string temp_name = live_bitmap_->GetName();
+ live_bitmap_->SetName(mark_bitmap_->GetName());
+ mark_bitmap_->SetName(temp_name);
}
-LargeObjectSpace::LargeObjectSpace(const std::string& name)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
: DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
- total_objects_allocated_(0) {
+ total_objects_allocated_(0), begin_(begin), end_(end) {
}
void LargeObjectSpace::CopyLiveToMarked() {
- mark_objects_->CopyFrom(*live_objects_.get());
+ mark_bitmap_->CopyFrom(live_bitmap_.get());
}
+// TODO: Use something cleaner than 0xFFFFFFFF.
LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
- : LargeObjectSpace(name),
+ : LargeObjectSpace(name, reinterpret_cast<byte*>(0xFFFFFFFF), nullptr),
lock_("large object map space lock", kAllocSpaceLock) {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
@@ -118,7 +121,9 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
size_t allocation_size = mem_map->Size();
- DCHECK(bytes_allocated != NULL);
+ DCHECK(bytes_allocated != nullptr);
+ begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
+ end_ = std::max(end_, reinterpret_cast<byte*>(obj) + allocation_size);
*bytes_allocated = allocation_size;
if (usable_size != nullptr) {
*usable_size = allocation_size;
@@ -191,9 +196,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_be
}
FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
- : LargeObjectSpace(name),
- begin_(begin),
- end_(end),
+ : LargeObjectSpace(name, begin, end),
mem_map_(mem_map),
lock_("free list space lock", kAllocSpaceLock) {
free_end_ = end - begin;
@@ -389,27 +392,41 @@ void FreeListSpace::Dump(std::ostream& os) const {
}
}
-void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
- // Sweep large objects
- accounting::ObjectSet* large_live_objects = GetLiveObjects();
- accounting::ObjectSet* large_mark_objects = GetMarkObjects();
- if (swap_bitmaps) {
- std::swap(large_live_objects, large_mark_objects);
- }
- DCHECK(freed_objects != nullptr);
- DCHECK(freed_bytes != nullptr);
- // O(n*log(n)) but hopefully there are not too many large objects.
- size_t objects = 0;
- size_t bytes = 0;
- Thread* self = Thread::Current();
- for (const mirror::Object* obj : large_live_objects->GetObjects()) {
- if (!large_mark_objects->Test(obj)) {
- bytes += Free(self, const_cast<mirror::Object*>(obj));
- ++objects;
+void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
+ Thread* self = context->self;
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+ // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
+ // the bitmaps as an optimization.
+ if (!context->swap_bitmaps) {
+ accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ bitmap->Clear(ptrs[i]);
}
}
- *freed_objects += objects;
- *freed_bytes += bytes;
+ context->freed_objects += num_ptrs;
+ context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+}
+
+void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* out_freed_objects,
+ size_t* out_freed_bytes) {
+ if (Begin() >= End()) {
+ return;
+ }
+ accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
+ accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
+ if (swap_bitmaps) {
+ std::swap(live_bitmap, mark_bitmap);
+ }
+ DCHECK(out_freed_objects != nullptr);
+ DCHECK(out_freed_bytes != nullptr);
+ SweepCallbackContext scc(swap_bitmaps, this);
+ accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
+ reinterpret_cast<uintptr_t>(Begin()),
+ reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
+ *out_freed_objects += scc.freed_objects;
+ *out_freed_bytes += scc.freed_bytes;
}
} // namespace space
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 18e518f4cd..0daefba0d9 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -49,11 +49,11 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return num_objects_allocated_;
}
- uint64_t GetTotalBytesAllocated() {
+ uint64_t GetTotalBytesAllocated() const {
return total_bytes_allocated_;
}
- uint64_t GetTotalObjectsAllocated() {
+ uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
@@ -73,20 +73,36 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return this;
}
- void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+ void Sweep(bool swap_bitmaps, size_t* out_freed_objects, size_t* out_freed_bytes);
virtual bool CanMoveObjects() const OVERRIDE {
return false;
}
+ // Current address at which the space begins, which may vary as the space is filled.
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Current address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
protected:
- explicit LargeObjectSpace(const std::string& name);
+ explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
+
+ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
// Approximate number of bytes which have been allocated into the space.
- size_t num_bytes_allocated_;
- size_t num_objects_allocated_;
- size_t total_bytes_allocated_;
- size_t total_objects_allocated_;
+ uint64_t num_bytes_allocated_;
+ uint64_t num_objects_allocated_;
+ uint64_t total_bytes_allocated_;
+ uint64_t total_objects_allocated_;
+
+ // Begin and end, may change as more large objects are allocated.
+ byte* begin_;
+ byte* end_;
friend class Space;
@@ -242,9 +258,6 @@ class FreeListSpace FINAL : public LargeObjectSpace {
typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
accounting::GcAllocator<AllocationHeader*> > FreeBlocks;
- byte* const begin_;
- byte* const end_;
-
// There is not footer for any allocations at the end of the space, so we keep track of how much
// free space there is at the end manually.
UniquePtr<MemMap> mem_map_;
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index eaf14fb7d0..7493c19a94 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -226,7 +226,6 @@ void MallocSpace::Dump(std::ostream& os) const {
void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
- DCHECK(context->space->IsMallocSpace());
space::MallocSpace* space = context->space->AsMallocSpace();
Thread* self = context->self;
Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 5a7d941b4a..f5c0e9495f 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -32,7 +32,11 @@ namespace art {
namespace gc {
namespace space {
-static constexpr bool kPrefetchDuringRosAllocFreeList = true;
+static constexpr bool kPrefetchDuringRosAllocFreeList = false;
+static constexpr size_t kPrefetchLookAhead = 8;
+// Use this only for verification, it is not safe to use since the class of the object may have
+// been freed.
+static constexpr bool kVerifyFreedBytes = false;
// TODO: Fix
// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
@@ -172,27 +176,24 @@ size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
CHECK(ptr != NULL);
CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
}
- const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
if (kRecentFreeCount > 0) {
MutexLock mu(self, lock_);
RegisterRecentFree(ptr);
}
- rosalloc_->Free(self, ptr);
- return bytes_freed;
+ return rosalloc_->Free(self, ptr);
}
size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- DCHECK(ptrs != NULL);
+ DCHECK(ptrs != nullptr);
- // Don't need the lock to calculate the size of the freed pointers.
- size_t bytes_freed = 0;
+ size_t verify_bytes = 0;
for (size_t i = 0; i < num_ptrs; i++) {
- mirror::Object* ptr = ptrs[i];
- const size_t look_ahead = 8;
- if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
- __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
+ if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
+ __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
+ }
+ if (kVerifyFreedBytes) {
+ verify_bytes += AllocationSizeNonvirtual(ptrs[i], nullptr);
}
- bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
}
if (kRecentFreeCount > 0) {
@@ -216,7 +217,10 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p
CHECK_EQ(num_broken_ptrs, 0u);
}
- rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
+ const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
+ if (kVerifyFreedBytes) {
+ CHECK_EQ(verify_bytes, bytes_freed);
+ }
return bytes_freed;
}
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 01e8b044ba..4e2841691e 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -70,9 +70,15 @@ ContinuousMemMapAllocSpace* Space::AsContinuousMemMapAllocSpace() {
DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
GcRetentionPolicy gc_retention_policy) :
- Space(name, gc_retention_policy),
- live_objects_(new accounting::ObjectSet("large live objects")),
- mark_objects_(new accounting::ObjectSet("large marked objects")) {
+ Space(name, gc_retention_policy) {
+ // TODO: Fix this if we ever support objects not in the low 32 bit.
+ const size_t capacity = static_cast<size_t>(std::numeric_limits<uint32_t>::max());
+ live_bitmap_.reset(accounting::LargeObjectBitmap::Create("large live objects", nullptr,
+ capacity));
+ CHECK(live_bitmap_.get() != nullptr);
+ mark_bitmap_.reset(accounting::LargeObjectBitmap::Create("large marked objects", nullptr,
+ capacity));
+ CHECK(mark_bitmap_.get() != nullptr);
}
void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
@@ -84,13 +90,7 @@ void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects,
if (live_bitmap == mark_bitmap) {
return;
}
- SweepCallbackContext scc;
- scc.swap_bitmaps = swap_bitmaps;
- scc.heap = Runtime::Current()->GetHeap();
- scc.self = Thread::Current();
- scc.space = this;
- scc.freed_objects = 0;
- scc.freed_bytes = 0;
+ SweepCallbackContext scc(swap_bitmaps, this);
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
@@ -136,6 +136,11 @@ void ContinuousMemMapAllocSpace::SwapBitmaps() {
mark_bitmap_->SetName(temp_name);
}
+Space::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space)
+ : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()), freed_objects(0),
+ freed_bytes(0) {
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 2b27f8747f..0a87a160b3 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -173,10 +173,11 @@ class Space {
protected:
struct SweepCallbackContext {
- bool swap_bitmaps;
- Heap* heap;
- space::Space* space;
- Thread* self;
+ public:
+ SweepCallbackContext(bool swap_bitmaps, space::Space* space);
+ const bool swap_bitmaps;
+ space::Space* const space;
+ Thread* const self;
size_t freed_objects;
size_t freed_bytes;
};
@@ -313,15 +314,15 @@ class ContinuousSpace : public Space {
// is suitable for use for large primitive arrays.
class DiscontinuousSpace : public Space {
public:
- accounting::ObjectSet* GetLiveObjects() const {
- return live_objects_.get();
+ accounting::LargeObjectBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
}
- accounting::ObjectSet* GetMarkObjects() const {
- return mark_objects_.get();
+ accounting::LargeObjectBitmap* GetMarkBitmap() const {
+ return mark_bitmap_.get();
}
- virtual bool IsDiscontinuousSpace() const {
+ virtual bool IsDiscontinuousSpace() const OVERRIDE {
return true;
}
@@ -330,8 +331,8 @@ class DiscontinuousSpace : public Space {
protected:
DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
- UniquePtr<accounting::ObjectSet> live_objects_;
- UniquePtr<accounting::ObjectSet> mark_objects_;
+ UniquePtr<accounting::LargeObjectBitmap> live_bitmap_;
+ UniquePtr<accounting::LargeObjectBitmap> mark_bitmap_;
private:
DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 9896a4833c..28200dfbfc 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -39,10 +39,8 @@ class SpaceTest : public CommonRuntimeTest {
}
void AddSpace(ContinuousSpace* space) {
- // For RosAlloc, revoke the thread local runs before moving onto a
- // new alloc space.
- Runtime::Current()->GetHeap()->RevokeAllThreadLocalBuffers();
- Runtime::Current()->GetHeap()->AddSpace(space);
+ // By passing true, AddSpace() does the revoke.
+ Runtime::Current()->GetHeap()->AddSpace(space, true);
}
mirror::Class* GetByteArrayClass(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -349,11 +347,8 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
EXPECT_EQ(usable_size, computed_usable_size);
}
- // Release memory and check pointers are nullptr.
+ // Release memory.
space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
- for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- EXPECT_TRUE(lots_of_objects[i] == nullptr);
- }
// Succeeds, fits by adjusting the max allowed footprint.
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
@@ -367,12 +362,8 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
EXPECT_EQ(usable_size, computed_usable_size);
}
- // Release memory and check pointers are nullptr
- // TODO: This isn't compaction safe, fix.
+ // Release memory.
space->FreeList(self, arraysize(lots_of_objects), lots_of_objects);
- for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- EXPECT_TRUE(lots_of_objects[i] == nullptr);
- }
}
void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t object_size,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 1b06b63afa..046641362d 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -101,7 +101,7 @@ void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* ar
DCHECK(context->space->IsZygoteSpace());
ZygoteSpace* zygote_space = context->space->AsZygoteSpace();
Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
- accounting::CardTable* card_table = context->heap->GetCardTable();
+ accounting::CardTable* card_table = Runtime::Current()->GetHeap()->GetCardTable();
// If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
// the bitmaps as an optimization.
if (!context->swap_bitmaps) {
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index bbad88495e..987df91ead 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -254,20 +254,11 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
int idx = ExtractIndex(iref);
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
if (GetIndirectRefKind(iref) == kSirtOrInvalid &&
Thread::Current()->SirtContains(reinterpret_cast<jobject>(iref))) {
LOG(WARNING) << "Attempt to remove local SIRT entry from IRT, ignoring";
return true;
}
- if (GetIndirectRefKind(iref) == kSirtOrInvalid && vm->work_around_app_jni_bugs) {
- mirror::Object* direct_pointer = reinterpret_cast<mirror::Object*>(iref);
- idx = Find(direct_pointer, bottomIndex, topIndex, table_);
- if (idx == -1) {
- LOG(WARNING) << "Trying to work around app JNI bugs, but didn't find " << iref << " in table!";
- return false;
- }
- }
if (idx < bottomIndex) {
// Wrong segment.
@@ -285,7 +276,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
if (idx == topIndex-1) {
// Top-most entry. Scan up and consume holes.
- if (!vm->work_around_app_jni_bugs && !CheckEntry("remove", iref, idx)) {
+ if (!CheckEntry("remove", iref, idx)) {
return false;
}
@@ -321,7 +312,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
}
- if (!vm->work_around_app_jni_bugs && !CheckEntry("remove", iref, idx)) {
+ if (!CheckEntry("remove", iref, idx)) {
return false;
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index bcde9e5a2e..77d29ddf3f 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -94,6 +94,7 @@ static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code,
}
if (!method->IsResolutionMethod()) {
if (quick_code == GetQuickToInterpreterBridge() ||
+ quick_code == GetQuickToInterpreterBridgeTrampoline(Runtime::Current()->GetClassLinker()) ||
(quick_code == GetQuickResolutionTrampoline(Runtime::Current()->GetClassLinker()) &&
Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()
&& !method->IsNative() && !method->IsProxyMethod())) {
@@ -147,6 +148,7 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
// Do not overwrite interpreter to prevent from posting method entry/exit events twice.
new_portable_code = class_linker->GetPortableOatCodeFor(method, &have_portable_code);
new_quick_code = class_linker->GetQuickOatCodeFor(method);
+ DCHECK(new_quick_code != GetQuickToInterpreterBridgeTrampoline(class_linker));
if (entry_exit_stubs_installed_ && new_quick_code != GetQuickToInterpreterBridge()) {
DCHECK(new_portable_code != GetPortableToInterpreterBridge());
new_portable_code = GetPortableToInterpreterBridge();
@@ -256,7 +258,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
visitor.WalkStack(true);
CHECK_EQ(visitor.dex_pcs_.size(), thread->GetInstrumentationStack()->size());
- if (!instrumentation->ShouldNotifyMethodEnterExitEvents()) {
+ if (instrumentation->ShouldNotifyMethodEnterExitEvents()) {
// Create method enter events for all methods currently on the thread's stack. We only do this
// if no debugger is attached to prevent from posting events twice.
typedef std::deque<InstrumentationStackFrame>::const_reverse_iterator It;
@@ -303,8 +305,9 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
}
bool removed_stub = false;
// TODO: make this search more efficient?
- for (InstrumentationStackFrame instrumentation_frame : *instrumentation_stack_) {
- if (instrumentation_frame.frame_id_ == GetFrameId()) {
+ const size_t frameId = GetFrameId();
+ for (const InstrumentationStackFrame& instrumentation_frame : *instrumentation_stack_) {
+ if (instrumentation_frame.frame_id_ == frameId) {
if (kVerboseInstrumentation) {
LOG(INFO) << " Removing exit stub in " << DescribeLocation();
}
@@ -314,7 +317,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
CHECK(m == instrumentation_frame.method_) << PrettyMethod(m);
}
SetReturnPc(instrumentation_frame.return_pc_);
- if (!instrumentation_->ShouldNotifyMethodEnterExitEvents()) {
+ if (instrumentation_->ShouldNotifyMethodEnterExitEvents()) {
// Create the method exit events. As the methods didn't really exit the result is 0.
// We only do this if no debugger is attached to prevent from posting events twice.
instrumentation_->MethodExitEvent(thread_, instrumentation_frame.this_object_, m,
@@ -464,7 +467,7 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
// We're already set.
return;
}
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
Runtime* runtime = Runtime::Current();
Locks::thread_list_lock_->AssertNotHeld(self);
if (desired_level > 0) {
@@ -476,7 +479,7 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
}
runtime->GetClassLinker()->VisitClasses(InstallStubsClassVisitor, this);
instrumentation_stubs_installed_ = true;
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ MutexLock mu(self, *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(InstrumentationInstallStack, this);
} else {
interpreter_stubs_installed_ = false;
@@ -561,7 +564,8 @@ void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* q
new_quick_code = GetQuickToInterpreterBridge();
new_have_portable_code = false;
} else if (quick_code == GetQuickResolutionTrampoline(Runtime::Current()->GetClassLinker()) ||
- quick_code == GetQuickToInterpreterBridge()) {
+ quick_code == GetQuickToInterpreterBridgeTrampoline(Runtime::Current()->GetClassLinker()) ||
+ quick_code == GetQuickToInterpreterBridge()) {
DCHECK((portable_code == GetPortableResolutionTrampoline(Runtime::Current()->GetClassLinker())) ||
(portable_code == GetPortableToInterpreterBridge()));
new_portable_code = portable_code;
@@ -682,7 +686,7 @@ void Instrumentation::DisableDeoptimization() {
// Indicates if instrumentation should notify method enter/exit events to the listeners.
bool Instrumentation::ShouldNotifyMethodEnterExitEvents() const {
- return deoptimization_enabled_ || interpreter_stubs_installed_;
+ return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
void Instrumentation::DeoptimizeEverything() {
@@ -708,9 +712,10 @@ const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method) const {
Runtime* runtime = Runtime::Current();
if (LIKELY(!instrumentation_stubs_installed_)) {
const void* code = method->GetEntryPointFromQuickCompiledCode();
- DCHECK(code != NULL);
- if (LIKELY(code != GetQuickResolutionTrampoline(runtime->GetClassLinker()) &&
- code != GetQuickToInterpreterBridge())) {
+ DCHECK(code != nullptr);
+ if (LIKELY(code != GetQuickResolutionTrampoline(runtime->GetClassLinker())) &&
+ LIKELY(code != GetQuickToInterpreterBridgeTrampoline(runtime->GetClassLinker())) &&
+ LIKELY(code != GetQuickToInterpreterBridge())) {
return code;
}
}
@@ -799,7 +804,10 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation&
if (have_exception_caught_listeners_) {
DCHECK_EQ(thread->GetException(NULL), exception_object);
thread->ClearException();
- for (InstrumentationListener* listener : exception_caught_listeners_) {
+ // TODO: The copy below is due to the debug listener having an action where it can remove
+ // itself as a listener and break the iterator. The copy only works around the problem.
+ std::list<InstrumentationListener*> copy(exception_caught_listeners_);
+ for (InstrumentationListener* listener : copy) {
listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
}
thread->SetException(throw_location, exception_object);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 1bf007864d..e3f3cd0abe 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -302,19 +302,19 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
exit(0);
}
// Explicit definitions of ExecuteGotoImpl.
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
JValue ExecuteGotoImpl<true, false>(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
JValue ExecuteGotoImpl<false, false>(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
JValue ExecuteGotoImpl<true, true>(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+template<> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
JValue ExecuteGotoImpl<false, true>(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 21eeafa9d9..cc1fa0c94f 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -169,6 +169,13 @@ static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
return false;
}
}
+ // Report this field access to instrumentation if needed.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
+ Object* this_object = f->IsStatic() ? nullptr : obj;
+ instrumentation->FieldReadEvent(self, this_object, shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(), f);
+ }
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -210,6 +217,17 @@ static inline bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* ins
return false;
}
MemberOffset field_offset(inst->VRegC_22c());
+ // Report this field access to instrumentation if needed. Since we only have the offset of
+ // the field from the base of the object, we need to look for it first.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
+ ArtField* f = ArtField::FindInstanceFieldWithOffset(obj->GetClass(),
+ field_offset.Uint32Value());
+ DCHECK(f != nullptr);
+ DCHECK(!f->IsStatic());
+ instrumentation->FieldReadEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(), f);
+ }
const bool is_volatile = false; // iget-x-quick only on non volatile fields.
const uint32_t vregA = inst->VRegA_22c(inst_data);
switch (field_type) {
@@ -228,6 +246,39 @@ static inline bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* ins
return true;
}
+template<Primitive::Type field_type>
+static inline JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ JValue field_value;
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ field_value.SetZ(static_cast<uint8_t>(shadow_frame.GetVReg(vreg)));
+ break;
+ case Primitive::kPrimByte:
+ field_value.SetB(static_cast<int8_t>(shadow_frame.GetVReg(vreg)));
+ break;
+ case Primitive::kPrimChar:
+ field_value.SetC(static_cast<uint16_t>(shadow_frame.GetVReg(vreg)));
+ break;
+ case Primitive::kPrimShort:
+ field_value.SetS(static_cast<int16_t>(shadow_frame.GetVReg(vreg)));
+ break;
+ case Primitive::kPrimInt:
+ field_value.SetI(shadow_frame.GetVReg(vreg));
+ break;
+ case Primitive::kPrimLong:
+ field_value.SetJ(shadow_frame.GetVRegLong(vreg));
+ break;
+ case Primitive::kPrimNot:
+ field_value.SetL(shadow_frame.GetVRegReference(vreg));
+ break;
+ default:
+ LOG(FATAL) << "Unreachable: " << field_type;
+ break;
+ }
+ return field_value;
+}
+
// Handles iput-XXX and sput-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check, bool transaction_active>
@@ -254,6 +305,15 @@ static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
}
}
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ // Report this field access to instrumentation if needed. Since we only have the offset of
+ // the field from the base of the object, we need to look for it first.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
+ JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
+ Object* this_object = f->IsStatic() ? nullptr : obj;
+ instrumentation->FieldWriteEvent(self, this_object, shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(), f, field_value);
+ }
switch (field_type) {
case Primitive::kPrimBoolean:
f->SetBoolean<transaction_active>(obj, shadow_frame.GetVReg(vregA));
@@ -309,8 +369,20 @@ static inline bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instructio
return false;
}
MemberOffset field_offset(inst->VRegC_22c());
- const bool is_volatile = false; // iput-x-quick only on non volatile fields.
const uint32_t vregA = inst->VRegA_22c(inst_data);
+ // Report this field modification to instrumentation if needed. Since we only have the offset of
+ // the field from the base of the object, we need to look for it first.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
+ ArtField* f = ArtField::FindInstanceFieldWithOffset(obj->GetClass(),
+ field_offset.Uint32Value());
+ DCHECK(f != nullptr);
+ DCHECK(!f->IsStatic());
+ JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
+ instrumentation->FieldWriteEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
+ shadow_frame.GetDexPC(), f, field_value);
+ }
+ const bool is_volatile = false; // iput-x-quick only on non volatile fields.
switch (field_type) {
case Primitive::kPrimInt:
obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
@@ -498,7 +570,8 @@ static inline uint32_t FindNextInstructionFollowingException(Thread* self,
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
bool clear_exception = false;
- uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
+ SirtRef<mirror::Class> exception_class(self, exception->GetClass());
+ uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception_class, dex_pc,
&clear_exception);
if (found_dex_pc == DexFile::kDexNoIndex) {
instrumentation->MethodUnwindEvent(self, this_object,
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 69080477b0..223b7a1124 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -108,7 +108,7 @@ namespace JDWP {
*/
struct ModBasket {
ModBasket() : pLoc(NULL), threadId(0), classId(0), excepClassId(0),
- caught(false), field(0), thisPtr(0) { }
+ caught(false), fieldTypeID(0), fieldId(0), thisPtr(0) { }
const JdwpLocation* pLoc; /* LocationOnly */
std::string className; /* ClassMatch/ClassExclude */
@@ -116,7 +116,8 @@ struct ModBasket {
RefTypeId classId; /* ClassOnly */
RefTypeId excepClassId; /* ExceptionOnly */
bool caught; /* ExceptionOnly */
- FieldId field; /* FieldOnly */
+ RefTypeId fieldTypeID; /* FieldOnly */
+ FieldId fieldId; /* FieldOnly */
ObjectId thisPtr; /* InstanceOnly */
/* nothing for StepOnly -- handled differently */
};
@@ -233,7 +234,16 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
Dbg::UnconfigureStep(pMod->step.threadId);
}
}
- if (NeedsFullDeoptimization(pEvent->eventKind)) {
+ if (pEvent->eventKind == EK_SINGLE_STEP) {
+ // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize
+ // loop between each single-step. In a IDE, this would happens each time the user click on the
+ // "single-step" button. Here we delay the full undeoptimization to the next resume
+ // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or
+ // runtime shutdown).
+ // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full
+ // deoptimization and only the last single-step will trigger a full undeoptimization.
+ Dbg::DelayFullUndeoptimization();
+ } else if (NeedsFullDeoptimization(pEvent->eventKind)) {
CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
CHECK(req.method == nullptr);
req.kind = DeoptimizationRequest::kFullUndeoptimization;
@@ -448,7 +458,10 @@ static bool ModsMatch(JdwpEvent* pEvent, ModBasket* basket)
}
break;
case MK_FIELD_ONLY:
- if (!Dbg::MatchType(basket->classId, pMod->fieldOnly.refTypeId) || pMod->fieldOnly.fieldId != basket->field) {
+ if (pMod->fieldOnly.fieldId != basket->fieldId) {
+ return false;
+ }
+ if (!Dbg::MatchType(basket->fieldTypeID, pMod->fieldOnly.refTypeId)) {
return false;
}
break;
@@ -839,7 +852,8 @@ bool JdwpState::PostFieldEvent(const JdwpLocation* pLoc, RefTypeId typeId, Field
basket.thisPtr = thisPtr;
basket.threadId = Dbg::GetThreadSelfId();
basket.className = Dbg::GetClassName(pLoc->class_id);
- basket.field = fieldId;
+ basket.fieldTypeID = typeId;
+ basket.fieldId = fieldId;
if (InvokeInProgress()) {
VLOG(jdwp) << "Not posting field event during invoke";
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 5ffe753dbb..4843c2b5ec 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -291,6 +291,7 @@ static JdwpError VM_Suspend(JdwpState*, Request&, ExpandBuf*)
*/
static JdwpError VM_Resume(JdwpState*, Request&, ExpandBuf*)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Dbg::ProcessDelayedFullUndeoptimizations();
Dbg::ResumeVM();
return ERR_NONE;
}
@@ -353,8 +354,8 @@ static JdwpError VM_DisposeObjects(JdwpState*, Request& request, ExpandBuf*)
static JdwpError VM_Capabilities(JdwpState*, Request&, ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- expandBufAdd1(reply, false); // canWatchFieldModification
- expandBufAdd1(reply, false); // canWatchFieldAccess
+ expandBufAdd1(reply, true); // canWatchFieldModification
+ expandBufAdd1(reply, true); // canWatchFieldAccess
expandBufAdd1(reply, true); // canGetBytecodes
expandBufAdd1(reply, true); // canGetSyntheticAttribute
expandBufAdd1(reply, true); // canGetOwnedMonitorInfo
@@ -980,6 +981,8 @@ static JdwpError TR_Resume(JdwpState*, Request& request, ExpandBuf*)
return ERR_NONE;
}
+ Dbg::ProcessDelayedFullUndeoptimizations();
+
Dbg::ResumeThread(thread_id);
return ERR_NONE;
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 8e22c1df62..f480256b6a 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -318,6 +318,8 @@ void JdwpState::ResetState() {
CHECK(event_list_ == NULL);
}
+ Dbg::ProcessDelayedFullUndeoptimizations();
+
/*
* Should not have one of these in progress. If the debugger went away
* mid-request, though, we could see this.
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f7aeffd36c..fd9c40be13 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -513,14 +513,16 @@ class Libraries {
SafeMap<std::string, SharedLibrary*> libraries_;
};
-#define CHECK_NON_NULL_ARGUMENT(fn, value) \
+#define CHECK_NON_NULL_ARGUMENT(value) CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value)
+
+#define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value) \
if (UNLIKELY(value == nullptr)) { \
- JniAbortF(#fn, #value " == null"); \
+ JniAbortF(name, #value " == null"); \
}
-#define CHECK_NON_NULL_MEMCPY_ARGUMENT(fn, length, value) \
+#define CHECK_NON_NULL_MEMCPY_ARGUMENT(length, value) \
if (UNLIKELY(length != 0 && value == nullptr)) { \
- JniAbortF(#fn, #value " == null"); \
+ JniAbortF(__FUNCTION__, #value " == null"); \
}
class JNI {
@@ -535,7 +537,7 @@ class JNI {
}
static jclass FindClass(JNIEnv* env, const char* name) {
- CHECK_NON_NULL_ARGUMENT(FindClass, name);
+ CHECK_NON_NULL_ARGUMENT(name);
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
std::string descriptor(NormalizeJniClassDescriptor(name));
@@ -551,19 +553,19 @@ class JNI {
}
static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) {
- CHECK_NON_NULL_ARGUMENT(FromReflectedMethod, jlr_method);
+ CHECK_NON_NULL_ARGUMENT(jlr_method);
ScopedObjectAccess soa(env);
return soa.EncodeMethod(mirror::ArtMethod::FromReflectedMethod(soa, jlr_method));
}
static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
- CHECK_NON_NULL_ARGUMENT(FromReflectedField, jlr_field);
+ CHECK_NON_NULL_ARGUMENT(jlr_field);
ScopedObjectAccess soa(env);
return soa.EncodeField(mirror::ArtField::FromReflectedField(soa, jlr_field));
}
static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
- CHECK_NON_NULL_ARGUMENT(ToReflectedMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
mirror::ArtMethod* m = soa.DecodeMethod(mid);
CHECK(!kMovingMethods);
@@ -578,7 +580,7 @@ class JNI {
}
static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) {
- CHECK_NON_NULL_ARGUMENT(ToReflectedField, fid);
+ CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::ArtField* f = soa.DecodeField(fid);
jobject art_field = soa.AddLocalReference<jobject>(f);
@@ -592,22 +594,22 @@ class JNI {
}
static jclass GetObjectClass(JNIEnv* env, jobject java_object) {
- CHECK_NON_NULL_ARGUMENT(GetObjectClass, java_object);
+ CHECK_NON_NULL_ARGUMENT(java_object);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
return soa.AddLocalReference<jclass>(o->GetClass());
}
static jclass GetSuperclass(JNIEnv* env, jclass java_class) {
- CHECK_NON_NULL_ARGUMENT(GetSuperclass, java_class);
+ CHECK_NON_NULL_ARGUMENT(java_class);
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
return soa.AddLocalReference<jclass>(c->GetSuperClass());
}
static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
- CHECK_NON_NULL_ARGUMENT(IsAssignableFrom, java_class1);
- CHECK_NON_NULL_ARGUMENT(IsAssignableFrom, java_class2);
+ CHECK_NON_NULL_ARGUMENT(java_class1);
+ CHECK_NON_NULL_ARGUMENT(java_class2);
ScopedObjectAccess soa(env);
mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
@@ -615,7 +617,7 @@ class JNI {
}
static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
- CHECK_NON_NULL_ARGUMENT(IsInstanceOf, java_class);
+ CHECK_NON_NULL_ARGUMENT(java_class);
if (jobj == nullptr) {
// Note: JNI is different from regular Java instanceof in this respect
return JNI_TRUE;
@@ -639,7 +641,7 @@ class JNI {
}
static jint ThrowNew(JNIEnv* env, jclass c, const char* msg) {
- CHECK_NON_NULL_ARGUMENT(ThrowNew, c);
+ CHECK_NON_NULL_ARGUMENT(c);
return ThrowNewException(env, c, msg, nullptr);
}
@@ -797,7 +799,7 @@ class JNI {
}
static jobject AllocObject(JNIEnv* env, jclass java_class) {
- CHECK_NON_NULL_ARGUMENT(AllocObject, java_class);
+ CHECK_NON_NULL_ARGUMENT(java_class);
ScopedObjectAccess soa(env);
mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(java_class));
if (c == nullptr) {
@@ -809,16 +811,16 @@ class JNI {
static jobject NewObject(JNIEnv* env, jclass java_class, jmethodID mid, ...) {
va_list args;
va_start(args, mid);
- CHECK_NON_NULL_ARGUMENT(NewObject, java_class);
- CHECK_NON_NULL_ARGUMENT(NewObject, mid);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(mid);
jobject result = NewObjectV(env, java_class, mid, args);
va_end(args);
return result;
}
static jobject NewObjectV(JNIEnv* env, jclass java_class, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(NewObjectV, java_class);
- CHECK_NON_NULL_ARGUMENT(NewObjectV, mid);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(java_class));
if (c == nullptr) {
@@ -837,8 +839,8 @@ class JNI {
}
static jobject NewObjectA(JNIEnv* env, jclass java_class, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(NewObjectA, java_class);
- CHECK_NON_NULL_ARGUMENT(NewObjectA, mid);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(java_class));
if (c == nullptr) {
@@ -857,18 +859,18 @@ class JNI {
}
static jmethodID GetMethodID(JNIEnv* env, jclass java_class, const char* name, const char* sig) {
- CHECK_NON_NULL_ARGUMENT(GetMethodID, java_class);
- CHECK_NON_NULL_ARGUMENT(GetMethodID, name);
- CHECK_NON_NULL_ARGUMENT(GetMethodID, sig);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(name);
+ CHECK_NON_NULL_ARGUMENT(sig);
ScopedObjectAccess soa(env);
return FindMethodID(soa, java_class, name, sig, false);
}
static jmethodID GetStaticMethodID(JNIEnv* env, jclass java_class, const char* name,
const char* sig) {
- CHECK_NON_NULL_ARGUMENT(GetStaticMethodID, java_class);
- CHECK_NON_NULL_ARGUMENT(GetStaticMethodID, name);
- CHECK_NON_NULL_ARGUMENT(GetStaticMethodID, sig);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(name);
+ CHECK_NON_NULL_ARGUMENT(sig);
ScopedObjectAccess soa(env);
return FindMethodID(soa, java_class, name, sig, true);
}
@@ -876,8 +878,8 @@ class JNI {
static jobject CallObjectMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallObjectMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallObjectMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -885,16 +887,16 @@ class JNI {
}
static jobject CallObjectMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallObjectMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallObjectMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
}
static jobject CallObjectMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallObjectMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallObjectMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args));
@@ -904,8 +906,8 @@ class JNI {
static jboolean CallBooleanMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallBooleanMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallBooleanMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -913,15 +915,15 @@ class JNI {
}
static jboolean CallBooleanMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallBooleanMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallBooleanMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetZ();
}
static jboolean CallBooleanMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallBooleanMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallBooleanMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetZ();
@@ -930,8 +932,8 @@ class JNI {
static jbyte CallByteMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallByteMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallByteMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -939,15 +941,15 @@ class JNI {
}
static jbyte CallByteMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallByteMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallByteMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetB();
}
static jbyte CallByteMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallByteMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallByteMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetB();
@@ -956,8 +958,8 @@ class JNI {
static jchar CallCharMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallCharMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallCharMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -965,15 +967,15 @@ class JNI {
}
static jchar CallCharMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallCharMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallCharMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetC();
}
static jchar CallCharMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallCharMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallCharMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetC();
@@ -982,8 +984,8 @@ class JNI {
static jdouble CallDoubleMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallDoubleMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallDoubleMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -991,15 +993,15 @@ class JNI {
}
static jdouble CallDoubleMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallDoubleMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallDoubleMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetD();
}
static jdouble CallDoubleMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallDoubleMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallDoubleMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetD();
@@ -1008,8 +1010,8 @@ class JNI {
static jfloat CallFloatMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallFloatMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallFloatMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1017,15 +1019,15 @@ class JNI {
}
static jfloat CallFloatMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallFloatMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallFloatMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetF();
}
static jfloat CallFloatMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallFloatMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallFloatMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetF();
@@ -1034,8 +1036,8 @@ class JNI {
static jint CallIntMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallIntMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallIntMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1043,15 +1045,15 @@ class JNI {
}
static jint CallIntMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallIntMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallIntMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetI();
}
static jint CallIntMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallIntMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallIntMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetI();
@@ -1060,8 +1062,8 @@ class JNI {
static jlong CallLongMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallLongMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallLongMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1069,15 +1071,15 @@ class JNI {
}
static jlong CallLongMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallLongMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallLongMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetJ();
}
static jlong CallLongMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallLongMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallLongMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetJ();
@@ -1086,8 +1088,8 @@ class JNI {
static jshort CallShortMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallShortMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallShortMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1095,15 +1097,15 @@ class JNI {
}
static jshort CallShortMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallShortMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallShortMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetS();
}
static jshort CallShortMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallShortMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallShortMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid,
args).GetS();
@@ -1112,23 +1114,23 @@ class JNI {
static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallVoidMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallVoidMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, ap);
va_end(ap);
}
static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallVoidMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallVoidMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args);
}
static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallVoidMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallVoidMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeVirtualOrInterfaceWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
}
@@ -1136,8 +1138,8 @@ class JNI {
static jobject CallNonvirtualObjectMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
jobject local_result = soa.AddLocalReference<jobject>(result.GetL());
@@ -1147,8 +1149,8 @@ class JNI {
static jobject CallNonvirtualObjectMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
@@ -1156,8 +1158,8 @@ class JNI {
static jobject CallNonvirtualObjectMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualObjectMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
@@ -1167,8 +1169,8 @@ class JNI {
...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1177,16 +1179,16 @@ class JNI {
static jboolean CallNonvirtualBooleanMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetZ();
}
static jboolean CallNonvirtualBooleanMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualBooleanMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetZ();
}
@@ -1194,8 +1196,8 @@ class JNI {
static jbyte CallNonvirtualByteMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1204,16 +1206,16 @@ class JNI {
static jbyte CallNonvirtualByteMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetB();
}
static jbyte CallNonvirtualByteMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualByteMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetB();
}
@@ -1221,8 +1223,8 @@ class JNI {
static jchar CallNonvirtualCharMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1231,16 +1233,16 @@ class JNI {
static jchar CallNonvirtualCharMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetC();
}
static jchar CallNonvirtualCharMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualCharMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetC();
}
@@ -1248,8 +1250,8 @@ class JNI {
static jshort CallNonvirtualShortMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1258,16 +1260,16 @@ class JNI {
static jshort CallNonvirtualShortMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetS();
}
static jshort CallNonvirtualShortMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualShortMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetS();
}
@@ -1275,8 +1277,8 @@ class JNI {
static jint CallNonvirtualIntMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1285,16 +1287,16 @@ class JNI {
static jint CallNonvirtualIntMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetI();
}
static jint CallNonvirtualIntMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualIntMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetI();
}
@@ -1302,8 +1304,8 @@ class JNI {
static jlong CallNonvirtualLongMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1312,16 +1314,16 @@ class JNI {
static jlong CallNonvirtualLongMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetJ();
}
static jlong CallNonvirtualLongMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualLongMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetJ();
}
@@ -1329,8 +1331,8 @@ class JNI {
static jfloat CallNonvirtualFloatMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1339,16 +1341,16 @@ class JNI {
static jfloat CallNonvirtualFloatMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetF();
}
static jfloat CallNonvirtualFloatMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualFloatMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetF();
}
@@ -1356,8 +1358,8 @@ class JNI {
static jdouble CallNonvirtualDoubleMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, obj, mid, ap));
va_end(ap);
@@ -1366,16 +1368,16 @@ class JNI {
static jdouble CallNonvirtualDoubleMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, obj, mid, args).GetD();
}
static jdouble CallNonvirtualDoubleMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualDoubleMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args).GetD();
}
@@ -1383,8 +1385,8 @@ class JNI {
static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethod, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, obj, mid, ap);
va_end(ap);
@@ -1392,40 +1394,40 @@ class JNI {
static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass, jmethodID mid,
va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethodV, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, obj, mid, args);
}
static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethodA, obj);
- CHECK_NON_NULL_ARGUMENT(CallNonvirtualVoidMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeWithJValues(soa, soa.Decode<mirror::Object*>(obj), mid, args);
}
static jfieldID GetFieldID(JNIEnv* env, jclass java_class, const char* name, const char* sig) {
- CHECK_NON_NULL_ARGUMENT(GetFieldID, java_class);
- CHECK_NON_NULL_ARGUMENT(GetFieldID, name);
- CHECK_NON_NULL_ARGUMENT(GetFieldID, sig);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(name);
+ CHECK_NON_NULL_ARGUMENT(sig);
ScopedObjectAccess soa(env);
return FindFieldID(soa, java_class, name, sig, false);
}
static jfieldID GetStaticFieldID(JNIEnv* env, jclass java_class, const char* name,
const char* sig) {
- CHECK_NON_NULL_ARGUMENT(GetStaticFieldID, java_class);
- CHECK_NON_NULL_ARGUMENT(GetStaticFieldID, name);
- CHECK_NON_NULL_ARGUMENT(GetFieldID, sig);
+ CHECK_NON_NULL_ARGUMENT(java_class);
+ CHECK_NON_NULL_ARGUMENT(name);
+ CHECK_NON_NULL_ARGUMENT(sig);
ScopedObjectAccess soa(env);
return FindFieldID(soa, java_class, name, sig, true);
}
static jobject GetObjectField(JNIEnv* env, jobject obj, jfieldID fid) {
- CHECK_NON_NULL_ARGUMENT(GetObjectField, obj);
- CHECK_NON_NULL_ARGUMENT(GetObjectField, fid);
+ CHECK_NON_NULL_ARGUMENT(obj);
+ CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(obj);
mirror::ArtField* f = soa.DecodeField(fid);
@@ -1433,15 +1435,15 @@ class JNI {
}
static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
- CHECK_NON_NULL_ARGUMENT(GetStaticObjectField, fid);
+ CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::ArtField* f = soa.DecodeField(fid);
return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
}
static void SetObjectField(JNIEnv* env, jobject java_object, jfieldID fid, jobject java_value) {
- CHECK_NON_NULL_ARGUMENT(SetObjectField, java_object);
- CHECK_NON_NULL_ARGUMENT(SetObjectField, fid);
+ CHECK_NON_NULL_ARGUMENT(java_object);
+ CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
mirror::Object* v = soa.Decode<mirror::Object*>(java_value);
@@ -1450,7 +1452,7 @@ class JNI {
}
static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) {
- CHECK_NON_NULL_ARGUMENT(SetStaticObjectField, fid);
+ CHECK_NON_NULL_ARGUMENT(fid);
ScopedObjectAccess soa(env);
mirror::Object* v = soa.Decode<mirror::Object*>(java_value);
mirror::ArtField* f = soa.DecodeField(fid);
@@ -1458,29 +1460,29 @@ class JNI {
}
#define GET_PRIMITIVE_FIELD(fn, instance) \
- CHECK_NON_NULL_ARGUMENT(Get #fn Field, instance); \
- CHECK_NON_NULL_ARGUMENT(Get #fn Field, fid); \
+ CHECK_NON_NULL_ARGUMENT(instance); \
+ CHECK_NON_NULL_ARGUMENT(fid); \
ScopedObjectAccess soa(env); \
mirror::Object* o = soa.Decode<mirror::Object*>(instance); \
mirror::ArtField* f = soa.DecodeField(fid); \
return f->Get ##fn (o)
#define GET_STATIC_PRIMITIVE_FIELD(fn) \
- CHECK_NON_NULL_ARGUMENT(GetStatic #fn Field, fid); \
+ CHECK_NON_NULL_ARGUMENT(fid); \
ScopedObjectAccess soa(env); \
mirror::ArtField* f = soa.DecodeField(fid); \
return f->Get ##fn (f->GetDeclaringClass())
#define SET_PRIMITIVE_FIELD(fn, instance, value) \
- CHECK_NON_NULL_ARGUMENT(Set #fn Field, instance); \
- CHECK_NON_NULL_ARGUMENT(Set #fn Field, fid); \
+ CHECK_NON_NULL_ARGUMENT(instance); \
+ CHECK_NON_NULL_ARGUMENT(fid); \
ScopedObjectAccess soa(env); \
mirror::Object* o = soa.Decode<mirror::Object*>(instance); \
mirror::ArtField* f = soa.DecodeField(fid); \
f->Set ##fn <false>(o, value)
#define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
- CHECK_NON_NULL_ARGUMENT(SetStatic #fn Field, fid); \
+ CHECK_NON_NULL_ARGUMENT(fid); \
ScopedObjectAccess soa(env); \
mirror::ArtField* f = soa.DecodeField(fid); \
f->Set ##fn <false>(f->GetDeclaringClass(), value)
@@ -1616,7 +1618,7 @@ class JNI {
static jobject CallStaticObjectMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticObjectMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
jobject local_result = soa.AddLocalReference<jobject>(result.GetL());
@@ -1625,14 +1627,14 @@ class JNI {
}
static jobject CallStaticObjectMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticObjectMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
}
static jobject CallStaticObjectMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticObjectMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithJValues(soa, nullptr, mid, args));
return soa.AddLocalReference<jobject>(result.GetL());
@@ -1641,7 +1643,7 @@ class JNI {
static jboolean CallStaticBooleanMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticBooleanMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1649,13 +1651,13 @@ class JNI {
}
static jboolean CallStaticBooleanMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticBooleanMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetZ();
}
static jboolean CallStaticBooleanMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticBooleanMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetZ();
}
@@ -1663,7 +1665,7 @@ class JNI {
static jbyte CallStaticByteMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticByteMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1671,13 +1673,13 @@ class JNI {
}
static jbyte CallStaticByteMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticByteMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetB();
}
static jbyte CallStaticByteMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticByteMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetB();
}
@@ -1685,7 +1687,7 @@ class JNI {
static jchar CallStaticCharMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticCharMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1693,13 +1695,13 @@ class JNI {
}
static jchar CallStaticCharMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticCharMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetC();
}
static jchar CallStaticCharMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticCharMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetC();
}
@@ -1707,7 +1709,7 @@ class JNI {
static jshort CallStaticShortMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticShortMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1715,13 +1717,13 @@ class JNI {
}
static jshort CallStaticShortMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticShortMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetS();
}
static jshort CallStaticShortMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticShortMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetS();
}
@@ -1729,7 +1731,7 @@ class JNI {
static jint CallStaticIntMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticIntMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1737,13 +1739,13 @@ class JNI {
}
static jint CallStaticIntMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticIntMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetI();
}
static jint CallStaticIntMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticIntMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetI();
}
@@ -1751,7 +1753,7 @@ class JNI {
static jlong CallStaticLongMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticLongMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1759,13 +1761,13 @@ class JNI {
}
static jlong CallStaticLongMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticLongMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetJ();
}
static jlong CallStaticLongMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticLongMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetJ();
}
@@ -1773,7 +1775,7 @@ class JNI {
static jfloat CallStaticFloatMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticFloatMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1781,13 +1783,13 @@ class JNI {
}
static jfloat CallStaticFloatMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticFloatMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetF();
}
static jfloat CallStaticFloatMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticFloatMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetF();
}
@@ -1795,7 +1797,7 @@ class JNI {
static jdouble CallStaticDoubleMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticDoubleMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithVarArgs(soa, nullptr, mid, ap));
va_end(ap);
@@ -1803,13 +1805,13 @@ class JNI {
}
static jdouble CallStaticDoubleMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticDoubleMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithVarArgs(soa, nullptr, mid, args).GetD();
}
static jdouble CallStaticDoubleMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticDoubleMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetD();
}
@@ -1817,20 +1819,20 @@ class JNI {
static void CallStaticVoidMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
- CHECK_NON_NULL_ARGUMENT(CallStaticVoidMethod, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, nullptr, mid, ap);
va_end(ap);
}
static void CallStaticVoidMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticVoidMethodV, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeWithVarArgs(soa, nullptr, mid, args);
}
static void CallStaticVoidMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
- CHECK_NON_NULL_ARGUMENT(CallStaticVoidMethodA, mid);
+ CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
InvokeWithJValues(soa, nullptr, mid, args);
}
@@ -1859,26 +1861,26 @@ class JNI {
}
static jsize GetStringLength(JNIEnv* env, jstring java_string) {
- CHECK_NON_NULL_ARGUMENT(GetStringLength, java_string);
+ CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
return soa.Decode<mirror::String*>(java_string)->GetLength();
}
static jsize GetStringUTFLength(JNIEnv* env, jstring java_string) {
- CHECK_NON_NULL_ARGUMENT(GetStringLength, java_string);
+ CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
return soa.Decode<mirror::String*>(java_string)->GetUtfLength();
}
static void GetStringRegion(JNIEnv* env, jstring java_string, jsize start, jsize length,
jchar* buf) {
- CHECK_NON_NULL_ARGUMENT(GetStringRegion, java_string);
+ CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
if (start < 0 || length < 0 || start + length > s->GetLength()) {
ThrowSIOOBE(soa, start, length, s->GetLength());
} else {
- CHECK_NON_NULL_MEMCPY_ARGUMENT(GetStringRegion, length, buf);
+ CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
const jchar* chars = s->GetCharArray()->GetData() + s->GetOffset();
memcpy(buf, chars + start, length * sizeof(jchar));
}
@@ -1886,20 +1888,20 @@ class JNI {
static void GetStringUTFRegion(JNIEnv* env, jstring java_string, jsize start, jsize length,
char* buf) {
- CHECK_NON_NULL_ARGUMENT(GetStringUTFRegion, java_string);
+ CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
if (start < 0 || length < 0 || start + length > s->GetLength()) {
ThrowSIOOBE(soa, start, length, s->GetLength());
} else {
- CHECK_NON_NULL_MEMCPY_ARGUMENT(GetStringUTFRegion, length, buf);
+ CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
const jchar* chars = s->GetCharArray()->GetData() + s->GetOffset();
ConvertUtf16ToModifiedUtf8(buf, chars + start, length);
}
}
static const jchar* GetStringChars(JNIEnv* env, jstring java_string, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetStringChars, java_string);
+ CHECK_NON_NULL_ARGUMENT(java_string);
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* chars = s->GetCharArray();
@@ -1918,7 +1920,7 @@ class JNI {
}
static void ReleaseStringChars(JNIEnv* env, jstring java_string, const jchar* chars) {
- CHECK_NON_NULL_ARGUMENT(ReleaseStringChars, java_string);
+ CHECK_NON_NULL_ARGUMENT(java_string);
delete[] chars;
ScopedObjectAccess soa(env);
UnpinPrimitiveArray(soa, soa.Decode<mirror::String*>(java_string)->GetCharArray());
@@ -1955,7 +1957,7 @@ class JNI {
}
static jsize GetArrayLength(JNIEnv* env, jarray java_array) {
- CHECK_NON_NULL_ARGUMENT(GetArrayLength, java_array);
+ CHECK_NON_NULL_ARGUMENT(java_array);
ScopedObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(java_array);
if (UNLIKELY(!obj->IsArrayInstance())) {
@@ -1966,7 +1968,7 @@ class JNI {
}
static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index) {
- CHECK_NON_NULL_ARGUMENT(GetObjectArrayElement, java_array);
+ CHECK_NON_NULL_ARGUMENT(java_array);
ScopedObjectAccess soa(env);
mirror::ObjectArray<mirror::Object>* array =
soa.Decode<mirror::ObjectArray<mirror::Object>*>(java_array);
@@ -1975,7 +1977,7 @@ class JNI {
static void SetObjectArrayElement(JNIEnv* env, jobjectArray java_array, jsize index,
jobject java_value) {
- CHECK_NON_NULL_ARGUMENT(SetObjectArrayElement, java_array);
+ CHECK_NON_NULL_ARGUMENT(java_array);
ScopedObjectAccess soa(env);
mirror::ObjectArray<mirror::Object>* array =
soa.Decode<mirror::ObjectArray<mirror::Object>*>(java_array);
@@ -2070,7 +2072,7 @@ class JNI {
}
static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray java_array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetPrimitiveArrayCritical, java_array);
+ CHECK_NON_NULL_ARGUMENT(java_array);
ScopedObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -2087,54 +2089,54 @@ class JNI {
}
static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* elements, jint mode) {
- CHECK_NON_NULL_ARGUMENT(ReleasePrimitiveArrayCritical, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ReleasePrimitiveArray(env, array, elements, mode);
}
static jboolean* GetBooleanArrayElements(JNIEnv* env, jbooleanArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetBooleanArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jbooleanArray, jboolean*, mirror::BooleanArray>(soa, array, is_copy);
}
static jbyte* GetByteArrayElements(JNIEnv* env, jbyteArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetByteArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jbyteArray, jbyte*, mirror::ByteArray>(soa, array, is_copy);
}
static jchar* GetCharArrayElements(JNIEnv* env, jcharArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetCharArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jcharArray, jchar*, mirror::CharArray>(soa, array, is_copy);
}
static jdouble* GetDoubleArrayElements(JNIEnv* env, jdoubleArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetDoubleArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jdoubleArray, jdouble*, mirror::DoubleArray>(soa, array, is_copy);
}
static jfloat* GetFloatArrayElements(JNIEnv* env, jfloatArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetFloatArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jfloatArray, jfloat*, mirror::FloatArray>(soa, array, is_copy);
}
static jint* GetIntArrayElements(JNIEnv* env, jintArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetIntArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jintArray, jint*, mirror::IntArray>(soa, array, is_copy);
}
static jlong* GetLongArrayElements(JNIEnv* env, jlongArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetLongArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jlongArray, jlong*, mirror::LongArray>(soa, array, is_copy);
}
static jshort* GetShortArrayElements(JNIEnv* env, jshortArray array, jboolean* is_copy) {
- CHECK_NON_NULL_ARGUMENT(GetShortArrayElements, array);
+ CHECK_NON_NULL_ARGUMENT(array);
ScopedObjectAccess soa(env);
return GetPrimitiveArray<jshortArray, jshort*, mirror::ShortArray>(soa, array, is_copy);
}
@@ -2290,7 +2292,7 @@ class JNI {
JniAbortF("RegisterNatives", "negative method count: %d", method_count);
return JNI_ERR; // Not reached.
}
- CHECK_NON_NULL_ARGUMENT(RegisterNatives, java_class);
+ CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class);
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
if (UNLIKELY(method_count == 0)) {
@@ -2298,7 +2300,7 @@ class JNI {
<< PrettyDescriptor(c);
return JNI_OK;
}
- CHECK_NON_NULL_ARGUMENT(RegisterNatives, methods);
+ CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", methods);
for (jint i = 0; i < method_count; ++i) {
const char* name = methods[i].name;
const char* sig = methods[i].signature;
@@ -2335,7 +2337,7 @@ class JNI {
}
static jint UnregisterNatives(JNIEnv* env, jclass java_class) {
- CHECK_NON_NULL_ARGUMENT(UnregisterNatives, java_class);
+ CHECK_NON_NULL_ARGUMENT(java_class);
ScopedObjectAccess soa(env);
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
@@ -2358,7 +2360,7 @@ class JNI {
}
static jint MonitorEnter(JNIEnv* env, jobject java_object) NO_THREAD_SAFETY_ANALYSIS {
- CHECK_NON_NULL_ARGUMENT(MonitorEnter, java_object);
+ CHECK_NON_NULL_ARGUMENT(java_object);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
o = o->MonitorEnter(soa.Self());
@@ -2370,7 +2372,7 @@ class JNI {
}
static jint MonitorExit(JNIEnv* env, jobject java_object) NO_THREAD_SAFETY_ANALYSIS {
- CHECK_NON_NULL_ARGUMENT(MonitorExit, java_object);
+ CHECK_NON_NULL_ARGUMENT(java_object);
ScopedObjectAccess soa(env);
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
o->MonitorExit(soa.Self());
@@ -2382,7 +2384,7 @@ class JNI {
}
static jint GetJavaVM(JNIEnv* env, JavaVM** vm) {
- CHECK_NON_NULL_ARGUMENT(GetJavaVM, vm);
+ CHECK_NON_NULL_ARGUMENT(vm);
Runtime* runtime = Runtime::Current();
if (runtime != nullptr) {
*vm = runtime->GetJavaVM();
@@ -2422,7 +2424,7 @@ class JNI {
}
static jobjectRefType GetObjectRefType(JNIEnv* env, jobject java_object) {
- CHECK_NON_NULL_ARGUMENT(GetObjectRefType, java_object);
+ CHECK_NON_NULL_ARGUMENT(java_object);
// Do we definitely know what kind of reference this is?
IndirectRef ref = reinterpret_cast<IndirectRef>(java_object);
@@ -2444,23 +2446,6 @@ class JNI {
if (static_cast<JNIEnvExt*>(env)->self->SirtContains(java_object)) {
return JNILocalRefType;
}
-
- if (!static_cast<JNIEnvExt*>(env)->vm->work_around_app_jni_bugs) {
- return JNIInvalidRefType;
- }
-
- // If we're handing out direct pointers, check whether it's a direct pointer to a local
- // reference.
- {
- ScopedObjectAccess soa(env);
- if (soa.Decode<mirror::Object*>(java_object) ==
- reinterpret_cast<mirror::Object*>(java_object)) {
- mirror::Object* object = reinterpret_cast<mirror::Object*>(java_object);
- if (soa.Env()->locals.ContainsDirectPointer(object)) {
- return JNILocalRefType;
- }
- }
- }
return JNIInvalidRefType;
}
LOG(FATAL) << "IndirectRefKind[" << kind << "]";
@@ -2560,12 +2545,12 @@ class JNI {
static void GetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array,
jsize start, jsize length, JavaT* buf)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK_NON_NULL_ARGUMENT(GetPrimitiveArrayRegion, java_array);
+ CHECK_NON_NULL_ARGUMENT(java_array);
ArrayT* array = soa.Decode<ArrayT*>(java_array);
if (start < 0 || length < 0 || start + length > array->GetLength()) {
ThrowAIOOBE(soa, array, start, length, "src");
} else {
- CHECK_NON_NULL_MEMCPY_ARGUMENT(GetStringRegion, length, buf);
+ CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
JavaT* data = array->GetData();
memcpy(buf, data + start, length * sizeof(JavaT));
}
@@ -2575,12 +2560,12 @@ class JNI {
static void SetPrimitiveArrayRegion(ScopedObjectAccess& soa, JavaArrayT java_array,
jsize start, jsize length, const JavaT* buf)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK_NON_NULL_ARGUMENT(SetPrimitiveArrayRegion, java_array);
+ CHECK_NON_NULL_ARGUMENT(java_array);
ArrayT* array = soa.Decode<ArrayT*>(java_array);
if (start < 0 || length < 0 || start + length > array->GetLength()) {
ThrowAIOOBE(soa, array, start, length, "dst");
} else {
- CHECK_NON_NULL_MEMCPY_ARGUMENT(GetStringRegion, length, buf);
+ CHECK_NON_NULL_MEMCPY_ARGUMENT(length, buf);
JavaT* data = array->GetData();
memcpy(data + start, buf, length * sizeof(JavaT));
}
@@ -2993,7 +2978,6 @@ JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
check_jni(false),
force_copy(false), // TODO: add a way to enable this
trace(options->jni_trace_),
- work_around_app_jni_bugs(false),
pins_lock("JNI pin table lock", kPinTableLock),
pin_table("pin table", kPinTableInitial, kPinTableMax),
globals_lock("JNI global reference table lock"),
@@ -3044,7 +3028,6 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
if (force_copy) {
os << " (with forcecopy)";
}
- os << "; workarounds are " << (work_around_app_jni_bugs ? "on" : "off");
Thread* self = Thread::Current();
{
MutexLock mu(self, pins_lock);
@@ -3162,7 +3145,7 @@ bool JavaVMExt::LoadNativeLibrary(const std::string& path,
if (handle == nullptr) {
*detail = dlerror();
- LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << detail;
+ LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *detail;
return false;
}
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index 42796dbe79..ec911b204b 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -110,9 +110,6 @@ class JavaVMExt : public JavaVM {
// Extra diagnostics.
std::string trace;
- // Used to provide compatibility for apps that assumed direct references.
- bool work_around_app_jni_bugs;
-
// Used to hold references to pinned primitive arrays.
Mutex pins_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
ReferenceTable pin_table GUARDED_BY(pins_lock);
@@ -149,7 +146,7 @@ struct JNIEnvExt : public JNIEnv {
void PopFrame();
template<typename T>
- T AddLocalReference(mirror::Object* obj, bool jni_work_arounds)
+ T AddLocalReference(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Offset SegmentStateOffset();
@@ -216,7 +213,7 @@ class ScopedJniEnvLocalRefState {
};
template<typename T>
-inline T JNIEnvExt::AddLocalReference(mirror::Object* obj, bool jni_work_arounds) {
+inline T JNIEnvExt::AddLocalReference(mirror::Object* obj) {
IndirectRef ref = locals.Add(local_ref_cookie, obj);
// TODO: fix this to understand PushLocalFrame, so we can turn it on.
@@ -231,9 +228,6 @@ inline T JNIEnvExt::AddLocalReference(mirror::Object* obj, bool jni_work_arounds
}
}
- if (jni_work_arounds) {
- return reinterpret_cast<T>(obj);
- }
return reinterpret_cast<T>(ref);
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 3d2fd7b0ae..7f974d0cf0 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -27,10 +27,11 @@
namespace art {
namespace mirror {
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline size_t Array::SizeOf() {
// This is safe from overflow because the array was already allocated, so we know it's sane.
- size_t component_size = GetClass<kVerifyFlags>()->GetComponentSize();
+ size_t component_size =
+ GetClass<kVerifyFlags, kDoReadBarrier>()->template GetComponentSize<kDoReadBarrier>();
// Don't need to check this since we already check this in GetClass.
int32_t component_count =
GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>();
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 772d303360..6bfd5c890f 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -41,7 +41,7 @@ class MANAGED Array : public Object {
const SirtRef<IntArray>& dimensions)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index f91cab1036..7b0b94cd78 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -19,6 +19,7 @@
#include "art_field-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "object-inl.h"
+#include "object_array-inl.h"
#include "object_utils.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -69,5 +70,25 @@ void ArtField::VisitRoots(RootCallback* callback, void* arg) {
}
}
+// TODO: we could speed up the search if fields are ordered by offsets.
+ArtField* ArtField::FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset) {
+ DCHECK(klass != nullptr);
+ ObjectArray<ArtField>* instance_fields = klass->GetIFields();
+ if (instance_fields != nullptr) {
+ for (int32_t i = 0, e = instance_fields->GetLength(); i < e; ++i) {
+ mirror::ArtField* field = instance_fields->GetWithoutChecks(i);
+ if (field->GetOffset().Uint32Value() == field_offset) {
+ return field;
+ }
+ }
+ }
+ // We did not find field in the class: look into superclass.
+ if (klass->GetSuperClass() != NULL) {
+ return FindInstanceFieldWithOffset(klass->GetSuperClass(), field_offset);
+ } else {
+ return nullptr;
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index 0daa838a2c..ba70cc64e3 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -132,6 +132,10 @@ class MANAGED ArtField : public Object {
return (GetAccessFlags() & kAccVolatile) != 0;
}
+ // Returns an instance field with this offset in the given class or nullptr if not found.
+ static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
private:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index d5eccaffdc..6e1f0623bb 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -22,6 +22,7 @@
#include "dex_file.h"
#include "entrypoints/entrypoint_utils.h"
#include "object_array.h"
+#include "oat.h"
#include "runtime.h"
namespace art {
@@ -77,13 +78,11 @@ inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() {
inline uint32_t ArtMethod::GetCodeSize() {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
- uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode());
- if (code == 0) {
- return 0;
+ const void* code = EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode());
+ if (code == nullptr) {
+ return 0u;
}
- // TODO: make this Thumb2 specific
- code &= ~0x1;
- return reinterpret_cast<uint32_t*>(code)[-1];
+ return reinterpret_cast<const OatMethodHeader*>(code)[-1].code_size_;
}
inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
@@ -123,7 +122,8 @@ inline void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
return;
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (code == GetQuickResolutionTrampoline(class_linker)) {
+ if (code == GetQuickResolutionTrampoline(class_linker) ||
+ code == GetQuickToInterpreterBridgeTrampoline(class_linker)) {
return;
}
DCHECK(IsWithinQuickCode(pc))
@@ -153,26 +153,6 @@ inline void ArtMethod::SetPortableOatCodeOffset(uint32_t code_offset) {
SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
}
-inline uint32_t ArtMethod::GetOatMappingTableOffset() {
- DCHECK(!Runtime::Current()->IsStarted());
- return PointerToLowMemUInt32(GetMappingTable());
-}
-
-inline void ArtMethod::SetOatMappingTableOffset(uint32_t mapping_table_offset) {
- DCHECK(!Runtime::Current()->IsStarted());
- SetMappingTable(reinterpret_cast<const uint8_t*>(mapping_table_offset));
-}
-
-inline uint32_t ArtMethod::GetOatVmapTableOffset() {
- DCHECK(!Runtime::Current()->IsStarted());
- return PointerToLowMemUInt32(GetVmapTable());
-}
-
-inline void ArtMethod::SetOatVmapTableOffset(uint32_t vmap_table_offset) {
- DCHECK(!Runtime::Current()->IsStarted());
- SetVmapTable(reinterpret_cast<uint8_t*>(vmap_table_offset));
-}
-
inline void ArtMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) {
DCHECK(!Runtime::Current()->IsStarted());
SetNativeGcMap(reinterpret_cast<uint8_t*>(gc_map_offset));
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index ee5a0a4060..90bcbabdc5 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -230,10 +230,15 @@ uintptr_t ArtMethod::ToNativePc(const uint32_t dex_pc) {
return 0;
}
-uint32_t ArtMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc,
+uint32_t ArtMethod::FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_pc,
bool* has_no_move_exception) {
MethodHelper mh(this);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ // Set aside the exception while we resolve its type.
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location;
+ SirtRef<mirror::Throwable> exception(self, self->GetException(&throw_location));
+ self->ClearException();
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
// Iterate over the catch handlers associated with dex_pc.
@@ -245,21 +250,25 @@ uint32_t ArtMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc,
break;
}
// Does this catch exception type apply?
- Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
- if (iter_exception_type == NULL) {
- // The verifier should take care of resolving all exception classes early
+ Class* iter_exception_type = mh.GetClassFromTypeIdx(iter_type_idx);
+ if (exception_type.get() == nullptr) {
+ self->ClearException();
LOG(WARNING) << "Unresolved exception class when finding catch block: "
<< mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
- } else if (iter_exception_type->IsAssignableFrom(exception_type)) {
+ } else if (iter_exception_type->IsAssignableFrom(exception_type.get())) {
found_dex_pc = it.GetHandlerAddress();
break;
}
}
if (found_dex_pc != DexFile::kDexNoIndex) {
const Instruction* first_catch_instr =
- Instruction::At(&mh.GetCodeItem()->insns_[found_dex_pc]);
+ Instruction::At(&code_item->insns_[found_dex_pc]);
*has_no_move_exception = (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION);
}
+ // Put the exception back.
+ if (exception.get() != nullptr) {
+ self->SetException(throw_location, exception.get());
+ }
return found_dex_pc;
}
@@ -306,13 +315,13 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
} else {
(*art_portable_invoke_stub)(this, args, args_size, self, result, shorty[0]);
}
- if (UNLIKELY(reinterpret_cast<intptr_t>(self->GetException(NULL)) == -1)) {
- // Unusual case where we were running LLVM generated code and an
+ if (UNLIKELY(self->GetException(nullptr) == Thread::GetDeoptimizationException())) {
+ // Unusual case where we were running generated code and an
// exception was thrown to force the activations to be removed from the
// stack. Continue execution in the interpreter.
self->ClearException();
ShadowFrame* shadow_frame = self->GetAndClearDeoptimizationShadowFrame(result);
- self->SetTopOfStack(NULL, 0);
+ self->SetTopOfStack(nullptr, 0);
self->SetTopOfShadowStack(shadow_frame);
interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
}
@@ -342,30 +351,15 @@ bool ArtMethod::IsRegistered() {
return native_method != jni_stub;
}
-extern "C" void art_work_around_app_jni_bugs(JNIEnv*, jobject);
void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_fast) {
DCHECK(Thread::Current() == self);
CHECK(IsNative()) << PrettyMethod(this);
CHECK(!IsFastNative()) << PrettyMethod(this);
CHECK(native_method != NULL) << PrettyMethod(this);
- if (!self->GetJniEnv()->vm->work_around_app_jni_bugs) {
- if (is_fast) {
- SetAccessFlags(GetAccessFlags() | kAccFastNative);
- }
- SetNativeMethod(native_method);
- } else {
- // We've been asked to associate this method with the given native method but are working
- // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct
- // the native method to runtime support and store the target somewhere runtime support will
- // find it.
-#if defined(__i386__) || defined(__x86_64__)
- UNIMPLEMENTED(FATAL);
-#else
- SetNativeMethod(reinterpret_cast<void*>(art_work_around_app_jni_bugs));
-#endif
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_),
- reinterpret_cast<const uint8_t*>(native_method), false);
+ if (is_fast) {
+ SetAccessFlags(GetAccessFlags() | kAccFastNative);
}
+ SetNativeMethod(native_method);
}
void ArtMethod::UnregisterNative(Thread* self) {
@@ -374,5 +368,43 @@ void ArtMethod::UnregisterNative(Thread* self) {
RegisterNative(self, GetJniDlsymLookupStub(), false);
}
+const void* ArtMethod::GetOatCodePointer() {
+ if (IsPortableCompiled() || IsNative() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
+ return nullptr;
+ }
+ Runtime* runtime = Runtime::Current();
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ // On failure, instead of nullptr we get the quick-to-interpreter-bridge (but not the trampoline).
+ DCHECK(entry_point != GetQuickToInterpreterBridgeTrampoline(runtime->GetClassLinker()));
+ if (entry_point == GetQuickToInterpreterBridge()) {
+ return nullptr;
+ }
+ return EntryPointToCodePointer(entry_point);
+}
+
+const uint8_t* ArtMethod::GetMappingTable() {
+ const void* code = GetOatCodePointer();
+ if (code == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code) - offset;
+}
+
+const uint8_t* ArtMethod::GetVmapTable() {
+ const void* code = GetOatCodePointer();
+ if (code == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code) - offset;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index fd5ac19435..b3b9ca7dc1 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -21,6 +21,7 @@
#include "dex_file.h"
#include "invoke_type.h"
#include "modifiers.h"
+#include "oat.h"
#include "object.h"
#include "object_callbacks.h"
@@ -261,7 +262,6 @@ class MANAGED ArtMethod : public Object {
EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code, false);
}
-
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -270,9 +270,11 @@ class MANAGED ArtMethod : public Object {
return pc == 0;
}
/*
- * During a stack walk, a return PC may point to the end of the code + 1
- * (in the case that the last instruction is a call that isn't expected to
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
* return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
*/
return (code <= pc && pc <= code + GetCodeSize());
}
@@ -284,37 +286,20 @@ class MANAGED ArtMethod : public Object {
void SetQuickOatCodeOffset(uint32_t code_offset);
void SetPortableOatCodeOffset(uint32_t code_offset);
- // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
- const uint8_t* GetMappingTable() {
- return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_mapping_table_),
- false);
- }
-
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetMappingTable(const uint8_t* mapping_table) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_mapping_table_), mapping_table, false);
+ static const void* EntryPointToCodePointer(const void* entry_point) ALWAYS_INLINE {
+ uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
+ code &= ~0x1; // TODO: Make this Thumb2 specific.
+ return reinterpret_cast<const void*>(code);
}
- uint32_t GetOatMappingTableOffset();
+ // Actual pointer to compiled oat code or nullptr.
+ const void* GetOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetOatMappingTableOffset(uint32_t mapping_table_offset);
+ // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
+ const uint8_t* GetMappingTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
- const uint8_t* GetVmapTable() {
- return GetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_vmap_table_),
- false);
- }
-
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetVmapTable(const uint8_t* vmap_table) {
- SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_vmap_table_), vmap_table, false);
- }
-
- uint32_t GetOatVmapTableOffset();
-
- void SetOatVmapTableOffset(uint32_t vmap_table_offset);
+ const uint8_t* GetVmapTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap() {
return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), false);
@@ -418,12 +403,15 @@ class MANAGED ArtMethod : public Object {
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
- uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc, bool* has_no_move_exception)
+ uint32_t FindCatchBlock(SirtRef<Class>& exception_type, uint32_t dex_pc,
+ bool* has_no_move_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetClass(Class* java_lang_reflect_ArtMethod);
+ template <bool kDoReadBarrier = true>
static Class* GetJavaLangReflectArtMethod() {
+ // This does not need a RB because it is a root.
return java_lang_reflect_ArtMethod_;
}
@@ -466,20 +454,6 @@ class MANAGED ArtMethod : public Object {
// offsets for the quick compiler and dex PCs for the portable.
uint64_t gc_map_;
- // --- Quick compiler meta-data. ---
- // TODO: merge and place in native heap, such as done with the code size.
-
- // Pointer to a data structure created by the quick compiler to map between dex PCs and native
- // PCs, and vice-versa.
- uint64_t quick_mapping_table_;
-
- // When a register is promoted into a register, the spill mask holds which registers hold dex
- // registers. The first promoted register's corresponding dex register is vmap_table_[1], the Nth
- // is vmap_table_[N]. vmap_table_[0] holds the length of the table.
- uint64_t quick_vmap_table_;
-
- // --- End of quick compiler meta-data. ---
-
// Access flags; low 16 bits are defined by spec.
uint32_t access_flags_;
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 025e62a5cb..3c02aa0b49 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -478,6 +478,19 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor)
VisitStaticFieldsReferences<kVisitClass>(this, visitor);
}
+template<bool kDoReadBarrier>
+bool Class::IsArtFieldClass() {
+ Class* java_lang_Class = GetClass<kVerifyNone, kDoReadBarrier>();
+ Class* java_lang_reflect_ArtField =
+ java_lang_Class->GetInstanceField(0)->GetClass<kVerifyNone, kDoReadBarrier>();
+ return this == java_lang_reflect_ArtField;
+}
+
+template<bool kDoReadBarrier>
+bool Class::IsArtMethodClass() {
+ return this == ArtMethod::GetJavaLangReflectArtMethod<kDoReadBarrier>();
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 6dbb29dae3..ad86e1fc88 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -328,16 +328,6 @@ bool Class::IsThrowableClass() {
return WellKnownClasses::ToClass(WellKnownClasses::java_lang_Throwable)->IsAssignableFrom(this);
}
-bool Class::IsArtFieldClass() {
- Class* java_lang_Class = GetClass();
- Class* java_lang_reflect_ArtField = java_lang_Class->GetInstanceField(0)->GetClass();
- return this == java_lang_reflect_ArtField;
-}
-
-bool Class::IsArtMethodClass() {
- return this == ArtMethod::GetJavaLangReflectArtMethod();
-}
-
void Class::SetClassLoader(ClassLoader* new_class_loader) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d955b9791f..226dee0c9a 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -364,9 +364,9 @@ class MANAGED Class : public Object {
return depth;
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
bool IsArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetComponentType<kVerifyFlags>() != NULL;
+ return GetComponentType<kVerifyFlags, kDoReadBarrier>() != NULL;
}
bool IsClassClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -375,17 +375,19 @@ class MANAGED Class : public Object {
bool IsThrowableClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<bool kDoReadBarrier = true>
bool IsArtFieldClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template<bool kDoReadBarrier = true>
bool IsArtMethodClass();
static MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Class, kVerifyFlags>(ComponentTypeOffset(), false);
+ return GetFieldObject<Class, kVerifyFlags, kDoReadBarrier>(ComponentTypeOffset(), false);
}
void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -395,8 +397,10 @@ class MANAGED Class : public Object {
SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type, false);
}
+ template<bool kDoReadBarrier = true>
size_t GetComponentSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return Primitive::ComponentSize(GetComponentType()->GetPrimitiveType());
+ return Primitive::ComponentSize(
+ GetComponentType<kDefaultVerifyFlags, kDoReadBarrier>()->GetPrimitiveType());
}
bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -427,7 +431,7 @@ class MANAGED Class : public Object {
return IsClassClass() || IsArrayClass();
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index a6db387a08..04517ec28b 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -34,9 +34,10 @@
namespace art {
namespace mirror {
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline Class* Object::GetClass() {
- return GetFieldObject<Class, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
+ return GetFieldObject<Class, kVerifyFlags, kDoReadBarrier>(
+ OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
}
template<VerifyObjectFlags kVerifyFlags>
@@ -51,13 +52,14 @@ inline void Object::SetClass(Class* new_klass) {
OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false);
}
-inline LockWord Object::GetLockWord() {
- return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), true));
+inline LockWord Object::GetLockWord(bool as_volatile) {
+ return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), as_volatile));
}
-inline void Object::SetLockWord(LockWord new_val) {
+inline void Object::SetLockWord(LockWord new_val, bool as_volatile) {
// Force use of non-transactional mode and do not check.
- SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue(), true);
+ SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue(),
+ as_volatile);
}
inline bool Object::CasLockWord(LockWord old_val, LockWord new_val) {
@@ -104,15 +106,42 @@ inline Object* Object::GetReadBarrierPointer() {
#endif
}
-inline void Object::SetReadBarrierPointer(Object* rb_pointer) {
+inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
DCHECK(kUseBakerOrBrooksReadBarrier);
// We don't mark the card as this occurs as part of object allocation. Not all objects have
// backing cards, such as large objects.
SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
- OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), rb_pointer, false);
+ OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_), rb_ptr, false);
+#else
+ LOG(FATAL) << "Unreachable";
+#endif
+}
+
+inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) {
+#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
+ DCHECK(kUseBakerOrBrooksReadBarrier);
+ MemberOffset offset = OFFSET_OF_OBJECT_MEMBER(Object, x_rb_ptr_);
+ byte* raw_addr = reinterpret_cast<byte*>(this) + offset.SizeValue();
+ HeapReference<Object>* ref = reinterpret_cast<HeapReference<Object>*>(raw_addr);
+ HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_rb_ptr));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(rb_ptr));
+ uint32_t expected_val = expected_ref.reference_;
+ uint32_t new_val;
+ do {
+ uint32_t old_val = ref->reference_;
+ if (old_val != expected_val) {
+ // Lost the race.
+ return false;
+ }
+ new_val = new_ref.reference_;
+ } while (!__sync_bool_compare_and_swap(
+ reinterpret_cast<uint32_t*>(raw_addr), expected_val, new_val));
+ DCHECK_EQ(new_val, ref->reference_);
+ return true;
#else
LOG(FATAL) << "Unreachable";
+ return false;
#endif
}
@@ -146,16 +175,17 @@ inline bool Object::InstanceOf(Class* klass) {
return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline bool Object::IsClass() {
- Class* java_lang_Class = GetClass<kVerifyFlags>()->GetClass();
- return GetClass<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ==
+ Class* java_lang_Class =
+ GetClass<kVerifyFlags, kDoReadBarrier>()->template GetClass<kVerifyFlags, kDoReadBarrier>();
+ return GetClass<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis), kDoReadBarrier>() ==
java_lang_Class;
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline Class* Object::AsClass() {
- DCHECK(IsClass<kVerifyFlags>());
+ DCHECK((IsClass<kVerifyFlags, kDoReadBarrier>()));
return down_cast<Class*>(this);
}
@@ -172,14 +202,15 @@ inline ObjectArray<T>* Object::AsObjectArray() {
return down_cast<ObjectArray<T>*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline bool Object::IsArrayInstance() {
- return GetClass<kVerifyFlags>()->IsArrayClass();
+ return GetClass<kVerifyFlags, kDoReadBarrier>()->
+ template IsArrayClass<kVerifyFlags, kDoReadBarrier>();
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline bool Object::IsArtField() {
- return GetClass<kVerifyFlags>()->IsArtFieldClass();
+ return GetClass<kVerifyFlags, kDoReadBarrier>()->template IsArtFieldClass<kDoReadBarrier>();
}
template<VerifyObjectFlags kVerifyFlags>
@@ -188,9 +219,9 @@ inline ArtField* Object::AsArtField() {
return down_cast<ArtField*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline bool Object::IsArtMethod() {
- return GetClass<kVerifyFlags>()->IsArtMethodClass();
+ return GetClass<kVerifyFlags, kDoReadBarrier>()->template IsArtMethodClass<kDoReadBarrier>();
}
template<VerifyObjectFlags kVerifyFlags>
@@ -210,9 +241,9 @@ inline Reference* Object::AsReference() {
return down_cast<Reference*>(this);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline Array* Object::AsArray() {
- DCHECK(IsArrayInstance<kVerifyFlags>());
+ DCHECK((IsArrayInstance<kVerifyFlags, kDoReadBarrier>()));
return down_cast<Array*>(this);
}
@@ -338,20 +369,21 @@ inline bool Object::IsPhantomReferenceInstance() {
return GetClass<kVerifyFlags>()->IsPhantomReferenceClass();
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline size_t Object::SizeOf() {
size_t result;
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- if (IsArrayInstance<kVerifyFlags>()) {
- result = AsArray<kNewFlags>()->template SizeOf<kNewFlags>();
- } else if (IsClass<kNewFlags>()) {
- result = AsClass<kNewFlags>()->template SizeOf<kNewFlags>();
+ if (IsArrayInstance<kVerifyFlags, kDoReadBarrier>()) {
+ result = AsArray<kNewFlags, kDoReadBarrier>()->template SizeOf<kNewFlags, kDoReadBarrier>();
+ } else if (IsClass<kNewFlags, kDoReadBarrier>()) {
+ result = AsClass<kNewFlags, kDoReadBarrier>()->template SizeOf<kNewFlags, kDoReadBarrier>();
} else {
- result = GetClass<kNewFlags>()->GetObjectSize();
+ result = GetClass<kNewFlags, kDoReadBarrier>()->GetObjectSize();
}
- DCHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(GetClass<kNewFlags>());
- DCHECK(!IsArtField<kNewFlags>() || result == sizeof(ArtField));
- DCHECK(!IsArtMethod<kNewFlags>() || result == sizeof(ArtMethod));
+ DCHECK_GE(result, sizeof(Object))
+ << " class=" << PrettyTypeOf(GetClass<kNewFlags, kDoReadBarrier>());
+ DCHECK(!(IsArtField<kNewFlags, kDoReadBarrier>()) || result == sizeof(ArtField));
+ DCHECK(!(IsArtMethod<kNewFlags, kDoReadBarrier>()) || result == sizeof(ArtMethod));
return result;
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index be7e9f20ec..766bbc9892 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -150,7 +150,7 @@ int32_t Object::GenerateIdentityHashCode() {
int32_t Object::IdentityHashCode() const {
mirror::Object* current_this = const_cast<mirror::Object*>(this);
while (true) {
- LockWord lw = current_this->GetLockWord();
+ LockWord lw = current_this->GetLockWord(false);
switch (lw.GetState()) {
case LockWord::kUnlocked: {
// Try to compare and swap in a new hash, if we succeed we will return the hash on the next
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index f652202999..370b3b89cb 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -72,14 +72,16 @@ class MANAGED LOCKABLE Object {
return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetReadBarrierPointer(Object* rb_pointer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
@@ -89,7 +91,7 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -100,8 +102,10 @@ class MANAGED LOCKABLE Object {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
}
- LockWord GetLockWord();
- void SetLockWord(LockWord new_val);
+ // As volatile can be false if the mutators are suspended. This is an optimization since it
+ // avoids the barriers.
+ LockWord GetLockWord(bool as_volatile);
+ void SetLockWord(LockWord new_val, bool as_volatile);
bool CasLockWord(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
@@ -114,9 +118,9 @@ class MANAGED LOCKABLE Object {
void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -124,9 +128,9 @@ class MANAGED LOCKABLE Object {
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ObjectArray<T>* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -159,12 +163,12 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
bool IsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* AsArtMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
bool IsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtField* AsArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 5ff049081f..26b1fd1093 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -83,9 +83,9 @@ class MANAGED ObjectArray : public Array {
template<const bool kVisitClass, typename Visitor>
void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
- private:
static MemberOffset OffsetOfElement(int32_t i);
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray);
};
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index bbc7dd0d82..38b77d19c1 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -111,7 +111,7 @@ bool Monitor::Install(Thread* self) {
MutexLock mu(self, monitor_lock_); // Uncontended mutex acquisition as monitor isn't yet public.
CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
// Propagate the lock state.
- LockWord lw(obj_->GetLockWord());
+ LockWord lw(obj_->GetLockWord(false));
switch (lw.GetState()) {
case LockWord::kThinLocked: {
CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
@@ -574,7 +574,8 @@ void Monitor::NotifyAll(Thread* self) {
bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
DCHECK(obj != nullptr);
- LockWord lw(obj->GetLockWord());
+ // Don't need volatile since we only deflate with mutators suspended.
+ LockWord lw(obj->GetLockWord(false));
// If the lock isn't an inflated monitor, then we don't need to deflate anything.
if (lw.GetState() == LockWord::kFatLocked) {
Monitor* monitor = lw.FatLockMonitor();
@@ -595,14 +596,15 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
return false;
}
// Deflate to a thin lock.
- obj->SetLockWord(LockWord::FromThinLockId(owner->GetThreadId(), monitor->lock_count_));
- VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / " << monitor->lock_count_;
+ obj->SetLockWord(LockWord::FromThinLockId(owner->GetThreadId(), monitor->lock_count_), false);
+ VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / "
+ << monitor->lock_count_;
} else if (monitor->HasHashCode()) {
- obj->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()));
+ obj->SetLockWord(LockWord::FromHashCode(monitor->GetHashCode()), false);
VLOG(monitor) << "Deflated " << obj << " to hash monitor " << monitor->GetHashCode();
} else {
// No lock and no hash, just put an empty lock word inside the object.
- obj->SetLockWord(LockWord());
+ obj->SetLockWord(LockWord(), false);
VLOG(monitor) << "Deflated" << obj << " to empty lock word";
}
// The monitor is deflated, mark the object as nullptr so that we know to delete it during the
@@ -626,7 +628,7 @@ void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t
VLOG(monitor) << "monitor: thread " << owner->GetThreadId()
<< " created monitor " << m.get() << " for object " << obj;
Runtime::Current()->GetMonitorList()->Add(m.release());
- CHECK_EQ(obj->GetLockWord().GetState(), LockWord::kFatLocked);
+ CHECK_EQ(obj->GetLockWord(true).GetState(), LockWord::kFatLocked);
}
}
@@ -642,12 +644,12 @@ void Monitor::InflateThinLocked(Thread* self, SirtRef<mirror::Object>& obj, Lock
// Suspend the owner, inflate. First change to blocked and give up mutator_lock_.
ScopedThreadStateChange tsc(self, kBlocked);
self->SetMonitorEnterObject(obj.get());
- if (lock_word == obj->GetLockWord()) { // If lock word hasn't changed.
+ if (lock_word == obj->GetLockWord(true)) { // If lock word hasn't changed.
bool timed_out;
Thread* owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
if (owner != nullptr) {
// We succeeded in suspending the thread, check the lock's status didn't change.
- lock_word = obj->GetLockWord();
+ lock_word = obj->GetLockWord(true);
if (lock_word.GetState() == LockWord::kThinLocked &&
lock_word.ThinLockOwner() == owner_thread_id) {
// Go ahead and inflate the lock.
@@ -680,7 +682,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
size_t contention_count = 0;
SirtRef<mirror::Object> sirt_obj(self, obj);
while (true) {
- LockWord lock_word = sirt_obj->GetLockWord();
+ LockWord lock_word = sirt_obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kUnlocked: {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0));
@@ -697,7 +699,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
uint32_t new_count = lock_word.ThinLockCount() + 1;
if (LIKELY(new_count <= LockWord::kThinLockMaxCount)) {
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- sirt_obj->SetLockWord(thin_locked);
+ sirt_obj->SetLockWord(thin_locked, true);
return sirt_obj.get(); // Success!
} else {
// We'd overflow the recursion count, so inflate the monitor.
@@ -737,13 +739,13 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
DCHECK(self != NULL);
DCHECK(obj != NULL);
obj = FakeUnlock(obj);
- LockWord lock_word = obj->GetLockWord();
+ LockWord lock_word = obj->GetLockWord(true);
SirtRef<mirror::Object> sirt_obj(self, obj);
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
case LockWord::kUnlocked:
- FailedUnlock(sirt_obj.get(), self, NULL, NULL);
+ FailedUnlock(sirt_obj.get(), self, nullptr, nullptr);
return false; // Failure.
case LockWord::kThinLocked: {
uint32_t thread_id = self->GetThreadId();
@@ -752,16 +754,16 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
// TODO: there's a race here with the owner dying while we unlock.
Thread* owner =
Runtime::Current()->GetThreadList()->FindThreadByThreadId(lock_word.ThinLockOwner());
- FailedUnlock(sirt_obj.get(), self, owner, NULL);
+ FailedUnlock(sirt_obj.get(), self, owner, nullptr);
return false; // Failure.
} else {
// We own the lock, decrease the recursion count.
if (lock_word.ThinLockCount() != 0) {
uint32_t new_count = lock_word.ThinLockCount() - 1;
LockWord thin_locked(LockWord::FromThinLockId(thread_id, new_count));
- sirt_obj->SetLockWord(thin_locked);
+ sirt_obj->SetLockWord(thin_locked, true);
} else {
- sirt_obj->SetLockWord(LockWord());
+ sirt_obj->SetLockWord(LockWord(), true);
}
return true; // Success!
}
@@ -782,10 +784,9 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
*/
void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
-
- LockWord lock_word = obj->GetLockWord();
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
+ LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
@@ -801,7 +802,7 @@ void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
} else {
// We own the lock, inflate to enqueue ourself on the Monitor.
Inflate(self, self, obj, 0);
- lock_word = obj->GetLockWord();
+ lock_word = obj->GetLockWord(true);
}
break;
}
@@ -817,10 +818,9 @@ void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
}
void Monitor::DoNotify(Thread* self, mirror::Object* obj, bool notify_all) {
- DCHECK(self != NULL);
- DCHECK(obj != NULL);
-
- LockWord lock_word = obj->GetLockWord();
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
+ LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
@@ -855,9 +855,8 @@ void Monitor::DoNotify(Thread* self, mirror::Object* obj, bool notify_all) {
}
uint32_t Monitor::GetLockOwnerThreadId(mirror::Object* obj) {
- DCHECK(obj != NULL);
-
- LockWord lock_word = obj->GetLockWord();
+ DCHECK(obj != nullptr);
+ LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kHashCode:
// Fall-through.
@@ -902,7 +901,7 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
if (pretty_object == nullptr) {
os << wait_message << "an unknown object";
} else {
- if ((pretty_object->GetLockWord().GetState() == LockWord::kThinLocked) &&
+ if ((pretty_object->GetLockWord(true).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
// Getting the identity hashcode here would result in lock inflation and suspension of the
// current thread, which isn't safe if this is the only runnable thread.
@@ -1112,7 +1111,7 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
static mirror::Object* MonitorDeflateCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (Monitor::Deflate(reinterpret_cast<Thread*>(arg), object)) {
- DCHECK_NE(object->GetLockWord().GetState(), LockWord::kFatLocked);
+ DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
// If we deflated, return nullptr so that the monitor gets removed from the array.
return nullptr;
}
@@ -1126,9 +1125,8 @@ void MonitorList::DeflateMonitors() {
}
MonitorInfo::MonitorInfo(mirror::Object* obj) : owner_(NULL), entry_count_(0) {
- DCHECK(obj != NULL);
-
- LockWord lock_word = obj->GetLockWord();
+ DCHECK(obj != nullptr);
+ LockWord lock_word = obj->GetLockWord(true);
switch (lock_word.GetState()) {
case LockWord::kUnlocked:
// Fall-through.
diff --git a/runtime/monitor.h b/runtime/monitor.h
index c459278269..0b80892ba9 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -231,6 +231,10 @@ class MonitorList {
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
+ // During sweeping we may free an object and on a separate thread have an object created using
+ // the newly freed memory. That object may then have its lock-word inflated and a monitor created.
+ // If we allow new monitor registration during sweeping this monitor may be incorrectly freed as
+ // the object wasn't marked when sweeping began.
bool allow_new_monitors_ GUARDED_BY(monitor_list_lock_);
Mutex monitor_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ConditionVariable monitor_add_condition_ GUARDED_BY(monitor_list_lock_);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 6af16f4812..223107075c 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -262,7 +262,7 @@ static double GetDoubleProperty(const char* property, double minValue, double ma
static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
jstring javaPkgname, jboolean defer) {
const bool kVerboseLogging = false; // Spammy logging.
- const bool kDebugLogging = true; // Logging useful for debugging.
+ const bool kReasonLogging = true; // Logging of reason for returning JNI_TRUE.
ScopedUtfChars filename(env, javaFilename);
if ((filename.c_str() == nullptr) || !OS::FileExists(filename.c_str())) {
@@ -312,7 +312,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
int e2 = stat(prev_profile_file.c_str(), &prevstat);
if (e1 < 0) {
// No profile file, need to run dex2oat
- if (kDebugLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded profile file " << profile_file << " doesn't exist";
}
return JNI_TRUE;
@@ -330,12 +330,12 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
bool newOk = ProfileHelper::LoadTopKSamples(newTopK, profile_file, topKThreshold);
bool oldOk = ProfileHelper::LoadTopKSamples(oldTopK, prev_profile_file, topKThreshold);
if (!newOk || !oldOk) {
- if (kDebugLogging) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded Ignoring invalid profiles: "
<< (newOk ? "" : profile_file) << " " << (oldOk ? "" : prev_profile_file);
}
} else if (newTopK.empty()) {
- if (kDebugLogging && kVerboseLogging) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded empty profile: " << profile_file;
}
// If the new topK is empty we shouldn't optimize so we leave the changePercent at 0.0.
@@ -345,7 +345,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
std::inserter(diff, diff.end()));
// TODO: consider using the usedPercentage instead of the plain diff count.
changePercent = 100.0 * static_cast<double>(diff.size()) / static_cast<double>(newTopK.size());
- if (kDebugLogging && kVerboseLogging) {
+ if (kVerboseLogging) {
std::set<std::string>::iterator end = diff.end();
for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
LOG(INFO) << "DexFile_isDexOptNeeded new in topK: " << *it;
@@ -354,7 +354,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
}
if (changePercent > changeThreshold) {
- if (kDebugLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded size of new profile file " << profile_file <<
" is significantly different from old profile file " << prev_profile_file << " (top "
<< topKThreshold << "% samples changed in proportion of " << changePercent << "%)";
@@ -366,13 +366,12 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
}
} else {
// Previous profile does not exist. Make a copy of the current one.
- if (kDebugLogging) {
+ if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded previous profile doesn't exist: " << prev_profile_file;
}
if (!defer) {
CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
}
- return JNI_TRUE;
}
}
@@ -389,7 +388,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
error_msg.clear();
} else {
const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename.c_str(), NULL,
- kDebugLogging);
+ kReasonLogging);
if (oat_dex_file != nullptr) {
uint32_t location_checksum;
// If its not possible to read the classes.dex assume up-to-date as we won't be able to
@@ -423,7 +422,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
std::string cache_location(GetDalvikCacheFilenameOrDie(filename.c_str()));
oat_file.reset(OatFile::Open(cache_location, filename.c_str(), NULL, false, &error_msg));
if (oat_file.get() == nullptr) {
- if (kDebugLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
<< " does not exist for " << filename.c_str() << ": " << error_msg;
}
@@ -436,7 +435,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
const ImageHeader& image_header = space->AsImageSpace()->GetImageHeader();
if (oat_file->GetOatHeader().GetImageFileLocationOatChecksum() !=
image_header.GetOatChecksum()) {
- if (kDebugLogging) {
+ if (kReasonLogging) {
ScopedObjectAccess soa(env);
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
<< " has out-of-date oat checksum compared to "
@@ -446,7 +445,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
}
if (oat_file->GetOatHeader().GetImageFileLocationOatDataBegin()
!= reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin())) {
- if (kDebugLogging) {
+ if (kReasonLogging) {
ScopedObjectAccess soa(env);
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
<< " has out-of-date oat begin compared to "
@@ -459,7 +458,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
uint32_t location_checksum;
if (!DexFile::GetChecksum(filename.c_str(), &location_checksum, &error_msg)) {
- if (kDebugLogging) {
+ if (kReasonLogging) {
LOG(ERROR) << "DexFile_isDexOptNeeded failed to compute checksum of " << filename.c_str()
<< " (error " << error_msg << ")";
}
@@ -468,7 +467,7 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename.c_str(), location_checksum,
&error_msg)) {
- if (kDebugLogging) {
+ if (kReasonLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
<< " has out-of-date checksum compared to " << filename.c_str()
<< " (error " << error_msg << ")";
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 5c5eaa1ad4..76c58662d4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -164,23 +164,12 @@ static jstring VMRuntime_vmLibrary(JNIEnv* env, jobject) {
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv* env, jobject, jint targetSdkVersion) {
- // This is the target SDK version of the app we're about to run.
+ // This is the target SDK version of the app we're about to run. It is intended that this a place
+ // where workarounds can be enabled.
// Note that targetSdkVersion may be CUR_DEVELOPMENT (10000).
// Note that targetSdkVersion may be 0, meaning "current".
- if (targetSdkVersion > 0 && targetSdkVersion <= 13 /* honeycomb-mr2 */) {
- Runtime* runtime = Runtime::Current();
- JavaVMExt* vm = runtime->GetJavaVM();
- if (vm->check_jni) {
- LOG(INFO) << "CheckJNI enabled: not enabling JNI app bug workarounds.";
- } else {
- LOG(INFO) << "Turning on JNI app bug workarounds for target SDK version "
- << targetSdkVersion << "...";
-
- vm->work_around_app_jni_bugs = true;
- LOG(WARNING) << "Permenantly disabling heap compaction due to jni workarounds";
- Runtime::Current()->GetHeap()->DisableCompaction();
- }
- }
+ UNUSED(env);
+ UNUSED(targetSdkVersion);
}
static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 0b84005f5a..86db893cf8 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -88,6 +88,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
+ LOG(ERROR) << "Unexpected thread state: " << internal_thread_state;
return -1; // Unreachable.
}
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 246e090f22..c1a48e9138 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '2', '0', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '2', '2', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
@@ -348,8 +348,6 @@ OatMethodOffsets::OatMethodOffsets()
frame_size_in_bytes_(0),
core_spill_mask_(0),
fp_spill_mask_(0),
- mapping_table_offset_(0),
- vmap_table_offset_(0),
gc_map_offset_(0)
{}
@@ -357,19 +355,30 @@ OatMethodOffsets::OatMethodOffsets(uint32_t code_offset,
uint32_t frame_size_in_bytes,
uint32_t core_spill_mask,
uint32_t fp_spill_mask,
- uint32_t mapping_table_offset,
- uint32_t vmap_table_offset,
uint32_t gc_map_offset
)
: code_offset_(code_offset),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask),
fp_spill_mask_(fp_spill_mask),
- mapping_table_offset_(mapping_table_offset),
- vmap_table_offset_(vmap_table_offset),
gc_map_offset_(gc_map_offset)
{}
OatMethodOffsets::~OatMethodOffsets() {}
+OatMethodHeader::OatMethodHeader()
+ : mapping_table_offset_(0),
+ vmap_table_offset_(0),
+ code_size_(0)
+{}
+
+OatMethodHeader::OatMethodHeader(uint32_t vmap_table_offset, uint32_t mapping_table_offset,
+ uint32_t code_size)
+ : mapping_table_offset_(mapping_table_offset),
+ vmap_table_offset_(vmap_table_offset),
+ code_size_(code_size)
+{}
+
+OatMethodHeader::~OatMethodHeader() {}
+
} // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index 2851f5c14d..e9dfae993d 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -119,9 +119,9 @@ class PACKED(4) OatHeader {
DISALLOW_COPY_AND_ASSIGN(OatHeader);
};
-// OatMethodOffsets are currently 7x32-bits=224-bits long, so if we can
+// OatMethodOffsets are currently 5x32-bits=160-bits long, so if we can
// save even one OatMethodOffsets struct, the more complicated encoding
-// using a bitmap pays for itself since few classes will have 224
+// using a bitmap pays for itself since few classes will have 160
// methods.
enum OatClassType {
kOatClassAllCompiled = 0, // OatClass is followed by an OatMethodOffsets for each method.
@@ -140,8 +140,6 @@ class PACKED(4) OatMethodOffsets {
uint32_t frame_size_in_bytes,
uint32_t core_spill_mask,
uint32_t fp_spill_mask,
- uint32_t mapping_table_offset,
- uint32_t vmap_table_offset,
uint32_t gc_map_offset);
~OatMethodOffsets();
@@ -150,9 +148,25 @@ class PACKED(4) OatMethodOffsets {
uint32_t frame_size_in_bytes_;
uint32_t core_spill_mask_;
uint32_t fp_spill_mask_;
+ uint32_t gc_map_offset_;
+};
+
+// OatMethodHeader precedes the raw code chunk generated by the Quick compiler.
+class PACKED(4) OatMethodHeader {
+ public:
+ OatMethodHeader();
+
+ explicit OatMethodHeader(uint32_t mapping_table_offset, uint32_t vmap_table_offset,
+ uint32_t code_size);
+
+ ~OatMethodHeader();
+
+ // The offset in bytes from the start of the mapping table to the end of the header.
uint32_t mapping_table_offset_;
+ // The offset in bytes from the start of the vmap table to the end of the header.
uint32_t vmap_table_offset_;
- uint32_t gc_map_offset_;
+ // The code size in bytes.
+ uint32_t code_size_;
};
} // namespace art
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
new file mode 100644
index 0000000000..00ae7977b9
--- /dev/null
+++ b/runtime/oat_file-inl.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_FILE_INL_H_
+#define ART_RUNTIME_OAT_FILE_INL_H_
+
+#include "oat_file.h"
+
+namespace art {
+
+inline uint32_t OatFile::OatMethod::GetMappingTableOffset() const {
+ const uint8_t* mapping_table = GetMappingTable();
+ return static_cast<uint32_t>(mapping_table != nullptr ? mapping_table - begin_ : 0u);
+}
+
+inline uint32_t OatFile::OatMethod::GetVmapTableOffset() const {
+ const uint8_t* vmap_table = GetVmapTable();
+ return static_cast<uint32_t>(vmap_table != nullptr ? vmap_table - begin_ : 0u);
+}
+
+inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].mapping_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code) - offset;
+}
+
+inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
+ const void* code = mirror::ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ if (code == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset = reinterpret_cast<const OatMethodHeader*>(code)[-1].vmap_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code) - offset;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_FILE_INL_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 1967345c7c..56e1f053cf 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -386,7 +386,7 @@ const DexFile* OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const {
dex_file_location_checksum_, error_msg);
}
-const OatFile::OatClass* OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
+OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
uint32_t oat_class_offset = oat_class_offsets_pointer_[class_def_index];
const byte* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
@@ -419,12 +419,12 @@ const OatFile::OatClass* OatFile::OatDexFile::GetOatClass(uint16_t class_def_ind
}
CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
- return new OatClass(oat_file_,
- status,
- type,
- bitmap_size,
- reinterpret_cast<const uint32_t*>(bitmap_pointer),
- reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
+ return OatClass(oat_file_,
+ status,
+ type,
+ bitmap_size,
+ reinterpret_cast<const uint32_t*>(bitmap_pointer),
+ reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
}
OatFile::OatClass::OatClass(const OatFile* oat_file,
@@ -434,7 +434,7 @@ OatFile::OatClass::OatClass(const OatFile* oat_file,
const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer)
: oat_file_(oat_file), status_(status), type_(type),
- bitmap_(NULL), methods_pointer_(methods_pointer) {
+ bitmap_(bitmap_pointer), methods_pointer_(methods_pointer) {
CHECK(methods_pointer != nullptr);
switch (type_) {
case kOatClassAllCompiled: {
@@ -445,14 +445,12 @@ OatFile::OatClass::OatClass(const OatFile* oat_file,
case kOatClassSomeCompiled: {
CHECK_NE(0U, bitmap_size);
CHECK(bitmap_pointer != nullptr);
- bitmap_ = new BitVector(0, false, Allocator::GetNoopAllocator(), bitmap_size,
- const_cast<uint32_t*>(bitmap_pointer));
break;
}
case kOatClassNoneCompiled: {
CHECK_EQ(0U, bitmap_size);
CHECK(bitmap_pointer == nullptr);
- methods_pointer_ = NULL;
+ methods_pointer_ = nullptr;
break;
}
case kOatClassMax: {
@@ -462,14 +460,11 @@ OatFile::OatClass::OatClass(const OatFile* oat_file,
}
}
-OatFile::OatClass::~OatClass() {
- delete bitmap_;
-}
-
const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index) const {
+ // NOTE: We don't keep the number of methods and cannot do a bounds check for method_index.
if (methods_pointer_ == NULL) {
CHECK_EQ(kOatClassNoneCompiled, type_);
- return OatMethod(NULL, 0, 0, 0, 0, 0, 0, 0);
+ return OatMethod(NULL, 0, 0, 0, 0, 0);
}
size_t methods_pointer_index;
if (bitmap_ == NULL) {
@@ -477,12 +472,11 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
methods_pointer_index = method_index;
} else {
CHECK_EQ(kOatClassSomeCompiled, type_);
- if (!bitmap_->IsBitSet(method_index)) {
- return OatMethod(NULL, 0, 0, 0, 0, 0, 0, 0);
+ if (!BitVector::IsBitSet(bitmap_, method_index)) {
+ return OatMethod(NULL, 0, 0, 0, 0, 0);
}
- size_t num_set_bits = bitmap_->NumSetBits(method_index);
- CHECK_NE(0U, num_set_bits);
- methods_pointer_index = num_set_bits - 1;
+ size_t num_set_bits = BitVector::NumSetBits(bitmap_, method_index);
+ methods_pointer_index = num_set_bits;
}
const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
return OatMethod(
@@ -491,8 +485,6 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
oat_method_offsets.frame_size_in_bytes_,
oat_method_offsets.core_spill_mask_,
oat_method_offsets.fp_spill_mask_,
- oat_method_offsets.mapping_table_offset_,
- oat_method_offsets.vmap_table_offset_,
oat_method_offsets.gc_map_offset_);
}
@@ -501,32 +493,13 @@ OatFile::OatMethod::OatMethod(const byte* base,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const uint32_t mapping_table_offset,
- const uint32_t vmap_table_offset,
const uint32_t gc_map_offset)
: begin_(base),
code_offset_(code_offset),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask),
fp_spill_mask_(fp_spill_mask),
- mapping_table_offset_(mapping_table_offset),
- vmap_table_offset_(vmap_table_offset),
native_gc_map_offset_(gc_map_offset) {
- if (kIsDebugBuild) {
- if (mapping_table_offset_ != 0) { // implies non-native, non-stub code
- if (vmap_table_offset_ == 0) {
- CHECK_EQ(0U, static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
- __builtin_popcount(fp_spill_mask_)));
- } else {
- VmapTable vmap_table(reinterpret_cast<const uint8_t*>(begin_ + vmap_table_offset_));
-
- CHECK_EQ(vmap_table.Size(), static_cast<uint32_t>(__builtin_popcount(core_spill_mask_) +
- __builtin_popcount(fp_spill_mask_)));
- }
- } else {
- CHECK_EQ(vmap_table_offset_, 0U);
- }
- }
}
OatFile::OatMethod::~OatMethod() {}
@@ -549,8 +522,6 @@ void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
method->SetFrameSizeInBytes(frame_size_in_bytes_);
method->SetCoreSpillMask(core_spill_mask_);
method->SetFpSpillMask(fp_spill_mask_);
- method->SetMappingTable(GetMappingTable());
- method->SetVmapTable(GetVmapTable());
method->SetNativeGcMap(GetNativeGcMap()); // Used by native methods in work around JNI mode.
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index d6e8dc07f2..5f6cb1ec31 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -87,12 +87,6 @@ class OatFile {
uint32_t GetFpSpillMask() const {
return fp_spill_mask_;
}
- uint32_t GetMappingTableOffset() const {
- return mapping_table_offset_;
- }
- uint32_t GetVmapTableOffset() const {
- return vmap_table_offset_;
- }
uint32_t GetNativeGcMapOffset() const {
return native_gc_map_offset_;
}
@@ -122,16 +116,15 @@ class OatFile {
}
uint32_t GetQuickCodeSize() const;
- const uint8_t* GetMappingTable() const {
- return GetOatPointer<const uint8_t*>(mapping_table_offset_);
- }
- const uint8_t* GetVmapTable() const {
- return GetOatPointer<const uint8_t*>(vmap_table_offset_);
- }
const uint8_t* GetNativeGcMap() const {
return GetOatPointer<const uint8_t*>(native_gc_map_offset_);
}
+ uint32_t GetMappingTableOffset() const;
+ uint32_t GetVmapTableOffset() const;
+ const uint8_t* GetMappingTable() const;
+ const uint8_t* GetVmapTable() const;
+
~OatMethod();
// Create an OatMethod with offsets relative to the given base address
@@ -140,8 +133,6 @@ class OatFile {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const uint32_t mapping_table_offset,
- const uint32_t vmap_table_offset,
const uint32_t gc_map_offset);
private:
@@ -159,8 +150,6 @@ class OatFile {
size_t frame_size_in_bytes_;
uint32_t core_spill_mask_;
uint32_t fp_spill_mask_;
- uint32_t mapping_table_offset_;
- uint32_t vmap_table_offset_;
uint32_t native_gc_map_offset_;
friend class OatClass;
@@ -181,7 +170,6 @@ class OatFile {
// methods. note that runtime created methods such as miranda
// methods are not included.
const OatMethod GetOatMethod(uint32_t method_index) const;
- ~OatClass();
private:
OatClass(const OatFile* oat_file,
@@ -191,15 +179,13 @@ class OatFile {
const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer);
- const OatFile* oat_file_;
+ const OatFile* const oat_file_;
const mirror::Class::Status status_;
- COMPILE_ASSERT(mirror::Class::Status::kStatusMax < (2 ^ 16), class_status_wont_fit_in_16bits);
- OatClassType type_;
- COMPILE_ASSERT(OatClassType::kOatClassMax < (2 ^ 16), oat_class_type_wont_fit_in_16bits);
+ const OatClassType type_;
- const BitVector* bitmap_;
+ const uint32_t* const bitmap_;
const OatMethodOffsets* methods_pointer_;
@@ -225,7 +211,7 @@ class OatFile {
}
// Returns the OatClass for the class specified by the given DexFile class_def_index.
- const OatClass* GetOatClass(uint16_t class_def_index) const;
+ OatClass GetOatClass(uint16_t class_def_index) const;
~OatDexFile();
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 9198c90d5a..767c197206 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,8 +24,10 @@
namespace art {
namespace mirror {
+class Class;
class Object;
template<class MirrorType> class HeapReference;
+class Reference;
} // namespace mirror
class StackVisitor;
@@ -59,6 +61,7 @@ typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t
const StackVisitor* visitor, RootType root_type);
typedef void (MarkHeapReferenceCallback)(mirror::HeapReference<mirror::Object>* ref, void* arg);
+typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Reference* ref, void* arg);
// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 57d32bb476..084e8f6a0e 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -251,7 +251,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
// TODO: support -Djava.class.path
i++;
if (i == options.size()) {
- Usage("Missing required class path value for %s", option.c_str());
+ Usage("Missing required class path value for %s\n", option.c_str());
return false;
}
const StringPiece& value = options[i].first;
@@ -279,35 +279,35 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
} else if (StartsWith(option, "-Xms")) {
size_t size = ParseMemoryOption(option.substr(strlen("-Xms")).c_str(), 1024);
if (size == 0) {
- Usage("Failed to parse memory option %s", option.c_str());
+ Usage("Failed to parse memory option %s\n", option.c_str());
return false;
}
heap_initial_size_ = size;
} else if (StartsWith(option, "-Xmx")) {
size_t size = ParseMemoryOption(option.substr(strlen("-Xmx")).c_str(), 1024);
if (size == 0) {
- Usage("Failed to parse memory option %s", option.c_str());
+ Usage("Failed to parse memory option %s\n", option.c_str());
return false;
}
heap_maximum_size_ = size;
} else if (StartsWith(option, "-XX:HeapGrowthLimit=")) {
size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapGrowthLimit=")).c_str(), 1024);
if (size == 0) {
- Usage("Failed to parse memory option %s", option.c_str());
+ Usage("Failed to parse memory option %s\n", option.c_str());
return false;
}
heap_growth_limit_ = size;
} else if (StartsWith(option, "-XX:HeapMinFree=")) {
size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapMinFree=")).c_str(), 1024);
if (size == 0) {
- Usage("Failed to parse memory option %s", option.c_str());
+ Usage("Failed to parse memory option %s\n", option.c_str());
return false;
}
heap_min_free_ = size;
} else if (StartsWith(option, "-XX:HeapMaxFree=")) {
size_t size = ParseMemoryOption(option.substr(strlen("-XX:HeapMaxFree=")).c_str(), 1024);
if (size == 0) {
- Usage("Failed to parse memory option %s", option.c_str());
+ Usage("Failed to parse memory option %s\n", option.c_str());
return false;
}
heap_max_free_ = size;
@@ -316,7 +316,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
return false;
}
} else if (StartsWith(option, "-XX:ForegroundHeapGrowthMultiplier=")) {
- if (!ParseDouble(option, '=', 0.1, 0.9, &foreground_heap_growth_multiplier_)) {
+ if (!ParseDouble(option, '=', 0.1, 10.0, &foreground_heap_growth_multiplier_)) {
return false;
}
} else if (StartsWith(option, "-XX:ParallelGCThreads=")) {
@@ -330,7 +330,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
} else if (StartsWith(option, "-Xss")) {
size_t size = ParseMemoryOption(option.substr(strlen("-Xss")).c_str(), 1);
if (size == 0) {
- Usage("Failed to parse memory option %s", option.c_str());
+ Usage("Failed to parse memory option %s\n", option.c_str());
return false;
}
stack_size_ = size;
@@ -398,7 +398,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
(gc_option == "noverifycardtable")) {
// Ignored for backwards compatibility.
} else {
- Usage("Unknown -Xgc option %s", gc_option.c_str());
+ Usage("Unknown -Xgc option %s\n", gc_option.c_str());
return false;
}
}
@@ -411,7 +411,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
if (collector_type != gc::kCollectorTypeNone) {
background_collector_type_ = collector_type;
} else {
- Usage("Unknown -XX:BackgroundGC option %s", substring.c_str());
+ Usage("Unknown -XX:BackgroundGC option %s\n", substring.c_str());
return false;
}
} else if (option == "-XX:+DisableExplicitGC") {
@@ -443,10 +443,13 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
} else if (verbose_options[i] == "threads") {
gLogVerbosity.threads = true;
} else {
- Usage("Unknown -verbose option %s", verbose_options[i].c_str());
+ Usage("Unknown -verbose option %s\n", verbose_options[i].c_str());
return false;
}
}
+ } else if (StartsWith(option, "-verbose-methods:")) {
+ gLogVerbosity.compiler = false;
+ Split(option.substr(strlen("-verbose-methods:")), ',', gVerboseMethods);
} else if (StartsWith(option, "-Xlockprofthreshold:")) {
if (!ParseUnsignedInteger(option, ':', &lock_profiling_threshold_)) {
return false;
@@ -476,7 +479,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
} else if (option == "abort") {
const void* hook = options[i].second;
if (hook == nullptr) {
- Usage("abort was NULL");
+ Usage("abort was NULL\n");
return false;
}
hook_abort_ = reinterpret_cast<void(*)()>(const_cast<void*>(hook));
@@ -568,14 +571,14 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
} else if (option == "-Xcompiler-option") {
i++;
if (i == options.size()) {
- Usage("Missing required compiler option for %s", option.c_str());
+ Usage("Missing required compiler option for %s\n", option.c_str());
return false;
}
compiler_options_.push_back(options[i].first);
} else if (option == "-Ximage-compiler-option") {
i++;
if (i == options.size()) {
- Usage("Missing required compiler option for %s", option.c_str());
+ Usage("Missing required compiler option for %s\n", option.c_str());
return false;
}
image_compiler_options_.push_back(options[i].first);
@@ -586,7 +589,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
} else if (verify_mode == "remote" || verify_mode == "all") {
verify_ = true;
} else {
- Usage("Unknown -Xverify option %s", verify_mode.c_str());
+ Usage("Unknown -Xverify option %s\n", verify_mode.c_str());
return false;
}
} else if (StartsWith(option, "-ea") ||
@@ -626,7 +629,7 @@ bool ParsedOptions::Parse(const Runtime::Options& options, bool ignore_unrecogni
StartsWith(option, "-XX:mainThreadStackSize=")) {
// Ignored for backwards compatibility.
} else if (!ignore_unrecognized) {
- Usage("Unrecognized option %s", option.c_str());
+ Usage("Unrecognized option %s\n", option.c_str());
return false;
}
}
@@ -717,6 +720,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:HeapMinFree=N\n");
UsageMessage(stream, " -XX:HeapMaxFree=N\n");
UsageMessage(stream, " -XX:HeapTargetUtilization=doublevalue\n");
+ UsageMessage(stream, " -XX:ForegroundHeapGrowthMultiplier=doublevalue\n");
UsageMessage(stream, " -XX:LowMemoryMode\n");
UsageMessage(stream, " -Xprofile:{threadcpuclock,wallclock,dualclock}\n");
UsageMessage(stream, "\n");
@@ -789,7 +793,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
bool ParsedOptions::ParseStringAfterChar(const std::string& s, char c, std::string* parsed_value) {
std::string::size_type colon = s.find(c);
if (colon == std::string::npos) {
- Usage("Missing char %c in option %s", c, s.c_str());
+ Usage("Missing char %c in option %s\n", c, s.c_str());
return false;
}
// Add one to remove the char we were trimming until.
@@ -800,14 +804,14 @@ bool ParsedOptions::ParseStringAfterChar(const std::string& s, char c, std::stri
bool ParsedOptions::ParseInteger(const std::string& s, char after_char, int* parsed_value) {
std::string::size_type colon = s.find(after_char);
if (colon == std::string::npos) {
- Usage("Missing char %c in option %s", after_char, s.c_str());
+ Usage("Missing char %c in option %s\n", after_char, s.c_str());
return false;
}
const char* begin = &s[colon + 1];
char* end;
size_t result = strtoul(begin, &end, 10);
if (begin == end || *end != '\0') {
- Usage("Failed to parse integer from %s ", s.c_str());
+ Usage("Failed to parse integer from %s\n", s.c_str());
return false;
}
*parsed_value = result;
@@ -821,7 +825,7 @@ bool ParsedOptions::ParseUnsignedInteger(const std::string& s, char after_char,
return false;
}
if (i < 0) {
- Usage("Negative value %d passed for unsigned option %s", i, s.c_str());
+ Usage("Negative value %d passed for unsigned option %s\n", i, s.c_str());
return false;
}
*parsed_value = i;
@@ -840,7 +844,7 @@ bool ParsedOptions::ParseDouble(const std::string& option, char after_char,
// Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
const bool sane_val = iss.eof() && (value >= min) && (value <= max);
if (!sane_val) {
- Usage("Invalid double value %s for option %s", option.c_str());
+ Usage("Invalid double value %s for option %s\n", substring.c_str(), option.c_str());
return false;
}
*parsed_value = value;
diff --git a/runtime/catch_finder.cc b/runtime/quick_exception_handler.cc
index f0293d738f..a91fdf17ec 100644
--- a/runtime/catch_finder.cc
+++ b/runtime/quick_exception_handler.cc
@@ -14,37 +14,37 @@
* limitations under the License.
*/
-#include "catch_finder.h"
+#include "quick_exception_handler.h"
+
#include "catch_block_stack_visitor.h"
+#include "deoptimize_stack_visitor.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "sirt_ref-inl.h"
namespace art {
-CatchFinder::CatchFinder(Thread* self, const ThrowLocation& throw_location,
- mirror::Throwable* exception, bool is_deoptimization)
- : self_(self), context_(self->GetLongJumpContext()),
- exception_(exception), is_deoptimization_(is_deoptimization), throw_location_(throw_location),
+QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
+ : self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
- handler_quick_frame_(nullptr), handler_quick_frame_pc_(0),
- handler_dex_pc_(0), clear_exception_(false), top_shadow_frame_(nullptr),
- handler_frame_id_(kInvalidFrameId) {
- // Exception not in root sets, can't allow GC.
- last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
+ handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_dex_pc_(0),
+ clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
}
-void CatchFinder::FindCatch() {
+void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
+ mirror::Throwable* exception) {
+ DCHECK(!is_deoptimization_);
+ SirtRef<mirror::Throwable> exception_ref(self_, exception);
+
// Walk the stack to find catch handler or prepare for deoptimization.
- CatchBlockStackVisitor visitor(self_, context_, exception_, is_deoptimization_, this);
+ CatchBlockStackVisitor visitor(self_, context_, exception_ref, this);
visitor.WalkStack(true);
mirror::ArtMethod* catch_method = *handler_quick_frame_;
- if (catch_method == nullptr) {
- if (kDebugExceptionDelivery) {
+ if (kDebugExceptionDelivery) {
+ if (catch_method == nullptr) {
LOG(INFO) << "Handler is upcall";
- }
- } else {
- CHECK(!is_deoptimization_);
- if (kDebugExceptionDelivery) {
+ } else {
const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
@@ -55,17 +55,23 @@ void CatchFinder::FindCatch() {
DCHECK(!self_->IsExceptionPending());
} else {
// Put exception back in root set with clear throw location.
- self_->SetException(ThrowLocation(), exception_);
- }
- self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
- // Do instrumentation events after allowing thread suspension again.
- if (!is_deoptimization_) {
- // The debugger may suspend this thread and walk its stack. Let's do this before popping
- // instrumentation frames.
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_,
- exception_);
+ self_->SetException(ThrowLocation(), exception_ref.get());
}
+ // The debugger may suspend this thread and walk its stack. Let's do this before popping
+ // instrumentation frames.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ instrumentation->ExceptionCaughtEvent(self_, throw_location, catch_method, handler_dex_pc_,
+ exception_ref.get());
+}
+
+void QuickExceptionHandler::DeoptimizeStack() {
+ DCHECK(is_deoptimization_);
+
+ DeoptimizeStackVisitor visitor(self_, context_, this);
+ visitor.WalkStack(true);
+
+ // Restore deoptimization exception
+ self_->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
}
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
@@ -105,7 +111,7 @@ class InstrumentationStackVisitor : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
};
-void CatchFinder::UpdateInstrumentationStack() {
+void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_id_);
visitor.WalkStack(true);
@@ -118,11 +124,7 @@ void CatchFinder::UpdateInstrumentationStack() {
}
}
-void CatchFinder::DoLongJump() {
- if (is_deoptimization_) {
- // TODO: proper return value.
- self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
- }
+void QuickExceptionHandler::DoLongJump() {
// Place context back on thread so it will be available when we continue.
self_->ReleaseLongJumpContext(context_);
context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
diff --git a/runtime/catch_finder.h b/runtime/quick_exception_handler.h
index ebbafe2580..ef3766c0a6 100644
--- a/runtime/catch_finder.h
+++ b/runtime/quick_exception_handler.h
@@ -14,29 +14,39 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_CATCH_FINDER_H_
-#define ART_RUNTIME_CATCH_FINDER_H_
+#ifndef ART_RUNTIME_QUICK_EXCEPTION_HANDLER_H_
+#define ART_RUNTIME_QUICK_EXCEPTION_HANDLER_H_
-#include "mirror/art_method-inl.h"
-#include "thread.h"
+#include "base/logging.h"
+#include "base/mutex.h"
namespace art {
+namespace mirror {
+class ArtMethod;
+class Throwable;
+} // namespace mirror
+class Context;
+class Thread;
+class ThrowLocation;
+class ShadowFrame;
+
static constexpr bool kDebugExceptionDelivery = false;
static constexpr size_t kInvalidFrameId = 0xffffffff;
// Manages exception delivery for Quick backend. Not used by Portable backend.
-class CatchFinder {
+class QuickExceptionHandler {
public:
- CatchFinder(Thread* self, const ThrowLocation& throw_location, mirror::Throwable* exception,
- bool is_deoptimization)
+ QuickExceptionHandler(Thread* self, bool is_deoptimization)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ~CatchFinder() {
+ ~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
}
- void FindCatch() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FindCatch(const ThrowLocation& throw_location, mirror::Throwable* exception)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -56,10 +66,6 @@ class CatchFinder {
clear_exception_ = clear_exception;
}
- void SetTopShadowFrame(ShadowFrame* top_shadow_frame) {
- top_shadow_frame_ = top_shadow_frame;
- }
-
void SetHandlerFrameId(size_t frame_id) {
handler_frame_id_ = frame_id;
}
@@ -67,14 +73,9 @@ class CatchFinder {
private:
Thread* const self_;
Context* const context_;
- mirror::Throwable* const exception_;
const bool is_deoptimization_;
- // Location of the throw.
- const ThrowLocation& throw_location_;
// Is method tracing active?
const bool method_tracing_active_;
- // Support for nesting no thread suspension checks.
- const char* last_no_assert_suspension_cause_;
// Quick frame with found handler or last frame if no handler found.
mirror::ArtMethod** handler_quick_frame_;
// PC to branch to for the handler.
@@ -83,13 +84,11 @@ class CatchFinder {
uint32_t handler_dex_pc_;
// Should the exception be cleared as the catch block has no move-exception?
bool clear_exception_;
- // Deoptimization top shadow frame.
- ShadowFrame* top_shadow_frame_;
// Frame id of the catch handler or the upcall.
size_t handler_frame_id_;
- DISALLOW_COPY_AND_ASSIGN(CatchFinder);
+ DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
};
} // namespace art
-#endif // ART_RUNTIME_CATCH_FINDER_H_
+#endif // ART_RUNTIME_QUICK_EXCEPTION_HANDLER_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index eb0522ad04..611ce0bb98 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1230,6 +1230,10 @@ void Runtime::SetFaultMessage(const std::string& message) {
void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
const {
+ if (GetInstrumentation()->InterpretOnly()) {
+ argv->push_back("--compiler-filter=interpret-only");
+ }
+
argv->push_back("--runtime-arg");
std::string checkstr = "-implicit-checks";
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 462711ed79..1ee0b1add3 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -359,6 +359,10 @@ class Runtime {
bool InitZygote();
void DidForkFromZygote();
+ const instrumentation::Instrumentation* GetInstrumentation() const {
+ return &instrumentation_;
+ }
+
instrumentation::Instrumentation* GetInstrumentation() {
return &instrumentation_;
}
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index da1b2cac14..960d3324d3 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -33,7 +33,7 @@ static constexpr bool kDumpHeapObjectOnSigsevg = false;
struct Backtrace {
void Dump(std::ostream& os) {
- DumpNativeStack(os, GetTid(), "\t", true);
+ DumpNativeStack(os, GetTid(), "\t");
}
};
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index ebc545250a..7698d6a49b 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -38,23 +38,18 @@ class ScopedThreadStateChange {
Runtime* runtime = Runtime::Current();
CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDown(self_));
} else {
- bool runnable_transition;
DCHECK_EQ(self, Thread::Current());
// Read state without locks, ok as state is effectively thread local and we're not interested
// in the suspend count (this will be handled in the runnable transitions).
old_thread_state_ = self->GetState();
- runnable_transition = old_thread_state_ == kRunnable || new_thread_state == kRunnable;
- if (!runnable_transition) {
- // A suspended transition to another effectively suspended transition, ok to use Unsafe.
- self_->SetState(new_thread_state);
- }
-
- if (runnable_transition && old_thread_state_ != new_thread_state) {
+ if (old_thread_state_ != new_thread_state) {
if (new_thread_state == kRunnable) {
self_->TransitionFromSuspendedToRunnable();
- } else {
- DCHECK_EQ(old_thread_state_, kRunnable);
+ } else if (old_thread_state_ == kRunnable) {
self_->TransitionFromRunnableToSuspended(new_thread_state);
+ } else {
+ // A suspended transition to another effectively suspended transition, ok to use Unsafe.
+ self_->SetState(new_thread_state);
}
}
}
@@ -171,7 +166,7 @@ class ScopedObjectAccessUnchecked : public ScopedThreadStateChange {
DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
- return Env()->AddLocalReference<T>(obj, Vm()->work_around_app_jni_bugs);
+ return Env()->AddLocalReference<T>(obj);
}
template<typename T>
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b3d14f0599..0fafbfa94d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,7 +32,6 @@
#include "arch/context.h"
#include "base/mutex.h"
-#include "catch_finder.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "cutils/atomic.h"
@@ -54,6 +53,7 @@
#include "mirror/stack_trace_element.h"
#include "monitor.h"
#include "object_utils.h"
+#include "quick_exception_handler.h"
#include "reflection.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -876,7 +876,7 @@ struct StackDumpVisitor : public StackVisitor {
if (o == nullptr) {
os << "an unknown object";
} else {
- if ((o->GetLockWord().GetState() == LockWord::kThinLocked) &&
+ if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
// Getting the identity hashcode here would result in lock inflation and suspension of the
// current thread, which isn't safe if this is the only runnable thread.
@@ -939,7 +939,7 @@ void Thread::DumpStack(std::ostream& os) const {
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
SirtRef<mirror::ArtMethod> method_ref(Thread::Current(), GetCurrentMethod(nullptr));
- DumpNativeStack(os, GetTid(), " native: ", false, method_ref.get());
+ DumpNativeStack(os, GetTid(), " native: ", method_ref.get());
}
DumpJavaStack(os);
} else {
@@ -1018,7 +1018,8 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte
tls32_.state_and_flags.as_struct.flags = 0;
tls32_.state_and_flags.as_struct.state = kNative;
memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
- memset(tlsPtr_.rosalloc_runs, 0, sizeof(tlsPtr_.rosalloc_runs));
+ std::fill(tlsPtr_.rosalloc_runs, tlsPtr_.rosalloc_runs + kRosAllocNumOfSizeBrackets,
+ gc::allocator::RosAlloc::GetDedicatedFullRun());
for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
tlsPtr_.checkpoint_functions[i] = nullptr;
}
@@ -1248,10 +1249,6 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
// Read from SIRT.
result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
VerifyObject(result);
- } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
- // Assume an invalid local reference is actually a direct pointer.
- result = reinterpret_cast<mirror::Object*>(obj);
- VerifyObject(result);
} else {
result = kInvalidIndirectRefObject;
}
@@ -1845,7 +1842,7 @@ void Thread::QuickDeliverException() {
// Don't leave exception visible while we try to find the handler, which may cause class
// resolution.
ClearException();
- bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1));
+ bool is_deoptimization = (exception == GetDeoptimizationException());
if (kDebugExceptionDelivery) {
if (!is_deoptimization) {
mirror::String* msg = exception->GetDetailMessage();
@@ -1856,10 +1853,14 @@ void Thread::QuickDeliverException() {
DumpStack(LOG(INFO) << "Deoptimizing: ");
}
}
- CatchFinder catch_finder(this, throw_location, exception, is_deoptimization);
- catch_finder.FindCatch();
- catch_finder.UpdateInstrumentationStack();
- catch_finder.DoLongJump();
+ QuickExceptionHandler exception_handler(this, is_deoptimization);
+ if (is_deoptimization) {
+ exception_handler.DeoptimizeStack();
+ } else {
+ exception_handler.FindCatch(throw_location, exception);
+ }
+ exception_handler.UpdateInstrumentationStack();
+ exception_handler.DoLongJump();
LOG(FATAL) << "UNREACHABLE";
}
@@ -1931,92 +1932,102 @@ class ReferenceMapVisitor : public StackVisitor {
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (false) {
LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
- << StringPrintf("@ PC:%04x", GetDexPc());
+ << StringPrintf("@ PC:%04x", GetDexPc());
}
ShadowFrame* shadow_frame = GetCurrentShadowFrame();
if (shadow_frame != nullptr) {
- mirror::ArtMethod* m = shadow_frame->GetMethod();
- size_t num_regs = shadow_frame->NumberOfVRegs();
- if (m->IsNative() || shadow_frame->HasReferenceArray()) {
- // SIRT for JNI or References for interpreter.
- for (size_t reg = 0; reg < num_regs; ++reg) {
+ VisitShadowFrame(shadow_frame);
+ } else {
+ VisitQuickFrame();
+ }
+ return true;
+ }
+
+ void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = shadow_frame->GetMethod();
+ size_t num_regs = shadow_frame->NumberOfVRegs();
+ if (m->IsNative() || shadow_frame->HasReferenceArray()) {
+ // SIRT for JNI or References for interpreter.
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ mirror::Object* ref = shadow_frame->GetVRegReference(reg);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
+ }
+ }
+ }
+ } else {
+ // Java method.
+ // Portable path use DexGcMap and store in Method.native_gc_map_.
+ const uint8_t* gc_map = m->GetNativeGcMap();
+ CHECK(gc_map != nullptr) << PrettyMethod(m);
+ verifier::DexPcToReferenceMap dex_gc_map(gc_map);
+ uint32_t dex_pc = shadow_frame->GetDexPC();
+ const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+ DCHECK(reg_bitmap != nullptr);
+ num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ if (TestBitmap(reg, reg_bitmap)) {
mirror::Object* ref = shadow_frame->GetVRegReference(reg);
if (ref != nullptr) {
mirror::Object* new_ref = ref;
visitor_(&new_ref, reg, this);
if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
+ shadow_frame->SetVRegReference(reg, new_ref);
}
}
}
- } else {
- // Java method.
- // Portable path use DexGcMap and store in Method.native_gc_map_.
- const uint8_t* gc_map = m->GetNativeGcMap();
- CHECK(gc_map != nullptr) << PrettyMethod(m);
- verifier::DexPcToReferenceMap dex_gc_map(gc_map);
- uint32_t dex_pc = GetDexPc();
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+ }
+ }
+ }
+
+ private:
+ void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ // Process register map (which native and runtime methods don't have)
+ if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
+ const uint8_t* native_gc_map = m->GetNativeGcMap();
+ CHECK(native_gc_map != nullptr) << PrettyMethod(m);
+ mh_.ChangeMethod(m);
+ const DexFile::CodeItem* code_item = mh_.GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions?
+ NativePcOffsetToReferenceMap map(native_gc_map);
+ size_t num_regs = std::min(map.RegWidth() * 8,
+ static_cast<size_t>(code_item->registers_size_));
+ if (num_regs > 0) {
+ const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
DCHECK(reg_bitmap != nullptr);
- num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
+ const VmapTable vmap_table(m->GetVmapTable());
+ uint32_t core_spills = m->GetCoreSpillMask();
+ uint32_t fp_spills = m->GetFpSpillMask();
+ size_t frame_size = m->GetFrameSizeInBytes();
+ // For all dex registers in the bitmap
+ mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+ DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
+ // Does this register hold a reference?
if (TestBitmap(reg, reg_bitmap)) {
- mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
+ uint32_t vmap_offset;
+ if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
+ int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
+ // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
+ mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
+ if (*ref_addr != nullptr) {
+ visitor_(ref_addr, reg, this);
}
- }
- }
- }
- }
- } else {
- mirror::ArtMethod* m = GetMethod();
- // Process register map (which native and runtime methods don't have)
- if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- const uint8_t* native_gc_map = m->GetNativeGcMap();
- CHECK(native_gc_map != nullptr) << PrettyMethod(m);
- mh_.ChangeMethod(m);
- const DexFile::CodeItem* code_item = mh_.GetCodeItem();
- DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions?
- NativePcOffsetToReferenceMap map(native_gc_map);
- size_t num_regs = std::min(map.RegWidth() * 8,
- static_cast<size_t>(code_item->registers_size_));
- if (num_regs > 0) {
- const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
- DCHECK(reg_bitmap != nullptr);
- const VmapTable vmap_table(m->GetVmapTable());
- uint32_t core_spills = m->GetCoreSpillMask();
- uint32_t fp_spills = m->GetFpSpillMask();
- size_t frame_size = m->GetFrameSizeInBytes();
- // For all dex registers in the bitmap
- mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
- DCHECK(cur_quick_frame != nullptr);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- // Does this register hold a reference?
- if (TestBitmap(reg, reg_bitmap)) {
- uint32_t vmap_offset;
- if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
- int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
- // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
- mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
- if (*ref_addr != nullptr) {
- visitor_(ref_addr, reg, this);
- }
- } else {
- StackReference<mirror::Object>* ref_addr =
- reinterpret_cast<StackReference<mirror::Object>*>(
- GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
- reg));
- mirror::Object* ref = ref_addr->AsMirrorPtr();
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (ref != new_ref) {
- ref_addr->Assign(new_ref);
- }
+ } else {
+ StackReference<mirror::Object>* ref_addr =
+ reinterpret_cast<StackReference<mirror::Object>*>(
+ GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
+ reg));
+ mirror::Object* ref = ref_addr->AsMirrorPtr();
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, reg, this);
+ if (ref != new_ref) {
+ ref_addr->Assign(new_ref);
}
}
}
@@ -2024,10 +2035,8 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
}
- return true;
}
- private:
static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
}
@@ -2064,7 +2073,7 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
if (tlsPtr_.opeer != nullptr) {
visitor(&tlsPtr_.opeer, arg, thread_id, kRootThreadObject);
}
- if (tlsPtr_.exception != nullptr) {
+ if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg, thread_id, kRootNativeStack);
}
tlsPtr_.throw_location.VisitRoots(visitor, arg);
@@ -2084,6 +2093,14 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
if (tlsPtr_.single_step_control != nullptr) {
tlsPtr_.single_step_control->VisitRoots(visitor, arg, thread_id, kRootDebugger);
}
+ if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
+ RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
+ ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitorToCallback);
+ for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
+ shadow_frame = shadow_frame->GetLink()) {
+ mapper.VisitShadowFrame(shadow_frame);
+ }
+ }
// Visit roots on this thread's stack
Context* context = GetLongJumpContext();
RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
diff --git a/runtime/thread.h b/runtime/thread.h
index d25bbe997d..f8692855cf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -686,11 +686,20 @@ class Thread {
return tlsPtr_.single_step_control;
}
+ // Returns the fake exception used to activate deoptimization.
+ static mirror::Throwable* GetDeoptimizationException() {
+ return reinterpret_cast<mirror::Throwable*>(-1);
+ }
+
void SetDeoptimizationShadowFrame(ShadowFrame* sf);
void SetDeoptimizationReturnValue(const JValue& ret_val);
ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
+ bool HasDeoptimizationShadowFrame() const {
+ return tlsPtr_.deoptimization_shadow_frame != nullptr;
+ }
+
std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
return tlsPtr_.instrumentation_stack;
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 270deb0a9e..6f93566a57 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -78,7 +78,7 @@ void ThreadList::DumpNativeStacks(std::ostream& os) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
for (const auto& thread : list_) {
os << "DUMPING THREAD " << thread->GetTid() << "\n";
- DumpNativeStack(os, thread->GetTid(), "\t", true);
+ DumpNativeStack(os, thread->GetTid(), "\t");
os << "\n";
}
}
@@ -99,7 +99,7 @@ static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_A
// TODO: Reenable this when the native code in system_server can handle it.
// Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
if (false) {
- DumpNativeStack(os, tid, " native: ", false);
+ DumpNativeStack(os, tid, " native: ");
}
os << "\n";
}
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 76b6f270d9..1dc2da0c8b 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -55,18 +55,18 @@ TEST_F(TransactionTest, Object_monitor) {
// Lock object's monitor outside the transaction.
sirt_obj->MonitorEnter(soa.Self());
- uint32_t old_lock_word = sirt_obj->GetLockWord().GetValue();
+ uint32_t old_lock_word = sirt_obj->GetLockWord(false).GetValue();
Transaction transaction;
Runtime::Current()->EnterTransactionMode(&transaction);
// Unlock object's monitor inside the transaction.
sirt_obj->MonitorExit(soa.Self());
- uint32_t new_lock_word = sirt_obj->GetLockWord().GetValue();
+ uint32_t new_lock_word = sirt_obj->GetLockWord(false).GetValue();
Runtime::Current()->ExitTransactionMode();
// Aborting transaction must not clear the Object::class field.
transaction.Abort();
- uint32_t aborted_lock_word = sirt_obj->GetLockWord().GetValue();
+ uint32_t aborted_lock_word = sirt_obj->GetLockWord(false).GetValue();
EXPECT_NE(old_lock_word, new_lock_word);
EXPECT_EQ(aborted_lock_word, new_lock_word);
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index afbcbb7582..c4d1a7864c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1041,20 +1041,7 @@ std::string GetSchedulerGroupName(pid_t tid) {
return "";
}
-static std::string CleanMapName(const backtrace_map_t* map) {
- if (map == NULL || map->name.empty()) {
- return "???";
- }
- // Turn "/usr/local/google/home/enh/clean-dalvik-dev/out/host/linux-x86/lib/libartd.so"
- // into "libartd.so".
- size_t last_slash = map->name.rfind('/');
- if (last_slash == std::string::npos) {
- return map->name;
- }
- return map->name.substr(last_slash + 1);
-}
-
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, bool include_count,
+void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
mirror::ArtMethod* current_method) {
// We may be called from contexts where current_method is not null, so we must assert this.
if (current_method != nullptr) {
@@ -1072,27 +1059,34 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
for (Backtrace::const_iterator it = backtrace->begin();
it != backtrace->end(); ++it) {
// We produce output like this:
- // ] #00 unwind_backtrace_thread+536 [0x55d75bb8] (libbacktrace.so)
- os << prefix;
- if (include_count) {
- os << StringPrintf("#%02zu ", it->num);
- }
- if (!it->func_name.empty()) {
- os << it->func_name;
+ // ] #00 pc 000075bb8 /system/lib/libc.so (unwind_backtrace_thread+536)
+ // In order for parsing tools to continue to function, the stack dump
+ // format must at least adhere to this format:
+ // #XX pc <RELATIVE_ADDR> <FULL_PATH_TO_SHARED_LIBRARY> ...
+ // The parsers require a single space before and after pc, and two spaces
+ // after the <RELATIVE_ADDR>. There can be any prefix data before the
+ // #XX. <RELATIVE_ADDR> has to be a hex number but with no 0x prefix.
+ os << prefix << StringPrintf("#%02zu pc ", it->num);
+ if (!it->map) {
+ os << StringPrintf("%08" PRIxPTR " ???", it->pc);
} else {
- if (current_method != nullptr && current_method->IsWithinQuickCode(it->pc)) {
+ os << StringPrintf("%08" PRIxPTR " ", it->pc - it->map->start)
+ << it->map->name << " (";
+ if (!it->func_name.empty()) {
+ os << it->func_name;
+ if (it->func_offset != 0) {
+ os << "+" << it->func_offset;
+ }
+ } else if (current_method != nullptr && current_method->IsWithinQuickCode(it->pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
} else {
os << "???";
}
+ os << ")";
}
- if (it->func_offset != 0) {
- os << "+" << it->func_offset;
- }
- os << StringPrintf(" [%p]", reinterpret_cast<void*>(it->pc));
- os << " (" << CleanMapName(it->map) << ")\n";
+ os << "\n";
}
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 5def66b56c..6ab10137b1 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -382,7 +382,7 @@ void SetThreadName(const char* thread_name);
// Dumps the native stack for thread 'tid' to 'os'.
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
- bool include_count = true, mirror::ArtMethod* current_method = nullptr)
+ mirror::ArtMethod* current_method = nullptr)
NO_THREAD_SAFETY_ANALYSIS;
// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index dbde7c7632..91170f0e83 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -361,7 +361,7 @@ mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader, &mh.GetClassDef(),
- mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
+ mh.GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
true);
return verifier.FindAccessedFieldAtDexPc(dex_pc);
}
@@ -375,11 +375,11 @@ mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
// got what we wanted.
bool success = Verify();
if (!success) {
- return NULL;
+ return nullptr;
}
RegisterLine* register_line = reg_table_.GetLine(dex_pc);
if (register_line == NULL) {
- return NULL;
+ return nullptr;
}
const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
return GetQuickFieldAccess(inst, register_line);
@@ -1176,8 +1176,11 @@ bool MethodVerifier::SetTypesFromSignature() {
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->get(), descriptor,
- false);
+ const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+ if (!reg_type.IsNonZeroReferenceTypes()) {
+ DCHECK(HasFailures());
+ return false;
+ }
reg_line->SetRegisterType(arg_start + cur_arg, reg_type);
}
break;
@@ -2865,11 +2868,7 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
common_super = &reg_types_.JavaLangThrowable(false);
} else {
const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
- if (common_super == NULL) {
- // Unconditionally assign for the first handler. We don't assert this is a Throwable
- // as that is caught at runtime
- common_super = &exception;
- } else if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
+ if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
if (exception.IsUnresolvedTypes()) {
// We don't know enough about the type. Fail here and let runtime handle it.
Fail(VERIFY_ERROR_NO_CLASS) << "unresolved exception class " << exception;
@@ -2878,6 +2877,8 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unexpected non-exception class " << exception;
return reg_types_.Conflict();
}
+ } else if (common_super == nullptr) {
+ common_super = &exception;
} else if (common_super->Equals(exception)) {
// odd case, but nothing to do
} else {
@@ -3118,37 +3119,14 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
- if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
- return nullptr;
- } else if (actual_arg_type.IsZero()) { // Invoke on "null" instance: we can't go further.
+ if (!actual_arg_type.HasClass()) {
+ VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
}
- mirror::Class* this_class = NULL;
- if (!actual_arg_type.IsUnresolvedTypes()) {
- this_class = actual_arg_type.GetClass();
- } else {
- const std::string& descriptor(actual_arg_type.GetDescriptor());
- // Try to resolve type.
- const RegType& resolved_arg_type = reg_types_.FromDescriptor(class_loader_->get(),
- descriptor.c_str(), false);
- if (!resolved_arg_type.HasClass()) {
- return nullptr; // Resolution failed.
- }
- this_class = resolved_arg_type.GetClass();
- if (this_class == NULL) {
- Thread* self = Thread::Current();
- self->ClearException();
- // Look for a system class
- this_class = reg_types_.FromDescriptor(nullptr, descriptor.c_str(), false).GetClass();
- }
- }
- if (this_class == NULL) {
- return NULL;
- }
- mirror::ObjectArray<mirror::ArtMethod>* vtable = this_class->GetVTable();
- CHECK(vtable != NULL);
+ mirror::ObjectArray<mirror::ArtMethod>* vtable = actual_arg_type.GetClass()->GetVTable();
+ CHECK(vtable != nullptr);
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK(vtable_index < vtable->GetLength());
+ CHECK_LT(static_cast<int32_t>(vtable_index), vtable->GetLength());
mirror::ArtMethod* res_method = vtable->Get(vtable_index);
CHECK(!Thread::Current()->IsExceptionPending());
return res_method;
@@ -3619,29 +3597,6 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_ty
}
}
-// Look for an instance field with this offset.
-// TODO: we may speed up the search if offsets are sorted by doing a quick search.
-static mirror::ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ObjectArray<mirror::ArtField>* instance_fields = klass->GetIFields();
- if (instance_fields != NULL) {
- for (int32_t i = 0, e = instance_fields->GetLength(); i < e; ++i) {
- mirror::ArtField* field = instance_fields->Get(i);
- if (field->GetOffset().Uint32Value() == field_offset) {
- return field;
- }
- }
- }
- // We did not find field in class: look into superclass.
- if (klass->GetSuperClass() != NULL) {
- return FindInstanceFieldWithOffset(klass->GetSuperClass(), field_offset);
- } else {
- return NULL;
- }
-}
-
-// Returns the access field of a quick field access (iget/iput-quick) or NULL
-// if it cannot be found.
mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
RegisterLine* reg_line) {
DCHECK(inst->Opcode() == Instruction::IGET_QUICK ||
@@ -3651,29 +3606,18 @@ mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
- mirror::Class* object_class = NULL;
- if (!object_type.IsUnresolvedTypes()) {
- object_class = object_type.GetClass();
- } else {
- // We need to resolve the class from its descriptor.
- const std::string& descriptor(object_type.GetDescriptor());
- Thread* self = Thread::Current();
- object_class = reg_types_.FromDescriptor(class_loader_->get(), descriptor.c_str(),
- false).GetClass();
- if (object_class == NULL) {
- self->ClearException();
- // Look for a system class
- object_class = reg_types_.FromDescriptor(nullptr, descriptor.c_str(),
- false).GetClass();
- }
- }
- if (object_class == NULL) {
- // Failed to get the Class* from reg type.
- LOG(WARNING) << "Failed to get Class* from " << object_type;
- return NULL;
+ if (!object_type.HasClass()) {
+ VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
+ return nullptr;
}
uint32_t field_offset = static_cast<uint32_t>(inst->VRegC_22c());
- return FindInstanceFieldWithOffset(object_class, field_offset);
+ mirror::ArtField* f = mirror::ArtField::FindInstanceFieldWithOffset(object_type.GetClass(),
+ field_offset);
+ if (f == nullptr) {
+ VLOG(verifier) << "Failed to find instance field at offset '" << field_offset
+ << "' from '" << PrettyDescriptor(object_type.GetClass()) << "'";
+ }
+ return f;
}
void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt
index 55b0dbe8f4..ecb3599482 100644
--- a/test/046-reflect/expected.txt
+++ b/test/046-reflect/expected.txt
@@ -92,6 +92,8 @@ SuperTarget constructor ()V
Target constructor (IF)V : ii=7 ff=3.3333
myMethod (I)I
arg=17 anInt=7
+got expected exception for Class.newInstance
+got expected exception for Constructor.newInstance
ReflectTest done!
public method
static java.lang.Object java.util.Collections.checkType(java.lang.Object,java.lang.Class) accessible=false
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index d60fcb485b..3e6d7007f9 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -362,6 +362,27 @@ public class Main {
targ = cons.newInstance(args);
targ.myMethod(17);
+ try {
+ Thrower thrower = Thrower.class.newInstance();
+ System.out.println("ERROR: Class.newInstance did not throw exception");
+ } catch (UnsupportedOperationException uoe) {
+ System.out.println("got expected exception for Class.newInstance");
+ } catch (Exception e) {
+ System.out.println("ERROR: Class.newInstance got unexpected exception: " +
+ e.getClass().getName());
+ }
+
+ try {
+ Constructor<Thrower> constructor = Thrower.class.getDeclaredConstructor();
+ Thrower thrower = constructor.newInstance();
+ System.out.println("ERROR: Constructor.newInstance did not throw exception");
+ } catch (InvocationTargetException ite) {
+ System.out.println("got expected exception for Constructor.newInstance");
+ } catch (Exception e) {
+ System.out.println("ERROR: Constructor.newInstance got unexpected exception: " +
+ e.getClass().getName());
+ }
+
} catch (Exception ex) {
System.out.println("----- unexpected exception -----");
ex.printStackTrace();
@@ -669,3 +690,9 @@ class MethodNoisyInitUser {
public static void staticMethod() {}
public void createMethodNoisyInit(MethodNoisyInit ni) {}
}
+
+class Thrower {
+ public Thrower() throws UnsupportedOperationException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index db5030022b..7576b02752 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -16,6 +16,7 @@ b13679511Test finishing
largeFrame passes
largeFrameFloat passes
mulBy1Test passes
+constantPropagationTest passes
getterSetterTest passes
identityTest passes
wideGetterSetterTest passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index d32c037afd..6a12ca93f7 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -38,6 +38,7 @@ public class Main {
largeFrameTest();
largeFrameTestFloat();
mulBy1Test();
+ constantPropagationTest();
getterSetterTest();
identityTest();
wideGetterSetterTest();
@@ -766,6 +767,32 @@ public class Main {
}
}
+ static void constantPropagationTest() {
+ int i = 1;
+ int t = 1;
+ float z = 1F;
+ long h = 1L;
+ int g[] = new int[1];
+ int w = 1;
+ long f = 0;
+
+ for (int a = 1; a < 100; a++) {
+ try {
+ i = (int)(z);
+ h >>= (0 % t);
+ }
+ finally {
+ w = (int)(2 * (f * 6));
+ }
+ }
+
+ if (w == 0 && h == 1 && g[0] == 0) {
+ System.out.println("constantPropagationTest passes");
+ } else {
+ System.out.println("constantPropagationTest fails");
+ }
+ }
+
static void b2296099Test() throws Exception {
int x = -1190771042;
int dist = 360530809;
diff --git a/test/Android.mk b/test/Android.mk
index 334df1f72a..08a925c7a4 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -57,6 +57,8 @@ TEST_OAT_DIRECTORIES := \
# StackWalk2 \
ART_TEST_TARGET_DEX_FILES :=
+ART_TEST_TARGET_DEX_FILES$(ART_PHONY_TEST_TARGET_SUFFIX) :=
+ART_TEST_TARGET_DEX_FILES$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) :=
ART_TEST_HOST_DEX_FILES :=
# $(1): module prefix
@@ -76,13 +78,17 @@ define build-art-test-dex
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
include $(BUILD_JAVA_LIBRARY)
+
ART_TEST_TARGET_DEX_FILES += $$(LOCAL_INSTALLED_MODULE)
+ ART_TEST_TARGET_DEX_FILES$(ART_PHONY_TEST_TARGET_SUFFIX) += $$(LOCAL_INSTALLED_MODULE)
ifdef TARGET_2ND_ARCH
+ ART_TEST_TARGET_DEX_FILES$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += $(4)/$(1)-$(2).jar
+
# TODO: make this a simple copy
-$(4)/$(1)-$(2).jar: $(3)/$(1)-$(2).jar
+$(4)/$(1)-$(2).jar: $(3)/$(1)-$(2).jar $(4)
cp $$< $(4)/
- endif
+ endif
endif
ifeq ($(ART_BUILD_HOST),true)
@@ -102,9 +108,22 @@ endef
$(foreach dir,$(TEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-test-dex,$(dir),$(ART_NATIVETEST_OUT),$(2ND_ART_NATIVETEST_OUT))))
$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval $(call build-art-test-dex,oat-test-dex,$(dir),$(ART_TEST_OUT),$(2ND_ART_TEST_OUT))))
+# Rules to explicitly create 2nd-arch test directories, as we use a "cp" for them
+# instead of BUILD_JAVA_LIBRARY
+ifneq ($(2ND_ART_NATIVETEST_OUT),)
+$(2ND_ART_NATIVETEST_OUT):
+ $(hide) mkdir -p $@
+endif
+
+ifneq ($(2ND_ART_TEST_OUT),)
+$(2ND_ART_TEST_OUT):
+ $(hide) mkdir -p $@
+endif
+
########################################################################
-ART_TEST_TARGET_OAT_TARGETS :=
+ART_TEST_TARGET_OAT_TARGETS$(ART_PHONY_TEST_TARGET_SUFFIX) :=
+ART_TEST_TARGET_OAT_TARGETS$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) :=
ART_TEST_HOST_OAT_DEFAULT_TARGETS :=
ART_TEST_HOST_OAT_INTERPRETER_TARGETS :=
@@ -160,7 +179,10 @@ test-art-host-oat-$(1): test-art-host-oat-default-$(1) test-art-host-oat-interpr
.PHONY: test-art-oat-$(1)
test-art-oat-$(1): test-art-host-oat-$(1) test-art-target-oat-$(1)
-ART_TEST_TARGET_OAT_TARGETS += test-art-target-oat-$(1)
+ART_TEST_TARGET_OAT_TARGETS$(ART_PHONY_TEST_TARGET_SUFFIX) += test-art-target-oat-$(1)$(ART_PHONY_TEST_TARGET_SUFFIX)
+ifdef TARGET_2ND_ARCH
+ ART_TEST_TARGET_OAT_TARGETS$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) += test-art-target-oat-$(1)$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+endif
ART_TEST_HOST_OAT_DEFAULT_TARGETS += test-art-host-oat-default-$(1)
ART_TEST_HOST_OAT_INTERPRETER_TARGETS += test-art-host-oat-interpreter-$(1)
endef
diff --git a/test/ThreadStress/ThreadStress.java b/test/ThreadStress/ThreadStress.java
index 795c790212..5dccc689c2 100644
--- a/test/ThreadStress/ThreadStress.java
+++ b/test/ThreadStress/ThreadStress.java
@@ -14,13 +14,15 @@
* limitations under the License.
*/
+import android.system.ErrnoException;
+import android.system.Os;
+import android.system.OsConstants;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import libcore.io.*;
// Run on host with:
// javac ThreadTest.java && java ThreadStress && rm *.class
@@ -202,7 +204,7 @@ class ThreadStress implements Runnable {
}
case SIGQUIT: {
try {
- Libcore.os.kill(Libcore.os.getpid(), OsConstants.SIGQUIT);
+ Os.kill(Os.getpid(), OsConstants.SIGQUIT);
} catch (ErrnoException ex) {
}
}
diff --git a/test/run-test b/test/run-test
index 58de9809df..6e59641eba 100755
--- a/test/run-test
+++ b/test/run-test
@@ -68,6 +68,7 @@ debug_mode="no"
runtime="art"
usage="no"
build_only="no"
+suffix64=""
while true; do
if [ "x$1" = "x--host" ]; then
@@ -154,6 +155,7 @@ while true; do
shift
elif [ "x$1" = "x--64" ]; then
run_args="${run_args} --64"
+ suffix64="64"
shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
@@ -187,7 +189,7 @@ elif [ "$runtime" = "art" ]; then
fi
run_args="${run_args} --boot -Ximage:${ANDROID_HOST_OUT}/framework/core.art"
else
- run_args="${run_args} --boot -Ximage:/data/art-test/core.art"
+ run_args="${run_args} --boot -Ximage:/data/art-test${suffix64}/core.art"
fi
fi