summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.executable.mk10
-rw-r--r--build/Android.gtest.mk4
-rw-r--r--compiler/common_compiler_test.cc1
-rw-r--r--compiler/dex/bb_optimizations.h7
-rw-r--r--compiler/dex/compiler_enums.h3
-rw-r--r--compiler/dex/mir_dataflow.cc96
-rw-r--r--compiler/dex/mir_field_info.h1
-rw-r--r--compiler/dex/mir_graph.cc75
-rw-r--r--compiler/dex/mir_graph.h49
-rw-r--r--compiler/dex/mir_optimization.cc372
-rw-r--r--compiler/dex/mir_optimization_test.cc559
-rw-r--r--compiler/dex/pass_driver_me_opts.cc9
-rw-r--r--compiler/dex/pass_driver_me_post_opt.cc1
-rw-r--r--compiler/dex/post_opt_passes.h7
-rw-r--r--compiler/dex/ssa_transformation.cc8
-rw-r--r--compiler/image_test.cc20
-rw-r--r--dalvikvm/Android.mk11
-rw-r--r--dex2oat/dex2oat.cc1
-rw-r--r--oatdump/oatdump.cc2
-rw-r--r--patchoat/patchoat.cc1
-rw-r--r--runtime/base/allocator.cc1
-rw-r--r--runtime/class_linker.cc23
-rw-r--r--runtime/common_runtime_test.cc2
-rw-r--r--runtime/mem_map.cc29
-rw-r--r--runtime/mem_map.h5
-rw-r--r--runtime/mem_map_test.cc14
-rw-r--r--runtime/runtime.cc3
-rw-r--r--runtime/thread_list.cc4
-rw-r--r--runtime/trace.cc12
-rw-r--r--runtime/trace.h5
-rw-r--r--sigchainlib/Android.mk6
-rw-r--r--sigchainlib/sigchain.cc17
-rw-r--r--sigchainlib/sigchain.h12
-rw-r--r--sigchainlib/sigchain_dummy.cc59
-rw-r--r--sigchainlib/version-script.txt12
-rw-r--r--test/004-UnsafeTest/src/Main.java4
-rw-r--r--test/036-finalizer/src/Main.java26
-rw-r--r--test/122-missing-classes/build28
-rw-r--r--test/122-missing-classes/expected.txt6
-rw-r--r--test/122-missing-classes/info.txt1
-rw-r--r--test/122-missing-classes/src/Main.java80
-rw-r--r--test/122-missing-classes/src/MissingClass.java20
-rwxr-xr-xtest/etc/run-test-jar7
43 files changed, 1164 insertions, 449 deletions
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index d887acd1cd..02252ab02b 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -54,7 +54,7 @@ define build-art-executable
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
- LOCAL_SRC_FILES := $$(art_source)
+ LOCAL_SRC_FILES := $$(art_source) ../sigchainlib/sigchain.cc
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime $$(art_c_includes)
LOCAL_SHARED_LIBRARIES += $$(art_shared_libraries)
@@ -65,9 +65,15 @@ define build-art-executable
endif
LOCAL_CFLAGS := $(ART_EXECUTABLES_CFLAGS)
+ # Mac OS linker doesn't understand --export-dynamic/--version-script.
+ ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host)
+ LOCAL_LDFLAGS := -Wl,--version-script,art/sigchainlib/version-script.txt -Wl,--export-dynamic
+ endif
+
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
$(call set-target-local-cflags-vars,$(6))
+ LOCAL_SHARED_LIBRARIES += libdl
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
@@ -76,7 +82,7 @@ define build-art-executable
else
LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS)
endif
- LOCAL_LDLIBS += -lpthread
+ LOCAL_LDLIBS += -lpthread -ldl
endif
ifeq ($$(art_ndebug_or_debug),ndebug)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index af43a3c6d6..3e100e95ca 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -287,7 +287,7 @@ define define-art-gtest-rule-host
.PHONY: $$(gtest_rule)
$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && LD_PRELOAD=libsigchain$$(ART_HOST_SHLIB_EXTENSION) $$< && $$(call ART_TEST_PASSED,$$@)) \
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
@@ -335,7 +335,7 @@ define define-art-gtest
LOCAL_MODULE_TAGS := tests
endif
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
- LOCAL_SRC_FILES := $$(art_gtest_filename)
+ LOCAL_SRC_FILES := $$(art_gtest_filename) sigchainlib/sigchain.cc
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES) art/runtime $$(art_gtest_extra_c_includes)
LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index e3eb9e9915..d1d47fb361 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -397,6 +397,7 @@ void CommonCompilerTest::ReserveImageSpace() {
// Reserve where the image will be loaded up front so that other parts of test set up don't
// accidentally end up colliding with the fixed memory address when we need to load the image.
std::string error_msg;
+ MemMap::Init();
image_reservation_.reset(MemMap::MapAnonymous("image reservation",
reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
(size_t)100 * 1024 * 1024, // 100MB
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index fce23bc2fb..fba0863697 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -143,7 +143,7 @@ class CodeLayout : public PassME {
class NullCheckElimination : public PassME {
public:
NullCheckElimination()
- : PassME("NCE", kRepeatingTopologicalSortTraversal, "3_post_nce_cfg") {
+ : PassME("NCE", kRepeatingPreOrderDFSTraversal, "3_post_nce_cfg") {
}
bool Gate(const PassDataHolder* data) const {
@@ -195,7 +195,7 @@ class TypeInference : public PassME {
class ClassInitCheckElimination : public PassME {
public:
ClassInitCheckElimination()
- : PassME("ClInitCheckElimination", kLoopRepeatingTopologicalSortTraversal) {
+ : PassME("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
}
bool Gate(const PassDataHolder* data) const {
@@ -271,7 +271,8 @@ class BBCombine : public PassME {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- return ((c_unit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
+ return c_unit->mir_graph->HasTryCatchBlocks() ||
+ ((c_unit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
}
bool Worker(PassDataHolder* data) const;
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index e4003bf088..78da420339 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -311,7 +311,8 @@ enum MIROptimizationFlagPositions {
kMIRCallee, // Instruction is inlined from callee.
kMIRIgnoreSuspendCheck,
kMIRDup,
- kMIRMark, // Temporary node mark.
+ kMIRMark, // Temporary node mark can be used by
+ // opt passes for their private needs.
kMIRStoreNonTemporal,
kMIRLastMIRFlag,
};
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 51b6709533..0a6924cbca 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -118,10 +118,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_DA | DF_REF_A | DF_NON_NULL_DST,
// 1D MONITOR_ENTER vAA
- DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+ DF_UA | DF_NULL_CHK_A | DF_REF_A,
// 1E MONITOR_EXIT vAA
- DF_UA | DF_NULL_CHK_0 | DF_REF_A,
+ DF_UA | DF_NULL_CHK_A | DF_REF_A,
// 1F CHK_CAST vAA, type@BBBB
DF_UA | DF_REF_A | DF_UMS,
@@ -130,7 +130,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
// 21 ARRAY_LENGTH vA, vB
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_CORE_A | DF_REF_B,
// 22 NEW_INSTANCE vAA, type@BBBB
DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
@@ -235,88 +235,88 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_NOP,
// 44 AGET vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 45 AGET_WIDE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 46 AGET_OBJECT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
// 47 AGET_BOOLEAN vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 48 AGET_BYTE vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 49 AGET_CHAR vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 4A AGET_SHORT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_DA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 4B APUT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 4C APUT_WIDE vAA, vBB, vCC
- DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 4D APUT_OBJECT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_A | DF_REF_B | DF_CORE_C | DF_LVN,
// 4E APUT_BOOLEAN vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 4F APUT_BYTE vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 50 APUT_CHAR vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 51 APUT_SHORT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UA | DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 52 IGET vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 53 IGET_WIDE vA, vB, field@CCCC
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 54 IGET_OBJECT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
// 55 IGET_BOOLEAN vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 56 IGET_BYTE vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 57 IGET_CHAR vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 58 IGET_SHORT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 59 IPUT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 5A IPUT_WIDE vA, vB, field@CCCC
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 5B IPUT_OBJECT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
// 5C IPUT_BOOLEAN vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 5D IPUT_BYTE vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 5E IPUT_CHAR vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 5F IPUT_SHORT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// 60 SGET vAA, field@BBBB
DF_DA | DF_SFIELD | DF_UMS,
@@ -712,10 +712,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
// E3 IGET_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// E4 IPUT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// E5 SGET_VOLATILE
DF_DA | DF_SFIELD | DF_UMS,
@@ -724,13 +724,13 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_UA | DF_SFIELD | DF_UMS,
// E7 IGET_OBJECT_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
// E8 IGET_WIDE_VOLATILE
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// E9 IPUT_WIDE_VOLATILE
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_REF_B | DF_IFIELD | DF_LVN,
// EA SGET_WIDE_VOLATILE
DF_DA | DF_A_WIDE | DF_SFIELD | DF_UMS,
@@ -751,28 +751,28 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_FORMAT_3RC,
// F0 INVOKE_OBJECT_INIT_RANGE
- DF_NOP | DF_NULL_CHK_0,
+ DF_NOP,
// F1 RETURN_VOID_BARRIER
DF_NOP,
// F2 IGET_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
// F3 IGET_WIDE_QUICK
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_IFIELD | DF_LVN,
+ DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
// F4 IGET_OBJECT_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_IFIELD | DF_LVN,
+ DF_DA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
// F5 IPUT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
// F6 IPUT_WIDE_QUICK
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_IFIELD | DF_LVN,
+ DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
// F7 IPUT_OBJECT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_IFIELD | DF_LVN,
// F8 INVOKE_VIRTUAL_QUICK
DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
@@ -787,7 +787,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
// FC IPUT_OBJECT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
+ DF_UA | DF_UB | DF_NULL_CHK_B | DF_REF_A | DF_REF_B | DF_IFIELD | DF_LVN,
// FD SGET_OBJECT_VOLATILE
DF_DA | DF_REF_A | DF_SFIELD | DF_UMS,
@@ -824,7 +824,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
DF_NOP,
// 108 MIR_NULL_CHECK
- DF_UA | DF_REF_A | DF_NULL_CHK_0 | DF_LVN,
+ DF_UA | DF_REF_A | DF_NULL_CHK_A | DF_LVN,
// 109 MIR_RANGE_CHECK
0,
@@ -893,10 +893,10 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
0,
// 11F MirOpPackedArrayGet
- DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
// 120 MirOpPackedArrayPut
- DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C | DF_LVN,
+ DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
};
/* Return the base virtual register for a SSA name */
@@ -1403,7 +1403,7 @@ bool MIRGraph::VerifyPredInfo(BasicBlock* bb) {
GetBlockName(bb, block_name1);
GetBlockName(pred_bb, block_name2);
DumpCFG("/sdcard/cfg/", false);
- LOG(FATAL) << "Successor " << block_name1 << "not found from "
+ LOG(FATAL) << "Successor " << block_name1 << " not found from "
<< block_name2;
}
}
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 9745c412c9..1842a16840 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -137,6 +137,7 @@ class MirIFieldLoweringInfo : public MirFieldInfo {
// The member offset of the field, 0u if unresolved.
MemberOffset field_offset_;
+ friend class NullCheckEliminationTest;
friend class GlobalValueNumberingTest;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index f0c9858627..8dded79aa2 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -86,6 +86,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
raw_use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
max_num_reachable_blocks_(0),
+ dfs_orders_up_to_date_(false),
dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)),
dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)),
dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)),
@@ -2224,7 +2225,7 @@ void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
}
}
-void BasicBlock::Hide(CompilationUnit* c_unit) {
+void BasicBlock::Hide(MIRGraph* mir_graph) {
// First lets make it a dalvik bytecode block so it doesn't have any special meaning.
block_type = kDalvikByteCode;
@@ -2239,7 +2240,6 @@ void BasicBlock::Hide(CompilationUnit* c_unit) {
first_mir_insn = nullptr;
last_mir_insn = nullptr;
- MIRGraph* mir_graph = c_unit->mir_graph.get();
for (BasicBlockId pred_id : predecessors) {
BasicBlock* pred_bb = mir_graph->GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
@@ -2262,6 +2262,48 @@ void BasicBlock::Hide(CompilationUnit* c_unit) {
successor_block_list_type = kNotUsed;
}
+/*
+ * Kill an unreachable block and all blocks that become unreachable by killing this one.
+ */
+void BasicBlock::KillUnreachable(MIRGraph* mir_graph) {
+ DCHECK(predecessors.empty()); // Unreachable.
+
+ // Mark as dead and hidden.
+ block_type = kDead;
+ hidden = true;
+
+ // Detach it from its MIRs so we don't generate code for them. Also detached MIRs
+ // are updated to know that they no longer have a parent.
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
+ mir->bb = NullBasicBlockId;
+ }
+ first_mir_insn = nullptr;
+ last_mir_insn = nullptr;
+
+ data_flow_info = nullptr;
+
+ // Erase this bb from all children's predecessors and kill unreachable children.
+ ChildBlockIterator iter(this, mir_graph);
+ for (BasicBlock* succ_bb = iter.Next(); succ_bb != nullptr; succ_bb = iter.Next()) {
+ succ_bb->ErasePredecessor(id);
+ if (succ_bb->predecessors.empty()) {
+ succ_bb->KillUnreachable(mir_graph);
+ }
+ }
+
+ // Remove links to children.
+ fall_through = NullBasicBlockId;
+ taken = NullBasicBlockId;
+ successor_block_list_type = kNotUsed;
+
+ if (kIsDebugBuild) {
+ if (catch_entry) {
+ DCHECK_EQ(mir_graph->catches_.count(start_offset), 1u);
+ mir_graph->catches_.erase(start_offset);
+ }
+ }
+}
+
bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) {
// In order to determine if the ssa reg is live out, we scan all the MIRs. We remember
// the last SSA number of the same dalvik register. At the end, if it is different than ssa_reg,
@@ -2333,17 +2375,34 @@ bool BasicBlock::ReplaceChild(BasicBlockId old_bb, BasicBlockId new_bb) {
void BasicBlock::ErasePredecessor(BasicBlockId old_pred) {
auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
DCHECK(pos != predecessors.end());
- predecessors.erase(pos);
+ // It's faster to move the back() to *pos than erase(pos).
+ *pos = predecessors.back();
+ predecessors.pop_back();
+ size_t idx = std::distance(predecessors.begin(), pos);
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) {
+ break;
+ }
+ DCHECK_EQ(mir->ssa_rep->num_uses - 1u, predecessors.size());
+ DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred);
+ mir->meta.phi_incoming[idx] = mir->meta.phi_incoming[predecessors.size()];
+ mir->ssa_rep->uses[idx] = mir->ssa_rep->uses[predecessors.size()];
+ mir->ssa_rep->num_uses = predecessors.size();
+ }
}
void BasicBlock::UpdatePredecessor(BasicBlockId old_pred, BasicBlockId new_pred) {
DCHECK_NE(new_pred, NullBasicBlockId);
auto pos = std::find(predecessors.begin(), predecessors.end(), old_pred);
- if (pos != predecessors.end()) {
- *pos = new_pred;
- } else {
- // If not found, add it.
- predecessors.push_back(new_pred);
+ DCHECK(pos != predecessors.end());
+ *pos = new_pred;
+ size_t idx = std::distance(predecessors.begin(), pos);
+ for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (static_cast<int>(mir->dalvikInsn.opcode) != kMirOpPhi) {
+ break;
+ }
+ DCHECK_EQ(mir->meta.phi_incoming[idx], old_pred);
+ mir->meta.phi_incoming[idx] = new_pred;
}
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index cc215bde06..80303f6752 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -49,17 +49,14 @@ enum DataFlowAttributePos {
kFormat35c,
kFormat3rc,
kFormatExtended, // Extended format for extended MIRs.
- kNullCheckSrc0, // Null check of uses[0].
- kNullCheckSrc1, // Null check of uses[1].
- kNullCheckSrc2, // Null check of uses[2].
+ kNullCheckA, // Null check of A.
+ kNullCheckB, // Null check of B.
kNullCheckOut0, // Null check out outgoing arg0.
kDstNonNull, // May assume dst is non-null.
kRetNonNull, // May assume retval is non-null.
kNullTransferSrc0, // Object copy src[0] -> dst.
kNullTransferSrcN, // Phi null check state transfer.
- kRangeCheckSrc1, // Range check of uses[1].
- kRangeCheckSrc2, // Range check of uses[2].
- kRangeCheckSrc3, // Range check of uses[3].
+ kRangeCheckC, // Range check of C.
kFPA,
kFPB,
kFPC,
@@ -88,17 +85,14 @@ enum DataFlowAttributePos {
#define DF_FORMAT_35C (UINT64_C(1) << kFormat35c)
#define DF_FORMAT_3RC (UINT64_C(1) << kFormat3rc)
#define DF_FORMAT_EXTENDED (UINT64_C(1) << kFormatExtended)
-#define DF_NULL_CHK_0 (UINT64_C(1) << kNullCheckSrc0)
-#define DF_NULL_CHK_1 (UINT64_C(1) << kNullCheckSrc1)
-#define DF_NULL_CHK_2 (UINT64_C(1) << kNullCheckSrc2)
+#define DF_NULL_CHK_A (UINT64_C(1) << kNullCheckA)
+#define DF_NULL_CHK_B (UINT64_C(1) << kNullCheckB)
#define DF_NULL_CHK_OUT0 (UINT64_C(1) << kNullCheckOut0)
#define DF_NON_NULL_DST (UINT64_C(1) << kDstNonNull)
#define DF_NON_NULL_RET (UINT64_C(1) << kRetNonNull)
#define DF_NULL_TRANSFER_0 (UINT64_C(1) << kNullTransferSrc0)
#define DF_NULL_TRANSFER_N (UINT64_C(1) << kNullTransferSrcN)
-#define DF_RANGE_CHK_1 (UINT64_C(1) << kRangeCheckSrc1)
-#define DF_RANGE_CHK_2 (UINT64_C(1) << kRangeCheckSrc2)
-#define DF_RANGE_CHK_3 (UINT64_C(1) << kRangeCheckSrc3)
+#define DF_RANGE_CHK_C (UINT64_C(1) << kRangeCheckC)
#define DF_FP_A (UINT64_C(1) << kFPA)
#define DF_FP_B (UINT64_C(1) << kFPB)
#define DF_FP_C (UINT64_C(1) << kFPC)
@@ -117,14 +111,11 @@ enum DataFlowAttributePos {
#define DF_HAS_DEFS (DF_DA)
-#define DF_HAS_NULL_CHKS (DF_NULL_CHK_0 | \
- DF_NULL_CHK_1 | \
- DF_NULL_CHK_2 | \
+#define DF_HAS_NULL_CHKS (DF_NULL_CHK_A | \
+ DF_NULL_CHK_B | \
DF_NULL_CHK_OUT0)
-#define DF_HAS_RANGE_CHKS (DF_RANGE_CHK_1 | \
- DF_RANGE_CHK_2 | \
- DF_RANGE_CHK_3)
+#define DF_HAS_RANGE_CHKS (DF_RANGE_CHK_C)
#define DF_HAS_NR_CHKS (DF_HAS_NULL_CHKS | \
DF_HAS_RANGE_CHKS)
@@ -132,9 +123,10 @@ enum DataFlowAttributePos {
#define DF_A_IS_REG (DF_UA | DF_DA)
#define DF_B_IS_REG (DF_UB)
#define DF_C_IS_REG (DF_UC)
-#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER)
#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C)
#define DF_NULL_TRANSFER (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)
+#define DF_IS_INVOKE (DF_FORMAT_35C | DF_FORMAT_3RC)
+
enum OatMethodAttributes {
kIsLeaf, // Method is leaf.
kHasLoop, // Method contains simple loop.
@@ -160,6 +152,7 @@ enum OatMethodAttributes {
#define MIR_CALLEE (1 << kMIRCallee)
#define MIR_IGNORE_SUSPEND_CHECK (1 << kMIRIgnoreSuspendCheck)
#define MIR_DUP (1 << kMIRDup)
+#define MIR_MARK (1 << kMIRMark)
#define MIR_STORE_NON_TEMPORAL (1 << kMIRStoreNonTemporal)
#define BLOCK_NAME_LEN 80
@@ -422,7 +415,12 @@ struct BasicBlock {
* remove itself from any predecessor edges, remove itself from any
* child's predecessor array.
*/
- void Hide(CompilationUnit* c_unit);
+ void Hide(MIRGraph* mir_graph);
+
+ /**
+ * @brief Kill the unreachable block and all blocks that become unreachable by killing this one.
+ */
+ void KillUnreachable(MIRGraph* mir_graph);
/**
* @brief Is ssa_reg the last SSA definition of that VR in the block?
@@ -1015,6 +1013,10 @@ class MIRGraph {
return GetFirstSpecialTempVR() + max_available_special_compiler_temps_;
}
+ bool HasTryCatchBlocks() const {
+ return current_code_item_->tries_size_ != 0;
+ }
+
void DumpCheckStats();
MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
int SRegToVReg(int ssa_reg) const;
@@ -1150,6 +1152,10 @@ class MIRGraph {
void InsertPhiNodes();
void DoDFSPreOrderSSARename(BasicBlock* block);
+ bool DfsOrdersUpToDate() const {
+ return dfs_orders_up_to_date_;
+ }
+
/*
* IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
* we can verify that all catch entries have native PC entries.
@@ -1246,6 +1252,7 @@ class MIRGraph {
ArenaVector<uint32_t> raw_use_counts_; // Not weighted
unsigned int num_reachable_blocks_;
unsigned int max_num_reachable_blocks_;
+ bool dfs_orders_up_to_date_;
ArenaVector<BasicBlockId> dfs_order_;
ArenaVector<BasicBlockId> dfs_post_order_;
ArenaVector<BasicBlockId> dom_post_order_traversal_;
@@ -1306,7 +1313,9 @@ class MIRGraph {
static const uint64_t oat_data_flow_attributes_[kMirOpLast];
ArenaVector<BasicBlock*> gen_suspend_test_list_; // List of blocks containing suspend tests
+ friend class MirOptimizationTest;
friend class ClassInitCheckEliminationTest;
+ friend class NullCheckEliminationTest;
friend class GlobalValueNumberingTest;
friend class LocalValueNumberingTest;
friend class TopologicalSortOrderTest;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 84c056daf3..00528e5f4b 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -752,51 +752,101 @@ bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
/* Combine any basic blocks terminated by instructions that we now know can't throw */
void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
// Loop here to allow combining a sequence of blocks
- while (true) {
- // Check termination conditions
- if ((bb->first_mir_insn == NULL)
- || (bb->data_flow_info == NULL)
- || (bb->block_type == kExceptionHandling)
- || (bb->block_type == kExitBlock)
- || (bb->block_type == kDead)
- || (bb->taken == NullBasicBlockId)
- || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling)
- || (bb->successor_block_list_type != kNotUsed)
- || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
- break;
- }
-
- // Test the kMirOpCheck instruction
+ while ((bb->block_type == kDalvikByteCode) &&
+ (bb->last_mir_insn != nullptr) &&
+ (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) == kMirOpCheck)) {
MIR* mir = bb->last_mir_insn;
- // Grab the attributes from the paired opcode
+ DCHECK(bb->first_mir_insn != nullptr);
+
+ // Grab the attributes from the paired opcode.
MIR* throw_insn = mir->meta.throw_insn;
uint64_t df_attributes = GetDataFlowAttributes(throw_insn);
- bool can_combine = true;
- if (df_attributes & DF_HAS_NULL_CHKS) {
- can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
+
+ // Don't combine if the throw_insn can still throw NPE.
+ if ((df_attributes & DF_HAS_NULL_CHKS) != 0 &&
+ (throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0) {
+ break;
}
- if (df_attributes & DF_HAS_RANGE_CHKS) {
- can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
+ // Now whitelist specific instructions.
+ bool ok = false;
+ if ((df_attributes & DF_IFIELD) != 0) {
+ // Combine only if fast, otherwise weird things can happen.
+ const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(throw_insn);
+ ok = (df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet();
+ } else if ((df_attributes & DF_SFIELD) != 0) {
+ // Combine only if fast, otherwise weird things can happen.
+ const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(throw_insn);
+ bool fast = ((df_attributes & DF_DA) ? field_info.FastPut() : field_info.FastGet());
+ // Don't combine if the SGET/SPUT can call <clinit>().
+ bool clinit = !field_info.IsInitialized() &&
+ (throw_insn->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0;
+ ok = fast && !clinit;
+ } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
+ // Only AGET/APUT have range checks. We have processed the AGET/APUT null check above.
+ DCHECK_NE(throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK, 0);
+ ok = ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
+ } else if ((throw_insn->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
+ // We can encounter a non-throwing insn here thanks to inlining or other optimizations.
+ ok = true;
+ } else if (throw_insn->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
+ throw_insn->dalvikInsn.opcode == Instruction::FILL_ARRAY_DATA ||
+ static_cast<int>(throw_insn->dalvikInsn.opcode) == kMirOpNullCheck) {
+ // No more checks for these (null check was processed above).
+ ok = true;
}
- if (!can_combine) {
+ if (!ok) {
break;
}
+
// OK - got one. Combine
BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
DCHECK(!bb_next->catch_entry);
- DCHECK_EQ(Predecessors(bb_next), 1U);
- // Overwrite the kOpCheck insn with the paired opcode
+ DCHECK_EQ(bb_next->predecessors.size(), 1u);
+ // Overwrite the kMirOpCheck insn with the paired opcode.
DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
*bb->last_mir_insn = *throw_insn;
+ // And grab the rest of the instructions from bb_next.
+ bb->last_mir_insn = bb_next->last_mir_insn;
+ throw_insn->next = nullptr;
+ bb_next->last_mir_insn = throw_insn;
+ // Mark acquired instructions as belonging to bb.
+ for (MIR* insn = mir; insn != nullptr; insn = insn->next) {
+ insn->bb = bb->id;
+ }
+ // Before we overwrite successors, remove their predecessor links to bb.
+ bb_next->ErasePredecessor(bb->id);
+ if (bb->taken != NullBasicBlockId) {
+ DCHECK_EQ(bb->successor_block_list_type, kNotUsed);
+ BasicBlock* bb_taken = GetBasicBlock(bb->taken);
+ // bb->taken will be overwritten below.
+ DCHECK_EQ(bb_taken->block_type, kExceptionHandling);
+ DCHECK_EQ(bb_taken->predecessors.size(), 1u);
+ DCHECK_EQ(bb_taken->predecessors[0], bb->id);
+ bb_taken->predecessors.clear();
+ bb_taken->block_type = kDead;
+ DCHECK(bb_taken->data_flow_info == nullptr);
+ } else {
+ DCHECK_EQ(bb->successor_block_list_type, kCatch);
+ for (SuccessorBlockInfo* succ_info : bb->successor_blocks) {
+ if (succ_info->block != NullBasicBlockId) {
+ BasicBlock* succ_bb = GetBasicBlock(succ_info->block);
+ DCHECK(succ_bb->catch_entry);
+ succ_bb->ErasePredecessor(bb->id);
+ if (succ_bb->predecessors.empty()) {
+ succ_bb->KillUnreachable(this);
+ }
+ }
+ }
+ }
// Use the successor info from the next block
bb->successor_block_list_type = bb_next->successor_block_list_type;
bb->successor_blocks.swap(bb_next->successor_blocks); // Swap instead of copying.
+ bb_next->successor_block_list_type = kNotUsed;
// Use the ending block linkage from the next block
bb->fall_through = bb_next->fall_through;
- GetBasicBlock(bb->taken)->block_type = kDead; // Kill the unused exception block
+ bb_next->fall_through = NullBasicBlockId;
bb->taken = bb_next->taken;
- // Include the rest of the instructions
- bb->last_mir_insn = bb_next->last_mir_insn;
+ bb_next->taken = NullBasicBlockId;
/*
* If lower-half of pair of blocks to combine contained
* a return or a conditional branch or an explicit throw,
@@ -805,15 +855,30 @@ void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
bb->terminated_by_return = bb_next->terminated_by_return;
bb->conditional_branch = bb_next->conditional_branch;
bb->explicit_throw = bb_next->explicit_throw;
+ // Merge the use_lvn flag.
+ bb->use_lvn |= bb_next->use_lvn;
+
+ // Kill the unused block.
+ bb_next->data_flow_info = nullptr;
/*
* NOTE: we aren't updating all dataflow info here. Should either make sure this pass
* happens after uses of i_dominated, dom_frontier or update the dataflow info here.
+ * NOTE: GVN uses bb->data_flow_info->live_in_v which is unaffected by the block merge.
*/
- // Kill bb_next and remap now-dead id to parent
+ // Kill bb_next and remap now-dead id to parent.
bb_next->block_type = kDead;
+ bb_next->data_flow_info = nullptr; // Must be null for dead blocks. (Relied on by the GVN.)
block_id_map_.Overwrite(bb_next->id, bb->id);
+ // Update predecessors in children.
+ ChildBlockIterator iter(bb, this);
+ for (BasicBlock* child = iter.Next(); child != nullptr; child = iter.Next()) {
+ child->UpdatePredecessor(bb_next->id, bb->id);
+ }
+
+ // DFS orders are not up to date anymore.
+ dfs_orders_up_to_date_ = false;
// Now, loop back and see if we can keep going
}
@@ -827,12 +892,21 @@ bool MIRGraph::EliminateNullChecksGate() {
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumSSARegs();
+ temp_bit_vector_size_ = GetNumOfCodeVRs();
temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
temp_bit_matrix_ = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
+
+ // reset MIR_MARK
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ mir->optimization_flags &= ~MIR_MARK;
+ }
+ }
+
return true;
}
@@ -840,100 +914,96 @@ bool MIRGraph::EliminateNullChecksGate() {
* Eliminate unnecessary null checks for a basic block.
*/
bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
- if (bb->data_flow_info == nullptr) return false;
+ if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
+ // Ignore the kExitBlock as well.
+ DCHECK(bb->first_mir_insn == nullptr);
+ return false;
+ }
- ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
+ ArenaBitVector* vregs_to_check = temp_bit_vector_;
/*
* Set initial state. Catch blocks don't need any special treatment.
*/
if (bb->block_type == kEntryBlock) {
- ssa_regs_to_check->ClearAllBits();
+ vregs_to_check->ClearAllBits();
// Assume all ins are objects.
for (uint16_t in_reg = GetFirstInVR();
in_reg < GetNumOfCodeVRs(); in_reg++) {
- ssa_regs_to_check->SetBit(in_reg);
+ vregs_to_check->SetBit(in_reg);
}
if ((cu_->access_flags & kAccStatic) == 0) {
- // If non-static method, mark "this" as non-null
+ // If non-static method, mark "this" as non-null.
int this_reg = GetFirstInVR();
- ssa_regs_to_check->ClearBit(this_reg);
- }
- } else if (bb->predecessors.size() == 1) {
- BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
- // pred_bb must have already been processed at least once.
- DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
- ssa_regs_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
- if (pred_bb->block_type == kDalvikByteCode) {
- // Check to see if predecessor had an explicit null-check.
- MIR* last_insn = pred_bb->last_mir_insn;
- if (last_insn != nullptr) {
- Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
- if (last_opcode == Instruction::IF_EQZ) {
- if (pred_bb->fall_through == bb->id) {
- // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
- // it can't be null.
- ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
- }
- } else if (last_opcode == Instruction::IF_NEZ) {
- if (pred_bb->taken == bb->id) {
- // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
- // null.
- ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
- }
- }
- }
+ vregs_to_check->ClearBit(this_reg);
}
} else {
- // Starting state is union of all incoming arcs
+ DCHECK_EQ(bb->block_type, kDalvikByteCode);
+ // Starting state is union of all incoming arcs.
bool copied_first = false;
for (BasicBlockId pred_id : bb->predecessors) {
+ if (temp_bit_matrix_[pred_id] == nullptr) {
+ continue;
+ }
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->data_flow_info != nullptr);
- if (temp_bit_matrix_[pred_bb->id] == nullptr) {
- continue;
+ MIR* null_check_insn = nullptr;
+ if (pred_bb->block_type == kDalvikByteCode) {
+ // Check to see if predecessor had an explicit null-check.
+ MIR* last_insn = pred_bb->last_mir_insn;
+ if (last_insn != nullptr) {
+ Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
+ if ((last_opcode == Instruction::IF_EQZ && pred_bb->fall_through == bb->id) ||
+ (last_opcode == Instruction::IF_NEZ && pred_bb->taken == bb->id)) {
+ // Remember the null check insn if there's no other predecessor requiring null check.
+ if (!copied_first || !vregs_to_check->IsBitSet(last_insn->dalvikInsn.vA)) {
+ null_check_insn = last_insn;
+ }
+ }
+ }
}
if (!copied_first) {
copied_first = true;
- ssa_regs_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ vregs_to_check->Copy(temp_bit_matrix_[pred_id]);
} else {
- ssa_regs_to_check->Union(temp_bit_matrix_[pred_bb->id]);
+ vregs_to_check->Union(temp_bit_matrix_[pred_id]);
+ }
+ if (null_check_insn != nullptr) {
+ vregs_to_check->ClearBit(null_check_insn->dalvikInsn.vA);
}
}
DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
- // At this point, ssa_regs_to_check shows which sregs have an object definition with
+ // At this point, vregs_to_check shows which sregs have an object definition with
// no intervening uses.
// Walk through the instruction in the block, updating as necessary
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- if (mir->ssa_rep == NULL) {
- continue;
- }
-
uint64_t df_attributes = GetDataFlowAttributes(mir);
+ DCHECK_EQ(df_attributes & DF_NULL_TRANSFER_N, 0u); // No Phis yet.
+
// Might need a null check?
if (df_attributes & DF_HAS_NULL_CHKS) {
- int src_idx;
- if (df_attributes & DF_NULL_CHK_1) {
- src_idx = 1;
- } else if (df_attributes & DF_NULL_CHK_2) {
- src_idx = 2;
+ int src_vreg;
+ if (df_attributes & DF_NULL_CHK_OUT0) {
+ DCHECK_NE(df_attributes & DF_IS_INVOKE, 0u);
+ src_vreg = mir->dalvikInsn.vC;
+ } else if (df_attributes & DF_NULL_CHK_B) {
+ DCHECK_NE(df_attributes & DF_REF_B, 0u);
+ src_vreg = mir->dalvikInsn.vB;
} else {
- src_idx = 0;
+ DCHECK_NE(df_attributes & DF_NULL_CHK_A, 0u);
+ DCHECK_NE(df_attributes & DF_REF_A, 0u);
+ src_vreg = mir->dalvikInsn.vA;
}
- int src_sreg = mir->ssa_rep->uses[src_idx];
- if (!ssa_regs_to_check->IsBitSet(src_sreg)) {
+ if (!vregs_to_check->IsBitSet(src_vreg)) {
// Eliminate the null check.
- mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
+ mir->optimization_flags |= MIR_MARK;
} else {
// Do the null check.
- // TODO: Rewrite the pass to converge first before doing any modifications so that
- // we don't lose the MIR_IGNORE_NULL_CHECK here if previously set by some other pass.
- mir->optimization_flags &= ~MIR_IGNORE_NULL_CHECK;
- // Mark s_reg as null-checked
- ssa_regs_to_check->ClearBit(src_sreg);
+ mir->optimization_flags &= ~MIR_MARK;
+ // Mark src_vreg as null-checked.
+ vregs_to_check->ClearBit(src_vreg);
}
}
@@ -947,66 +1017,41 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
* Note: we can't tell if a CONST definition might be used as an object, so treat
* them all as object definitions.
*/
- if (((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A)) ||
+ if ((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A) ||
(df_attributes & DF_SETS_CONST)) {
- ssa_regs_to_check->SetBit(mir->ssa_rep->defs[0]);
+ vregs_to_check->SetBit(mir->dalvikInsn.vA);
}
- // Now, remove mark from all object definitions we know are non-null.
+ // Then, remove mark from all object definitions we know are non-null.
if (df_attributes & DF_NON_NULL_DST) {
// Mark target of NEW* as non-null
- ssa_regs_to_check->ClearBit(mir->ssa_rep->defs[0]);
+ DCHECK_NE(df_attributes & DF_REF_A, 0u);
+ vregs_to_check->ClearBit(mir->dalvikInsn.vA);
}
// Mark non-null returns from invoke-style NEW*
if (df_attributes & DF_NON_NULL_RET) {
MIR* next_mir = mir->next;
// Next should be an MOVE_RESULT_OBJECT
- if (next_mir &&
- next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
- // Mark as null checked
- ssa_regs_to_check->ClearBit(next_mir->ssa_rep->defs[0]);
+ if (UNLIKELY(next_mir == nullptr)) {
+ // The MethodVerifier makes sure there's no MOVE_RESULT at the catch entry or branch
+ // target, so the MOVE_RESULT cannot be broken away into another block.
+ LOG(WARNING) << "Unexpected end of block following new";
+ } else if (UNLIKELY(next_mir->dalvikInsn.opcode != Instruction::MOVE_RESULT_OBJECT)) {
+ LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
} else {
- if (next_mir) {
- LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
- } else if (bb->fall_through != NullBasicBlockId) {
- // Look in next basic block
- struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
- for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
- tmir =tmir->next) {
- if (MIR::DecodedInstruction::IsPseudoMirOp(tmir->dalvikInsn.opcode)) {
- continue;
- }
- // First non-pseudo should be MOVE_RESULT_OBJECT
- if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
- // Mark as null checked
- ssa_regs_to_check->ClearBit(tmir->ssa_rep->defs[0]);
- } else {
- LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
- }
- break;
- }
- }
+ // Mark as null checked.
+ vregs_to_check->ClearBit(next_mir->dalvikInsn.vA);
}
}
- /*
- * Propagate nullcheck state on register copies (including
- * Phi pseudo copies. For the latter, nullcheck state is
- * the "or" of all the Phi's operands.
- */
- if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
- int tgt_sreg = mir->ssa_rep->defs[0];
- int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
- mir->ssa_rep->num_uses;
- bool needs_null_check = false;
- for (int i = 0; i < operands; i++) {
- needs_null_check |= ssa_regs_to_check->IsBitSet(mir->ssa_rep->uses[i]);
- }
- if (needs_null_check) {
- ssa_regs_to_check->SetBit(tgt_sreg);
+ // Propagate null check state on register copies.
+ if (df_attributes & DF_NULL_TRANSFER_0) {
+ DCHECK_EQ(df_attributes | ~(DF_DA | DF_REF_A | DF_UB | DF_REF_B), static_cast<uint64_t>(-1));
+ if (vregs_to_check->IsBitSet(mir->dalvikInsn.vB)) {
+ vregs_to_check->SetBit(mir->dalvikInsn.vA);
} else {
- ssa_regs_to_check->ClearBit(tgt_sreg);
+ vregs_to_check->ClearBit(mir->dalvikInsn.vA);
}
}
}
@@ -1016,15 +1061,15 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
ArenaBitVector* old_ending_ssa_regs_to_check = temp_bit_matrix_[bb->id];
if (old_ending_ssa_regs_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
- nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
- temp_bit_matrix_[bb->id] = ssa_regs_to_check;
- // Create a new ssa_regs_to_check for next BB.
+ nce_changed = vregs_to_check->GetHighestBitSet() != -1;
+ temp_bit_matrix_[bb->id] = vregs_to_check;
+ // Create a new vregs_to_check for next BB.
temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
- } else if (!ssa_regs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
+ } else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
nce_changed = true;
- temp_bit_matrix_[bb->id] = ssa_regs_to_check;
- temp_bit_vector_ = old_ending_ssa_regs_to_check; // Reuse for ssa_regs_to_check for next BB.
+ temp_bit_matrix_[bb->id] = vregs_to_check;
+ temp_bit_vector_ = old_ending_ssa_regs_to_check; // Reuse for vregs_to_check for next BB.
}
return nce_changed;
}
@@ -1036,6 +1081,18 @@ void MIRGraph::EliminateNullChecksEnd() {
temp_bit_matrix_ = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
+
+ // converge MIR_MARK with MIR_IGNORE_NULL_CHECK
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
+ COMPILE_ASSERT(kMarkToIgnoreNullCheckShift > 0, check_valid_shift_right);
+ uint16_t mirMarkAdjustedToIgnoreNullCheck =
+ (mir->optimization_flags & MIR_MARK) >> kMarkToIgnoreNullCheckShift;
+ mir->optimization_flags |= mirMarkAdjustedToIgnoreNullCheck;
+ }
+ }
}
/*
@@ -1100,26 +1157,27 @@ bool MIRGraph::EliminateClassInitChecksGate() {
// First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- DCHECK(bb->data_flow_info != nullptr);
- if (mir->dalvikInsn.opcode >= Instruction::SGET &&
- mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
- const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
- uint16_t index = 0xffffu;
- if (!field_info.IsInitialized()) {
- DCHECK_LT(class_to_index_map.size(), 0xffffu);
- MapEntry entry = {
- // Treat unresolved fields as if each had its own class.
- field_info.IsResolved() ? field_info.DeclaringDexFile()
- : nullptr,
- field_info.IsResolved() ? field_info.DeclaringClassIndex()
- : field_info.FieldIndex(),
- static_cast<uint16_t>(class_to_index_map.size())
- };
- index = class_to_index_map.insert(entry).first->index;
+ if (bb->block_type == kDalvikByteCode) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::SGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
+ uint16_t index = 0xffffu;
+ if (!field_info.IsInitialized()) {
+ DCHECK_LT(class_to_index_map.size(), 0xffffu);
+ MapEntry entry = {
+ // Treat unresolved fields as if each had its own class.
+ field_info.IsResolved() ? field_info.DeclaringDexFile()
+ : nullptr,
+ field_info.IsResolved() ? field_info.DeclaringClassIndex()
+ : field_info.FieldIndex(),
+ static_cast<uint16_t>(class_to_index_map.size())
+ };
+ index = class_to_index_map.insert(entry).first->index;
+ }
+ // Using offset/2 for index into temp_insn_data_.
+ temp_insn_data_[mir->offset / 2u] = index;
}
- // Using offset/2 for index into temp_insn_data_.
- temp_insn_data_[mir->offset / 2u] = index;
}
}
}
@@ -1148,7 +1206,9 @@ bool MIRGraph::EliminateClassInitChecksGate() {
*/
bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
- if (bb->data_flow_info == nullptr) {
+ if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
+ // Ignore the kExitBlock as well.
+ DCHECK(bb->first_mir_insn == nullptr);
return false;
}
@@ -1163,7 +1223,6 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
// pred_bb must have already been processed at least once.
DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->data_flow_info != nullptr);
DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
} else {
@@ -1172,7 +1231,6 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
for (BasicBlockId pred_id : bb->predecessors) {
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->data_flow_info != nullptr);
if (temp_bit_matrix_[pred_bb->id] == nullptr) {
continue;
}
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 55e547e56f..337d4efda3 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -23,15 +23,8 @@
namespace art {
-class ClassInitCheckEliminationTest : public testing::Test {
+class MirOptimizationTest : public testing::Test {
protected:
- struct SFieldDef {
- uint16_t field_idx;
- uintptr_t declaring_dex_file;
- uint16_t declaring_class_idx;
- uint16_t declaring_field_idx;
- };
-
struct BBDef {
static constexpr size_t kMaxSuccessors = 4;
static constexpr size_t kMaxPredecessors = 4;
@@ -44,9 +37,12 @@ class ClassInitCheckEliminationTest : public testing::Test {
};
struct MIRDef {
- Instruction::Code opcode;
BasicBlockId bbid;
- uint32_t field_or_method_info;
+ Instruction::Code opcode;
+ uint32_t field_info;
+ uint32_t vA;
+ uint32_t vB;
+ uint32_t vC;
};
#define DEF_SUCC0() \
@@ -72,32 +68,6 @@ class ClassInitCheckEliminationTest : public testing::Test {
#define DEF_BB(type, succ, pred) \
{ type, succ, pred }
-#define DEF_MIR(opcode, bb, field_info) \
- { opcode, bb, field_info }
-
- void DoPrepareSFields(const SFieldDef* defs, size_t count) {
- cu_.mir_graph->sfield_lowering_infos_.clear();
- cu_.mir_graph->sfield_lowering_infos_.reserve(count);
- for (size_t i = 0u; i != count; ++i) {
- const SFieldDef* def = &defs[i];
- MirSFieldLoweringInfo field_info(def->field_idx);
- if (def->declaring_dex_file != 0u) {
- field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
- field_info.declaring_class_idx_ = def->declaring_class_idx;
- field_info.declaring_field_idx_ = def->declaring_field_idx;
- field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic;
- }
- ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
- ASSERT_FALSE(field_info.IsInitialized());
- cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
- }
- }
-
- template <size_t count>
- void PrepareSFields(const SFieldDef (&defs)[count]) {
- DoPrepareSFields(defs, count);
- }
-
void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
cu_.mir_graph->block_id_map_.clear();
cu_.mir_graph->block_list_.clear();
@@ -145,6 +115,63 @@ class ClassInitCheckEliminationTest : public testing::Test {
DoPrepareBasicBlocks(defs, count);
}
+ void PrepareSingleBlock() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareDiamond() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareLoop() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareCatch() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)), // The top.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // The throwing insn.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // Catch handler.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)), // The merged block.
+ };
+ PrepareBasicBlocks(bbs);
+ BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
+ catch_handler->catch_entry = true;
+ // Add successor block info to the check block.
+ BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
+ check_bb->successor_block_list_type = kCatch;
+ SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
+ (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
+ successor_block_info->block = catch_handler->id;
+ check_bb->successor_blocks.push_back(successor_block_info);
+ }
+
void DoPrepareMIRs(const MIRDef* defs, size_t count) {
mir_count_ = count;
mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
@@ -157,9 +184,15 @@ class ClassInitCheckEliminationTest : public testing::Test {
BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
bb->AppendMIR(mir);
if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
- ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.size());
- mir->meta.sfield_lowering_info = def->field_or_method_info;
+ ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
+ mir->meta.sfield_lowering_info = def->field_info;
+ } else if (def->opcode >= Instruction::IGET && def->opcode <= Instruction::IPUT_SHORT) {
+ ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
+ mir->meta.ifield_lowering_info = def->field_info;
}
+ mir->dalvikInsn.vA = def->vA;
+ mir->dalvikInsn.vB = def->vB;
+ mir->dalvikInsn.vC = def->vC;
mir->ssa_rep = nullptr;
mir->offset = 2 * i; // All insns need to be at least 2 code units long.
mir->optimization_flags = 0u;
@@ -179,15 +212,60 @@ class ClassInitCheckEliminationTest : public testing::Test {
DoPrepareMIRs(defs, count);
}
+ MirOptimizationTest()
+ : pool_(),
+ cu_(&pool_),
+ mir_count_(0u),
+ mirs_(nullptr),
+ code_item_(nullptr) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ cu_.access_flags = kAccStatic; // Don't let "this" interfere with this test.
+ }
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+ size_t mir_count_;
+ MIR* mirs_;
+ DexFile::CodeItem* code_item_;
+};
+
+class ClassInitCheckEliminationTest : public MirOptimizationTest {
+ protected:
+ struct SFieldDef {
+ uint16_t field_idx;
+ uintptr_t declaring_dex_file;
+ uint16_t declaring_class_idx;
+ uint16_t declaring_field_idx;
+ };
+
+ void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+ cu_.mir_graph->sfield_lowering_infos_.clear();
+ cu_.mir_graph->sfield_lowering_infos_.reserve(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const SFieldDef* def = &defs[i];
+ MirSFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_class_idx_ = def->declaring_class_idx;
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic;
+ }
+ ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
+ ASSERT_FALSE(field_info.IsInitialized());
+ cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+ }
+ }
+
+ template <size_t count>
+ void PrepareSFields(const SFieldDef (&defs)[count]) {
+ DoPrepareSFields(defs, count);
+ }
+
void PerformClassInitCheckElimination() {
- cu_.mir_graph->SSATransformationStart();
cu_.mir_graph->ComputeDFSOrders();
- cu_.mir_graph->ComputeDominators();
- cu_.mir_graph->ComputeTopologicalSortOrder();
- cu_.mir_graph->SSATransformationEnd();
bool gate_result = cu_.mir_graph->EliminateClassInitChecksGate();
ASSERT_TRUE(gate_result);
- LoopRepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
+ RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
bool change = false;
for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
change = cu_.mir_graph->EliminateClassInitChecks(bb);
@@ -196,21 +274,64 @@ class ClassInitCheckEliminationTest : public testing::Test {
}
ClassInitCheckEliminationTest()
- : pool_(),
- cu_(&pool_),
- mir_count_(0u),
- mirs_(nullptr),
- code_item_(nullptr) {
- cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ : MirOptimizationTest() {
}
+};
- ArenaPool pool_;
- CompilationUnit cu_;
- size_t mir_count_;
- MIR* mirs_;
- DexFile::CodeItem* code_item_;
+class NullCheckEliminationTest : public MirOptimizationTest {
+ protected:
+ struct IFieldDef {
+ uint16_t field_idx;
+ uintptr_t declaring_dex_file;
+ uint16_t declaring_class_idx;
+ uint16_t declaring_field_idx;
+ };
+
+ void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+ cu_.mir_graph->ifield_lowering_infos_.clear();
+ cu_.mir_graph->ifield_lowering_infos_.reserve(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const IFieldDef* def = &defs[i];
+ MirIFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_class_idx_ = def->declaring_class_idx;
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ }
+ ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
+ cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+ }
+ }
+
+ template <size_t count>
+ void PrepareIFields(const IFieldDef (&defs)[count]) {
+ DoPrepareIFields(defs, count);
+ }
+
+ void PerformNullCheckElimination() {
+ // Make vregs in range [100, 1000) input registers, i.e. requiring a null check.
+ code_item_->registers_size_ = 1000;
+ code_item_->ins_size_ = 900;
+
+ cu_.mir_graph->ComputeDFSOrders();
+ bool gate_result = cu_.mir_graph->EliminateNullChecksGate();
+ ASSERT_TRUE(gate_result);
+ RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
+ bool change = false;
+ for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
+ change = cu_.mir_graph->EliminateNullChecks(bb);
+ }
+ cu_.mir_graph->EliminateNullChecksEnd();
+ }
+
+ NullCheckEliminationTest()
+ : MirOptimizationTest() {
+ }
};
+#define DEF_SGET_SPUT_V0(bb, opcode, field_info) \
+ { bb, opcode, field_info, 0u, 0u, 0u }
+
TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
static const SFieldDef sfields[] = {
{ 0u, 1u, 0u, 0u },
@@ -220,31 +341,25 @@ TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
{ 4u, 1u, 3u, 4u }, // Same declaring class as sfield[3].
{ 5u, 0u, 0u, 0u }, // Unresolved.
};
- static const BBDef bbs[] = {
- DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
- DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
- DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
- };
static const MIRDef mirs[] = {
- DEF_MIR(Instruction::SPUT, 3u, 5u), // Unresolved.
- DEF_MIR(Instruction::SPUT, 3u, 0u),
- DEF_MIR(Instruction::SGET, 3u, 1u),
- DEF_MIR(Instruction::SGET, 3u, 2u),
- DEF_MIR(Instruction::SGET, 3u, 5u), // Unresolved.
- DEF_MIR(Instruction::SGET, 3u, 0u),
- DEF_MIR(Instruction::SGET, 3u, 1u),
- DEF_MIR(Instruction::SGET, 3u, 2u),
- DEF_MIR(Instruction::SGET, 3u, 5u), // Unresolved.
- DEF_MIR(Instruction::SGET, 3u, 3u),
- DEF_MIR(Instruction::SGET, 3u, 4u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 5u), // Unresolved.
+ DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 0u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 1u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 2u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 5u), // Unresolved.
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 0u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 1u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 2u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 5u), // Unresolved.
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 3u),
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 4u),
};
static const bool expected_ignore_clinit_check[] = {
false, false, false, false, true, true, true, true, true, false, true
};
PrepareSFields(sfields);
- PrepareBasicBlocks(bbs);
+ PrepareSingleBlock();
PrepareMIRs(mirs);
PerformClassInitCheckElimination();
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
@@ -268,40 +383,31 @@ TEST_F(ClassInitCheckEliminationTest, Diamond) {
{ 9u, 1u, 8u, 9u }, // Same declaring class as sfield[8].
{ 10u, 0u, 0u, 0u }, // Unresolved.
};
- static const BBDef bbs[] = {
- DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
- DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
- DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
- DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
- };
static const MIRDef mirs[] = {
// NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
- DEF_MIR(Instruction::SGET, 3u, 10u), // Unresolved.
- DEF_MIR(Instruction::SPUT, 3u, 10u), // Unresolved.
- DEF_MIR(Instruction::SPUT, 3u, 0u),
- DEF_MIR(Instruction::SGET, 6u, 0u), // Eliminated (block #3 dominates #6).
- DEF_MIR(Instruction::SPUT, 4u, 1u),
- DEF_MIR(Instruction::SGET, 6u, 1u), // Not eliminated (block #4 doesn't dominate #6).
- DEF_MIR(Instruction::SGET, 3u, 2u),
- DEF_MIR(Instruction::SGET, 4u, 2u), // Eliminated (block #3 dominates #4).
- DEF_MIR(Instruction::SGET, 3u, 3u),
- DEF_MIR(Instruction::SGET, 5u, 3u), // Eliminated (block #3 dominates #5).
- DEF_MIR(Instruction::SGET, 3u, 4u),
- DEF_MIR(Instruction::SGET, 6u, 4u), // Eliminated (block #3 dominates #6).
- DEF_MIR(Instruction::SGET, 4u, 5u),
- DEF_MIR(Instruction::SGET, 6u, 5u), // Not eliminated (block #4 doesn't dominate #6).
- DEF_MIR(Instruction::SGET, 5u, 6u),
- DEF_MIR(Instruction::SGET, 6u, 6u), // Not eliminated (block #5 doesn't dominate #6).
- DEF_MIR(Instruction::SGET, 4u, 7u),
- DEF_MIR(Instruction::SGET, 5u, 7u),
- DEF_MIR(Instruction::SGET, 6u, 7u), // Eliminated (initialized in both blocks #3 and #4).
- DEF_MIR(Instruction::SGET, 4u, 8u),
- DEF_MIR(Instruction::SGET, 5u, 9u),
- DEF_MIR(Instruction::SGET, 6u, 8u), // Eliminated (with sfield[9] in block #5).
- DEF_MIR(Instruction::SPUT, 6u, 9u), // Eliminated (with sfield[8] in block #4).
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 10u), // Unresolved.
+ DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 10u), // Unresolved.
+ DEF_SGET_SPUT_V0(3u, Instruction::SPUT, 0u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 0u), // Eliminated (BB #3 dominates #6).
+ DEF_SGET_SPUT_V0(4u, Instruction::SPUT, 1u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 1u), // Not eliminated (BB #4 doesn't dominate #6).
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 2u),
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 2u), // Eliminated (BB #3 dominates #4).
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 3u),
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 3u), // Eliminated (BB #3 dominates #5).
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 4u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 4u), // Eliminated (BB #3 dominates #6).
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 5u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 5u), // Not eliminated (BB #4 doesn't dominate #6).
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 6u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 6u), // Not eliminated (BB #5 doesn't dominate #6).
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 7u),
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 7u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 7u), // Eliminated (initialized in both #3 and #4).
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 8u),
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 9u),
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 8u), // Eliminated (with sfield[9] in BB #5).
+ DEF_SGET_SPUT_V0(6u, Instruction::SPUT, 9u), // Eliminated (with sfield[8] in BB #4).
};
static const bool expected_ignore_clinit_check[] = {
false, true, // Unresolved: sfield[10], method[2]
@@ -317,7 +423,7 @@ TEST_F(ClassInitCheckEliminationTest, Diamond) {
};
PrepareSFields(sfields);
- PrepareBasicBlocks(bbs);
+ PrepareDiamond();
PrepareMIRs(mirs);
PerformClassInitCheckElimination();
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
@@ -332,26 +438,18 @@ TEST_F(ClassInitCheckEliminationTest, Loop) {
{ 0u, 1u, 0u, 0u },
{ 1u, 1u, 1u, 1u },
};
- static const BBDef bbs[] = {
- DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
- DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
- DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
- DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
- DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
- };
static const MIRDef mirs[] = {
- DEF_MIR(Instruction::SGET, 3u, 0u),
- DEF_MIR(Instruction::SGET, 4u, 1u),
- DEF_MIR(Instruction::SGET, 5u, 0u), // Eliminated.
- DEF_MIR(Instruction::SGET, 5u, 1u), // Eliminated.
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 0u),
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 1u),
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 0u), // Eliminated.
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 1u), // Eliminated.
};
static const bool expected_ignore_clinit_check[] = {
false, false, true, true
};
PrepareSFields(sfields);
- PrepareBasicBlocks(bbs);
+ PrepareLoop();
PrepareMIRs(mirs);
PerformClassInitCheckElimination();
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
@@ -368,42 +466,24 @@ TEST_F(ClassInitCheckEliminationTest, Catch) {
{ 2u, 1u, 2u, 2u },
{ 3u, 1u, 3u, 3u },
};
- static const BBDef bbs[] = {
- DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
- DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
- DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
- DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)), // The top.
- DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // The throwing insn.
- DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)), // Catch handler.
- DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)), // The merged block.
- };
static const MIRDef mirs[] = {
- DEF_MIR(Instruction::SGET, 3u, 0u), // Before the exception edge.
- DEF_MIR(Instruction::SGET, 3u, 1u), // Before the exception edge.
- DEF_MIR(Instruction::SGET, 4u, 2u), // After the exception edge.
- DEF_MIR(Instruction::SGET, 4u, 3u), // After the exception edge.
- DEF_MIR(Instruction::SGET, 5u, 0u), // In catch handler; class init check eliminated.
- DEF_MIR(Instruction::SGET, 5u, 2u), // In catch handler; class init check not eliminated.
- DEF_MIR(Instruction::SGET, 6u, 0u), // Class init check eliminated.
- DEF_MIR(Instruction::SGET, 6u, 1u), // Class init check eliminated.
- DEF_MIR(Instruction::SGET, 6u, 2u), // Class init check eliminated.
- DEF_MIR(Instruction::SGET, 6u, 3u), // Class init check not eliminated.
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 0u), // Before the exception edge.
+ DEF_SGET_SPUT_V0(3u, Instruction::SGET, 1u), // Before the exception edge.
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 2u), // After the exception edge.
+ DEF_SGET_SPUT_V0(4u, Instruction::SGET, 3u), // After the exception edge.
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 0u), // In catch handler; clinit check eliminated.
+ DEF_SGET_SPUT_V0(5u, Instruction::SGET, 2u), // In catch handler; clinit check not eliminated.
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 0u), // Class init check eliminated.
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 1u), // Class init check eliminated.
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 2u), // Class init check eliminated.
+ DEF_SGET_SPUT_V0(6u, Instruction::SGET, 3u), // Class init check not eliminated.
};
static const bool expected_ignore_clinit_check[] = {
false, false, false, false, true, false, true, true, true, false
};
PrepareSFields(sfields);
- PrepareBasicBlocks(bbs);
- BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(5u);
- catch_handler->catch_entry = true;
- // Add successor block info to the check block.
- BasicBlock* check_bb = cu_.mir_graph->GetBasicBlock(3u);
- check_bb->successor_block_list_type = kCatch;
- SuccessorBlockInfo* successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
- (cu_.arena.Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
- successor_block_info->block = catch_handler->id;
- check_bb->successor_blocks.push_back(successor_block_info);
+ PrepareCatch();
PrepareMIRs(mirs);
PerformClassInitCheckElimination();
ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
@@ -413,4 +493,189 @@ TEST_F(ClassInitCheckEliminationTest, Catch) {
}
}
+#define DEF_IGET_IPUT(bb, opcode, vA, vB, field_info) \
+ { bb, opcode, field_info, vA, vB, 0u }
+#define DEF_AGET_APUT(bb, opcode, vA, vB, vC) \
+ { bb, opcode, 0u, vA, vB, vC }
+#define DEF_INVOKE(bb, opcode, vC) \
+ { bb, opcode, 0u, 0u, 0u, vC }
+#define DEF_OTHER1(bb, opcode, vA) \
+ { bb, opcode, 0u, vA, 0u, 0u }
+#define DEF_OTHER2(bb, opcode, vA, vB) \
+ { bb, opcode, 0u, vA, vB, 0u }
+
+TEST_F(NullCheckEliminationTest, SingleBlock) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 0u, 1u },
+ { 2u, 1u, 0u, 2u }, // Object.
+ };
+ static const MIRDef mirs[] = {
+ DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 0u, 100u, 2u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 1u, 0u, 1u),
+ DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 2u, 100u, 2u), // Differs from 0u (no LVN here).
+ DEF_IGET_IPUT(3u, Instruction::IGET, 3u, 2u, 1u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 4u, 101u, 0u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 5u, 102u, 0u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 6u, 103u, 0u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 7u, 103u, 1u),
+ DEF_IGET_IPUT(3u, Instruction::IPUT, 8u, 104u, 0u),
+ DEF_IGET_IPUT(3u, Instruction::IPUT, 9u, 104u, 1u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 10u, 105u, 0u),
+ DEF_IGET_IPUT(3u, Instruction::IPUT, 11u, 105u, 1u),
+ DEF_IGET_IPUT(3u, Instruction::IPUT, 12u, 106u, 0u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 13u, 106u, 1u),
+ DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 107),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 15u, 107u, 1u),
+ DEF_IGET_IPUT(3u, Instruction::IGET, 16u, 108u, 0u),
+ DEF_INVOKE(3u, Instruction::INVOKE_DIRECT, 108),
+ DEF_AGET_APUT(3u, Instruction::AGET, 18u, 109u, 110u),
+ DEF_AGET_APUT(3u, Instruction::APUT, 19u, 109u, 111u),
+ DEF_OTHER2(3u, Instruction::ARRAY_LENGTH, 20u, 112u),
+ DEF_AGET_APUT(3u, Instruction::AGET, 21u, 112u, 113u),
+ DEF_OTHER1(3u, Instruction::MONITOR_ENTER, 114u),
+ DEF_OTHER1(3u, Instruction::MONITOR_EXIT, 114u),
+ };
+ static const bool expected_ignore_null_check[] = {
+ false, false, true, false /* Not doing LVN. */,
+ false, true /* Set before running NCE. */,
+ false, true, // IGET, IGET
+ false, true, // IPUT, IPUT
+ false, true, // IGET, IPUT
+ false, true, // IPUT, IGET
+ false, true, // INVOKE, IGET
+ false, true, // IGET, INVOKE
+ false, true, // AGET, APUT
+ false, true, // ARRAY_LENGTH, AGET
+ false, true, // MONITOR_ENTER, MONITOR_EXIT
+ };
+
+ PrepareIFields(ifields);
+ PrepareSingleBlock();
+ PrepareMIRs(mirs);
+
+ // Mark IGET 5u as null-checked to test that NCE doesn't clear this flag.
+ mirs_[5u].optimization_flags |= MIR_IGNORE_NULL_CHECK;
+
+ PerformNullCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_null_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
+ }
+}
+
+TEST_F(NullCheckEliminationTest, Diamond) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 0u, 1u },
+ { 2u, 1u, 0u, 2u }, // int[].
+ };
+ static const MIRDef mirs[] = {
+ // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
+ DEF_IGET_IPUT(3u, Instruction::IPUT, 0u, 100u, 0u),
+ DEF_IGET_IPUT(6u, Instruction::IGET, 1u, 100u, 1u), // Eliminated (BB #3 dominates #6).
+ DEF_IGET_IPUT(3u, Instruction::IGET, 2u, 101u, 0u),
+ DEF_IGET_IPUT(4u, Instruction::IPUT, 3u, 101u, 0u), // Eliminated (BB #3 dominates #4).
+ DEF_IGET_IPUT(3u, Instruction::IGET, 4u, 102u, 0u),
+ DEF_IGET_IPUT(5u, Instruction::IPUT, 5u, 102u, 1u), // Eliminated (BB #3 dominates #5).
+ DEF_IGET_IPUT(4u, Instruction::IPUT, 6u, 103u, 0u),
+ DEF_IGET_IPUT(6u, Instruction::IPUT, 7u, 103u, 1u), // Not eliminated (going through BB #5).
+ DEF_IGET_IPUT(5u, Instruction::IGET, 8u, 104u, 1u),
+ DEF_IGET_IPUT(6u, Instruction::IGET, 9u, 104u, 0u), // Not eliminated (going through BB #4).
+ DEF_INVOKE(4u, Instruction::INVOKE_DIRECT, 105u),
+ DEF_IGET_IPUT(5u, Instruction::IGET, 11u, 105u, 1u),
+ DEF_IGET_IPUT(6u, Instruction::IPUT, 12u, 105u, 0u), // Eliminated.
+ DEF_IGET_IPUT(3u, Instruction::IGET_OBJECT, 13u, 106u, 2u),
+ DEF_OTHER1(3u, Instruction::IF_EQZ, 13u), // Last insn in the BB #3.
+ DEF_OTHER2(5u, Instruction::NEW_ARRAY, 13u, 107u),
+ DEF_AGET_APUT(6u, Instruction::AGET, 16u, 13u, 108u), // Eliminated.
+ };
+ static const bool expected_ignore_null_check[] = {
+ false, true, // BB #3 IPUT, BB #6 IGET
+ false, true, // BB #3 IGET, BB #4 IPUT
+ false, true, // BB #3 IGET, BB #5 IPUT
+ false, false, // BB #4 IPUT, BB #6 IPUT
+ false, false, // BB #5 IGET, BB #6 IGET
+ false, false, true, // BB #4 INVOKE, BB #5 IGET, BB #6 IPUT
+ false, false, // BB #3 IGET_OBJECT & IF_EQZ
+ false, true, // BB #5 NEW_ARRAY, BB #6 AGET
+ };
+
+ PrepareIFields(ifields);
+ PrepareDiamond();
+ PrepareMIRs(mirs);
+ PerformNullCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_null_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
+ }
+}
+
+TEST_F(NullCheckEliminationTest, Loop) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ };
+ static const MIRDef mirs[] = {
+ DEF_IGET_IPUT(3u, Instruction::IGET, 0u, 100u, 0u),
+ DEF_IGET_IPUT(4u, Instruction::IGET, 1u, 101u, 0u),
+ DEF_IGET_IPUT(5u, Instruction::IGET, 2u, 100u, 1u), // Eliminated.
+ DEF_IGET_IPUT(5u, Instruction::IGET, 3u, 101u, 1u), // Eliminated.
+ DEF_IGET_IPUT(3u, Instruction::IGET, 4u, 102u, 0u),
+ DEF_IGET_IPUT(4u, Instruction::IGET, 5u, 102u, 1u), // Not eliminated (MOVE_OBJECT_16).
+ DEF_OTHER2(4u, Instruction::MOVE_OBJECT_16, 102u, 103u),
+ };
+ static const bool expected_ignore_null_check[] = {
+ false, false, true, true,
+ false, false, false,
+ };
+
+ PrepareIFields(ifields);
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformNullCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_null_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
+ }
+}
+
+TEST_F(NullCheckEliminationTest, Catch) {
+ static const IFieldDef ifields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ };
+ static const MIRDef mirs[] = {
+ DEF_IGET_IPUT(3u, Instruction::IGET, 0u, 100u, 0u), // Before the exception edge.
+ DEF_IGET_IPUT(3u, Instruction::IGET, 1u, 101u, 0u), // Before the exception edge.
+ DEF_IGET_IPUT(4u, Instruction::IGET, 2u, 102u, 0u), // After the exception edge.
+ DEF_IGET_IPUT(4u, Instruction::IGET, 3u, 103u, 0u), // After the exception edge.
+ DEF_IGET_IPUT(5u, Instruction::IGET, 4u, 100u, 1u), // In catch handler; eliminated.
+ DEF_IGET_IPUT(5u, Instruction::IGET, 5u, 102u, 1u), // In catch handler; not eliminated.
+ DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 100u, 0u), // Null check eliminated.
+ DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 101u, 1u), // Null check eliminated.
+ DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 102u, 0u), // Null check eliminated.
+ DEF_IGET_IPUT(6u, Instruction::IGET, 6u, 103u, 1u), // Null check not eliminated.
+ };
+ static const bool expected_ignore_null_check[] = {
+ false, false, false, false, true, false, true, true, true, false
+ };
+
+ PrepareIFields(ifields);
+ PrepareCatch();
+ PrepareMIRs(mirs);
+ PerformNullCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_null_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_null_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) << i;
+ }
+}
+
+// Undefine MIR_DEF for null check elimination.
+#undef MIR_DEF
+
} // namespace art
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index cd3ffd4cc8..a2bf8b4aab 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -20,6 +20,7 @@
#include "dataflow_iterator.h"
#include "dataflow_iterator-inl.h"
#include "pass_driver_me_opts.h"
+#include "post_opt_passes.h"
namespace art {
@@ -35,13 +36,15 @@ template<>
const Pass* const PassDriver<PassDriverMEOpts>::g_passes[] = {
GetPassInstance<CacheFieldLoweringInfo>(),
GetPassInstance<CacheMethodLoweringInfo>(),
+ GetPassInstance<CalculatePredecessors>(),
+ GetPassInstance<DFSOrders>(),
+ GetPassInstance<ClassInitCheckElimination>(),
GetPassInstance<SpecialMethodInliner>(),
- GetPassInstance<CodeLayout>(),
GetPassInstance<NullCheckElimination>(),
+ GetPassInstance<BBCombine>(),
+ GetPassInstance<CodeLayout>(),
GetPassInstance<TypeInference>(),
- GetPassInstance<ClassInitCheckElimination>(),
GetPassInstance<GlobalValueNumberingPass>(),
- GetPassInstance<BBCombine>(),
GetPassInstance<BBOptimizations>(),
};
diff --git a/compiler/dex/pass_driver_me_post_opt.cc b/compiler/dex/pass_driver_me_post_opt.cc
index 4acab6c6e5..e6238e9f25 100644
--- a/compiler/dex/pass_driver_me_post_opt.cc
+++ b/compiler/dex/pass_driver_me_post_opt.cc
@@ -33,7 +33,6 @@ template<>
const Pass* const PassDriver<PassDriverMEPostOpt>::g_passes[] = {
GetPassInstance<InitializeData>(),
GetPassInstance<ClearPhiInstructions>(),
- GetPassInstance<CalculatePredecessors>(),
GetPassInstance<DFSOrders>(),
GetPassInstance<BuildDomination>(),
GetPassInstance<TopologicalSortOrders>(),
diff --git a/compiler/dex/post_opt_passes.h b/compiler/dex/post_opt_passes.h
index e7805bae68..02833be2c5 100644
--- a/compiler/dex/post_opt_passes.h
+++ b/compiler/dex/post_opt_passes.h
@@ -91,6 +91,13 @@ class DFSOrders : public PassME {
DFSOrders() : PassME("DFSOrders") {
}
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return !c_unit->mir_graph->DfsOrdersUpToDate();
+ }
+
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 4388041fac..412f85d5dd 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -66,8 +66,9 @@ void MIRGraph::MarkPreOrder(BasicBlock* block) {
}
void MIRGraph::RecordDFSOrders(BasicBlock* block) {
- DCHECK(temp_scoped_alloc_.get() != nullptr);
- ScopedArenaVector<BasicBlock*> succ(temp_scoped_alloc_->Adapter());
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
+ ScopedArenaVector<BasicBlock*> succ(allocator.Adapter());
+ succ.reserve(GetNumBlocks());
MarkPreOrder(block);
succ.push_back(block);
while (!succ.empty()) {
@@ -107,10 +108,11 @@ void MIRGraph::ComputeDFSOrders() {
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
if (!bb->visited) {
- bb->Hide(cu_);
+ bb->Hide(this);
}
}
}
+ dfs_orders_up_to_date_ = true;
}
/*
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index cf4259f790..d5d487f03c 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -63,7 +63,7 @@ TEST_F(ImageTest, WriteRead) {
ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
const uintptr_t requested_image_base = ART_BASE_ADDRESS;
- ImageWriter writer(*compiler_driver_, requested_image_base);
+ std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_, requested_image_base));
{
{
jobject class_loader = NULL;
@@ -83,8 +83,8 @@ TEST_F(ImageTest, WriteRead) {
t.NewTiming("WriteElf");
SafeMap<std::string, std::string> key_value_store;
OatWriter oat_writer(class_linker->GetBootClassPath(), 0, 0, 0, compiler_driver_.get(),
- &writer, &timings, &key_value_store);
- bool success = writer.PrepareImageAddressSpace() &&
+ writer.get(), &timings, &key_value_store);
+ bool success = writer->PrepareImageAddressSpace() &&
compiler_driver_->WriteElf(GetTestAndroidRoot(),
!kIsTargetBuild,
class_linker->GetBootClassPath(),
@@ -99,9 +99,9 @@ TEST_F(ImageTest, WriteRead) {
{
bool success_image =
- writer.Write(image_file.GetFilename(), dup_oat->GetPath(), dup_oat->GetPath());
+ writer->Write(image_file.GetFilename(), dup_oat->GetPath(), dup_oat->GetPath());
ASSERT_TRUE(success_image);
- bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer.GetOatDataBegin());
+ bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
ASSERT_TRUE(success_fixup);
}
@@ -130,14 +130,18 @@ TEST_F(ImageTest, WriteRead) {
compiler_driver_.reset();
// Tear down old runtime before making a new one, clearing out misc state.
+
+ // Remove the reservation of the memory for use to load the image.
+ // Need to do this before we reset the runtime.
+ UnreserveImageSpace();
+ writer.reset(nullptr);
+
runtime_.reset();
java_lang_dex_file_ = NULL;
+ MemMap::Init();
std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
- // Remove the reservation of the memory for use to load the image.
- UnreserveImageSpace();
-
RuntimeOptions options;
std::string image("-Ximage:");
image.append(image_location.GetFilename());
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index a06b5c5254..0bab429e83 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -24,10 +24,11 @@ include $(CLEAR_VARS)
LOCAL_MODULE := dalvikvm
LOCAL_MODULE_TAGS := optional
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc
+LOCAL_SRC_FILES := dalvikvm.cc ../sigchainlib/sigchain.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
LOCAL_C_INCLUDES := art/runtime
-LOCAL_SHARED_LIBRARIES := libdl libnativehelper
+LOCAL_SHARED_LIBRARIES := libdl liblog libnativehelper
+LOCAL_LDFLAGS := -Wl,--version-script,art/sigchainlib/version-script.txt -Wl,--export-dynamic
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
LOCAL_MULTILIB := both
@@ -50,11 +51,15 @@ LOCAL_MODULE := dalvikvm
LOCAL_MODULE_TAGS := optional
LOCAL_CLANG := true
LOCAL_CPP_EXTENSION := cc
-LOCAL_SRC_FILES := dalvikvm.cc
+LOCAL_SRC_FILES := dalvikvm.cc ../sigchainlib/sigchain.cc
LOCAL_CFLAGS := $(dalvikvm_cflags)
LOCAL_C_INCLUDES := art/runtime
LOCAL_SHARED_LIBRARIES := libnativehelper
LOCAL_LDFLAGS := -ldl -lpthread
+# Mac OS linker doesn't understand --export-dynamic.
+ifneq ($(HOST_OS),darwin)
+ LOCAL_LDFLAGS += -Wl,--export-dynamic
+endif
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common.mk
LOCAL_IS_HOST_MODULE := true
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 7be4349067..2a7d9983fa 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1262,6 +1262,7 @@ static int dex2oat(int argc, char** argv) {
RuntimeOptions runtime_options;
std::vector<const DexFile*> boot_class_path;
+ art::MemMap::Init(); // For ZipEntry::ExtractToMemMap.
if (boot_image_option.empty()) {
size_t failure_count = OpenDexFiles(dex_filenames, dex_locations, boot_class_path);
if (failure_count > 0) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 1f2c0aa1cc..2649ab767e 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2225,6 +2225,8 @@ static int oatdump(int argc, char** argv) {
runtime.reset(StartRuntime(args.boot_image_location_,
args.image_location_,
args.instruction_set_));
+ } else {
+ MemMap::Init();
}
if (args.oat_filename_ != nullptr) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 4ed428c200..504addc054 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -758,6 +758,7 @@ static File* CreateOrOpen(const char* name, bool* created) {
static int patchoat(int argc, char **argv) {
InitLogging(argv);
+ MemMap::Init();
const bool debug = kIsDebugBuild;
orig_argc = argc;
orig_argv = argv;
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index f67616ef0d..994e2357af 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -74,6 +74,7 @@ Allocator* Allocator::GetNoopAllocator() {
namespace TrackedAllocators {
+// These globals are safe since they don't have any non-trivial destructors.
Atomic<size_t> g_bytes_used[kAllocatorTagCount];
volatile size_t g_max_bytes_used[kAllocatorTagCount];
Atomic<uint64_t> g_total_bytes_used[kAllocatorTagCount];
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index cf3a581668..5718e44f9c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -115,7 +115,17 @@ static void ThrowEarlierClassFailure(mirror::Class* c)
}
}
-static void WrapExceptionInInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static void VlogClassInitializationFailure(Handle<mirror::Class> klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (VLOG_IS_ON(class_linker)) {
+ std::string temp;
+ LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
+ << klass->GetLocation() << "\n" << Thread::Current()->GetException(nullptr)->Dump();
+ }
+}
+
+static void WrapExceptionInInitializer(Handle<mirror::Class> klass)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
JNIEnv* env = self->GetJniEnv();
@@ -132,6 +142,7 @@ static void WrapExceptionInInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lo
self->ThrowNewWrappedException(throw_location, "Ljava/lang/ExceptionInInitializerError;",
nullptr);
}
+ VlogClassInitializationFailure(klass);
}
static size_t Hash(const char* s) {
@@ -4151,6 +4162,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
// Was the class already found to be erroneous? Done under the lock to match the JLS.
if (klass->IsErroneous()) {
ThrowEarlierClassFailure(klass.Get());
+ VlogClassInitializationFailure(klass);
return false;
}
@@ -4163,6 +4175,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
// compile time.
if (klass->IsErroneous()) {
CHECK(self->IsExceptionPending());
+ VlogClassInitializationFailure(klass);
} else {
CHECK(Runtime::Current()->IsCompiler());
CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime);
@@ -4181,6 +4194,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
if (klass->GetStatus() == mirror::Class::kStatusInitializing) {
// Could have got an exception during verification.
if (self->IsExceptionPending()) {
+ VlogClassInitializationFailure(klass);
return false;
}
// We caught somebody else in the act; was it us?
@@ -4277,7 +4291,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
ObjectLock<mirror::Class> lock(self, klass);
if (self->IsExceptionPending()) {
- WrapExceptionInInitializer();
+ WrapExceptionInInitializer(klass);
klass->SetStatus(mirror::Class::kStatusError, self);
success = false;
} else {
@@ -4311,9 +4325,9 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
// When we wake up, repeat the test for init-in-progress. If
// there's an exception pending (only possible if
- // "interruptShouldThrow" was set), bail out.
+ // we were not using WaitIgnoringInterrupts), bail out.
if (self->IsExceptionPending()) {
- WrapExceptionInInitializer();
+ WrapExceptionInInitializer(klass);
klass->SetStatus(mirror::Class::kStatusError, self);
return false;
}
@@ -4330,6 +4344,7 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
// different thread. Synthesize one here.
ThrowNoClassDefFoundError("<clinit> failed for class %s; see exception in other thread",
PrettyDescriptor(klass.Get()).c_str());
+ VlogClassInitializationFailure(klass);
return false;
}
if (klass->IsInitialized()) {
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index eed6f7184c..ea3da648fc 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -185,6 +185,8 @@ void CommonRuntimeTest::SetUp() {
int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
ASSERT_EQ(mkdir_result, 0);
+ MemMap::Init(); // For LoadExpectSingleDexFile
+
std::string error_msg;
java_lang_dex_file_ = LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str());
boot_class_path_.push_back(java_lang_dex_file_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 231e9e56b0..3144ce1643 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -70,7 +70,7 @@ std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps) {
return os;
}
-MemMap::Maps MemMap::maps_;
+MemMap::Maps* MemMap::maps_ = nullptr;
#if USE_ART_LOW_4G_ALLOCATOR
// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
@@ -457,11 +457,12 @@ MemMap::~MemMap() {
// Remove it from maps_.
MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
bool found = false;
- for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
+ DCHECK(maps_ != nullptr);
+ for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
it != end && it->first == base_begin_; ++it) {
if (it->second == this) {
found = true;
- maps_.erase(it);
+ maps_->erase(it);
break;
}
}
@@ -483,7 +484,8 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
// Add it to maps_.
MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
- maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
+ DCHECK(maps_ != nullptr);
+ maps_->insert(std::make_pair(base_begin_, this));
}
}
@@ -614,7 +616,7 @@ void MemMap::DumpMapsLocked(std::ostream& os) {
bool MemMap::HasMemMap(MemMap* map) {
void* base_begin = map->BaseBegin();
- for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
+ for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
it != end && it->first == base_begin; ++it) {
if (it->second == map) {
return true;
@@ -626,7 +628,8 @@ bool MemMap::HasMemMap(MemMap* map) {
MemMap* MemMap::GetLargestMemMapAt(void* address) {
size_t largest_size = 0;
MemMap* largest_map = nullptr;
- for (auto it = maps_.lower_bound(address), end = maps_.end();
+ DCHECK(maps_ != nullptr);
+ for (auto it = maps_->lower_bound(address), end = maps_->end();
it != end && it->first == address; ++it) {
MemMap* map = it->second;
CHECK(map != nullptr);
@@ -638,6 +641,20 @@ MemMap* MemMap::GetLargestMemMapAt(void* address) {
return largest_map;
}
+void MemMap::Init() {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ if (maps_ == nullptr) {
+ // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
+ maps_ = new Maps;
+ }
+}
+
+void MemMap::Shutdown() {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ delete maps_;
+ maps_ = nullptr;
+}
+
std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 314bf8d800..df1222c39d 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -138,6 +138,9 @@ class MemMap {
typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
+ static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+
private:
MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -167,7 +170,7 @@ class MemMap {
#endif
// All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
- static Maps maps_ GUARDED_BY(Locks::mem_maps_lock_);
+ static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
};
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a78f4631f7..14a72b9b1b 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -87,6 +87,10 @@ class MemMapTest : public testing::Test {
delete m1;
}
+ void CommonInit() {
+ MemMap::Init();
+ }
+
#if defined(__LP64__) && !defined(__x86_64__)
static uintptr_t GetLinearScanPos() {
return MemMap::next_mem_pos_;
@@ -101,10 +105,10 @@ extern uintptr_t CreateStartPos(uint64_t input);
#endif
TEST_F(MemMapTest, Start) {
+ CommonInit();
uintptr_t start = GetLinearScanPos();
EXPECT_LE(64 * KB, start);
EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
-
#ifdef __BIONIC__
// Test a couple of values. Make sure they are different.
uintptr_t last = 0;
@@ -122,6 +126,7 @@ TEST_F(MemMapTest, Start) {
#endif
TEST_F(MemMapTest, MapAnonymousEmpty) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
nullptr,
@@ -143,6 +148,7 @@ TEST_F(MemMapTest, MapAnonymousEmpty) {
#ifdef __LP64__
TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
nullptr,
@@ -157,6 +163,7 @@ TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
#endif
TEST_F(MemMapTest, MapAnonymousExactAddr) {
+ CommonInit();
std::string error_msg;
// Map at an address that should work, which should succeed.
std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
@@ -200,6 +207,7 @@ TEST_F(MemMapTest, RemapAtEnd32bit) {
#endif
TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
+ CommonInit();
// This test may not work under valgrind.
if (RUNNING_ON_VALGRIND == 0) {
uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
@@ -217,6 +225,7 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
+ CommonInit();
std::string error_msg;
uintptr_t ptr = 0;
ptr -= kPageSize; // Now it's close to the top.
@@ -232,6 +241,7 @@ TEST_F(MemMapTest, MapAnonymousOverflow) {
#ifdef __LP64__
TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
@@ -244,6 +254,7 @@ TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
}
TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
+ CommonInit();
std::string error_msg;
std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
reinterpret_cast<uint8_t*>(0xF0000000),
@@ -257,6 +268,7 @@ TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
#endif
TEST_F(MemMapTest, CheckNoGaps) {
+ CommonInit();
std::string error_msg;
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 42d05a9307..3bd825b640 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -193,6 +193,7 @@ Runtime::~Runtime() {
Thread::Shutdown();
QuasiAtomic::Shutdown();
verifier::MethodVerifier::Shutdown();
+ MemMap::Shutdown();
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == nullptr || instance_ == this);
instance_ = nullptr;
@@ -645,6 +646,8 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
+ MemMap::Init();
+
std::unique_ptr<ParsedOptions> options(ParsedOptions::Create(raw_options, ignore_unrecognized));
if (options.get() == nullptr) {
LOG(ERROR) << "Failed to parse options";
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 35411e2660..646830acc6 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -34,6 +34,7 @@
#include "monitor.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
+#include "trace.h"
#include "utils.h"
#include "well_known_classes.h"
@@ -877,6 +878,9 @@ void ThreadList::Unregister(Thread* self) {
// suspend and so on, must happen at this point, and not in ~Thread.
self->Destroy();
+ // If tracing, remember thread id and name before thread exits.
+ Trace::StoreExitingThreadInfo(self);
+
uint32_t thin_lock_id = self->GetThreadId();
while (self != nullptr) {
// Remove and delete the Thread* while holding the thread_list_lock_ and
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 027f62d880..91a37fddaf 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -706,9 +706,21 @@ static void DumpThread(Thread* t, void* arg) {
void Trace::DumpThreadList(std::ostream& os) {
Thread* self = Thread::Current();
+ for (auto it : exited_threads_) {
+ os << it.first << "\t" << it.second << "\n";
+ }
Locks::thread_list_lock_->AssertNotHeld(self);
MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(DumpThread, &os);
}
+void Trace::StoreExitingThreadInfo(Thread* thread) {
+ MutexLock mu(thread, *Locks::trace_lock_);
+ if (the_trace_ != nullptr) {
+ std::string name;
+ thread->GetThreadName(name);
+ the_trace_->exited_threads_.Put(thread->GetTid(), name);
+ }
+}
+
} // namespace art
diff --git a/runtime/trace.h b/runtime/trace.h
index 45a02dab3c..ead1c29c72 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -104,6 +104,8 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
static std::vector<mirror::ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
static void FreeStackTrace(std::vector<mirror::ArtMethod*>* stack_trace);
+ // Save id and name of a thread before it exits.
+ static void StoreExitingThreadInfo(Thread* thread);
private:
explicit Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled);
@@ -166,6 +168,9 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
// Did we overflow the buffer recording traces?
bool overflow_;
+ // Map of thread ids and names that have already exited.
+ SafeMap<pid_t, std::string> exited_threads_;
+
DISALLOW_COPY_AND_ASSIGN(Trace);
};
diff --git a/sigchainlib/Android.mk b/sigchainlib/Android.mk
index d86735d120..e52adfc651 100644
--- a/sigchainlib/Android.mk
+++ b/sigchainlib/Android.mk
@@ -22,10 +22,10 @@ include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION)
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
-LOCAL_SRC_FILES := sigchain.cc
+LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_CLANG = $(ART_TARGET_CLANG)
LOCAL_MODULE:= libsigchain
-LOCAL_SHARED_LIBRARIES := liblog libdl
+LOCAL_SHARED_LIBRARIES := liblog
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_build.mk
include $(BUILD_SHARED_LIBRARY)
@@ -37,7 +37,7 @@ LOCAL_MODULE_TAGS := optional
LOCAL_IS_HOST_MODULE := true
LOCAL_CFLAGS += $(ART_HOST_CFLAGS)
LOCAL_CLANG = $(ART_HOST_CLANG)
-LOCAL_SRC_FILES := sigchain.cc
+LOCAL_SRC_FILES := sigchain_dummy.cc
LOCAL_MODULE:= libsigchain
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
LOCAL_LDLIBS = -ldl
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 74bfb7e9c6..c5015e8d0f 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -107,21 +107,20 @@ static void CheckSignalValid(int signal) {
}
}
-
// Claim a signal chain for a particular signal.
-void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
CheckSignalValid(signal);
user_sigactions[signal].Claim(*oldaction);
}
-void UnclaimSignalChain(int signal) {
+extern "C" void UnclaimSignalChain(int signal) {
CheckSignalValid(signal);
user_sigactions[signal].Unclaim(signal);
}
// Invoke the user's signal handler.
-void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
// Check the arguments.
CheckSignalValid(sig);
@@ -148,10 +147,9 @@ void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
}
}
-extern "C" {
// These functions are C linkage since they replace the functions in libc.
-int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
+extern "C" int sigaction(int signal, const struct sigaction* new_action, struct sigaction* old_action) {
// If this signal has been claimed as a signal chain, record the user's
// action but don't pass it on to the kernel.
// Note that we check that the signal number is in range here. An out of range signal
@@ -187,7 +185,7 @@ int sigaction(int signal, const struct sigaction* new_action, struct sigaction*
return linked_sigaction(signal, new_action, old_action);
}
-sighandler_t signal(int signal, sighandler_t handler) {
+extern "C" sighandler_t signal(int signal, sighandler_t handler) {
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_handler = handler;
@@ -226,7 +224,7 @@ sighandler_t signal(int signal, sighandler_t handler) {
return reinterpret_cast<sighandler_t>(sa.sa_handler);
}
-int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
+extern "C" int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
const sigset_t* new_set_ptr = bionic_new_set;
sigset_t tmpset;
if (bionic_new_set != NULL) {
@@ -258,9 +256,8 @@ int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_se
SigProcMask linked_sigprocmask= reinterpret_cast<SigProcMask>(linked_sigprocmask_sym);
return linked_sigprocmask(how, new_set_ptr, bionic_old_set);
}
-} // extern "C"
-void InitializeSignalChain() {
+extern "C" void InitializeSignalChain() {
// Warning.
// Don't call this from within a signal context as it makes calls to
// dlsym. Calling into the dynamic linker will result in locks being
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
index 5bc4026850..0de0d080e8 100644
--- a/sigchainlib/sigchain.h
+++ b/sigchainlib/sigchain.h
@@ -19,16 +19,12 @@
#include <signal.h>
-namespace art {
+extern "C" void InitializeSignalChain();
-void InitializeSignalChain();
+extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction);
-void ClaimSignalChain(int signal, struct sigaction* oldaction);
+extern "C" void UnclaimSignalChain(int signal);
-void UnclaimSignalChain(int signal);
-
-void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
-
-} // namespace art
+extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context);
#endif // ART_SIGCHAINLIB_SIGCHAIN_H_
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
new file mode 100644
index 0000000000..7176f052cb
--- /dev/null
+++ b/sigchainlib/sigchain_dummy.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef HAVE_ANDROID_OS
+#include <android/log.h>
+#else
+#include <stdarg.h>
+#include <iostream>
+#endif
+
+#include <stdlib.h>
+
+#include "sigchain.h"
+
+static void log(const char* format, ...) {
+ char buf[256];
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(buf, sizeof(buf), format, ap);
+#ifdef HAVE_ANDROID_OS
+ __android_log_write(ANDROID_LOG_ERROR, "libsigchain", buf);
+#else
+ std::cout << buf << "\n";
+#endif
+ va_end(ap);
+}
+
+extern "C" void ClaimSignalChain(int signal, struct sigaction* oldaction) {
+ log("ClaimSignalChain is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void UnclaimSignalChain(int signal) {
+ log("UnclaimSignalChain is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
+ log("InvokeUserSignalHandler is not exported by the main executable.");
+ abort();
+}
+
+extern "C" void InitializeSignalChain() {
+ log("InitializeSignalChain is not exported by the main executable.");
+ abort();
+}
diff --git a/sigchainlib/version-script.txt b/sigchainlib/version-script.txt
new file mode 100644
index 0000000000..8030da4d65
--- /dev/null
+++ b/sigchainlib/version-script.txt
@@ -0,0 +1,12 @@
+{
+global:
+ ClaimSignalChain;
+ UnclaimSignalChain;
+ InvokeUserSignalHandler;
+ InitializeSignalChain;
+ sigaction;
+ signal;
+ sigprocmask;
+local:
+ *;
+};
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index 8c8ad163c5..743d62ca69 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -25,14 +25,14 @@ public class Main {
private static void check(int actual, int expected, String msg) {
if (actual != expected) {
System.out.println(msg + " : " + actual + " != " + expected);
- System.exit(-1);
+ System.exit(1);
}
}
private static void check(long actual, long expected, String msg) {
if (actual != expected) {
System.out.println(msg + " : " + actual + " != " + expected);
- System.exit(-1);
+ System.exit(1);
}
}
diff --git a/test/036-finalizer/src/Main.java b/test/036-finalizer/src/Main.java
index 4ebbdc57f3..e3cf4eedbc 100644
--- a/test/036-finalizer/src/Main.java
+++ b/test/036-finalizer/src/Main.java
@@ -125,18 +125,28 @@ public class Main {
}
static class FinalizeCounter {
+ public static final int maxCount = 1024;
+ public static boolean finalized[] = new boolean[maxCount];
private static Object finalizeLock = new Object();
private static volatile int finalizeCount = 0;
private int index;
static int getCount() {
return finalizeCount;
}
+ static void printNonFinalized() {
+ for (int i = 0; i < maxCount; ++i) {
+ if (!FinalizeCounter.finalized[i]) {
+ System.err.println("Element " + i + " was not finalized");
+ }
+ }
+ }
FinalizeCounter(int index) {
this.index = index;
}
protected void finalize() {
synchronized(finalizeLock) {
++finalizeCount;
+ finalized[index] = true;
}
}
}
@@ -149,11 +159,21 @@ public class Main {
}
private static void runFinalizationTest() {
- int count = 1024;
- allocFinalizableObjects(count);
+ allocFinalizableObjects(FinalizeCounter.maxCount);
Runtime.getRuntime().gc();
System.runFinalization();
- System.out.println("Finalized " + FinalizeCounter.getCount() + " / " + count);
+ System.out.println("Finalized " + FinalizeCounter.getCount() + " / " + FinalizeCounter.maxCount);
+ if (FinalizeCounter.getCount() != FinalizeCounter.maxCount) {
+ // Print out all the finalized elements.
+ FinalizeCounter.printNonFinalized();
+ // Try to sleep for a couple seconds to see if the objects became finalized after.
+ try {
+ java.lang.Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ }
+ System.out.println("After sleep finalized " + FinalizeCounter.getCount() + " / " + FinalizeCounter.maxCount);
+ FinalizeCounter.printNonFinalized();
+ }
}
public static class FinalizerTest {
diff --git a/test/122-missing-classes/build b/test/122-missing-classes/build
new file mode 100644
index 0000000000..62e57c86da
--- /dev/null
+++ b/test/122-missing-classes/build
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+mkdir classes
+
+# Some classes are available at compile time...
+${JAVAC} -d classes `find src -name '*.java'`
+
+# ...but not at run time.
+rm 'classes/MissingClass.class'
+rm 'classes/Main$MissingInnerClass.class'
+${DX} -JXmx256m --debug --dex --output=$TEST_NAME.jar classes
diff --git a/test/122-missing-classes/expected.txt b/test/122-missing-classes/expected.txt
new file mode 100644
index 0000000000..ce761c3761
--- /dev/null
+++ b/test/122-missing-classes/expected.txt
@@ -0,0 +1,6 @@
+Test Started
+testMissingFieldType caught NoClassDefFoundError
+testMissingMethodReturnType caught NoClassDefFoundError
+testMissingMethodParameterType caught NoClassDefFoundError
+testMissingInnerClass caught NoClassDefFoundError
+Test Finished
diff --git a/test/122-missing-classes/info.txt b/test/122-missing-classes/info.txt
new file mode 100644
index 0000000000..a734f990d5
--- /dev/null
+++ b/test/122-missing-classes/info.txt
@@ -0,0 +1 @@
+Tests the effects of missing classes.
diff --git a/test/122-missing-classes/src/Main.java b/test/122-missing-classes/src/Main.java
new file mode 100644
index 0000000000..1667d2d55b
--- /dev/null
+++ b/test/122-missing-classes/src/Main.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class Main {
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Test Started");
+ testMissingFieldType();
+ testMissingMethodReturnType();
+ testMissingMethodParameterType();
+ testMissingInnerClass();
+ System.out.println("Test Finished");
+ }
+
+ private static class ClassWithMissingFieldType {
+ MissingClass field;
+ }
+
+ private static void testMissingFieldType() throws Exception {
+ try {
+ ClassWithMissingFieldType.class.getDeclaredFields();
+ throw new AssertionError();
+ } catch (NoClassDefFoundError e) {
+ System.out.println("testMissingFieldType caught NoClassDefFoundError");
+ }
+ }
+
+ private static class ClassWithMissingMethodReturnType {
+ MissingClass method() {
+ return null;
+ }
+ }
+
+ private static void testMissingMethodReturnType() throws Exception {
+ try {
+ ClassWithMissingMethodReturnType.class.getDeclaredMethods();
+ throw new AssertionError();
+ } catch (NoClassDefFoundError e) {
+ System.out.println("testMissingMethodReturnType caught NoClassDefFoundError");
+ }
+ }
+
+ private static class ClassWithMissingMethodParameterType {
+ void method(MissingClass arg) {}
+ }
+
+ private static void testMissingMethodParameterType() throws Exception {
+ try {
+ ClassWithMissingMethodParameterType.class.getDeclaredMethods();
+ throw new AssertionError();
+ } catch (NoClassDefFoundError e) {
+ System.out.println("testMissingMethodParameterType caught NoClassDefFoundError");
+ }
+ }
+
+ private static final class MissingInnerClass {
+ }
+
+ private static void testMissingInnerClass() throws Exception {
+ try {
+ Main.class.getDeclaredClasses();
+ throw new AssertionError();
+ } catch (NoClassDefFoundError e) {
+ System.out.println("testMissingInnerClass caught NoClassDefFoundError");
+ }
+ }
+}
diff --git a/test/122-missing-classes/src/MissingClass.java b/test/122-missing-classes/src/MissingClass.java
new file mode 100644
index 0000000000..33aaa5625f
--- /dev/null
+++ b/test/122-missing-classes/src/MissingClass.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class MissingClass {
+ public static final class MissingInnerClass {
+ }
+}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 9fed1581c0..82d47d7451 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -354,12 +354,9 @@ else
if [ "$USE_GDB" = "y" ]; then
# When running under gdb, we cannot do piping and grepping...
- LD_PRELOAD=libsigchain.so $cmdline "$@"
+ $cmdline "$@"
else
- # If we are execing /bin/false we might not be on the same ISA as libsigchain.so
- # ld.so will helpfully warn us of this. Unfortunately this messes up our error
- # checking so we will just filter out the error with a grep.
- LD_PRELOAD=libsigchain.so $cmdline "$@" 2>&1 | grep -v -E "^ERROR: ld\.so: object '.+\.so' from LD_PRELOAD cannot be preloaded.*: ignored\.$"
+ $cmdline "$@" 2>&1
# Add extra detail if time out is enabled.
if [ ${PIPESTATUS[0]} = 124 ] && [ "$TIME_OUT" = "y" ]; then
echo -e "\e[91mTEST TIMED OUT!\e[0m" >&2