Merge "MIPS32: improvements in code generation (mostly 64-bit ALU ops)"
diff --git a/Android.mk b/Android.mk
index 97a82e2..34022ae 100644
--- a/Android.mk
+++ b/Android.mk
@@ -98,16 +98,26 @@
ART_HOST_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(HOST_OUT_JAVA_LIBRARIES)/core-libart-hostdex.jar \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
+ $(HOST_OUT_JAVA_LIBRARIES)/core-oj-hostdex.jar \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvm$(ART_HOST_SHLIB_EXTENSION)
ART_TARGET_DEPENDENCIES := \
$(ART_TARGET_EXECUTABLES) \
$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \
- $(TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
+ $(TARGET_OUT_JAVA_LIBRARIES)/core-oj.jar \
+ $(TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
+ $(TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \
+ $(TARGET_OUT_SHARED_LIBRARIES)/libopenjdkjvm.so
ifdef TARGET_2ND_ARCH
ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
+ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so
+ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libopenjdkjvm.so
endif
ifdef HOST_2ND_ARCH
ART_HOST_DEPENDENCIES += $(2ND_HOST_OUT_SHARED_LIBRARIES)/libjavacore.so
+ART_HOST_DEPENDENCIES += $(2ND_HOST_OUT_SHARED_LIBRARIES)/libopenjdk.so
+ART_HOST_DEPENDENCIES += $(2ND_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvm.so
endif
########################################################################
@@ -405,10 +415,10 @@
# Rules for building all dependencies for tests.
.PHONY: build-art-host-tests
-build-art-host-tests: build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES)
+build-art-host-tests: build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
.PHONY: build-art-target-tests
-build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TEST_ART_TARGET_SYNC_DEPS)
+build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TEST_ART_TARGET_SYNC_DEPS) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
########################################################################
# targets to switch back and forth from libdvm to libart
diff --git a/NOTICE b/NOTICE
index d27f6a6..d79b004 100644
--- a/NOTICE
+++ b/NOTICE
@@ -262,5 +262,3 @@
pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
ARM contributions llvm/lib/Target/ARM/LICENSE.TXT
md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h
-
--------------------------------------------------------------------
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 43e1457..eec471e 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -359,12 +359,6 @@
ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default $(art_host_cflags)
ART_HOST_ASFLAGS += $(art_asflags)
-# The latest clang update trips over many of the files in art and never finishes
-# compiling for aarch64 with -O3 (or -O2). Drop back to -O1 while we investigate
-# to stop punishing the build server.
-# Bug: http://b/23256622
-ART_TARGET_CLANG_CFLAGS_arm64 += -O1
-
ifndef LIBART_IMG_TARGET_BASE_ADDRESS
$(error LIBART_IMG_TARGET_BASE_ADDRESS unset)
endif
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index c53479c..81cd6ef 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -80,20 +80,24 @@
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
# Jar files for core.art.
-TARGET_CORE_JARS := core-libart conscrypt okhttp bouncycastle
+TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle
HOST_CORE_JARS := $(addsuffix -hostdex,$(TARGET_CORE_JARS))
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ifeq ($(ART_TEST_ANDROID_ROOT),)
TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+else
+TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
+endif
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
# Classpath for Jack compilation: we only need core-libart.
-HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
-HOST_JACK_CLASSPATH := $(foreach dep,$(HOST_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
-TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
-TARGET_JACK_CLASSPATH := $(foreach dep,$(TARGET_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
+HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
+TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
+TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
endif
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index edf107e..c9af1c6 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -114,6 +114,9 @@
# Do you want run-tests with the --debuggable flag
ART_TEST_RUN_TEST_DEBUGGABLE ?= $(ART_TEST_FULL)
+# Do you want to test multi-part boot-image functionality?
+ART_TEST_RUN_TEST_MULTI_IMAGE ?= $(ART_TEST_FULL)
+
# Define the command run on test failure. $(1) is the name of the test. Executed by the shell.
define ART_TEST_FAILED
( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 291a69d..99f7a2a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -441,7 +441,9 @@
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
$$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar
+ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj.jar
.PHONY: $$(gtest_rule)
$$(gtest_rule): test-art-target-sync
@@ -483,6 +485,7 @@
# Dependencies for all host gtests.
gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
$$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \
+ $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$$(ART_HOST_SHLIB_EXTENSION) \
$$(gtest_exe) \
$$(ART_GTEST_$(1)_HOST_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
@@ -564,6 +567,9 @@
LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue
+ # clang fails to compile art/runtime/arch/stub_test.cc for arm64 without -O1
+ # b/26275713
+ LOCAL_CLANG_CFLAGS_arm64 += -O1
include $$(BUILD_EXECUTABLE)
library_path :=
2nd_library_path :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 592843e..884f698 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -42,6 +42,7 @@
# $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
# $(4): wrapper, e.g., valgrind.
# $(5): dex2oat suffix, e.g, valgrind requires 32 right now.
+# $(6): multi-image.
# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
# run-test --no-image
define create-core-oat-host-rules
@@ -92,14 +93,25 @@
$$(error found $(2) expected pic or no-pic)
endif
- core_image_name := $($(3)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_IMG_SUFFIX)
- core_oat_name := $($(3)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_OAT_SUFFIX)
+ # If $(6) is true, generate a multi-image.
+ ifeq ($(6),true)
+ core_multi_infix := -multi
+ core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar
+ core_multi_group := _multi
+ else
+ core_multi_infix :=
+ core_multi_param :=
+ core_multi_group :=
+ endif
+
+ core_image_name := $($(3)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$$(core_multi_infix)$(4)$(CORE_IMG_SUFFIX)
+ core_oat_name := $($(3)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$$(core_multi_infix)$(4)$(CORE_OAT_SUFFIX)
# Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
ifeq ($(3),)
- $(4)HOST_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name)
+ $(4)HOST_CORE_IMAGE_$(1)_$(2)$$(core_multi_group)_64 := $$(core_image_name)
else
- $(4)HOST_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name)
+ $(4)HOST_CORE_IMAGE_$(1)_$(2)$$(core_multi_group)_32 := $$(core_image_name)
endif
$(4)HOST_CORE_IMG_OUTS += $$(core_image_name)
$(4)HOST_CORE_OAT_OUTS += $$(core_oat_name)
@@ -111,8 +123,9 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
+$$(core_image_name): PRIVATE_CORE_MULTI_PARAM := $$(core_multi_param)
$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
- @echo "host dex2oat: $$@ ($$?)"
+ @echo "host dex2oat: $$@"
@mkdir -p $$(dir $$@)
$$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
@@ -122,7 +135,7 @@
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(3)ART_HOST_ARCH) \
$$(LOCAL_$(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
--host --android-root=$$(HOST_OUT) --include-patch-information --generate-debug-info \
- $$(PRIVATE_CORE_COMPILE_OPTIONS)
+ $$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS)
$$(core_oat_name): $$(core_image_name)
@@ -138,32 +151,40 @@
# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
# $(2): wrapper.
# $(3): dex2oat suffix.
+# $(4): multi-image.
define create-core-oat-host-rule-combination
- $(call create-core-oat-host-rules,$(1),no-pic,,$(2),$(3))
- $(call create-core-oat-host-rules,$(1),pic,,$(2),$(3))
+ $(call create-core-oat-host-rules,$(1),no-pic,,$(2),$(3),$(4))
+ $(call create-core-oat-host-rules,$(1),pic,,$(2),$(3),$(4))
ifneq ($(HOST_PREFER_32_BIT),true)
- $(call create-core-oat-host-rules,$(1),no-pic,2ND_,$(2),$(3))
- $(call create-core-oat-host-rules,$(1),pic,2ND_,$(2),$(3))
+ $(call create-core-oat-host-rules,$(1),no-pic,2ND_,$(2),$(3),$(4))
+ $(call create-core-oat-host-rules,$(1),pic,2ND_,$(2),$(3),$(4))
endif
endef
-$(eval $(call create-core-oat-host-rule-combination,default,,))
-$(eval $(call create-core-oat-host-rule-combination,optimizing,,))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,,))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,,))
-$(eval $(call create-core-oat-host-rule-combination,jit,,))
+$(eval $(call create-core-oat-host-rule-combination,default,,,false))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,,,false))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,,,false))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false))
+$(eval $(call create-core-oat-host-rule-combination,jit,,,false))
+$(eval $(call create-core-oat-host-rule-combination,default,,,true))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,,,true))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,,,true))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true))
+$(eval $(call create-core-oat-host-rule-combination,jit,,,true))
valgrindHOST_CORE_IMG_OUTS :=
valgrindHOST_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32))
-$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32))
-$(eval $(call create-core-oat-host-rule-combination,jit,valgrind,32))
+$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32,false))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false))
+$(eval $(call create-core-oat-host-rule-combination,jit,valgrind,32,false))
valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
+test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS)
+
define create-core-oat-target-rules
core_compile_options :=
core_image_name :=
@@ -238,7 +259,7 @@
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
- @echo "target dex2oat: $$@ ($$?)"
+ @echo "target dex2oat: $$@"
@mkdir -p $$(dir $$@)
$$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 4aced5b..4dcaf80 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -80,8 +80,7 @@
}
}
-static Runtime* StartRuntime(const char* boot_image_location,
- InstructionSet instruction_set) {
+static Runtime* StartRuntime(const char* boot_image_location, InstructionSet instruction_set) {
CHECK(boot_image_location != nullptr);
RuntimeOptions options;
diff --git a/compiler/Android.mk b/compiler/Android.mk
index bdd9a84..f0bf499 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -92,7 +92,6 @@
optimizing/parallel_move_resolver.cc \
optimizing/pc_relative_fixups_x86.cc \
optimizing/prepare_for_register_allocation.cc \
- optimizing/primitive_type_propagation.cc \
optimizing/reference_type_propagation.cc \
optimizing/register_allocator.cc \
optimizing/sharpening.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 638b897..278c490 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -142,10 +142,7 @@
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
size_t pointer_size = class_linker_->GetImagePointerSize();
- for (auto& m : klass->GetDirectMethods(pointer_size)) {
- MakeExecutable(&m);
- }
- for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ for (auto& m : klass->GetMethods(pointer_size)) {
MakeExecutable(&m);
}
}
@@ -211,7 +208,8 @@
false,
timer_.get(),
-1,
- ""));
+ /* profile_file */ "",
+ /* dex_to_oat_map */ nullptr));
// We typically don't generate an image in unit tests, disable this optimization by default.
compiler_driver_->SetSupportBootImageFixup(false);
}
@@ -259,10 +257,7 @@
mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader);
CHECK(klass != nullptr) << "Class not found " << class_name;
auto pointer_size = class_linker_->GetImagePointerSize();
- for (auto& m : klass->GetDirectMethods(pointer_size)) {
- CompileMethod(&m);
- }
- for (auto& m : klass->GetVirtualMethods(pointer_size)) {
+ for (auto& m : klass->GetMethods(pointer_size)) {
CompileMethod(&m);
}
}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 15a4ba0..7a93613 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -23,7 +23,7 @@
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
-#include "length_prefixed_array.h"
+#include "base/length_prefixed_array.h"
#include "method_reference.h"
#include "utils/array_ref.h"
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index 3b88021..0130ef4 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -448,6 +448,10 @@
}
bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ // b/26327751.
+ if ((true)) {
+ return false;
+ }
int32_t encoded_imm = EncodeImmSingle(bit_cast<uint32_t, float>(0.5f));
A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
RegLocation rl_src = info->args[0];
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index f48947d..32d7518 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -22,6 +22,7 @@
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "dex/compiler_ir.h"
+#include "driver/compiler_driver.h"
#include "thread-inl.h"
#include "dex/mir_graph.h"
#include "dex/quick/mir_to_lir.h"
@@ -777,6 +778,17 @@
bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
uint32_t method_idx) {
+ // Check that we're allowed to inline.
+ {
+ CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
+ if (!cu->compiler_driver->MayInline(dex_file_, cu->dex_file)) {
+ VLOG(compiler) << "Won't inline " << method_idx << " in "
+ << cu->dex_file->GetLocation() << " from "
+ << dex_file_->GetLocation();
+ return false;
+ }
+ }
+
InlineMethod method;
{
ReaderMutexLock mu(Thread::Current(), lock_);
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 24daf2f..bcf20c7 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -58,6 +58,7 @@
CompilerOptions::kDefaultNumDexMethodsThreshold,
CompilerOptions::kDefaultInlineDepthLimit,
CompilerOptions::kDefaultInlineMaxCodeUnits,
+ nullptr,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
@@ -74,9 +75,25 @@
std::unique_ptr<const InstructionSetFeatures> isa_features;
std::string error;
isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
- CompilerDriver driver(&compiler_options, &verification_results, &method_inliner_map,
- Compiler::kQuick, isa, isa_features.get(),
- false, nullptr, nullptr, nullptr, 0, false, false, "", false, 0, -1, "");
+ CompilerDriver driver(&compiler_options,
+ &verification_results,
+ &method_inliner_map,
+ Compiler::kQuick,
+ isa,
+ isa_features.get(),
+ false,
+ nullptr,
+ nullptr,
+ nullptr,
+ 0,
+ false,
+ false,
+ "",
+ false,
+ 0,
+ -1,
+ "",
+ nullptr);
ClassLinker* linker = nullptr;
CompilationUnit cu(&pool, isa, &driver, linker);
DexFile::CodeItem code_item { 0, 0, 0, 0, 0, 0, { 0 } }; // NOLINT
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index e5d3841..1c2a619 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -508,6 +508,7 @@
{ kX86Lfence, kReg, NO_OPERAND, { 0, 0, 0x0F, 0xAE, 0, 5, 0, 0, false }, "Lfence", "" },
{ kX86Mfence, kReg, NO_OPERAND, { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0, false }, "Mfence", "" },
{ kX86Sfence, kReg, NO_OPERAND, { 0, 0, 0x0F, 0xAE, 0, 7, 0, 0, false }, "Sfence", "" },
+ { kX86LockAdd32MI8, kMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0xF0, 0, 0x83, 0x0, 0x0, 0, 0, 1, false }, "LockAdd32MI8", "[!0r+!1d],!2d" },
EXT_0F_ENCODING_MAP(Imul16, 0x66, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
EXT_0F_ENCODING_MAP(Imul32, 0x00, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index e977ebf..9deabc0 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -41,6 +41,7 @@
CompilerOptions::kDefaultNumDexMethodsThreshold,
CompilerOptions::kDefaultInlineDepthLimit,
CompilerOptions::kDefaultInlineMaxCodeUnits,
+ nullptr,
false,
CompilerOptions::kDefaultTopKProfileThreshold,
false,
@@ -72,7 +73,8 @@
false,
0,
-1,
- ""));
+ "",
+ nullptr));
cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr));
DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
cu_->arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 75f3fef..4ff7993 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -20,7 +20,7 @@
#include <inttypes.h>
#include <string>
-#include "arch/instruction_set_features.h"
+#include "arch/x86/instruction_set_features_x86.h"
#include "art_method.h"
#include "backend_x86.h"
#include "base/logging.h"
@@ -585,6 +585,8 @@
case kX86LockCmpxchgAR:
case kX86LockCmpxchg64M:
case kX86LockCmpxchg64A:
+ case kX86LockCmpxchg64AR:
+ case kX86LockAdd32MI8:
case kX86XchgMR:
case kX86Mfence:
// Atomic memory instructions provide full barrier.
@@ -598,7 +600,9 @@
}
bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
- if (!cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
+ const X86InstructionSetFeatures* features =
+ cu_->compiler_driver->GetInstructionSetFeatures()->AsX86InstructionSetFeatures();
+ if (!features->IsSmp()) {
return false;
}
// Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
@@ -610,20 +614,34 @@
* All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86 memory model.
* For those cases, all we need to ensure is that there is a scheduling barrier in place.
*/
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ bool use_locked_add = features->PrefersLockedAddSynchronization();
if (barrier_kind == kAnyAny) {
- // If no LIR exists already that can be used a barrier, then generate an mfence.
+ // If no LIR exists already that can be used a barrier, then generate a barrier.
if (mem_barrier == nullptr) {
- mem_barrier = NewLIR0(kX86Mfence);
+ if (use_locked_add) {
+ mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
+ } else {
+ mem_barrier = NewLIR0(kX86Mfence);
+ }
ret = true;
}
- // If last instruction does not provide full barrier, then insert an mfence.
+ // If last instruction does not provide full barrier, then insert a barrier.
if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
- mem_barrier = NewLIR0(kX86Mfence);
+ if (use_locked_add) {
+ mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
+ } else {
+ mem_barrier = NewLIR0(kX86Mfence);
+ }
ret = true;
}
} else if (barrier_kind == kNTStoreStore) {
- mem_barrier = NewLIR0(kX86Sfence);
+ if (use_locked_add) {
+ mem_barrier = NewLIR3(kX86LockAdd32MI8, rs_rSP.GetReg(), 0, 0);
+ } else {
+ mem_barrier = NewLIR0(kX86Sfence);
+ }
ret = true;
}
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index d6a6a60..8cd6574 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -606,6 +606,7 @@
// load-from-memory and store-to-memory instructions
kX86Sfence, // memory barrier to serialize all previous
// store-to-memory instructions
+ kX86LockAdd32MI8, // locked add used to serialize memory instructions
Binary0fOpCode(kX86Imul16), // 16bit multiply
Binary0fOpCode(kX86Imul32), // 32bit multiply
Binary0fOpCode(kX86Imul64), // 64bit multiply
diff --git a/compiler/dex/type_inference_test.cc b/compiler/dex/type_inference_test.cc
index 528a18c..e2c0d32 100644
--- a/compiler/dex/type_inference_test.cc
+++ b/compiler/dex/type_inference_test.cc
@@ -253,7 +253,7 @@
&cu_, cu_.class_loader, cu_.class_linker, *cu_.dex_file, nullptr /* code_item not used */,
0u /* class_def_idx not used */, 0u /* method_index not used */,
cu_.access_flags, nullptr /* verified_method not used */,
- NullHandle<mirror::DexCache>()));
+ ScopedNullHandle<mirror::DexCache>()));
cu_.mir_graph->current_method_ = 0u;
code_item_ = static_cast<DexFile::CodeItem*>(
cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index ef10b67..d6961a0 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -20,8 +20,8 @@
#include <iosfwd>
#include <memory>
+#include "base/length_prefixed_array.h"
#include "base/macros.h"
-#include "length_prefixed_array.h"
#include "utils/array_ref.h"
#include "utils/dedupe_set.h"
#include "utils/swap_space.h"
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index c6dbd24..84fb432 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -45,7 +45,8 @@
false,
nullptr,
-1,
- "");
+ "",
+ nullptr);
CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
ASSERT_TRUE(storage->DedupeEnabled()); // The default.
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index ba8f1d0..afb4b71 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -334,19 +334,21 @@
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
-CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
- VerificationResults* verification_results,
- DexFileToMethodInlinerMap* method_inliner_map,
- Compiler::Kind compiler_kind,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- bool boot_image, std::unordered_set<std::string>* image_classes,
- std::unordered_set<std::string>* compiled_classes,
- std::unordered_set<std::string>* compiled_methods,
- size_t thread_count, bool dump_stats, bool dump_passes,
- const std::string& dump_cfg_file_name, bool dump_cfg_append,
- CumulativeLogger* timer, int swap_fd,
- const std::string& profile_file)
+CompilerDriver::CompilerDriver(
+ const CompilerOptions* compiler_options,
+ VerificationResults* verification_results,
+ DexFileToMethodInlinerMap* method_inliner_map,
+ Compiler::Kind compiler_kind,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features,
+ bool boot_image, std::unordered_set<std::string>* image_classes,
+ std::unordered_set<std::string>* compiled_classes,
+ std::unordered_set<std::string>* compiled_methods,
+ size_t thread_count, bool dump_stats, bool dump_passes,
+ const std::string& dump_cfg_file_name, bool dump_cfg_append,
+ CumulativeLogger* timer, int swap_fd,
+ const std::string& profile_file,
+ const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map)
: compiler_options_(compiler_options),
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
@@ -374,6 +376,7 @@
compiler_context_(nullptr),
support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
dex_files_for_oat_file_(nullptr),
+ dex_file_oat_filename_map_(dex_to_oat_map),
compiled_method_storage_(swap_fd) {
DCHECK(compiler_options_ != nullptr);
DCHECK(verification_results_ != nullptr);
@@ -790,10 +793,7 @@
virtual bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetVirtualMethods(pointer_size)) {
- ResolveExceptionsForMethod(&m, pointer_size);
- }
- for (auto& m : c->GetDirectMethods(pointer_size)) {
+ for (auto& m : c->GetMethods(pointer_size)) {
ResolveExceptionsForMethod(&m, pointer_size);
}
return true;
@@ -901,8 +901,10 @@
*dex_file,
Runtime::Current()->GetLinearAlloc())));
Handle<mirror::Class> klass(hs2.NewHandle(
- class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
- NullHandle<mirror::ClassLoader>())));
+ class_linker->ResolveType(*dex_file,
+ exception_type_idx,
+ dex_cache,
+ ScopedNullHandle<mirror::ClassLoader>())));
if (klass.Get() == nullptr) {
const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
@@ -1539,6 +1541,12 @@
use_dex_cache = true;
}
}
+ if (!use_dex_cache && IsBootImage()) {
+ if (!AreInSameOatFile(&(const_cast<mirror::Class*>(referrer_class)->GetDexFile()),
+ &declaring_class->GetDexFile())) {
+ use_dex_cache = true;
+ }
+ }
// The method is defined not within this dex file. We need a dex cache slot within the current
// dex file or direct pointers.
bool must_use_direct_pointers = false;
@@ -1572,12 +1580,14 @@
*type = sharp_type;
}
} else {
- auto* image_space = heap->GetBootImageSpace();
bool method_in_image = false;
- if (image_space != nullptr) {
+ const std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
+ for (gc::space::ImageSpace* image_space : image_spaces) {
const auto& method_section = image_space->GetImageHeader().GetMethodsSection();
- method_in_image = method_section.Contains(
- reinterpret_cast<uint8_t*>(method) - image_space->Begin());
+ if (method_section.Contains(reinterpret_cast<uint8_t*>(method) - image_space->Begin())) {
+ method_in_image = true;
+ break;
+ }
}
if (method_in_image || compiling_boot || runtime->UseJit()) {
// We know we must be able to get to the method in the image, so use that pointer.
@@ -2573,4 +2583,15 @@
return inliner->IsStringInitMethodIndex(method_index);
}
+bool CompilerDriver::MayInlineInternal(const DexFile* inlined_from,
+ const DexFile* inlined_into) const {
+ // We're not allowed to inline across dex files if we're the no-inline-from dex file.
+ if (inlined_from != inlined_into &&
+ compiler_options_->GetNoInlineFromDexFile() == inlined_from) {
+ return false;
+ }
+
+ return true;
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index f0360ce..fa0cb9a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -97,7 +97,8 @@
size_t thread_count, bool dump_stats, bool dump_passes,
const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
- const std::string& profile_file);
+ const std::string& profile_file,
+ const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map);
~CompilerDriver();
@@ -113,6 +114,18 @@
: ArrayRef<const DexFile* const>();
}
+ // Are the given dex files compiled into the same oat file? Should only be called after
+ // GetDexFilesForOatFile, as the conservative answer (when we don't have a map) is true.
+ bool AreInSameOatFile(const DexFile* d1, const DexFile* d2) {
+ if (dex_file_oat_filename_map_ == nullptr) {
+ // TODO: Check for this wrt/ apps and boot image calls.
+ return true;
+ }
+ auto it1 = dex_file_oat_filename_map_->find(d1);
+ auto it2 = dex_file_oat_filename_map_->find(d2);
+ return it1 == it2;
+ }
+
void CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
@@ -471,6 +484,13 @@
bool CanAssumeClassIsLoaded(mirror::Class* klass)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const {
+ if (!kIsTargetBuild) {
+ return MayInlineInternal(inlined_from, inlined_into);
+ }
+ return true;
+ }
+
private:
// Return whether the declaring class of `resolved_member` is
// available to `referrer_class` for read or write access using two
@@ -587,6 +607,8 @@
ThreadPool* thread_pool, TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_);
+ bool MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const;
+
const CompilerOptions* const compiler_options_;
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
@@ -621,9 +643,8 @@
const bool boot_image_;
- // If image_ is true, specifies the classes that will be included in
- // the image. Note if image_classes_ is null, all classes are
- // included in the image.
+ // If image_ is true, specifies the classes that will be included in the image.
+ // Note if image_classes_ is null, all classes are included in the image.
std::unique_ptr<std::unordered_set<std::string>> image_classes_;
// Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
@@ -663,6 +684,9 @@
// List of dex files that will be stored in the oat file.
const std::vector<const DexFile*>* dex_files_for_oat_file_;
+ // Map from dex files to the oat file (name) they will be compiled into.
+ const std::unordered_map<const DexFile*, const char*>* dex_file_oat_filename_map_;
+
CompiledMethodStorage compiled_method_storage_;
friend class CompileClassVisitor;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index f8de9fa..82c0e86 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -86,10 +86,7 @@
mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
CHECK(c != nullptr);
const auto pointer_size = class_linker->GetImagePointerSize();
- for (auto& m : c->GetDirectMethods(pointer_size)) {
- MakeExecutable(&m);
- }
- for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ for (auto& m : c->GetMethods(pointer_size)) {
MakeExecutable(&m);
}
}
@@ -145,16 +142,21 @@
// TODO: check that all Method::GetCode() values are non-null
}
-TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
+TEST_F(CompilerDriverTest, DISABLED_AbstractMethodErrorStub) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING_WITH_QUICK();
TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK();
TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
jobject class_loader;
{
ScopedObjectAccess soa(Thread::Current());
- CompileVirtualMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Class", "isFinalizable",
+ CompileVirtualMethod(ScopedNullHandle<mirror::ClassLoader>(),
+ "java.lang.Class",
+ "isFinalizable",
"()Z");
- CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
+ CompileDirectMethod(ScopedNullHandle<mirror::ClassLoader>(),
+ "java.lang.Object",
+ "<init>",
+ "()V");
class_loader = LoadDex("AbstractMethod");
}
ASSERT_TRUE(class_loader != nullptr);
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 4d2d924..209bb5a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -31,9 +31,11 @@
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
inline_depth_limit_(kUnsetInlineDepthLimit),
inline_max_code_units_(kUnsetInlineMaxCodeUnits),
+ no_inline_from_(nullptr),
include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
+ native_debuggable_(kDefaultNativeDebuggable),
generate_debug_info_(kDefaultGenerateDebugInfo),
implicit_null_checks_(true),
implicit_so_checks_(true),
@@ -58,6 +60,7 @@
size_t num_dex_methods_threshold,
size_t inline_depth_limit,
size_t inline_max_code_units,
+ const DexFile* no_inline_from,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
@@ -78,9 +81,11 @@
num_dex_methods_threshold_(num_dex_methods_threshold),
inline_depth_limit_(inline_depth_limit),
inline_max_code_units_(inline_max_code_units),
+ no_inline_from_(no_inline_from),
include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
debuggable_(debuggable),
+ native_debuggable_(kDefaultNativeDebuggable),
generate_debug_info_(generate_debug_info),
implicit_null_checks_(implicit_null_checks),
implicit_so_checks_(implicit_so_checks),
@@ -207,6 +212,10 @@
} else if (option == "--debuggable") {
debuggable_ = true;
generate_debug_info_ = true;
+ } else if (option == "--native-debuggable") {
+ native_debuggable_ = true;
+ debuggable_ = true;
+ generate_debug_info_ = true;
} else if (option.starts_with("--top-k-profile-threshold=")) {
ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage);
} else if (option == "--include-patch-information") {
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index e6acab4..9ad1bee 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -49,6 +49,7 @@
static const size_t kDefaultTinyMethodThreshold = 20;
static const size_t kDefaultNumDexMethodsThreshold = 900;
static constexpr double kDefaultTopKProfileThreshold = 90.0;
+ static const bool kDefaultNativeDebuggable = false;
static const bool kDefaultGenerateDebugInfo = kIsDebugBuild;
static const bool kDefaultIncludePatchInformation = false;
static const size_t kDefaultInlineDepthLimit = 3;
@@ -71,6 +72,7 @@
size_t num_dex_methods_threshold,
size_t inline_depth_limit,
size_t inline_max_code_units,
+ const DexFile* no_inline_from,
bool include_patch_information,
double top_k_profile_threshold,
bool debuggable,
@@ -162,6 +164,10 @@
return debuggable_;
}
+ bool GetNativeDebuggable() const {
+ return native_debuggable_;
+ }
+
bool GetGenerateDebugInfo() const {
return generate_debug_info_;
}
@@ -212,6 +218,10 @@
return abort_on_hard_verifier_failure_;
}
+ const DexFile* GetNoInlineFromDexFile() const {
+ return no_inline_from_;
+ }
+
bool ParseCompilerOption(const StringPiece& option, UsageFn Usage);
private:
@@ -236,10 +246,15 @@
size_t num_dex_methods_threshold_;
size_t inline_depth_limit_;
size_t inline_max_code_units_;
+
+ // A dex file from which we should not inline code.
+ const DexFile* no_inline_from_;
+
bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
bool debuggable_;
+ bool native_debuggable_;
bool generate_debug_info_;
bool implicit_null_checks_;
bool implicit_so_checks_;
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index 06553a6..2bc8c89 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -36,6 +36,15 @@
namespace art {
namespace dwarf {
+// The ARM specification defines three special mapping symbols
+// $a, $t and $d which mark ARM, Thumb and data ranges respectively.
+// These symbols can be used by tools, for example, to pretty
+// print instructions correctly. Objdump will use them if they
+// exist, but it will still work well without them.
+// However, these extra symbols take space, so let's just generate
+// one symbol which marks the whole .text section as code.
+constexpr bool kGenerateSingleArmMappingSymbol = true;
+
static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) {
switch (isa) {
case kArm:
@@ -207,8 +216,7 @@
void WriteCFISection(ElfBuilder<ElfTypes>* builder,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat format) {
- CHECK(format == dwarf::DW_DEBUG_FRAME_FORMAT ||
- format == dwarf::DW_EH_FRAME_FORMAT);
+ CHECK(format == DW_DEBUG_FRAME_FORMAT || format == DW_EH_FRAME_FORMAT);
typedef typename ElfTypes::Addr Elf_Addr;
std::vector<uint32_t> binary_search_table;
@@ -220,7 +228,7 @@
}
// Write .eh_frame/.debug_frame section.
- auto* cfi_section = (format == dwarf::DW_DEBUG_FRAME_FORMAT
+ auto* cfi_section = (format == DW_DEBUG_FRAME_FORMAT
? builder->GetDebugFrame()
: builder->GetEhFrame());
{
@@ -1134,21 +1142,87 @@
}
}
+template <typename ElfTypes>
+void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
+ const ArrayRef<const MethodDebugInfo>& method_infos) {
+ bool generated_mapping_symbol = false;
+ auto* strtab = builder->GetStrTab();
+ auto* symtab = builder->GetSymTab();
+
+ if (method_infos.empty()) {
+ return;
+ }
+
+ // Find all addresses (low_pc) which contain deduped methods.
+ // The first instance of method is not marked deduped_, but the rest is.
+ std::unordered_set<uint32_t> deduped_addresses;
+ for (const MethodDebugInfo& info : method_infos) {
+ if (info.deduped_) {
+ deduped_addresses.insert(info.low_pc_);
+ }
+ }
+
+ strtab->Start();
+ strtab->Write(""); // strtab should start with empty string.
+ for (const MethodDebugInfo& info : method_infos) {
+ if (info.deduped_) {
+ continue; // Add symbol only for the first instance.
+ }
+ std::string name = PrettyMethod(info.dex_method_index_, *info.dex_file_, true);
+ if (deduped_addresses.find(info.low_pc_) != deduped_addresses.end()) {
+ name += " [DEDUPED]";
+ }
+
+ uint32_t low_pc = info.low_pc_;
+ // Add in code delta, e.g., thumb bit 0 for Thumb2 code.
+ low_pc += info.compiled_method_->CodeDelta();
+ symtab->Add(strtab->Write(name), builder->GetText(), low_pc,
+ true, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
+
+ // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
+ // instructions, so that disassembler tools can correctly disassemble.
+ // Note that even if we generate just a single mapping symbol, ARM's Streamline
+ // requires it to match function symbol. Just address 0 does not work.
+ if (info.compiled_method_->GetInstructionSet() == kThumb2) {
+ if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
+ symtab->Add(strtab->Write("$t"), builder->GetText(), info.low_pc_ & ~1,
+ true, 0, STB_LOCAL, STT_NOTYPE);
+ generated_mapping_symbol = true;
+ }
+ }
+ }
+ strtab->End();
+
+ // Symbols are buffered and written after names (because they are smaller).
+ // We could also do two passes in this function to avoid the buffering.
+ symtab->Start();
+ symtab->Write();
+ symtab->End();
+}
+
+template <typename ElfTypes>
+void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+ const ArrayRef<const MethodDebugInfo>& method_infos,
+ CFIFormat cfi_format) {
+ if (!method_infos.empty()) {
+ // Add methods to .symtab.
+ WriteDebugSymbols(builder, method_infos);
+ // Generate CFI (stack unwinding information).
+ WriteCFISection(builder, method_infos, cfi_format);
+ // Write DWARF .debug_* sections.
+ WriteDebugSections(builder, method_infos);
+ }
+}
+
// Explicit instantiations
-template void WriteCFISection<ElfTypes32>(
+template void WriteDebugInfo<ElfTypes32>(
ElfBuilder<ElfTypes32>* builder,
const ArrayRef<const MethodDebugInfo>& method_infos,
- CFIFormat format);
-template void WriteCFISection<ElfTypes64>(
+ CFIFormat cfi_format);
+template void WriteDebugInfo<ElfTypes64>(
ElfBuilder<ElfTypes64>* builder,
const ArrayRef<const MethodDebugInfo>& method_infos,
- CFIFormat format);
-template void WriteDebugSections<ElfTypes32>(
- ElfBuilder<ElfTypes32>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos);
-template void WriteDebugSections<ElfTypes64>(
- ElfBuilder<ElfTypes64>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos);
+ CFIFormat cfi_format);
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 9ed102f..7ec0be1 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -25,14 +25,10 @@
namespace art {
namespace dwarf {
-template<typename ElfTypes>
-void WriteCFISection(ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
- CFIFormat format);
-
-template<typename ElfTypes>
-void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos);
+template <typename ElfTypes>
+void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
+ const ArrayRef<const MethodDebugInfo>& method_infos,
+ CFIFormat cfi_format);
} // namespace dwarf
} // namespace art
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e411496..7b1bdd7 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -46,15 +46,6 @@
// Let's use .debug_frame because it is easier to strip or compress.
constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT;
-// The ARM specification defines three special mapping symbols
-// $a, $t and $d which mark ARM, Thumb and data ranges respectively.
-// These symbols can be used by tools, for example, to pretty
-// print instructions correctly. Objdump will use them if they
-// exist, but it will still work well without them.
-// However, these extra symbols take space, so let's just generate
-// one symbol which marks the whole .text section as code.
-constexpr bool kGenerateSingleArmMappingSymbol = true;
-
template <typename ElfTypes>
class ElfWriterQuick FINAL : public ElfWriter {
public:
@@ -99,10 +90,6 @@
}
template <typename ElfTypes>
-static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const dwarf::MethodDebugInfo>& method_infos);
-
-template <typename ElfTypes>
ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set,
const CompilerOptions* compiler_options,
File* elf_file)
@@ -165,14 +152,7 @@
void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
if (compiler_options_->GetGenerateDebugInfo()) {
- if (!method_infos.empty()) {
- // Add methods to .symtab.
- WriteDebugSymbols(builder_.get(), method_infos);
- // Generate CFI (stack unwinding information).
- dwarf::WriteCFISection(builder_.get(), method_infos, kCFIFormat);
- // Write DWARF .debug_* sections.
- dwarf::WriteDebugSections(builder_.get(), method_infos);
- }
+ dwarf::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat);
}
}
@@ -199,64 +179,6 @@
return builder_->GetStream();
}
-template <typename ElfTypes>
-static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
- bool generated_mapping_symbol = false;
- auto* strtab = builder->GetStrTab();
- auto* symtab = builder->GetSymTab();
-
- if (method_infos.empty()) {
- return;
- }
-
- // Find all addresses (low_pc) which contain deduped methods.
- // The first instance of method is not marked deduped_, but the rest is.
- std::unordered_set<uint32_t> deduped_addresses;
- for (const dwarf::MethodDebugInfo& info : method_infos) {
- if (info.deduped_) {
- deduped_addresses.insert(info.low_pc_);
- }
- }
-
- strtab->Start();
- strtab->Write(""); // strtab should start with empty string.
- for (const dwarf::MethodDebugInfo& info : method_infos) {
- if (info.deduped_) {
- continue; // Add symbol only for the first instance.
- }
- std::string name = PrettyMethod(info.dex_method_index_, *info.dex_file_, true);
- if (deduped_addresses.find(info.low_pc_) != deduped_addresses.end()) {
- name += " [DEDUPED]";
- }
-
- uint32_t low_pc = info.low_pc_;
- // Add in code delta, e.g., thumb bit 0 for Thumb2 code.
- low_pc += info.compiled_method_->CodeDelta();
- symtab->Add(strtab->Write(name), builder->GetText(), low_pc,
- true, info.high_pc_ - info.low_pc_, STB_GLOBAL, STT_FUNC);
-
- // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2
- // instructions, so that disassembler tools can correctly disassemble.
- // Note that even if we generate just a single mapping symbol, ARM's Streamline
- // requires it to match function symbol. Just address 0 does not work.
- if (info.compiled_method_->GetInstructionSet() == kThumb2) {
- if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) {
- symtab->Add(strtab->Write("$t"), builder->GetText(), info.low_pc_ & ~1,
- true, 0, STB_LOCAL, STT_NOTYPE);
- generated_mapping_symbol = true;
- }
- }
- }
- strtab->End();
-
- // Symbols are buffered and written after names (because they are smaller).
- // We could also do two passes in this function to avoid the buffering.
- symtab->Start();
- symtab->Write();
- symtab->End();
-}
-
// Explicit instantiations
template class ElfWriterQuick<ElfTypes32>;
template class ElfWriterQuick<ElfTypes64>;
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 15812dc..6859605 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -72,11 +72,18 @@
ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
const uintptr_t requested_image_base = ART_BASE_ADDRESS;
+ std::unordered_map<const DexFile*, const char*> dex_file_to_oat_filename_map;
+ std::vector<const char*> oat_filename_vector(1, oat_filename.c_str());
+ for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
+ dex_file_to_oat_filename_map.emplace(dex_file, oat_filename.c_str());
+ }
std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_,
requested_image_base,
/*compile_pic*/false,
/*compile_app_image*/false,
- storage_mode));
+ storage_mode,
+ oat_filename_vector,
+ dex_file_to_oat_filename_map));
// TODO: compile_pic should be a test argument.
{
{
@@ -131,12 +138,12 @@
ASSERT_TRUE(dup_oat.get() != nullptr);
{
- bool success_image = writer->Write(kInvalidImageFd,
- image_file.GetFilename(),
- dup_oat->GetPath(),
- dup_oat->GetPath());
+ std::vector<const char*> dup_oat_filename(1, dup_oat->GetPath().c_str());
+ std::vector<const char*> dup_image_filename(1, image_file.GetFilename().c_str());
+ bool success_image = writer->Write(kInvalidImageFd, dup_image_filename, dup_oat_filename);
ASSERT_TRUE(success_image);
- bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
+ bool success_fixup = ElfWriter::Fixup(dup_oat.get(),
+ writer->GetOatDataBegin(dup_oat_filename[0]));
ASSERT_TRUE(success_fixup);
ASSERT_EQ(dup_oat->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
@@ -181,7 +188,7 @@
java_lang_dex_file_ = nullptr;
MemMap::Init();
- std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
+ std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileNames()[0].c_str()));
RuntimeOptions options;
std::string image("-Ximage:");
@@ -203,10 +210,11 @@
class_linker_ = runtime_->GetClassLinker();
gc::Heap* heap = Runtime::Current()->GetHeap();
- ASSERT_TRUE(heap->HasImageSpace());
+ ASSERT_TRUE(heap->HasBootImageSpace());
ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
- gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
+ // We loaded the runtime with an explicit image, so it must exist.
+ gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[0];
ASSERT_TRUE(image_space != nullptr);
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
// Uncompressed, image should be smaller than file.
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index fce08ea..17d0f61 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -154,147 +154,174 @@
}
bool ImageWriter::Write(int image_fd,
- const std::string& image_filename,
- const std::string& oat_filename,
- const std::string& oat_location) {
- CHECK(!image_filename.empty());
+ const std::vector<const char*>& image_filenames,
+ const std::vector<const char*>& oat_filenames) {
+ CHECK(!image_filenames.empty());
+ CHECK(!oat_filenames.empty());
+ CHECK_EQ(image_filenames.size(), oat_filenames.size());
- std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
- return false;
- }
- std::string error_msg;
- oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, nullptr, &error_msg);
- if (oat_file_ == nullptr) {
- PLOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
- << ": " << error_msg;
- oat_file->Erase();
- return false;
- }
- Runtime::Current()->GetOatFileManager().RegisterOatFile(
+ size_t oat_file_offset = 0;
+
+ for (size_t i = 0; i < oat_filenames.size(); ++i) {
+ const char* oat_filename = oat_filenames[i];
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to open oat file " << oat_filename;
+ return false;
+ }
+ std::string error_msg;
+ oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_filename, nullptr, &error_msg);
+ if (oat_file_ == nullptr) {
+ PLOG(ERROR) << "Failed to open writable oat file " << oat_filename;
+ oat_file->Erase();
+ return false;
+ }
+ Runtime::Current()->GetOatFileManager().RegisterOatFile(
std::unique_ptr<const OatFile>(oat_file_));
- const OatHeader& oat_header = oat_file_->GetOatHeader();
- oat_address_offsets_[kOatAddressInterpreterToInterpreterBridge] =
- oat_header.GetInterpreterToInterpreterBridgeOffset();
- oat_address_offsets_[kOatAddressInterpreterToCompiledCodeBridge] =
- oat_header.GetInterpreterToCompiledCodeBridgeOffset();
- oat_address_offsets_[kOatAddressJNIDlsymLookup] =
- oat_header.GetJniDlsymLookupOffset();
- oat_address_offsets_[kOatAddressQuickGenericJNITrampoline] =
- oat_header.GetQuickGenericJniTrampolineOffset();
- oat_address_offsets_[kOatAddressQuickIMTConflictTrampoline] =
- oat_header.GetQuickImtConflictTrampolineOffset();
- oat_address_offsets_[kOatAddressQuickResolutionTrampoline] =
- oat_header.GetQuickResolutionTrampolineOffset();
- oat_address_offsets_[kOatAddressQuickToInterpreterBridge] =
- oat_header.GetQuickToInterpreterBridgeOffset();
+ const OatHeader& oat_header = oat_file_->GetOatHeader();
+ ImageInfo& image_info = GetImageInfo(oat_filename);
- size_t oat_loaded_size = 0;
- size_t oat_data_offset = 0;
- ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
+ size_t oat_loaded_size = 0;
+ size_t oat_data_offset = 0;
+ ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
+
+ DCHECK_EQ(image_info.oat_offset_, oat_file_offset);
+ oat_file_offset += oat_loaded_size;
+
+ if (i == 0) {
+ // Primary oat file, read the trampolines.
+ image_info.oat_address_offsets_[kOatAddressInterpreterToInterpreterBridge] =
+ oat_header.GetInterpreterToInterpreterBridgeOffset();
+ image_info.oat_address_offsets_[kOatAddressInterpreterToCompiledCodeBridge] =
+ oat_header.GetInterpreterToCompiledCodeBridgeOffset();
+ image_info.oat_address_offsets_[kOatAddressJNIDlsymLookup] =
+ oat_header.GetJniDlsymLookupOffset();
+ image_info.oat_address_offsets_[kOatAddressQuickGenericJNITrampoline] =
+ oat_header.GetQuickGenericJniTrampolineOffset();
+ image_info.oat_address_offsets_[kOatAddressQuickIMTConflictTrampoline] =
+ oat_header.GetQuickImtConflictTrampolineOffset();
+ image_info.oat_address_offsets_[kOatAddressQuickResolutionTrampoline] =
+ oat_header.GetQuickResolutionTrampolineOffset();
+ image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] =
+ oat_header.GetQuickToInterpreterBridgeOffset();
+ } else {
+ // Other oat files use the primary trampolines.
+ // TODO: Dummy values to protect usage? b/26317072
+ }
+
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ CreateHeader(oat_loaded_size, oat_data_offset);
+ CopyAndFixupNativeData();
+ }
+
+ SetOatChecksumFromElfFile(oat_file.get());
+
+ if (oat_file->FlushCloseOrErase() != 0) {
+ LOG(ERROR) << "Failed to flush and close oat file " << oat_filename;
+ return false;
+ }
+ }
{
- ScopedObjectAccess soa(Thread::Current());
- CreateHeader(oat_loaded_size, oat_data_offset);
- CopyAndFixupNativeData();
// TODO: heap validation can't handle these fix up passes.
+ ScopedObjectAccess soa(Thread::Current());
Runtime::Current()->GetHeap()->DisableObjectValidation();
CopyAndFixupObjects();
}
- SetOatChecksumFromElfFile(oat_file.get());
-
- if (oat_file->FlushCloseOrErase() != 0) {
- LOG(ERROR) << "Failed to flush and close oat file " << oat_filename << " for " << oat_location;
- return false;
- }
- std::unique_ptr<File> image_file;
- if (image_fd != kInvalidImageFd) {
- image_file.reset(new File(image_fd, image_filename, unix_file::kCheckSafeUsage));
- } else {
- image_file.reset(OS::CreateEmptyFile(image_filename.c_str()));
- }
- if (image_file == nullptr) {
- LOG(ERROR) << "Failed to open image file " << image_filename;
- return false;
- }
- if (fchmod(image_file->Fd(), 0644) != 0) {
- PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
- image_file->Erase();
- return EXIT_FAILURE;
- }
-
- std::unique_ptr<char[]> compressed_data;
- // Image data size excludes the bitmap and the header.
- ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- const size_t image_data_size = image_header->GetImageSize() - sizeof(ImageHeader);
- char* image_data = reinterpret_cast<char*>(image_->Begin()) + sizeof(ImageHeader);
- size_t data_size;
- const char* image_data_to_write;
-
- CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
- switch (image_storage_mode_) {
- case ImageHeader::kStorageModeLZ4: {
- size_t compressed_max_size = LZ4_compressBound(image_data_size);
- compressed_data.reset(new char[compressed_max_size]);
- data_size = LZ4_compress(
- reinterpret_cast<char*>(image_->Begin()) + sizeof(ImageHeader),
- &compressed_data[0],
- image_data_size);
- image_data_to_write = &compressed_data[0];
- VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size;
- break;
+ for (size_t i = 0; i < image_filenames.size(); ++i) {
+ const char* image_filename = image_filenames[i];
+ const char* oat_filename = oat_filenames[i];
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ std::unique_ptr<File> image_file;
+ if (image_fd != kInvalidImageFd) {
+ image_file.reset(new File(image_fd, image_filename, unix_file::kCheckSafeUsage));
+ } else {
+ image_file.reset(OS::CreateEmptyFile(image_filename));
}
- case ImageHeader::kStorageModeUncompressed: {
- data_size = image_data_size;
- image_data_to_write = image_data;
- break;
+ if (image_file == nullptr) {
+ LOG(ERROR) << "Failed to open image file " << image_filename;
+ return false;
}
- default: {
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ if (fchmod(image_file->Fd(), 0644) != 0) {
+ PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
+ image_file->Erase();
+ return EXIT_FAILURE;
}
- }
- // Write header first, as uncompressed.
- image_header->data_size_ = data_size;
- if (!image_file->WriteFully(image_->Begin(), sizeof(ImageHeader))) {
- PLOG(ERROR) << "Failed to write image file header " << image_filename;
- image_file->Erase();
- return false;
- }
+ std::unique_ptr<char[]> compressed_data;
+ // Image data size excludes the bitmap and the header.
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
+ const size_t image_data_size = image_header->GetImageSize() - sizeof(ImageHeader);
+ char* image_data = reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader);
+ size_t data_size;
+ const char* image_data_to_write;
- // Write out the image + fields + methods.
- const bool is_compressed = compressed_data != nullptr;
- if (!image_file->WriteFully(image_data_to_write, data_size)) {
- PLOG(ERROR) << "Failed to write image file data " << image_filename;
- image_file->Erase();
- return false;
- }
+ CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
+ switch (image_storage_mode_) {
+ case ImageHeader::kStorageModeLZ4: {
+ size_t compressed_max_size = LZ4_compressBound(image_data_size);
+ compressed_data.reset(new char[compressed_max_size]);
+ data_size = LZ4_compress(
+ reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
+ &compressed_data[0],
+ image_data_size);
+ image_data_to_write = &compressed_data[0];
+ VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size;
+ break;
+ }
+ case ImageHeader::kStorageModeUncompressed: {
+ data_size = image_data_size;
+ image_data_to_write = image_data;
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
+ }
+ }
- // Write out the image bitmap at the page aligned start of the image end, also uncompressed for
- // convenience.
- const ImageSection& bitmap_section = image_header->GetImageSection(
- ImageHeader::kSectionImageBitmap);
- // Align up since data size may be unaligned if the image is compressed.
- size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + data_size, kPageSize);
- if (!is_compressed) {
- CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset());
- }
- if (!image_file->Write(reinterpret_cast<char*>(image_bitmap_->Begin()),
- bitmap_section.Size(),
- bitmap_position_in_file)) {
- PLOG(ERROR) << "Failed to write image file " << image_filename;
- image_file->Erase();
- return false;
- }
- CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(),
- static_cast<size_t>(image_file->GetLength()));
- if (image_file->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
- return false;
+ // Write header first, as uncompressed.
+ image_header->data_size_ = data_size;
+ if (!image_file->WriteFully(image_info.image_->Begin(), sizeof(ImageHeader))) {
+ PLOG(ERROR) << "Failed to write image file header " << image_filename;
+ image_file->Erase();
+ return false;
+ }
+
+ // Write out the image + fields + methods.
+ const bool is_compressed = compressed_data != nullptr;
+ if (!image_file->WriteFully(image_data_to_write, data_size)) {
+ PLOG(ERROR) << "Failed to write image file data " << image_filename;
+ image_file->Erase();
+ return false;
+ }
+
+ // Write out the image bitmap at the page aligned start of the image end, also uncompressed for
+ // convenience.
+ const ImageSection& bitmap_section = image_header->GetImageSection(
+ ImageHeader::kSectionImageBitmap);
+ // Align up since data size may be unaligned if the image is compressed.
+ size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + data_size, kPageSize);
+ if (!is_compressed) {
+ CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset());
+ }
+ if (!image_file->Write(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()),
+ bitmap_section.Size(),
+ bitmap_position_in_file)) {
+ PLOG(ERROR) << "Failed to write image file " << image_filename;
+ image_file->Erase();
+ return false;
+ }
+ CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(),
+ static_cast<size_t>(image_file->GetLength()));
+ if (image_file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
+ return false;
+ }
}
return true;
}
@@ -319,12 +346,14 @@
DCHECK(object != nullptr);
DCHECK_NE(image_objects_offset_begin_, 0u);
- size_t bin_slot_offset = bin_slot_offsets_[bin_slot.GetBin()];
+ const char* oat_filename = GetOatFilename(object);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ size_t bin_slot_offset = image_info.bin_slot_offsets_[bin_slot.GetBin()];
size_t new_offset = bin_slot_offset + bin_slot.GetIndex();
DCHECK_ALIGNED(new_offset, kObjectAlignment);
SetImageOffset(object, new_offset);
- DCHECK_LT(new_offset, image_end_);
+ DCHECK_LT(new_offset, image_info.image_end_);
}
bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const {
@@ -338,7 +367,9 @@
DCHECK(IsImageOffsetAssigned(object));
LockWord lock_word = object->GetLockWord(false);
size_t offset = lock_word.ForwardingAddress();
- DCHECK_LT(offset, image_end_);
+ const char* oat_filename = GetOatFilename(object);
+ const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+ DCHECK_LT(offset, image_info.image_end_);
return offset;
}
@@ -377,15 +408,16 @@
void ImageWriter::PrepareDexCacheArraySlots() {
// Prepare dex cache array starts based on the ordering specified in the CompilerDriver.
- uint32_t size = 0u;
- for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) {
- dex_cache_array_starts_.Put(dex_file, size);
- DexCacheArraysLayout layout(target_ptr_size_, dex_file);
- size += layout.Size();
- }
// Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned()
// when AssignImageBinSlot() assigns their indexes out or order.
- bin_slot_sizes_[kBinDexCacheArray] = size;
+ for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) {
+ auto it = dex_file_oat_filename_map_.find(dex_file);
+ DCHECK(it != dex_file_oat_filename_map_.end()) << dex_file->GetLocation();
+ ImageInfo& image_info = GetImageInfo(it->second);
+ image_info.dex_cache_array_starts_.Put(dex_file, image_info.bin_slot_sizes_[kBinDexCacheArray]);
+ DexCacheArraysLayout layout(target_ptr_size_, dex_file);
+ image_info.bin_slot_sizes_[kBinDexCacheArray] += layout.Size();
+ }
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Thread* const self = Thread::Current();
@@ -399,24 +431,32 @@
const DexFile* dex_file = dex_cache->GetDexFile();
DexCacheArraysLayout layout(target_ptr_size_, dex_file);
DCHECK(layout.Valid());
- uint32_t start = dex_cache_array_starts_.Get(dex_file);
+ const char* oat_filename = GetOatFilenameForDexCache(dex_cache);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ uint32_t start = image_info.dex_cache_array_starts_.Get(dex_file);
DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr);
- AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(), start + layout.TypesOffset());
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(),
+ start + layout.TypesOffset(),
+ dex_cache);
DCHECK_EQ(dex_file->NumMethodIds() != 0u, dex_cache->GetResolvedMethods() != nullptr);
- AddDexCacheArrayRelocation(dex_cache->GetResolvedMethods(), start + layout.MethodsOffset());
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedMethods(),
+ start + layout.MethodsOffset(),
+ dex_cache);
DCHECK_EQ(dex_file->NumFieldIds() != 0u, dex_cache->GetResolvedFields() != nullptr);
- AddDexCacheArrayRelocation(dex_cache->GetResolvedFields(), start + layout.FieldsOffset());
+ AddDexCacheArrayRelocation(dex_cache->GetResolvedFields(),
+ start + layout.FieldsOffset(),
+ dex_cache);
DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr);
- AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset());
+ AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), dex_cache);
}
}
-void ImageWriter::AddDexCacheArrayRelocation(void* array, size_t offset) {
+void ImageWriter::AddDexCacheArrayRelocation(void* array, size_t offset, DexCache* dex_cache) {
if (array != nullptr) {
DCHECK(!IsInBootImage(array));
- native_object_relocations_.emplace(
- array,
- NativeObjectRelocation { offset, kNativeObjectRelocationTypeDexCacheArray });
+ const char* oat_filename = GetOatFilenameForDexCache(dex_cache);
+ native_object_relocations_.emplace(array,
+ NativeObjectRelocation { oat_filename, offset, kNativeObjectRelocationTypeDexCacheArray });
}
}
@@ -531,18 +571,21 @@
} // else bin = kBinRegular
}
+ const char* oat_filename = GetOatFilename(object);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+
size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment
- current_offset = bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned).
- // Move the current bin size up to accomodate the object we just assigned a bin slot.
- bin_slot_sizes_[bin] += offset_delta;
+ current_offset = image_info.bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned).
+ // Move the current bin size up to accommodate the object we just assigned a bin slot.
+ image_info.bin_slot_sizes_[bin] += offset_delta;
BinSlot new_bin_slot(bin, current_offset);
SetImageBinSlot(object, new_bin_slot);
- ++bin_slot_count_[bin];
+ ++image_info.bin_slot_count_[bin];
// Grow the image closer to the end by the object we just assigned.
- image_end_ += offset_delta;
+ image_info.image_end_ += offset_delta;
}
bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const {
@@ -565,7 +608,9 @@
LockWord lock_word = object->GetLockWord(false);
size_t offset = lock_word.ForwardingAddress();
BinSlot bin_slot(offset);
- DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()])
+ const char* oat_filename = GetOatFilename(object);
+ const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+ DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()])
<< "bin slot offset should not exceed the size of that bin";
}
return true;
@@ -580,39 +625,42 @@
DCHECK_LE(offset, std::numeric_limits<uint32_t>::max());
BinSlot bin_slot(static_cast<uint32_t>(offset));
- DCHECK_LT(bin_slot.GetIndex(), bin_slot_sizes_[bin_slot.GetBin()]);
+ const char* oat_filename = GetOatFilename(object);
+ const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+ DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()]);
return bin_slot;
}
bool ImageWriter::AllocMemory() {
- const size_t length = RoundUp(image_objects_offset_begin_ +
- GetBinSizeSum() +
- intern_table_bytes_ +
- class_table_bytes_,
- kPageSize);
- std::string error_msg;
- image_.reset(MemMap::MapAnonymous("image writer image",
- nullptr,
- length,
- PROT_READ | PROT_WRITE,
- false,
- false,
- &error_msg));
- if (UNLIKELY(image_.get() == nullptr)) {
- LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
- return false;
- }
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ const size_t length = RoundUp(image_objects_offset_begin_ +
+ GetBinSizeSum(image_info) +
+ intern_table_bytes_ +
+ class_table_bytes_,
+ kPageSize);
+ std::string error_msg;
+ image_info.image_.reset(MemMap::MapAnonymous("image writer image",
+ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ if (UNLIKELY(image_info.image_.get() == nullptr)) {
+ LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
+ return false;
+ }
- // Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
- CHECK_LE(image_end_, length);
- image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
- "image bitmap",
- image_->Begin(),
- RoundUp(image_end_, kPageSize)));
- if (image_bitmap_.get() == nullptr) {
- LOG(ERROR) << "Failed to allocate memory for image bitmap";
- return false;
+ // Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
+ CHECK_LE(image_info.image_end_, length);
+ image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
+ "image bitmap", image_info.image_->Begin(), RoundUp(image_info.image_end_, kPageSize)));
+ if (image_info.image_bitmap_.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate memory for image bitmap";
+ return false;
+ }
}
return true;
}
@@ -885,7 +933,7 @@
AssignImageBinSlot(obj);
}
-ObjectArray<Object>* ImageWriter::CreateImageRoots() const {
+ObjectArray<Object>* ImageWriter::CreateImageRoots(const char* oat_filename) const {
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
Thread* self = Thread::Current();
@@ -893,6 +941,15 @@
Handle<Class> object_array_class(hs.NewHandle(
class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
+ std::unordered_set<const DexFile*> image_dex_files;
+ for (auto& pair : dex_file_oat_filename_map_) {
+ const DexFile* image_dex_file = pair.first;
+ const char* image_oat_filename = pair.second;
+ if (strcmp(oat_filename, image_oat_filename) == 0) {
+ image_dex_files.insert(image_dex_file);
+ }
+ }
+
// build an Object[] of all the DexCaches used in the source_space_.
// Since we can't hold the dex lock when allocating the dex_caches
// ObjectArray, we lock the dex lock twice, first to get the number
@@ -905,7 +962,10 @@
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
mirror::DexCache* dex_cache =
down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
- dex_cache_count += IsInBootImage(dex_cache) ? 0u : 1u;
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ if (!IsInBootImage(dex_cache)) {
+ dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
+ }
}
}
Handle<ObjectArray<Object>> dex_caches(
@@ -918,7 +978,10 @@
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
mirror::DexCache* dex_cache =
down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
- non_image_dex_caches += IsInBootImage(dex_cache) ? 0u : 1u;
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ if (!IsInBootImage(dex_cache)) {
+ non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
+ }
}
CHECK_EQ(dex_cache_count, non_image_dex_caches)
<< "The number of non-image dex caches changed.";
@@ -926,7 +989,8 @@
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
mirror::DexCache* dex_cache =
down_cast<mirror::DexCache*>(self->DecodeJObject(data.weak_root));
- if (!IsInBootImage(dex_cache)) {
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ if (!IsInBootImage(dex_cache) && image_dex_files.find(dex_file) != image_dex_files.end()) {
dex_caches->Set<false>(i, dex_cache);
++i;
}
@@ -997,9 +1061,12 @@
}
// Visit and assign offsets for fields and field arrays.
auto* as_klass = h_obj->AsClass();
+ mirror::DexCache* dex_cache = as_klass->GetDexCache();
LengthPrefixedArray<ArtField>* fields[] = {
as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
};
+ const char* oat_file = GetOatFilenameForDexCache(dex_cache);
+ ImageInfo& image_info = GetImageInfo(oat_file);
for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
// Total array length including header.
if (cur_fields != nullptr) {
@@ -1008,11 +1075,10 @@
auto it = native_object_relocations_.find(cur_fields);
CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields
<< " already forwarded";
- size_t& offset = bin_slot_sizes_[kBinArtField];
+ size_t& offset = image_info.bin_slot_sizes_[kBinArtField];
DCHECK(!IsInBootImage(cur_fields));
- native_object_relocations_.emplace(
- cur_fields,
- NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtFieldArray });
+ native_object_relocations_.emplace(cur_fields,
+ NativeObjectRelocation {oat_file, offset, kNativeObjectRelocationTypeArtFieldArray });
offset += header_size;
// Forward individual fields so that we can quickly find where they belong.
for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
@@ -1022,52 +1088,49 @@
CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i
<< " already assigned " << PrettyField(field) << " static=" << field->IsStatic();
DCHECK(!IsInBootImage(field));
- native_object_relocations_.emplace(
- field,
- NativeObjectRelocation {offset, kNativeObjectRelocationTypeArtField });
+ native_object_relocations_.emplace(field,
+ NativeObjectRelocation {oat_file, offset, kNativeObjectRelocationTypeArtField });
offset += sizeof(ArtField);
}
}
}
// Visit and assign offsets for methods.
- LengthPrefixedArray<ArtMethod>* method_arrays[] = {
- as_klass->GetDirectMethodsPtr(), as_klass->GetVirtualMethodsPtr(),
- };
- for (LengthPrefixedArray<ArtMethod>* array : method_arrays) {
- if (array == nullptr) {
- continue;
- }
+ size_t num_methods = as_klass->NumMethods();
+ if (num_methods != 0) {
bool any_dirty = false;
- size_t count = 0;
- const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
- const size_t method_size = ArtMethod::Size(target_ptr_size_);
- auto iteration_range =
- MakeIterationRangeFromLengthPrefixedArray(array, method_size, method_alignment);
- for (auto& m : iteration_range) {
- any_dirty = any_dirty || WillMethodBeDirty(&m);
- ++count;
+ for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
+ if (WillMethodBeDirty(&m)) {
+ any_dirty = true;
+ break;
+ }
}
NativeObjectRelocationType type = any_dirty
? kNativeObjectRelocationTypeArtMethodDirty
: kNativeObjectRelocationTypeArtMethodClean;
Bin bin_type = BinTypeForNativeRelocationType(type);
// Forward the entire array at once, but header first.
+ const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
+ const size_t method_size = ArtMethod::Size(target_ptr_size_);
const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
method_size,
method_alignment);
+ LengthPrefixedArray<ArtMethod>* array = as_klass->GetMethodsPtr();
auto it = native_object_relocations_.find(array);
- CHECK(it == native_object_relocations_.end()) << "Method array " << array
- << " already forwarded";
- size_t& offset = bin_slot_sizes_[bin_type];
+ CHECK(it == native_object_relocations_.end())
+ << "Method array " << array << " already forwarded";
+ size_t& offset = image_info.bin_slot_sizes_[bin_type];
DCHECK(!IsInBootImage(array));
- native_object_relocations_.emplace(array, NativeObjectRelocation { offset,
- any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty :
- kNativeObjectRelocationTypeArtMethodArrayClean });
+ native_object_relocations_.emplace(array,
+ NativeObjectRelocation {
+ oat_file,
+ offset,
+ any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty
+ : kNativeObjectRelocationTypeArtMethodArrayClean });
offset += header_size;
- for (auto& m : iteration_range) {
- AssignMethodOffset(&m, type);
+ for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
+ AssignMethodOffset(&m, type, oat_file);
}
- (any_dirty ? dirty_methods_ : clean_methods_) += count;
+ (any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
}
} else if (h_obj->IsObjectArray()) {
// Walk elements of an object array.
@@ -1091,13 +1154,16 @@
}
}
-void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type) {
+void ImageWriter::AssignMethodOffset(ArtMethod* method,
+ NativeObjectRelocationType type,
+ const char* oat_filename) {
DCHECK(!IsInBootImage(method));
auto it = native_object_relocations_.find(method);
CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned "
<< PrettyMethod(method);
- size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
- native_object_relocations_.emplace(method, NativeObjectRelocation { offset, type });
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)];
+ native_object_relocations_.emplace(method, NativeObjectRelocation { oat_filename, offset, type });
offset += ArtMethod::Size(target_ptr_size_);
}
@@ -1130,18 +1196,20 @@
void ImageWriter::CalculateNewObjectOffsets() {
Thread* const self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<ObjectArray<Object>> image_roots(hs.NewHandle(CreateImageRoots()));
+ StackHandleScopeCollection handles(self);
+ std::vector<Handle<ObjectArray<Object>>> image_roots;
+ for (const char* oat_filename : oat_filenames_) {
+ std::string image_filename = oat_filename;
+ image_roots.push_back(handles.NewHandle(CreateImageRoots(image_filename.c_str())));
+ }
auto* runtime = Runtime::Current();
auto* heap = runtime->GetHeap();
- DCHECK_EQ(0U, image_end_);
// Leave space for the header, but do not write it yet, we need to
// know where image_roots is going to end up
- image_end_ += RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
+ image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment
- image_objects_offset_begin_ = image_end_;
// Clear any pre-existing monitors which may have been in the monitor words, assign bin slots.
heap->VisitObjects(WalkFieldsCallback, this);
// Write the image runtime methods.
@@ -1158,10 +1226,12 @@
const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean;
auto it = native_object_relocations_.find(&image_method_array_);
CHECK(it == native_object_relocations_.end());
- size_t& offset = bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
+ ImageInfo& default_image_info = GetImageInfo(default_oat_filename_);
+ size_t& offset =
+ default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)];
if (!compile_app_image_) {
native_object_relocations_.emplace(&image_method_array_,
- NativeObjectRelocation { offset, image_method_type });
+ NativeObjectRelocation { default_oat_filename_, offset, image_method_type });
}
size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize(
@@ -1173,43 +1243,74 @@
CHECK(m->IsRuntimeMethod());
DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image";
if (!IsInBootImage(m)) {
- AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean);
+ AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, default_oat_filename_);
}
}
// Calculate size of the dex cache arrays slot and prepare offsets.
PrepareDexCacheArraySlots();
// Calculate bin slot offsets.
- size_t bin_offset = image_objects_offset_begin_;
- for (size_t i = 0; i != kBinSize; ++i) {
- bin_slot_offsets_[i] = bin_offset;
- bin_offset += bin_slot_sizes_[i];
- if (i == kBinArtField) {
- static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
- static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
- DCHECK_ALIGNED(bin_offset, 4u);
- DCHECK(method_alignment == 4u || method_alignment == 8u);
- bin_offset = RoundUp(bin_offset, method_alignment);
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ size_t bin_offset = image_objects_offset_begin_;
+ for (size_t i = 0; i != kBinSize; ++i) {
+ image_info.bin_slot_offsets_[i] = bin_offset;
+ bin_offset += image_info.bin_slot_sizes_[i];
+ if (i == kBinArtField) {
+ static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields.");
+ static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4.");
+ DCHECK_ALIGNED(bin_offset, 4u);
+ DCHECK(method_alignment == 4u || method_alignment == 8u);
+ bin_offset = RoundUp(bin_offset, method_alignment);
+ }
}
+ // NOTE: There may be additional padding between the bin slots and the intern table.
+ DCHECK_EQ(image_info.image_end_,
+ GetBinSizeSum(image_info, kBinMirrorCount) + image_objects_offset_begin_);
}
- // NOTE: There may be additional padding between the bin slots and the intern table.
- DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
+ // Calculate image offsets.
+ size_t image_offset = 0;
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ image_info.image_begin_ = global_image_begin_ + image_offset;
+ image_info.image_offset_ = image_offset;
+ size_t native_sections_size = image_info.bin_slot_sizes_[kBinArtField] +
+ image_info.bin_slot_sizes_[kBinArtMethodDirty] +
+ image_info.bin_slot_sizes_[kBinArtMethodClean] +
+ image_info.bin_slot_sizes_[kBinDexCacheArray] +
+ intern_table_bytes_ +
+ class_table_bytes_;
+ size_t image_objects = RoundUp(image_info.image_end_, kPageSize);
+ size_t bitmap_size =
+ RoundUp(gc::accounting::ContinuousSpaceBitmap::ComputeBitmapSize(image_objects), kPageSize);
+ size_t heap_size = gc::accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_size);
+ size_t max = std::max(heap_size, image_info.image_end_ + native_sections_size + bitmap_size);
+ image_info.image_size_ = RoundUp(max, kPageSize);
+ image_offset += image_info.image_size_;
+ }
// Transform each object's bin slot into an offset which will be used to do the final copy.
heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this);
- DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
+ // DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_);
- image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots.Get()));
+ size_t i = 0;
+ for (const char* oat_filename : oat_filenames_) {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ image_info.image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots[i].Get()));
+ i++;
+ }
// Update the native relocations by adding their bin sums.
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
Bin bin_type = BinTypeForNativeRelocationType(relocation.type);
- relocation.offset += bin_slot_offsets_[bin_type];
+ ImageInfo& image_info = GetImageInfo(relocation.oat_filename);
+ relocation.offset += image_info.bin_slot_offsets_[bin_type];
}
+ /* TODO: Reenable the intern table and class table. b/26317072
// Calculate how big the intern table will be after being serialized.
InternTable* const intern_table = runtime->GetInternTable();
CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings";
@@ -1233,41 +1334,45 @@
class_table_bytes_ += table->WriteToMemory(nullptr);
}
}
+ */
- // Note that image_end_ is left at end of used mirror object section.
+ // Note that image_info.image_end_ is left at end of used mirror object section.
}
void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) {
CHECK_NE(0U, oat_loaded_size);
- const uint8_t* oat_file_begin = GetOatFileBegin();
+ const char* oat_filename = oat_file_->GetLocation().c_str();
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ const uint8_t* oat_file_begin = GetOatFileBegin(oat_filename);
const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size;
- oat_data_begin_ = oat_file_begin + oat_data_offset;
- const uint8_t* oat_data_end = oat_data_begin_ + oat_file_->Size();
+ image_info.oat_data_begin_ = const_cast<uint8_t*>(oat_file_begin) + oat_data_offset;
+ const uint8_t* oat_data_end = image_info.oat_data_begin_ + oat_file_->Size();
+ image_info.oat_size_ = oat_file_->Size();
// Create the image sections.
ImageSection sections[ImageHeader::kSectionCount];
// Objects section
auto* objects_section = §ions[ImageHeader::kSectionObjects];
- *objects_section = ImageSection(0u, image_end_);
+ *objects_section = ImageSection(0u, image_info.image_end_);
size_t cur_pos = objects_section->End();
// Add field section.
auto* field_section = §ions[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]);
- CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset());
+ *field_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinArtField]);
+ CHECK_EQ(image_info.bin_slot_offsets_[kBinArtField], field_section->Offset());
cur_pos = field_section->End();
// Round up to the alignment the required by the method section.
cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_));
// Add method section.
auto* methods_section = §ions[ImageHeader::kSectionArtMethods];
*methods_section = ImageSection(cur_pos,
- bin_slot_sizes_[kBinArtMethodClean] +
- bin_slot_sizes_[kBinArtMethodDirty]);
- CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
+ image_info.bin_slot_sizes_[kBinArtMethodClean] +
+ image_info.bin_slot_sizes_[kBinArtMethodDirty]);
+ CHECK_EQ(image_info.bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset());
cur_pos = methods_section->End();
// Add dex cache arrays section.
auto* dex_cache_arrays_section = §ions[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]);
- CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
+ *dex_cache_arrays_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinDexCacheArray]);
+ CHECK_EQ(image_info.bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset());
cur_pos = dex_cache_arrays_section->End();
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
@@ -1275,6 +1380,8 @@
auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings];
*interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
cur_pos = interned_strings_section->End();
+ // Round up to the alignment the class table expects. See HashSet::WriteToMemory.
+ cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
// Calculate the size of the class table section.
auto* class_table_section = §ions[ImageHeader::kSectionClassTable];
*class_table_section = ImageSection(cur_pos, class_table_bytes_);
@@ -1282,42 +1389,51 @@
// Image end goes right before the start of the image bitmap.
const size_t image_end = static_cast<uint32_t>(cur_pos);
// Finally bitmap section.
- const size_t bitmap_bytes = image_bitmap_->Size();
+ const size_t bitmap_bytes = image_info.image_bitmap_->Size();
auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap];
*bitmap_section = ImageSection(RoundUp(cur_pos, kPageSize), RoundUp(bitmap_bytes, kPageSize));
cur_pos = bitmap_section->End();
- if (kIsDebugBuild) {
+ if (VLOG_IS_ON(compiler)) {
+ LOG(INFO) << "Creating header for " << oat_filename;
size_t idx = 0;
for (const ImageSection& section : sections) {
LOG(INFO) << static_cast<ImageHeader::ImageSections>(idx) << " " << section;
++idx;
}
LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_;
+ LOG(INFO) << "Image roots address=" << std::hex << image_info.image_roots_address_ << std::dec;
+ LOG(INFO) << "Image begin=" << std::hex << reinterpret_cast<uintptr_t>(global_image_begin_)
+ << " Image offset=" << image_info.image_offset_ << std::dec;
+ LOG(INFO) << "Oat file begin=" << std::hex << reinterpret_cast<uintptr_t>(oat_file_begin)
+ << " Oat data begin=" << reinterpret_cast<uintptr_t>(image_info.oat_data_begin_)
+ << " Oat data end=" << reinterpret_cast<uintptr_t>(oat_data_end)
+ << " Oat file end=" << reinterpret_cast<uintptr_t>(oat_file_end);
}
- CHECK_EQ(AlignUp(image_begin_ + image_end, kPageSize), oat_file_begin) <<
- "Oat file should be right after the image.";
+
// Create the header, leave 0 for data size since we will fill this in as we are writing the
// image.
- new (image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_begin_),
- image_end,
- sections,
- image_roots_address_,
- oat_file_->GetOatHeader().GetChecksum(),
- PointerToLowMemUInt32(oat_file_begin),
- PointerToLowMemUInt32(oat_data_begin_),
- PointerToLowMemUInt32(oat_data_end),
- PointerToLowMemUInt32(oat_file_end),
- target_ptr_size_,
- compile_pic_,
- image_storage_mode_,
- /*data_size*/0u);
+ new (image_info.image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_info.image_begin_),
+ image_end,
+ sections,
+ image_info.image_roots_address_,
+ oat_file_->GetOatHeader().GetChecksum(),
+ PointerToLowMemUInt32(oat_file_begin),
+ PointerToLowMemUInt32(image_info.oat_data_begin_),
+ PointerToLowMemUInt32(oat_data_end),
+ PointerToLowMemUInt32(oat_file_end),
+ target_ptr_size_,
+ compile_pic_,
+ image_storage_mode_,
+ /*data_size*/0u);
}
ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
auto it = native_object_relocations_.find(method);
CHECK(it != native_object_relocations_.end()) << PrettyMethod(method) << " @ " << method;
- CHECK_GE(it->second.offset, image_end_) << "ArtMethods should be after Objects";
- return reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
+ const char* oat_filename = GetOatFilename(method->GetDexCache());
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ CHECK_GE(it->second.offset, image_info.image_end_) << "ArtMethods should be after Objects";
+ return reinterpret_cast<ArtMethod*>(image_info.image_begin_ + it->second.offset);
}
class FixupRootVisitor : public RootVisitor {
@@ -1345,18 +1461,24 @@
mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
const size_t offset = image_writer_->GetImageOffset(obj);
- auto* const dest = reinterpret_cast<Object*>(image_writer_->image_begin_ + offset);
+ auto* const dest = reinterpret_cast<Object*>(image_writer_->global_image_begin_ + offset);
VLOG(compiler) << "Update root from " << obj << " to " << dest;
return dest;
}
};
void ImageWriter::CopyAndFixupNativeData() {
+ const char* oat_filename = oat_file_->GetLocation().c_str();
+ ImageInfo& image_info = GetImageInfo(oat_filename);
// Copy ArtFields and methods to their locations and update the array for convenience.
for (auto& pair : native_object_relocations_) {
NativeObjectRelocation& relocation = pair.second;
- auto* dest = image_->Begin() + relocation.offset;
- DCHECK_GE(dest, image_->Begin() + image_end_);
+ // Only work with fields and methods that are in the current oat file.
+ if (strcmp(relocation.oat_filename, oat_filename) != 0) {
+ continue;
+ }
+ auto* dest = image_info.image_->Begin() + relocation.offset;
+ DCHECK_GE(dest, image_info.image_->Begin() + image_info.image_end_);
DCHECK(!IsInBootImage(pair.first));
switch (relocation.type) {
case kNativeObjectRelocationTypeArtField: {
@@ -1368,7 +1490,8 @@
case kNativeObjectRelocationTypeArtMethodClean:
case kNativeObjectRelocationTypeArtMethodDirty: {
CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
- reinterpret_cast<ArtMethod*>(dest));
+ reinterpret_cast<ArtMethod*>(dest),
+ image_info);
break;
}
// For arrays, copy just the header since the elements will get copied by their corresponding
@@ -1391,30 +1514,36 @@
}
}
// Fixup the image method roots.
- auto* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
+ auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
const ImageSection& methods_section = image_header->GetMethodsSection();
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
+ // Only place runtime methods in the image of the default oat file.
+ if (method->IsRuntimeMethod() && strcmp(default_oat_filename_, oat_filename) != 0) {
+ continue;
+ }
if (!IsInBootImage(method)) {
auto it = native_object_relocations_.find(method);
- CHECK(it != native_object_relocations_.end()) << "No fowarding for " << PrettyMethod(method);
+ CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method);
NativeObjectRelocation& relocation = it->second;
CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in "
<< methods_section;
CHECK(relocation.IsArtMethodRelocation()) << relocation.type;
- method = reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset);
+ method = reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset);
}
image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method);
}
FixupRootVisitor root_visitor(this);
+ /* TODO: Reenable the intern table and class table
// Write the intern table into the image.
const ImageSection& intern_table_section = image_header->GetImageSection(
ImageHeader::kSectionInternedStrings);
Runtime* const runtime = Runtime::Current();
InternTable* const intern_table = runtime->GetInternTable();
- uint8_t* const intern_table_memory_ptr = image_->Begin() + intern_table_section.Offset();
+ uint8_t* const intern_table_memory_ptr =
+ image_info.image_->Begin() + intern_table_section.Offset();
const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr);
CHECK_EQ(intern_table_bytes, intern_table_bytes_);
// Fixup the pointers in the newly written intern table to contain image addresses.
@@ -1430,10 +1559,11 @@
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
if (class_table_bytes_ > 0u) {
- ClassLinker* const class_linker = runtime->GetClassLinker();
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
const ImageSection& class_table_section = image_header->GetImageSection(
ImageHeader::kSectionClassTable);
- uint8_t* const class_table_memory_ptr = image_->Begin() + class_table_section.Offset();
+ uint8_t* const class_table_memory_ptr =
+ image_info.image_->Begin() + class_table_section.Offset();
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
size_t class_table_bytes = 0;
for (mirror::ClassLoader* loader : class_loaders_) {
@@ -1453,6 +1583,7 @@
}
CHECK_EQ(class_table_bytes, class_table_bytes_);
}
+ */
}
void ImageWriter::CopyAndFixupObjects() {
@@ -1500,7 +1631,8 @@
}
UNREACHABLE();
} else {
- elem = image_begin_ + it->second.offset;
+ ImageInfo& image_info = GetImageInfo(it->second.oat_filename);
+ elem = image_info.image_begin_ + it->second.offset;
}
}
dest_array->SetElementPtrSize<false, true>(i, elem, target_ptr_size_);
@@ -1512,14 +1644,16 @@
return;
}
size_t offset = GetImageOffset(obj);
- auto* dst = reinterpret_cast<Object*>(image_->Begin() + offset);
- DCHECK_LT(offset, image_end_);
+ const char* oat_filename = GetOatFilename(obj);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ auto* dst = reinterpret_cast<Object*>(image_info.image_->Begin() + offset);
+ DCHECK_LT(offset, image_info.image_end_);
const auto* src = reinterpret_cast<const uint8_t*>(obj);
- image_bitmap_->Set(dst); // Mark the obj as live.
+ image_info.image_bitmap_->Set(dst); // Mark the obj as live.
const size_t n = obj->SizeOf();
- DCHECK_LE(offset + n, image_->Size());
+ DCHECK_LE(offset + n, image_info.image_->Size());
memcpy(dst, src, n);
// Write in a hash code of objects which have inflated monitors or a hash code in their monitor
@@ -1595,34 +1729,55 @@
}
template <typename T>
-T* ImageWriter::NativeLocationInImage(T* obj) {
- return (obj == nullptr || IsInBootImage(obj))
- ? obj
- : reinterpret_cast<T*>(image_begin_ + NativeOffsetInImage(obj));
+T* ImageWriter::NativeLocationInImage(T* obj, const char* oat_filename) {
+ if (obj == nullptr || IsInBootImage(obj)) {
+ return obj;
+ } else {
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ return reinterpret_cast<T*>(image_info.image_begin_ + NativeOffsetInImage(obj));
+ }
}
template <typename T>
-T* ImageWriter::NativeCopyLocation(T* obj) {
- return (obj == nullptr || IsInBootImage(obj))
- ? obj
- : reinterpret_cast<T*>(image_->Begin() + NativeOffsetInImage(obj));
+T* ImageWriter::NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) {
+ if (obj == nullptr || IsInBootImage(obj)) {
+ return obj;
+ } else {
+ const char* oat_filename = GetOatFilenameForDexCache(dex_cache);
+ ImageInfo& image_info = GetImageInfo(oat_filename);
+ return reinterpret_cast<T*>(image_info.image_->Begin() + NativeOffsetInImage(obj));
+ }
}
class NativeLocationVisitor {
public:
- explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
+ explicit NativeLocationVisitor(ImageWriter* image_writer, const char* oat_filename)
+ : image_writer_(image_writer), oat_filename_(oat_filename) {}
template <typename T>
- T* operator()(T* ptr) const {
- return image_writer_->NativeLocationInImage(ptr);
+ T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return image_writer_->NativeLocationInImage(ptr, oat_filename_);
+ }
+
+ ArtMethod* operator()(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ const char* oat_filename = method->IsRuntimeMethod() ? image_writer_->GetDefaultOatFilename() :
+ image_writer_->GetOatFilenameForDexCache(method->GetDexCache());
+ return image_writer_->NativeLocationInImage(method, oat_filename);
+ }
+
+ ArtField* operator()(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ const char* oat_filename = image_writer_->GetOatFilenameForDexCache(field->GetDexCache());
+ return image_writer_->NativeLocationInImage(field, oat_filename);
}
private:
ImageWriter* const image_writer_;
+ const char* oat_filename_;
};
void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
- orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
+ const char* oat_filename = GetOatFilename(orig);
+ orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this, oat_filename));
FixupClassVisitor visitor(this, copy);
static_cast<mirror::Object*>(orig)->VisitReferences(visitor, visitor);
}
@@ -1661,7 +1816,7 @@
CHECK(it != native_object_relocations_.end())
<< "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method);
dest->SetArtMethod(
- reinterpret_cast<ArtMethod*>(image_begin_ + it->second.offset));
+ reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset));
} else if (!klass->IsArrayClass()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) {
@@ -1702,41 +1857,52 @@
// 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
// done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
// static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
+ const char* oat_filename = GetOatFilenameForDexCache(orig_dex_cache);
GcRoot<mirror::String>* orig_strings = orig_dex_cache->GetStrings();
if (orig_strings != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
- NativeLocationInImage(orig_strings),
+ NativeLocationInImage(orig_strings, oat_filename),
/*pointer size*/8u);
- orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings), ImageAddressVisitor(this));
+ orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache),
+ ImageAddressVisitor(this));
}
GcRoot<mirror::Class>* orig_types = orig_dex_cache->GetResolvedTypes();
if (orig_types != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
- NativeLocationInImage(orig_types),
+ NativeLocationInImage(orig_types, oat_filename),
/*pointer size*/8u);
- orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types), ImageAddressVisitor(this));
+ orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
+ ImageAddressVisitor(this));
}
ArtMethod** orig_methods = orig_dex_cache->GetResolvedMethods();
if (orig_methods != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodsOffset(),
- NativeLocationInImage(orig_methods),
+ NativeLocationInImage(orig_methods, oat_filename),
/*pointer size*/8u);
- ArtMethod** copy_methods = NativeCopyLocation(orig_methods);
+ ArtMethod** copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_);
- ArtMethod* copy = NativeLocationInImage(orig);
+ const char* method_oat_filename;
+ if (orig == nullptr || orig->IsRuntimeMethod()) {
+ method_oat_filename = default_oat_filename_;
+ } else {
+ method_oat_filename = GetOatFilenameForDexCache(orig->GetDexCache());
+ }
+ ArtMethod* copy = NativeLocationInImage(orig, method_oat_filename);
mirror::DexCache::SetElementPtrSize(copy_methods, i, copy, target_ptr_size_);
}
}
ArtField** orig_fields = orig_dex_cache->GetResolvedFields();
if (orig_fields != nullptr) {
copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedFieldsOffset(),
- NativeLocationInImage(orig_fields),
+ NativeLocationInImage(orig_fields, oat_filename),
/*pointer size*/8u);
- ArtField** copy_fields = NativeCopyLocation(orig_fields);
+ ArtField** copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_);
- ArtField* copy = NativeLocationInImage(orig);
+ const char* field_oat_filename =
+ orig == nullptr ? default_oat_filename_ : GetOatFilenameForDexCache(orig->GetDexCache());
+ ArtField* copy = NativeLocationInImage(orig, field_oat_filename);
mirror::DexCache::SetElementPtrSize(copy_fields, i, copy, target_ptr_size_);
}
}
@@ -1747,9 +1913,10 @@
// If we are compiling an app image, we need to use the stubs of the boot image.
if (compile_app_image_) {
// Use the current image pointers.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- DCHECK(image_space != nullptr);
- const OatFile* oat_file = image_space->GetOatFile();
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ DCHECK(!image_spaces.empty());
+ const OatFile* oat_file = image_spaces[0]->GetOatFile();
CHECK(oat_file != nullptr);
const OatHeader& header = oat_file->GetOatHeader();
switch (type) {
@@ -1772,10 +1939,13 @@
UNREACHABLE();
}
}
- return GetOatAddressForOffset(oat_address_offsets_[type]);
+ const ImageInfo& primary_image_info = GetImageInfo(0);
+ return GetOatAddressForOffset(primary_image_info.oat_address_offsets_[type], primary_image_info);
}
-const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, bool* quick_is_interpreted) {
+const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method,
+ const ImageInfo& image_info,
+ bool* quick_is_interpreted) {
DCHECK(!method->IsResolutionMethod()) << PrettyMethod(method);
DCHECK(!method->IsImtConflictMethod()) << PrettyMethod(method);
DCHECK(!method->IsImtUnimplementedMethod()) << PrettyMethod(method);
@@ -1788,7 +1958,7 @@
// Quick entrypoint:
uint32_t quick_oat_code_offset = PointerToLowMemUInt32(
method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_));
- const uint8_t* quick_code = GetOatAddressForOffset(quick_oat_code_offset);
+ const uint8_t* quick_code = GetOatAddressForOffset(quick_oat_code_offset, image_info);
*quick_is_interpreted = false;
if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() ||
method->GetDeclaringClass()->IsInitialized())) {
@@ -1808,42 +1978,32 @@
quick_code = GetOatAddress(kOatAddressQuickResolutionTrampoline);
}
if (!IsInBootOatFile(quick_code)) {
- DCHECK_GE(quick_code, oat_data_begin_);
+ // DCHECK_GE(quick_code, oat_data_begin_);
}
return quick_code;
}
-const uint8_t* ImageWriter::GetQuickEntryPoint(ArtMethod* method) {
- // Calculate the quick entry point following the same logic as FixupMethod() below.
- // The resolution method has a special trampoline to call.
- Runtime* runtime = Runtime::Current();
- if (UNLIKELY(method == runtime->GetResolutionMethod())) {
- return GetOatAddress(kOatAddressQuickResolutionTrampoline);
- } else if (UNLIKELY(method == runtime->GetImtConflictMethod() ||
- method == runtime->GetImtUnimplementedMethod())) {
- return GetOatAddress(kOatAddressQuickIMTConflictTrampoline);
- } else {
- // We assume all methods have code. If they don't currently then we set them to the use the
- // resolution trampoline. Abstract methods never have code and so we need to make sure their
- // use results in an AbstractMethodError. We use the interpreter to achieve this.
- if (UNLIKELY(!method->IsInvokable())) {
- return GetOatAddress(kOatAddressQuickToInterpreterBridge);
- } else {
- bool quick_is_interpreted;
- return GetQuickCode(method, &quick_is_interpreted);
- }
- }
-}
-
-void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy) {
+void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
+ ArtMethod* copy,
+ const ImageInfo& image_info) {
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked()));
+ const char* oat_filename;
+ if (orig->IsRuntimeMethod()) {
+ oat_filename = default_oat_filename_;
+ } else {
+ auto it = dex_file_oat_filename_map_.find(orig->GetDexFile());
+ DCHECK(it != dex_file_oat_filename_map_.end()) << orig->GetDexFile()->GetLocation();
+ oat_filename = it->second;
+ }
ArtMethod** orig_resolved_methods = orig->GetDexCacheResolvedMethods(target_ptr_size_);
- copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods), target_ptr_size_);
+ copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods, oat_filename),
+ target_ptr_size_);
GcRoot<mirror::Class>* orig_resolved_types = orig->GetDexCacheResolvedTypes(target_ptr_size_);
- copy->SetDexCacheResolvedTypes(NativeLocationInImage(orig_resolved_types), target_ptr_size_);
+ copy->SetDexCacheResolvedTypes(NativeLocationInImage(orig_resolved_types, oat_filename),
+ target_ptr_size_);
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
@@ -1877,7 +2037,7 @@
GetOatAddress(kOatAddressQuickToInterpreterBridge), target_ptr_size_);
} else {
bool quick_is_interpreted;
- const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
+ const uint8_t* quick_code = GetQuickCode(orig, image_info, &quick_is_interpreted);
copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
// JNI entrypoint:
@@ -1914,13 +2074,16 @@
CHECK(oat_header != nullptr);
CHECK(oat_header->IsValid());
- ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
+ ImageInfo& image_info = GetImageInfo(oat_file_->GetLocation().c_str());
+ ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
image_header->SetOatChecksum(oat_header->GetChecksum());
}
-size_t ImageWriter::GetBinSizeSum(ImageWriter::Bin up_to) const {
+size_t ImageWriter::GetBinSizeSum(ImageWriter::ImageInfo& image_info, ImageWriter::Bin up_to) const {
DCHECK_LE(up_to, kBinSize);
- return std::accumulate(&bin_slot_sizes_[0], &bin_slot_sizes_[up_to], /*init*/0);
+ return std::accumulate(&image_info.bin_slot_sizes_[0],
+ &image_info.bin_slot_sizes_[up_to],
+ /*init*/0);
}
ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) {
@@ -1946,15 +2109,18 @@
return lockword_ & ~kBinMask;
}
-uint8_t* ImageWriter::GetOatFileBegin() const {
- DCHECK_GT(intern_table_bytes_, 0u);
- size_t native_sections_size = bin_slot_sizes_[kBinArtField] +
- bin_slot_sizes_[kBinArtMethodDirty] +
- bin_slot_sizes_[kBinArtMethodClean] +
- bin_slot_sizes_[kBinDexCacheArray] +
- intern_table_bytes_ +
- class_table_bytes_;
- return image_begin_ + RoundUp(image_end_ + native_sections_size, kPageSize);
+uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const {
+ // DCHECK_GT(intern_table_bytes_, 0u); TODO: Reenable intern table and class table.
+ uintptr_t last_image_end = 0;
+ for (const char* oat_fn : oat_filenames_) {
+ const ImageInfo& image_info = GetConstImageInfo(oat_fn);
+ DCHECK(image_info.image_begin_ != nullptr);
+ uintptr_t this_end = reinterpret_cast<uintptr_t>(image_info.image_begin_) +
+ image_info.image_size_;
+ last_image_end = std::max(this_end, last_image_end);
+ }
+ const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+ return reinterpret_cast<uint8_t*>(last_image_end) + image_info.oat_offset_;
}
ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) {
@@ -1974,4 +2140,61 @@
UNREACHABLE();
}
+const char* ImageWriter::GetOatFilename(mirror::Object* obj) const {
+ if (compile_app_image_) {
+ return default_oat_filename_;
+ } else {
+ return GetOatFilenameForDexCache(obj->IsDexCache() ? obj->AsDexCache() :
+ obj->IsClass() ? obj->AsClass()->GetDexCache() : obj->GetClass()->GetDexCache());
+ }
+}
+
+const char* ImageWriter::GetOatFilenameForDexCache(mirror::DexCache* dex_cache) const {
+ if (compile_app_image_ || dex_cache == nullptr) {
+ return default_oat_filename_;
+ } else {
+ auto it = dex_file_oat_filename_map_.find(dex_cache->GetDexFile());
+ DCHECK(it != dex_file_oat_filename_map_.end()) << dex_cache->GetDexFile()->GetLocation();
+ return it->second;
+ }
+}
+
+ImageWriter::ImageInfo& ImageWriter::GetImageInfo(const char* oat_filename) {
+ auto it = image_info_map_.find(oat_filename);
+ DCHECK(it != image_info_map_.end());
+ return it->second;
+}
+
+const ImageWriter::ImageInfo& ImageWriter::GetConstImageInfo(const char* oat_filename) const {
+ auto it = image_info_map_.find(oat_filename);
+ DCHECK(it != image_info_map_.end());
+ return it->second;
+}
+
+const ImageWriter::ImageInfo& ImageWriter::GetImageInfo(size_t index) const {
+ DCHECK_LT(index, oat_filenames_.size());
+ return GetConstImageInfo(oat_filenames_[index]);
+}
+
+void ImageWriter::UpdateOatFile(const char* oat_filename) {
+ std::unique_ptr<File> oat_file(OS::OpenFileForReading(oat_filename));
+ DCHECK(oat_file != nullptr);
+ size_t oat_loaded_size = 0;
+ size_t oat_data_offset = 0;
+ ElfWriter::GetOatElfInformation(oat_file.get(), &oat_loaded_size, &oat_data_offset);
+
+ ImageInfo& cur_image_info = GetImageInfo(oat_filename);
+
+ // Update the oat_offset of the next image info.
+ auto it = std::find(oat_filenames_.begin(), oat_filenames_.end(), oat_filename);
+ DCHECK(it != oat_filenames_.end());
+
+ it++;
+ if (it != oat_filenames_.end()) {
+ // There is a following one.
+ ImageInfo& next_image_info = GetImageInfo(*it);
+ next_image_info.oat_offset_ = cur_image_info.oat_offset_ + oat_loaded_size;
+ }
+}
+
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 8e930f0..78297ae 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -27,11 +27,11 @@
#include <ostream>
#include "base/bit_utils.h"
+#include "base/length_prefixed_array.h"
#include "base/macros.h"
#include "driver/compiler_driver.h"
#include "gc/space/space.h"
#include "image.h"
-#include "length_prefixed_array.h"
#include "lock_word.h"
#include "mem_map.h"
#include "oat_file.h"
@@ -56,30 +56,31 @@
uintptr_t image_begin,
bool compile_pic,
bool compile_app_image,
- ImageHeader::StorageMode image_storage_mode)
+ ImageHeader::StorageMode image_storage_mode,
+ const std::vector<const char*> oat_filenames,
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map)
: compiler_driver_(compiler_driver),
- image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
- image_end_(0),
+ global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
image_objects_offset_begin_(0),
- image_roots_address_(0),
oat_file_(nullptr),
- oat_data_begin_(nullptr),
compile_pic_(compile_pic),
compile_app_image_(compile_app_image),
boot_image_space_(nullptr),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
- bin_slot_sizes_(),
- bin_slot_offsets_(),
- bin_slot_count_(),
intern_table_bytes_(0u),
image_method_array_(ImageHeader::kImageMethodsCount),
dirty_methods_(0u),
clean_methods_(0u),
class_table_bytes_(0u),
- image_storage_mode_(image_storage_mode) {
+ image_storage_mode_(image_storage_mode),
+ dex_file_oat_filename_map_(dex_file_oat_filename_map),
+ oat_filenames_(oat_filenames),
+ default_oat_filename_(oat_filenames[0]) {
CHECK_NE(image_begin, 0U);
+ for (const char* oat_filename : oat_filenames) {
+ image_info_map_.emplace(oat_filename, ImageInfo());
+ }
std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
- std::fill_n(oat_address_offsets_, arraysize(oat_address_offsets_), 0);
}
~ImageWriter() {
@@ -88,14 +89,25 @@
bool PrepareImageAddressSpace();
bool IsImageAddressSpaceReady() const {
- return image_roots_address_ != 0u;
+ bool ready = !image_info_map_.empty();
+ for (auto& pair : image_info_map_) {
+ const ImageInfo& image_info = pair.second;
+ if (image_info.image_roots_address_ == 0u) {
+ return false;
+ }
+ }
+ return ready;
}
template <typename T>
T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
- return (object == nullptr || IsInBootImage(object))
- ? object
- : reinterpret_cast<T*>(image_begin_ + GetImageOffset(object));
+ if (object == nullptr || IsInBootImage(object)) {
+ return object;
+ } else {
+ const char* oat_filename = GetOatFilename(object);
+ const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+ return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
+ }
}
ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -103,26 +115,36 @@
template <typename PtrType>
PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
const SHARED_REQUIRES(Locks::mutator_lock_) {
- auto it = dex_cache_array_starts_.find(dex_file);
- DCHECK(it != dex_cache_array_starts_.end());
+ auto oat_it = dex_file_oat_filename_map_.find(dex_file);
+ DCHECK(oat_it != dex_file_oat_filename_map_.end());
+ const ImageInfo& image_info = GetConstImageInfo(oat_it->second);
+ auto it = image_info.dex_cache_array_starts_.find(dex_file);
+ DCHECK(it != image_info.dex_cache_array_starts_.end());
return reinterpret_cast<PtrType>(
- image_begin_ + bin_slot_offsets_[kBinDexCacheArray] + it->second + offset);
+ image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
+ it->second + offset);
}
- uint8_t* GetOatFileBegin() const;
+ uint8_t* GetOatFileBegin(const char* oat_filename) const;
// If image_fd is not kInvalidImageFd, then we use that for the file. Otherwise we open
- // image_filename.
+ // the names in image_filenames.
bool Write(int image_fd,
- const std::string& image_filename,
- const std::string& oat_filename,
- const std::string& oat_location)
+ const std::vector<const char*>& image_filenames,
+ const std::vector<const char*>& oat_filenames)
REQUIRES(!Locks::mutator_lock_);
- uintptr_t GetOatDataBegin() {
- return reinterpret_cast<uintptr_t>(oat_data_begin_);
+ uintptr_t GetOatDataBegin(const char* oat_filename) {
+ return reinterpret_cast<uintptr_t>(GetImageInfo(oat_filename).oat_data_begin_);
}
+ const char* GetOatFilenameForDexCache(mirror::DexCache* dex_cache) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Update the oat size for the given oat file. This will make the oat_offset for the next oat
+ // file valid.
+ void UpdateOatFile(const char* oat_filename);
+
private:
bool AllocMemory();
@@ -214,6 +236,58 @@
const uint32_t lockword_;
};
+ struct ImageInfo {
+ explicit ImageInfo()
+ : image_begin_(nullptr),
+ image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)),
+ image_roots_address_(0),
+ image_offset_(0),
+ image_size_(0),
+ oat_offset_(0),
+ bin_slot_sizes_(),
+ bin_slot_offsets_(),
+ bin_slot_count_() {}
+
+ std::unique_ptr<MemMap> image_; // Memory mapped for generating the image.
+
+ // Target begin of this image. Notes: It is not valid to write here, this is the address
+ // of the target image, not necessarily where image_ is mapped. The address is only valid
+ // after layouting (otherwise null).
+ uint8_t* image_begin_;
+
+ size_t image_end_; // Offset to the free space in image_, initially size of image header.
+ uint32_t image_roots_address_; // The image roots address in the image.
+ size_t image_offset_; // Offset of this image from the start of the first image.
+
+ // Image size is the *address space* covered by this image. As the live bitmap is aligned
+ // to the page size, the live bitmap will cover more address space than necessary. But live
+ // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
+ // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
+ // page-aligned).
+ size_t image_size_;
+
+ // Oat data.
+ size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is
+ // valid when the previous oat file has been written.
+ uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is
+ // valid when the images have been layed out.
+ size_t oat_size_; // Size of the corresponding oat data.
+
+ // Image bitmap which lets us know where the objects inside of the image reside.
+ std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
+
+ // The start offsets of the dex cache arrays.
+ SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
+
+ // Offset from oat_data_begin_ to the stubs.
+ uint32_t oat_address_offsets_[kOatAddressCount];
+
+ // Bin slot tracking for dirty object packing.
+ size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin.
+ size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
+ size_t bin_slot_count_[kBinSize]; // Number of objects in a bin.
+ };
+
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -233,7 +307,8 @@
SHARED_REQUIRES(Locks::mutator_lock_);
BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
- void AddDexCacheArrayRelocation(void* array, size_t offset) SHARED_REQUIRES(Locks::mutator_lock_);
+ void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
@@ -244,19 +319,21 @@
mirror::Object* GetLocalAddress(mirror::Object* object) const
SHARED_REQUIRES(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
- uint8_t* dst = image_->Begin() + offset;
+ const char* oat_filename = GetOatFilename(object);
+ const ImageInfo& image_info = GetConstImageInfo(oat_filename);
+ uint8_t* dst = image_info.image_->Begin() + offset;
return reinterpret_cast<mirror::Object*>(dst);
}
// Returns the address in the boot image if we are compiling the app image.
const uint8_t* GetOatAddress(OatAddress type) const;
- const uint8_t* GetOatAddressForOffset(uint32_t offset) const {
+ const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
// With Quick, code is within the OatFile, as there are all in one
- // .o ELF object.
- DCHECK_LE(offset, oat_file_->Size());
- DCHECK(oat_data_begin_ != nullptr);
- return offset == 0u ? nullptr : oat_data_begin_ + offset;
+ // .o ELF object. But interpret it as signed.
+ DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
+ DCHECK(image_info.oat_data_begin_ != nullptr);
+ return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
}
// Returns true if the class was in the original requested image classes list.
@@ -282,7 +359,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
void CreateHeader(size_t oat_loaded_size, size_t oat_data_offset)
SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Object>* CreateImageRoots() const
+ mirror::ObjectArray<mirror::Object>* CreateImageRoots(const char* oat_filename) const
SHARED_REQUIRES(Locks::mutator_lock_);
void CalculateObjectBinSlots(mirror::Object* obj)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -304,7 +381,7 @@
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
SHARED_REQUIRES(Locks::mutator_lock_);
void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy)
+ void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
SHARED_REQUIRES(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -319,23 +396,24 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
- const uint8_t* GetQuickCode(ArtMethod* method, bool* quick_is_interpreted)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetQuickEntryPoint(ArtMethod* method)
+ const uint8_t* GetQuickCode(ArtMethod* method,
+ const ImageInfo& image_info,
+ bool* quick_is_interpreted)
SHARED_REQUIRES(Locks::mutator_lock_);
// Patches references in OatFile to expect runtime addresses.
void SetOatChecksumFromElfFile(File* elf_file);
// Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
- size_t GetBinSizeSum(Bin up_to = kBinSize) const;
+ size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
// Return true if a method is likely to be dirtied at runtime.
bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
// Assign the offset for an ArtMethod.
- void AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type)
+ void AssignMethodOffset(ArtMethod* method,
+ NativeObjectRelocationType type,
+ const char* oat_filename)
SHARED_REQUIRES(Locks::mutator_lock_);
// Return true if klass is loaded by the boot class loader but not in the boot image.
@@ -359,11 +437,11 @@
// Location of where the object will be when the image is loaded at runtime.
template <typename T>
- T* NativeLocationInImage(T* obj);
+ T* NativeLocationInImage(T* obj, const char* oat_filename) SHARED_REQUIRES(Locks::mutator_lock_);
// Location of where the temporary copy of the object currently is.
template <typename T>
- T* NativeCopyLocation(T* obj);
+ T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
// Return true of obj is inside of the boot image space. This may only return true if we are
// compiling an app image.
@@ -372,46 +450,35 @@
// Return true if ptr is within the boot oat file.
bool IsInBootOatFile(const void* ptr) const;
+ const char* GetOatFilename(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const char* GetDefaultOatFilename() const {
+ return default_oat_filename_;
+ }
+
+ ImageInfo& GetImageInfo(const char* oat_filename);
+ const ImageInfo& GetConstImageInfo(const char* oat_filename) const;
+ const ImageInfo& GetImageInfo(size_t index) const;
+
const CompilerDriver& compiler_driver_;
- // Beginning target image address for the output image.
- uint8_t* image_begin_;
-
- // Offset to the free space in image_.
- size_t image_end_;
+ // Beginning target image address for the first image.
+ uint8_t* global_image_begin_;
// Offset from image_begin_ to where the first object is in image_.
size_t image_objects_offset_begin_;
- // The image roots address in the image.
- uint32_t image_roots_address_;
-
// oat file with code for this image
OatFile* oat_file_;
- // Memory mapped for generating the image.
- std::unique_ptr<MemMap> image_;
-
// Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
// to keep track. These include vtable arrays, iftable arrays, and dex caches.
std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
- // The start offsets of the dex cache arrays.
- SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
-
// Saved hash codes. We use these to restore lockwords which were temporarily used to have
// forwarding addresses as well as copying over hash codes.
std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
- // Beginning target oat address for the pointers from the output image to its oat file.
- const uint8_t* oat_data_begin_;
-
- // Image bitmap which lets us know where the objects inside of the image reside.
- std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
-
- // Offset from oat_data_begin_ to the stubs.
- uint32_t oat_address_offsets_[kOatAddressCount];
-
// Boolean flags.
const bool compile_pic_;
const bool compile_app_image_;
@@ -422,10 +489,8 @@
// Size of pointers on the target architecture.
size_t target_ptr_size_;
- // Bin slot tracking for dirty object packing
- size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin
- size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins.
- size_t bin_slot_count_[kBinSize]; // Number of objects in a bin
+ // Mapping of oat filename to image data.
+ std::unordered_map<std::string, ImageInfo> image_info_map_;
// Cached size of the intern table for when we allocate memory.
size_t intern_table_bytes_;
@@ -434,6 +499,7 @@
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
struct NativeObjectRelocation {
+ const char* oat_filename;
uintptr_t offset;
NativeObjectRelocationType type;
@@ -468,6 +534,11 @@
// Which mode the image is stored as, see image.h
const ImageHeader::StorageMode image_storage_mode_;
+ // Map of dex files to the oat filenames that they were compiled into.
+ const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map_;
+ const std::vector<const char*> oat_filenames_;
+ const char* default_oat_filename_;
+
friend class ContainsBootClassLoaderNonImageClassVisitor;
friend class FixupClassVisitor;
friend class FixupRootVisitor;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index d001495..b323d24 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -84,6 +84,7 @@
CompilerOptions::kDefaultNumDexMethodsThreshold,
CompilerOptions::kDefaultInlineDepthLimit,
CompilerOptions::kDefaultInlineMaxCodeUnits,
+ /* no_inline_from */ nullptr,
/* include_patch_information */ false,
CompilerOptions::kDefaultTopKProfileThreshold,
Runtime::Current()->IsDebuggable(),
@@ -154,7 +155,8 @@
/* dump_cfg_append */ false,
cumulative_logger_.get(),
/* swap_fd */ -1,
- /* profile_file */ ""));
+ /* profile_file */ "",
+ /* dex to oat map */ nullptr));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
compiler_driver_->SetSupportBootImageFixup(false);
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 5ab55e0..8d60be2 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -220,7 +220,8 @@
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, nullptr, nullptr, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, /* is_shared_namespace */ false,
+ nullptr, nullptr, &reason))
<< reason;
jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
@@ -235,7 +236,8 @@
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, nullptr, nullptr, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, /* is_shared_namespace */ false,
+ nullptr, nullptr, &reason))
<< reason;
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 92cf8ca..877a674 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -47,7 +47,7 @@
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
false, nullptr, nullptr, nullptr, 1u,
- false, false, "", false, nullptr, -1, ""),
+ false, false, "", false, nullptr, -1, "", nullptr),
error_msg_(),
instruction_set_(instruction_set),
features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index eea5204..58f46d6 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -121,7 +121,8 @@
false,
timer_.get(),
-1,
- ""));
+ "",
+ nullptr));
}
bool WriteElf(File* file,
@@ -199,7 +200,7 @@
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
- ASSERT_EQ(1U, oat_header.GetDexFileCount()); // core
+ ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount()); // core
ASSERT_EQ(42U, oat_header.GetImageFileLocationOatChecksum());
ASSERT_EQ(4096U, oat_header.GetImageFileLocationOatDataBegin());
ASSERT_EQ("lue.art", std::string(oat_header.GetStoreValueByKey(OatHeader::kImageLocationKey)));
@@ -224,8 +225,9 @@
}
const char* descriptor = dex_file.GetClassDescriptor(class_def);
- mirror::Class* klass = class_linker->FindClass(soa.Self(), descriptor,
- NullHandle<mirror::ClassLoader>());
+ mirror::Class* klass = class_linker->FindClass(soa.Self(),
+ descriptor,
+ ScopedNullHandle<mirror::ClassLoader>());
const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(i);
CHECK_EQ(mirror::Class::Status::kStatusNotReady, oat_class.GetStatus()) << descriptor;
@@ -238,12 +240,12 @@
++method_index;
}
size_t visited_virtuals = 0;
- for (auto& m : klass->GetVirtualMethods(pointer_size)) {
- if (!m.IsMiranda()) {
- CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file);
- ++method_index;
- ++visited_virtuals;
- }
+ // TODO We should also check copied methods in this test.
+ for (auto& m : klass->GetDeclaredVirtualMethods(pointer_size)) {
+ EXPECT_FALSE(m.IsMiranda());
+ CheckMethod(&m, oat_class.GetOatMethod(method_index), dex_file);
+ ++method_index;
+ ++visited_virtuals;
}
EXPECT_EQ(visited_virtuals, num_virtual_methods);
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 2b2f0e8..025e35e 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -716,6 +716,14 @@
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DexFile::TypeId& type_id =
+ dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
+ const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
+ // Skip methods that are not in the image.
+ if (!writer_->GetCompilerDriver()->IsImageClass(class_descriptor)) {
+ return true;
+ }
+
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -737,7 +745,7 @@
*dex_file_,
it.GetMemberIndex(),
dex_cache,
- NullHandle<mirror::ClassLoader>(),
+ ScopedNullHandle<mirror::ClassLoader>(),
nullptr,
invoke_type);
if (method == nullptr) {
@@ -958,7 +966,9 @@
if (writer_->HasBootImage()) {
auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<const uint8_t*>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
- const uint8_t* oat_data = writer_->image_writer_->GetOatFileBegin() + file_offset_;
+ const char* oat_filename = writer_->image_writer_->GetOatFilenameForDexCache(dex_cache_);
+ const uint8_t* oat_data =
+ writer_->image_writer_->GetOatFileBegin(oat_filename) + file_offset_;
return element - oat_data;
} else {
size_t start = writer_->dex_cache_arrays_offsets_.Get(patch.TargetDexCacheDexFile());
@@ -994,9 +1004,15 @@
// NOTE: We're using linker patches for app->boot references when the image can
// be relocated and therefore we need to emit .oat_patches. We're not using this
// for app->app references, so check that the method is an image method.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
- CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ bool contains_method = false;
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
+ contains_method |=
+ image_space->GetImageHeader().GetMethodsSection().Contains(method_offset);
+ }
+ CHECK(contains_method);
}
// Note: We only patch targeting ArtMethods in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(method);
@@ -1012,7 +1028,8 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t address = target_offset;
if (writer_->HasBootImage()) {
- address = PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
+ const char* oat_filename = writer_->image_writer_->GetOatFilenameForDexCache(dex_cache_);
+ address = PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin(oat_filename) +
writer_->oat_data_offset_ + target_offset);
}
DCHECK_LE(offset + 4, code->size());
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 4c3f66a..dc75ff1 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1590,15 +1590,18 @@
HGraph* graph = GetGraph();
HInstruction* zero;
switch (type) {
- case Primitive::Type::kPrimNot: zero = graph->GetNullConstant(); break;
- case Primitive::Type::kPrimFloat: zero = graph->GetFloatConstant(0); break;
- case Primitive::Type::kPrimDouble: zero = graph->GetDoubleConstant(0); break;
+ case Primitive::kPrimNot: zero = graph->GetNullConstant(); break;
+ case Primitive::kPrimFloat: zero = graph->GetFloatConstant(0); break;
+ case Primitive::kPrimDouble: zero = graph->GetDoubleConstant(0); break;
default: zero = graph->GetConstant(type, 0); break;
}
HPhi* phi = new (graph->GetArena())
HPhi(graph->GetArena(), kNoRegNumber, /*number_of_inputs*/ 2, HPhi::ToPhiType(type));
phi->SetRawInputAt(0, instruction);
phi->SetRawInputAt(1, zero);
+ if (type == Primitive::kPrimNot) {
+ phi->SetReferenceTypeInfo(instruction->GetReferenceTypeInfo());
+ }
new_preheader->AddPhi(phi);
return phi;
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 1178d0f..4dd0d26 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -17,6 +17,8 @@
#include "builder.h"
#include "art_field-inl.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
#include "base/logging.h"
#include "class_linker.h"
#include "dex/verified_method.h"
@@ -458,6 +460,19 @@
return false;
}
+ // Find locations where we want to generate extra stackmaps for native debugging.
+ // This allows us to generate the info only at interesting points (for example,
+ // at start of java statement) rather than before every dex instruction.
+ const bool native_debuggable = compiler_driver_ != nullptr &&
+ compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
+ ArenaBitVector* native_debug_info_locations;
+ if (native_debuggable) {
+ const uint32_t num_instructions = code_item.insns_size_in_code_units_;
+ native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
+ native_debug_info_locations->ClearAllBits();
+ FindNativeDebugInfoLocations(code_item, native_debug_info_locations);
+ }
+
CreateBlocksForTryCatch(code_item);
InitializeParameters(code_item.ins_size_);
@@ -467,6 +482,11 @@
// Update the current block if dex_pc starts a new block.
MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
+ if (native_debuggable && native_debug_info_locations->IsBitSet(dex_pc)) {
+ if (current_block_ != nullptr) {
+ current_block_->AddInstruction(new (arena_) HNativeDebugInfo(dex_pc));
+ }
+ }
if (!AnalyzeDexInstruction(instruction, dex_pc)) {
return false;
}
@@ -507,6 +527,47 @@
current_block_ = block;
}
+void HGraphBuilder::FindNativeDebugInfoLocations(const DexFile::CodeItem& code_item,
+ ArenaBitVector* locations) {
+ // The callback gets called when the line number changes.
+ // In other words, it marks the start of new java statement.
+ struct Callback {
+ static bool Position(void* ctx, const DexFile::PositionInfo& entry) {
+ static_cast<ArenaBitVector*>(ctx)->SetBit(entry.address_);
+ return false;
+ }
+ };
+ dex_file_->DecodeDebugPositionInfo(&code_item, Callback::Position, locations);
+ // Add native debug info at the start of every basic block.
+ for (uint32_t pc = 0; pc < code_item.insns_size_in_code_units_; pc++) {
+ if (FindBlockStartingAt(pc) != nullptr) {
+ locations->SetBit(pc);
+ }
+ }
+ // Instruction-specific tweaks.
+ const Instruction* const begin = Instruction::At(code_item.insns_);
+ const Instruction* const end = begin->RelativeAt(code_item.insns_size_in_code_units_);
+ for (const Instruction* inst = begin; inst < end; inst = inst->Next()) {
+ switch (inst->Opcode()) {
+ case Instruction::MOVE_EXCEPTION:
+ case Instruction::MOVE_RESULT:
+ case Instruction::MOVE_RESULT_WIDE:
+ case Instruction::MOVE_RESULT_OBJECT: {
+ // The compiler checks that there are no instructions before those.
+ // So generate HNativeDebugInfo after them instead.
+ locations->ClearBit(inst->GetDexPc(code_item.insns_));
+ const Instruction* next = inst->Next();
+ if (next < end) {
+ locations->SetBit(next->GetDexPc(code_item.insns_));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
const uint16_t* code_end,
size_t* number_of_branches) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index c3979f3..26bf1cb 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -80,7 +80,8 @@
can_use_baseline_for_string_init_(true),
compilation_stats_(nullptr),
interpreter_metadata_(nullptr),
- dex_cache_(NullHandle<mirror::DexCache>()) {}
+ null_dex_cache_(),
+ dex_cache_(null_dex_cache_) {}
bool BuildGraph(const DexFile::CodeItem& code);
@@ -90,8 +91,9 @@
static constexpr const char* kBuilderPassName = "builder";
- // The number of entries in a packed switch before we use a jump table.
- static constexpr uint16_t kSmallSwitchThreshold = 5;
+ // The number of entries in a packed switch before we use a jump table or specified
+ // compare/jump series.
+ static constexpr uint16_t kSmallSwitchThreshold = 3;
private:
// Analyzes the dex instruction and adds HInstruction to the graph
@@ -110,6 +112,7 @@
const uint16_t* end,
size_t* number_of_branches);
void MaybeUpdateCurrentBlock(size_t dex_pc);
+ void FindNativeDebugInfoLocations(const DexFile::CodeItem& code_item, ArenaBitVector* locations);
HBasicBlock* FindBlockStartingAt(int32_t dex_pc) const;
HBasicBlock* FindOrCreateBlockStartingAt(int32_t dex_pc);
@@ -370,6 +373,7 @@
const uint8_t* interpreter_metadata_;
// Dex cache for dex_file_.
+ ScopedNullHandle<mirror::DexCache> null_dex_cache_;
Handle<mirror::DexCache> dex_cache_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3630dbe..58feb67 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -59,7 +59,7 @@
// S registers. Therefore there is no need to block it.
static constexpr DRegister DTMP = D31;
-static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())->
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmWordSize, x).Int32Value()
@@ -1618,7 +1618,15 @@
/* false_target */ nullptr);
}
-void LocationsBuilderARM::VisitCondition(HCondition* cond) {
+void LocationsBuilderARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ codegen_->RecordPcInfo(info, info->GetDexPc());
+}
+
+void LocationsBuilderARM::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
@@ -1649,7 +1657,7 @@
}
}
-void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
+void InstructionCodeGeneratorARM::HandleCondition(HCondition* cond) {
if (!cond->NeedsMaterialization()) {
return;
}
@@ -1706,83 +1714,83 @@
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderARM::VisitLocal(HLocal* local) {
@@ -6250,7 +6258,7 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- if (switch_instr->GetNumEntries() >= kPackedSwitchJumpTableThreshold &&
+ if (switch_instr->GetNumEntries() > kPackedSwitchCompareJumpThreshold &&
codegen_->GetAssembler()->IsThumb()) {
locations->AddTemp(Location::RequiresRegister()); // We need a temp for the table base.
if (switch_instr->GetStartValue() != 0) {
@@ -6266,12 +6274,30 @@
Register value_reg = locations->InAt(0).AsRegister<Register>();
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
- if (num_entries < kPackedSwitchJumpTableThreshold || !codegen_->GetAssembler()->IsThumb()) {
+ if (num_entries <= kPackedSwitchCompareJumpThreshold || !codegen_->GetAssembler()->IsThumb()) {
// Create a series of compare/jumps.
+ Register temp_reg = IP;
+ // Note: It is fine for the below AddConstantSetFlags() using IP register to temporarily store
+ // the immediate, because IP is used as the destination register. For the other
+ // AddConstantSetFlags() and GenerateCompareWithImmediate(), the immediate values are constant,
+ // and they can be encoded in the instruction without making use of IP register.
+ __ AddConstantSetFlags(temp_reg, value_reg, -lower_bound);
+
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (uint32_t i = 0; i < num_entries; i++) {
- GenerateCompareWithImmediate(value_reg, lower_bound + i);
- __ b(codegen_->GetLabelOf(successors[i]), EQ);
+ // Jump to successors[0] if value == lower_bound.
+ __ b(codegen_->GetLabelOf(successors[0]), EQ);
+ int32_t last_index = 0;
+ for (; num_entries - last_index > 2; last_index += 2) {
+ __ AddConstantSetFlags(temp_reg, temp_reg, -2);
+ // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+ __ b(codegen_->GetLabelOf(successors[last_index + 1]), LO);
+ // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+ __ b(codegen_->GetLabelOf(successors[last_index + 2]), EQ);
+ }
+ if (num_entries - last_index == 2) {
+ // The last missing case_value.
+ GenerateCompareWithImmediate(temp_reg, 1);
+ __ b(codegen_->GetLabelOf(successors[last_index + 1]), EQ);
}
// And the default for any other value.
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 8193c28..b7c58e1 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -170,6 +170,7 @@
private:
void HandleInvoke(HInvoke* invoke);
void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
+ void HandleCondition(HCondition* condition);
void HandleIntegerRotate(LocationSummary* locations);
void HandleLongRotate(LocationSummary* locations);
void HandleRotate(HRor* ror);
@@ -216,6 +217,7 @@
void GenerateOrrConst(Register out, Register first, uint32_t value);
void GenerateEorConst(Register out, Register first, uint32_t value);
void HandleBitwiseOperation(HBinaryOperation* operation);
+ void HandleCondition(HCondition* condition);
void HandleIntegerRotate(LocationSummary* locations);
void HandleLongRotate(LocationSummary* locations);
void HandleRotate(HRor* ror);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 451470f..b49f42b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -71,10 +71,10 @@
using helpers::ArtVixlRegCodeCoherentForRegSet;
static constexpr int kCurrentMethodStackOffset = 0;
-// The compare/jump sequence will generate about (2 * num_entries + 1) instructions. While jump
+// The compare/jump sequence will generate about (1.5 * num_entries + 3) instructions. While jump
// table version generates 7 instructions and num_entries literals. Compare/jump sequence will
// generates less code/data with a small num_entries.
-static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
inline Condition ARM64Condition(IfCondition cond) {
switch (cond) {
@@ -546,7 +546,7 @@
void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
uint32_t num_entries = switch_instr_->GetNumEntries();
- DCHECK_GE(num_entries, kPackedSwitchJumpTableThreshold);
+ DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
// We are about to use the assembler to place literals directly. Make sure we have enough
// underlying code buffer and we have generated the jump table with right size.
@@ -2427,7 +2427,7 @@
}
}
-void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
+void LocationsBuilderARM64::HandleCondition(HCondition* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
@@ -2447,7 +2447,7 @@
}
}
-void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
+void InstructionCodeGeneratorARM64::HandleCondition(HCondition* instruction) {
if (!instruction->NeedsMaterialization()) {
return;
}
@@ -2495,8 +2495,8 @@
M(Above) \
M(AboveOrEqual)
#define DEFINE_CONDITION_VISITORS(Name) \
-void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
-void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
+void LocationsBuilderARM64::Visit##Name(H##Name* comp) { HandleCondition(comp); } \
+void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { HandleCondition(comp); }
FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
#undef DEFINE_CONDITION_VISITORS
#undef FOR_EACH_CONDITION_INSTRUCTION
@@ -2949,6 +2949,14 @@
/* false_target */ nullptr);
}
+void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ codegen_->RecordPcInfo(info, info->GetDexPc());
+}
+
void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
HandleFieldGet(instruction);
}
@@ -4582,20 +4590,29 @@
// ranges and emit the tables only as required.
static constexpr int32_t kJumpTableInstructionThreshold = 1* MB / kMaxExpectedSizePerHInstruction;
- if (num_entries < kPackedSwitchJumpTableThreshold ||
+ if (num_entries <= kPackedSwitchCompareJumpThreshold ||
// Current instruction id is an upper bound of the number of HIRs in the graph.
GetGraph()->GetCurrentInstructionId() > kJumpTableInstructionThreshold) {
// Create a series of compare/jumps.
+ UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
+ Register temp = temps.AcquireW();
+ __ Subs(temp, value_reg, Operand(lower_bound));
+
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (uint32_t i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- vixl::Label* succ = codegen_->GetLabelOf(successors[i]);
- if (case_value == 0) {
- __ Cbz(value_reg, succ);
- } else {
- __ Cmp(value_reg, Operand(case_value));
- __ B(eq, succ);
- }
+ // Jump to successors[0] if value == lower_bound.
+ __ B(eq, codegen_->GetLabelOf(successors[0]));
+ int32_t last_index = 0;
+ for (; num_entries - last_index > 2; last_index += 2) {
+ __ Subs(temp, temp, Operand(2));
+ // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+ __ B(lo, codegen_->GetLabelOf(successors[last_index + 1]));
+ // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+ __ B(eq, codegen_->GetLabelOf(successors[last_index + 2]));
+ }
+ if (num_entries - last_index == 2) {
+ // The last missing case_value.
+ __ Cmp(temp, Operand(1));
+ __ B(eq, codegen_->GetLabelOf(successors[last_index + 1]));
}
// And the default for any other value.
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7950f07..0e90ac6 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -215,6 +215,7 @@
const FieldInfo& field_info,
bool value_can_be_null);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* instr);
void GenerateImplicitNullCheck(HNullCheck* instruction);
void GenerateExplicitNullCheck(HNullCheck* instruction);
@@ -257,6 +258,7 @@
void HandleFieldSet(HInstruction* instruction);
void HandleFieldGet(HInstruction* instruction);
void HandleInvoke(HInvoke* instr);
+ void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* instr);
CodeGeneratorARM64* const codegen_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 84fa0e6..4648606 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2214,7 +2214,7 @@
}
}
-void LocationsBuilderMIPS::VisitCondition(HCondition* instruction) {
+void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
switch (instruction->InputAt(0)->GetType()) {
default:
@@ -2234,7 +2234,7 @@
}
}
-void InstructionCodeGeneratorMIPS::VisitCondition(HCondition* instruction) {
+void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) {
if (!instruction->NeedsMaterialization()) {
return;
}
@@ -3366,6 +3366,14 @@
/* false_target */ nullptr);
}
+void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ codegen_->RecordPcInfo(info, info->GetDexPc());
+}
+
void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
Primitive::Type field_type = field_info.GetFieldType();
bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
@@ -4914,83 +4922,83 @@
}
void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS::VisitFakeString(HFakeString* instruction) {
@@ -5019,19 +5027,31 @@
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
// Create a set of compare/jumps.
+ Register temp_reg = TMP;
+ __ Addiu32(temp_reg, value_reg, -lower_bound);
+ // Jump to default if index is negative
+ // Note: We don't check the case that index is positive while value < lower_bound, because in
+ // this case, index >= num_entries must be true. So that we can save one branch instruction.
+ __ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
+
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int32_t i = 0; i < num_entries; ++i) {
- int32_t case_value = lower_bound + i;
- MipsLabel* successor_label = codegen_->GetLabelOf(successors[i]);
- if (case_value == 0) {
- __ Beqz(value_reg, successor_label);
- } else {
- __ LoadConst32(TMP, case_value);
- __ Beq(value_reg, TMP, successor_label);
- }
+ // Jump to successors[0] if value == lower_bound.
+ __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
+ int32_t last_index = 0;
+ for (; num_entries - last_index > 2; last_index += 2) {
+ __ Addiu(temp_reg, temp_reg, -2);
+ // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+ __ Bltz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
+ // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+ __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
+ }
+ if (num_entries - last_index == 2) {
+ // The last missing case_value.
+ __ Addiu(temp_reg, temp_reg, -1);
+ __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
}
- // Insert the default branch for every other value.
+ // And the default for any other value.
if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
__ B(codegen_->GetLabelOf(default_block));
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 1ee6bde..38302ad 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -185,6 +185,7 @@
private:
void HandleInvoke(HInvoke* invoke);
void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
@@ -220,6 +221,7 @@
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 99f58dd..05834ff 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1752,11 +1752,7 @@
void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
Primitive::Type in_type = compare->InputAt(0)->GetType();
- LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
- ? LocationSummary::kCall
- : LocationSummary::kNoCall;
-
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare);
switch (in_type) {
case Primitive::kPrimLong:
@@ -1766,13 +1762,11 @@
break;
case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
- locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
- locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
- }
default:
LOG(FATAL) << "Unexpected type for compare operation " << in_type;
@@ -1781,14 +1775,15 @@
void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
LocationSummary* locations = instruction->GetLocations();
+ GpuRegister res = locations->Out().AsRegister<GpuRegister>();
Primitive::Type in_type = instruction->InputAt(0)->GetType();
+ bool gt_bias = instruction->IsGtBias();
// 0 if: left == right
// 1 if: left > right
// -1 if: left < right
switch (in_type) {
case Primitive::kPrimLong: {
- GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
Location rhs_location = locations->InAt(1);
bool use_imm = rhs_location.IsConstant();
@@ -1803,35 +1798,52 @@
rhs = rhs_location.AsRegister<GpuRegister>();
}
__ Slt(TMP, lhs, rhs);
- __ Slt(dst, rhs, lhs);
- __ Subu(dst, dst, TMP);
+ __ Slt(res, rhs, lhs);
+ __ Subu(res, res, TMP);
break;
}
- case Primitive::kPrimFloat:
+ case Primitive::kPrimFloat: {
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ Mips64Label done;
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ LoadConst32(res, 0);
+ __ Bc1nez(FTMP, &done);
+ if (gt_bias) {
+ __ CmpLtS(FTMP, lhs, rhs);
+ __ LoadConst32(res, -1);
+ __ Bc1nez(FTMP, &done);
+ __ LoadConst32(res, 1);
+ } else {
+ __ CmpLtS(FTMP, rhs, lhs);
+ __ LoadConst32(res, 1);
+ __ Bc1nez(FTMP, &done);
+ __ LoadConst32(res, -1);
+ }
+ __ Bind(&done);
+ break;
+ }
+
case Primitive::kPrimDouble: {
- int32_t entry_point_offset;
- if (in_type == Primitive::kPrimFloat) {
- entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
- : QUICK_ENTRY_POINT(pCmplFloat);
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ Mips64Label done;
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ LoadConst32(res, 0);
+ __ Bc1nez(FTMP, &done);
+ if (gt_bias) {
+ __ CmpLtD(FTMP, lhs, rhs);
+ __ LoadConst32(res, -1);
+ __ Bc1nez(FTMP, &done);
+ __ LoadConst32(res, 1);
} else {
- entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
- : QUICK_ENTRY_POINT(pCmplDouble);
+ __ CmpLtD(FTMP, rhs, lhs);
+ __ LoadConst32(res, 1);
+ __ Bc1nez(FTMP, &done);
+ __ LoadConst32(res, -1);
}
- codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
- if (in_type == Primitive::kPrimFloat) {
- if (instruction->IsGtBias()) {
- CheckEntrypointTypes<kQuickCmpgFloat, int32_t, float, float>();
- } else {
- CheckEntrypointTypes<kQuickCmplFloat, int32_t, float, float>();
- }
- } else {
- if (instruction->IsGtBias()) {
- CheckEntrypointTypes<kQuickCmpgDouble, int32_t, double, double>();
- } else {
- CheckEntrypointTypes<kQuickCmplDouble, int32_t, double, double>();
- }
- }
+ __ Bind(&done);
break;
}
@@ -1840,143 +1852,67 @@
}
}
-void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
+void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ switch (instruction->InputAt(0)->GetType()) {
+ default:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ break;
+ }
if (instruction->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
-void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
+void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
if (!instruction->NeedsMaterialization()) {
return;
}
- // TODO: generalize to long
- DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
-
+ Primitive::Type type = instruction->InputAt(0)->GetType();
LocationSummary* locations = instruction->GetLocations();
-
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
- GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
- Location rhs_location = locations->InAt(1);
+ Mips64Label true_label;
- GpuRegister rhs_reg = ZERO;
- int64_t rhs_imm = 0;
- bool use_imm = rhs_location.IsConstant();
- if (use_imm) {
- rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
- } else {
- rhs_reg = rhs_location.AsRegister<GpuRegister>();
- }
+ switch (type) {
+ default:
+ // Integer case.
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
+ return;
+ case Primitive::kPrimLong:
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
+ return;
- IfCondition if_cond = instruction->GetCondition();
-
- switch (if_cond) {
- case kCondEQ:
- case kCondNE:
- if (use_imm && IsUint<16>(rhs_imm)) {
- __ Xori(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- __ Xor(dst, lhs, rhs_reg);
- }
- if (if_cond == kCondEQ) {
- __ Sltiu(dst, dst, 1);
- } else {
- __ Sltu(dst, ZERO, dst);
- }
- break;
-
- case kCondLT:
- case kCondGE:
- if (use_imm && IsInt<16>(rhs_imm)) {
- __ Slti(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- __ Slt(dst, lhs, rhs_reg);
- }
- if (if_cond == kCondGE) {
- // Simulate lhs >= rhs via !(lhs < rhs) since there's
- // only the slt instruction but no sge.
- __ Xori(dst, dst, 1);
- }
- break;
-
- case kCondLE:
- case kCondGT:
- if (use_imm && IsInt<16>(rhs_imm + 1)) {
- // Simulate lhs <= rhs via lhs < rhs + 1.
- __ Slti(dst, lhs, rhs_imm + 1);
- if (if_cond == kCondGT) {
- // Simulate lhs > rhs via !(lhs <= rhs) since there's
- // only the slti instruction but no sgti.
- __ Xori(dst, dst, 1);
- }
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- __ Slt(dst, rhs_reg, lhs);
- if (if_cond == kCondLE) {
- // Simulate lhs <= rhs via !(rhs < lhs) since there's
- // only the slt instruction but no sle.
- __ Xori(dst, dst, 1);
- }
- }
- break;
-
- case kCondB:
- case kCondAE:
- if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7fff) {
- __ Sltiu(dst, lhs, rhs_imm);
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- __ Sltu(dst, lhs, rhs_reg);
- }
- if (if_cond == kCondAE) {
- // Simulate lhs >= rhs via !(lhs < rhs) since there's
- // only the sltu instruction but no sgeu.
- __ Xori(dst, dst, 1);
- }
- break;
-
- case kCondBE:
- case kCondA:
- if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7ffe) {
- // Simulate lhs <= rhs via lhs < rhs + 1.
- __ Sltiu(dst, lhs, rhs_imm + 1);
- if (if_cond == kCondA) {
- // Simulate lhs > rhs via !(lhs <= rhs) since there's
- // only the sltiu instruction but no sgtiu.
- __ Xori(dst, dst, 1);
- }
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- __ Sltu(dst, rhs_reg, lhs);
- if (if_cond == kCondBE) {
- // Simulate lhs <= rhs via !(rhs < lhs) since there's
- // only the sltu instruction but no sleu.
- __ Xori(dst, dst, 1);
- }
- }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ // TODO: don't use branches.
+ GenerateFpCompareAndBranch(instruction->GetCondition(),
+ instruction->IsGtBias(),
+ type,
+ locations,
+ &true_label);
break;
}
+
+ // Convert the branches into the result.
+ Mips64Label done;
+
+ // False case: result = 0.
+ __ LoadConst32(dst, 0);
+ __ Bc(&done);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ LoadConst32(dst, 1);
+ __ Bind(&done);
}
void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
@@ -2375,6 +2311,329 @@
}
}
+void InstructionCodeGeneratorMIPS64::GenerateIntLongCompare(IfCondition cond,
+ bool is64bit,
+ LocationSummary* locations) {
+ GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ Location rhs_location = locations->InAt(1);
+ GpuRegister rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ if (is64bit) {
+ rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ }
+ } else {
+ rhs_reg = rhs_location.AsRegister<GpuRegister>();
+ }
+ int64_t rhs_imm_plus_one = rhs_imm + UINT64_C(1);
+
+ switch (cond) {
+ case kCondEQ:
+ case kCondNE:
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
+ break;
+
+ case kCondLT:
+ case kCondGE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ __ Slti(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondGE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the slt instruction but no sge.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondLE:
+ case kCondGT:
+ if (use_imm && IsInt<16>(rhs_imm_plus_one)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(dst, lhs, rhs_imm_plus_one);
+ if (cond == kCondGT) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the slti instruction but no sgti.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, rhs_reg, lhs);
+ if (cond == kCondLE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the slt instruction but no sle.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+
+ case kCondB:
+ case kCondAE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ // Sltiu sign-extends its 16-bit immediate operand before
+ // the comparison and thus lets us compare directly with
+ // unsigned values in the ranges [0, 0x7fff] and
+ // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ if (use_imm && (rhs_imm_plus_one != 0) && IsInt<16>(rhs_imm_plus_one)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ // Note that this only works if rhs + 1 does not overflow
+ // to 0, hence the check above.
+ // Sltiu sign-extends its 16-bit immediate operand before
+ // the comparison and thus lets us compare directly with
+ // unsigned values in the ranges [0, 0x7fff] and
+ // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
+ __ Sltiu(dst, lhs, rhs_imm_plus_one);
+ if (cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateIntLongCompareAndBranch(IfCondition cond,
+ bool is64bit,
+ LocationSummary* locations,
+ Mips64Label* label) {
+ GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
+ Location rhs_location = locations->InAt(1);
+ GpuRegister rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ if (is64bit) {
+ rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ }
+ } else {
+ rhs_reg = rhs_location.AsRegister<GpuRegister>();
+ }
+
+ if (use_imm && rhs_imm == 0) {
+ switch (cond) {
+ case kCondEQ:
+ case kCondBE: // <= 0 if zero
+ __ Beqzc(lhs, label);
+ break;
+ case kCondNE:
+ case kCondA: // > 0 if non-zero
+ __ Bnezc(lhs, label);
+ break;
+ case kCondLT:
+ __ Bltzc(lhs, label);
+ break;
+ case kCondGE:
+ __ Bgezc(lhs, label);
+ break;
+ case kCondLE:
+ __ Blezc(lhs, label);
+ break;
+ case kCondGT:
+ __ Bgtzc(lhs, label);
+ break;
+ case kCondB: // always false
+ break;
+ case kCondAE: // always true
+ __ Bc(label);
+ break;
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst64(rhs_reg, rhs_imm);
+ }
+ switch (cond) {
+ case kCondEQ:
+ __ Beqc(lhs, rhs_reg, label);
+ break;
+ case kCondNE:
+ __ Bnec(lhs, rhs_reg, label);
+ break;
+ case kCondLT:
+ __ Bltc(lhs, rhs_reg, label);
+ break;
+ case kCondGE:
+ __ Bgec(lhs, rhs_reg, label);
+ break;
+ case kCondLE:
+ __ Bgec(rhs_reg, lhs, label);
+ break;
+ case kCondGT:
+ __ Bltc(rhs_reg, lhs, label);
+ break;
+ case kCondB:
+ __ Bltuc(lhs, rhs_reg, label);
+ break;
+ case kCondAE:
+ __ Bgeuc(lhs, rhs_reg, label);
+ break;
+ case kCondBE:
+ __ Bgeuc(rhs_reg, lhs, label);
+ break;
+ case kCondA:
+ __ Bltuc(rhs_reg, lhs, label);
+ break;
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* locations,
+ Mips64Label* label) {
+ FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
+ FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
+ if (type == Primitive::kPrimFloat) {
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondNE:
+ __ CmpEqS(FTMP, lhs, rhs);
+ __ Bc1eqz(FTMP, label);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtS(FTMP, lhs, rhs);
+ } else {
+ __ CmpUltS(FTMP, lhs, rhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeS(FTMP, lhs, rhs);
+ } else {
+ __ CmpUleS(FTMP, lhs, rhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltS(FTMP, rhs, lhs);
+ } else {
+ __ CmpLtS(FTMP, rhs, lhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleS(FTMP, rhs, lhs);
+ } else {
+ __ CmpLeS(FTMP, rhs, lhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition";
+ }
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ switch (cond) {
+ case kCondEQ:
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondNE:
+ __ CmpEqD(FTMP, lhs, rhs);
+ __ Bc1eqz(FTMP, label);
+ break;
+ case kCondLT:
+ if (gt_bias) {
+ __ CmpLtD(FTMP, lhs, rhs);
+ } else {
+ __ CmpUltD(FTMP, lhs, rhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondLE:
+ if (gt_bias) {
+ __ CmpLeD(FTMP, lhs, rhs);
+ } else {
+ __ CmpUleD(FTMP, lhs, rhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondGT:
+ if (gt_bias) {
+ __ CmpUltD(FTMP, rhs, lhs);
+ } else {
+ __ CmpLtD(FTMP, rhs, lhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ case kCondGE:
+ if (gt_bias) {
+ __ CmpUleD(FTMP, rhs, lhs);
+ } else {
+ __ CmpLeD(FTMP, rhs, lhs);
+ }
+ __ Bc1nez(FTMP, label);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected non-floating-point condition";
+ }
+ }
+}
+
void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
size_t condition_input_index,
Mips64Label* true_target,
@@ -2420,97 +2679,27 @@
// The condition instruction has not been materialized, use its inputs as
// the comparison and its condition as the branch condition.
HCondition* condition = cond->AsCondition();
+ Primitive::Type type = condition->InputAt(0)->GetType();
+ LocationSummary* locations = cond->GetLocations();
+ IfCondition if_cond = condition->GetCondition();
+ Mips64Label* branch_target = true_target;
- GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
- Location rhs_location = condition->GetLocations()->InAt(1);
- GpuRegister rhs_reg = ZERO;
- int32_t rhs_imm = 0;
- bool use_imm = rhs_location.IsConstant();
- if (use_imm) {
- rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
- } else {
- rhs_reg = rhs_location.AsRegister<GpuRegister>();
- }
-
- IfCondition if_cond;
- Mips64Label* non_fallthrough_target;
if (true_target == nullptr) {
if_cond = condition->GetOppositeCondition();
- non_fallthrough_target = false_target;
- } else {
- if_cond = condition->GetCondition();
- non_fallthrough_target = true_target;
+ branch_target = false_target;
}
- if (use_imm && rhs_imm == 0) {
- switch (if_cond) {
- case kCondEQ:
- __ Beqzc(lhs, non_fallthrough_target);
- break;
- case kCondNE:
- __ Bnezc(lhs, non_fallthrough_target);
- break;
- case kCondLT:
- __ Bltzc(lhs, non_fallthrough_target);
- break;
- case kCondGE:
- __ Bgezc(lhs, non_fallthrough_target);
- break;
- case kCondLE:
- __ Blezc(lhs, non_fallthrough_target);
- break;
- case kCondGT:
- __ Bgtzc(lhs, non_fallthrough_target);
- break;
- case kCondB:
- break; // always false
- case kCondBE:
- __ Beqzc(lhs, non_fallthrough_target); // <= 0 if zero
- break;
- case kCondA:
- __ Bnezc(lhs, non_fallthrough_target); // > 0 if non-zero
- break;
- case kCondAE:
- __ Bc(non_fallthrough_target); // always true
- break;
- }
- } else {
- if (use_imm) {
- rhs_reg = TMP;
- __ LoadConst32(rhs_reg, rhs_imm);
- }
- switch (if_cond) {
- case kCondEQ:
- __ Beqc(lhs, rhs_reg, non_fallthrough_target);
- break;
- case kCondNE:
- __ Bnec(lhs, rhs_reg, non_fallthrough_target);
- break;
- case kCondLT:
- __ Bltc(lhs, rhs_reg, non_fallthrough_target);
- break;
- case kCondGE:
- __ Bgec(lhs, rhs_reg, non_fallthrough_target);
- break;
- case kCondLE:
- __ Bgec(rhs_reg, lhs, non_fallthrough_target);
- break;
- case kCondGT:
- __ Bltc(rhs_reg, lhs, non_fallthrough_target);
- break;
- case kCondB:
- __ Bltuc(lhs, rhs_reg, non_fallthrough_target);
- break;
- case kCondAE:
- __ Bgeuc(lhs, rhs_reg, non_fallthrough_target);
- break;
- case kCondBE:
- __ Bgeuc(rhs_reg, lhs, non_fallthrough_target);
- break;
- case kCondA:
- __ Bltuc(rhs_reg, lhs, non_fallthrough_target);
- break;
- }
+ switch (type) {
+ default:
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
+ break;
+ case Primitive::kPrimLong:
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
+ break;
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
+ break;
}
}
@@ -2556,6 +2745,14 @@
/* false_target */ nullptr);
}
+void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ codegen_->RecordPcInfo(info, info->GetDexPc());
+}
+
void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info ATTRIBUTE_UNUSED) {
LocationSummary* locations =
@@ -3886,83 +4083,83 @@
}
void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
@@ -3991,17 +4188,34 @@
GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
- // Create a series of compare/jumps.
+ // Create a set of compare/jumps.
+ GpuRegister temp_reg = TMP;
+ if (IsInt<16>(-lower_bound)) {
+ __ Addiu(temp_reg, value_reg, -lower_bound);
+ } else {
+ __ LoadConst32(AT, -lower_bound);
+ __ Addu(temp_reg, value_reg, AT);
+ }
+ // Jump to default if index is negative
+ // Note: We don't check the case that index is positive while value < lower_bound, because in
+ // this case, index >= num_entries must be true. So that we can save one branch instruction.
+ __ Bltzc(temp_reg, codegen_->GetLabelOf(default_block));
+
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int32_t i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- Mips64Label* succ = codegen_->GetLabelOf(successors[i]);
- if (case_value == 0) {
- __ Beqzc(value_reg, succ);
- } else {
- __ LoadConst32(TMP, case_value);
- __ Beqc(value_reg, TMP, succ);
- }
+ // Jump to successors[0] if value == lower_bound.
+ __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[0]));
+ int32_t last_index = 0;
+ for (; num_entries - last_index > 2; last_index += 2) {
+ __ Addiu(temp_reg, temp_reg, -2);
+ // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
+ __ Bltzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
+ // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
+ __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
+ }
+ if (num_entries - last_index == 2) {
+ // The last missing case_value.
+ __ Addiu(temp_reg, temp_reg, -1);
+ __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
}
// And the default for any other value.
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 85e3a4a..60ff96d 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -189,6 +189,7 @@
private:
void HandleInvoke(HInvoke* invoke);
void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
@@ -224,6 +225,7 @@
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleCondition(HCondition* instruction);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
@@ -237,6 +239,16 @@
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
+ void GenerateIntLongCompare(IfCondition cond, bool is64bit, LocationSummary* locations);
+ void GenerateIntLongCompareAndBranch(IfCondition cond,
+ bool is64bit,
+ LocationSummary* locations,
+ Mips64Label* label);
+ void GenerateFpCompareAndBranch(IfCondition cond,
+ bool gt_bias,
+ Primitive::Type type,
+ LocationSummary* locations,
+ Mips64Label* label);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
Mips64Assembler* const assembler_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index bc3256e..fd18917 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -42,7 +42,6 @@
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = EAX;
-
static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
static constexpr int kC2ConditionMask = 0x400;
@@ -1555,7 +1554,7 @@
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
- // LHS is guaranteed to be in a register (see LocationsBuilderX86::VisitCondition).
+ // LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition).
if (rhs.IsRegister()) {
__ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
} else if (rhs.IsConstant()) {
@@ -1617,6 +1616,14 @@
/* false_target */ nullptr);
}
+void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ codegen_->RecordPcInfo(info, info->GetDexPc());
+}
+
void LocationsBuilderX86::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
@@ -1660,7 +1667,7 @@
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
-void LocationsBuilderX86::VisitCondition(HCondition* cond) {
+void LocationsBuilderX86::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
@@ -1693,7 +1700,7 @@
}
}
-void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) {
+void InstructionCodeGeneratorX86::HandleCondition(HCondition* cond) {
if (!cond->NeedsMaterialization()) {
return;
}
@@ -1754,83 +1761,83 @@
}
void LocationsBuilderX86::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
@@ -4157,7 +4164,7 @@
*/
switch (kind) {
case MemBarrierKind::kAnyAny: {
- __ mfence();
+ MemoryFence();
break;
}
case MemBarrierKind::kAnyStore:
@@ -6752,31 +6759,67 @@
locations->SetInAt(0, Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
- int32_t lower_bound = switch_instr->GetStartValue();
- int32_t num_entries = switch_instr->GetNumEntries();
- LocationSummary* locations = switch_instr->GetLocations();
- Register value_reg = locations->InAt(0).AsRegister<Register>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+void InstructionCodeGeneratorX86::GenPackedSwitchWithCompares(Register value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block) {
+ // Figure out the correct compare values and jump conditions.
+ // Handle the first compare/branch as a special case because it might
+ // jump to the default case.
+ DCHECK_GT(num_entries, 2u);
+ Condition first_condition;
+ uint32_t index;
+ const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
+ if (lower_bound != 0) {
+ first_condition = kLess;
+ __ cmpl(value_reg, Immediate(lower_bound));
+ __ j(first_condition, codegen_->GetLabelOf(default_block));
+ __ j(kEqual, codegen_->GetLabelOf(successors[0]));
- // Create a series of compare/jumps.
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- if (case_value == 0) {
- __ testl(value_reg, value_reg);
- } else {
- __ cmpl(value_reg, Immediate(case_value));
- }
- __ j(kEqual, codegen_->GetLabelOf(successors[i]));
+ index = 1;
+ } else {
+ // Handle all the compare/jumps below.
+ first_condition = kBelow;
+ index = 0;
+ }
+
+ // Handle the rest of the compare/jumps.
+ for (; index + 1 < num_entries; index += 2) {
+ int32_t compare_to_value = lower_bound + index + 1;
+ __ cmpl(value_reg, Immediate(compare_to_value));
+ // Jump to successors[index] if value < case_value[index].
+ __ j(first_condition, codegen_->GetLabelOf(successors[index]));
+ // Jump to successors[index + 1] if value == case_value[index + 1].
+ __ j(kEqual, codegen_->GetLabelOf(successors[index + 1]));
+ }
+
+ if (index != num_entries) {
+ // There are an odd number of entries. Handle the last one.
+ DCHECK_EQ(index + 1, num_entries);
+ __ cmpl(value_reg, Immediate(lower_bound + index));
+ __ j(kEqual, codegen_->GetLabelOf(successors[index]));
}
// And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
- __ jmp(codegen_->GetLabelOf(default_block));
+ if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
+ __ jmp(codegen_->GetLabelOf(default_block));
}
}
+void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ uint32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+
+ GenPackedSwitchWithCompares(value_reg,
+ lower_bound,
+ num_entries,
+ switch_instr->GetBlock(),
+ switch_instr->GetDefaultBlock());
+}
+
void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
@@ -6791,11 +6834,20 @@
void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
- int32_t num_entries = switch_instr->GetNumEntries();
+ uint32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
Register value_reg = locations->InAt(0).AsRegister<Register>();
HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ if (num_entries <= kPackedSwitchJumpTableThreshold) {
+ GenPackedSwitchWithCompares(value_reg,
+ lower_bound,
+ num_entries,
+ switch_instr->GetBlock(),
+ default_block);
+ return;
+ }
+
// Optimizing has a jump area.
Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
Register constant_area = locations->InAt(1).AsRegister<Register>();
@@ -6807,7 +6859,7 @@
}
// Is the value in range?
- DCHECK_GE(num_entries, 1);
+ DCHECK_GE(num_entries, 1u);
__ cmpl(value_reg, Immediate(num_entries - 1));
__ j(kAbove, codegen_->GetLabelOf(default_block));
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7c292fa..3d34317 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_H_
+#include "arch/x86/instruction_set_features_x86.h"
#include "code_generator.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
@@ -166,6 +167,7 @@
private:
void HandleBitwiseOperation(HBinaryOperation* instruction);
void HandleInvoke(HInvoke* invoke);
+ void HandleCondition(HCondition* condition);
void HandleShift(HBinaryOperation* instruction);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
@@ -195,6 +197,11 @@
X86Assembler* GetAssembler() const { return assembler_; }
+ // The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump
+ // table version generates 7 instructions and num_entries literals. Compare/jump sequence will
+ // generates less code/data with a small num_entries.
+ static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5;
+
private:
// Generate code for the given suspend check. If not null, `successor`
// is the block to branch to if the suspend check is not needed, and after
@@ -207,6 +214,7 @@
void DivByPowerOfTwo(HDiv* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateRemFP(HRem* rem);
+ void HandleCondition(HCondition* condition);
void HandleShift(HBinaryOperation* instruction);
void GenerateShlLong(const Location& loc, Register shifter);
void GenerateShrLong(const Location& loc, Register shifter);
@@ -269,6 +277,11 @@
void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label);
void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
+ void GenPackedSwitchWithCompares(Register value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
@@ -496,6 +509,19 @@
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
+ // Ensure that prior stores complete to memory before subsequent loads.
+ // The locked add implementation will avoid serializing device memory, but will
+ // touch (but not change) the top of the stack.
+ // The 'non_temporal' parameter should be used to ensure ordering of non-temporal stores.
+ void MemoryFence(bool non_temporal = false) {
+ if (!non_temporal && isa_features_.PrefersLockedAddSynchronization()) {
+ assembler_.lock()->addl(Address(ESP, 0), Immediate(0));
+ } else {
+ assembler_.mfence();
+ }
+ }
+
+
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 92cef5f..7c94a8c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -41,6 +41,10 @@
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = RDI;
+// The compare/jump sequence will generate about (1.5 * num_entries) instructions. A jump
+// table version generates 7 instructions and num_entries literals. Compare/jump sequence will
+// generates less code/data with a small num_entries.
+static constexpr uint32_t kPackedSwitchJumpTableThreshold = 5;
static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 };
static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 };
@@ -782,7 +786,7 @@
switch (invoke->GetMethodLoadKind()) {
case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
// temp = thread->string_init_entrypoint
- __ gs()->movl(temp.AsRegister<CpuRegister>(),
+ __ gs()->movq(temp.AsRegister<CpuRegister>(),
Address::Absolute(invoke->GetStringInitOffset(), /* no_rip */ true));
break;
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
@@ -1596,6 +1600,14 @@
/* false_target */ nullptr);
}
+void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ new (GetGraph()->GetArena()) LocationSummary(info);
+}
+
+void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
+ codegen_->RecordPcInfo(info, info->GetDexPc());
+}
+
void LocationsBuilderX86_64::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
@@ -1639,7 +1651,7 @@
void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
-void LocationsBuilderX86_64::VisitCondition(HCondition* cond) {
+void LocationsBuilderX86_64::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
// Handle the long/FP comparisons made in instruction simplification.
@@ -1663,7 +1675,7 @@
}
}
-void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* cond) {
+void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) {
if (!cond->NeedsMaterialization()) {
return;
}
@@ -1761,83 +1773,83 @@
}
void LocationsBuilderX86_64::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitEqual(HEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitNotEqual(HNotEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitLessThan(HLessThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitGreaterThan(HGreaterThan* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitBelow(HBelow* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitAbove(HAbove* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
- VisitCondition(comp);
+ HandleCondition(comp);
}
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
@@ -4029,7 +4041,7 @@
*/
switch (kind) {
case MemBarrierKind::kAnyAny: {
- __ mfence();
+ MemoryFence();
break;
}
case MemBarrierKind::kAnyStore:
@@ -6331,11 +6343,58 @@
void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
- int32_t num_entries = switch_instr->GetNumEntries();
+ uint32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Should we generate smaller inline compare/jumps?
+ if (num_entries <= kPackedSwitchJumpTableThreshold) {
+ // Figure out the correct compare values and jump conditions.
+ // Handle the first compare/branch as a special case because it might
+ // jump to the default case.
+ DCHECK_GT(num_entries, 2u);
+ Condition first_condition;
+ uint32_t index;
+ const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+ if (lower_bound != 0) {
+ first_condition = kLess;
+ __ cmpl(value_reg_in, Immediate(lower_bound));
+ __ j(first_condition, codegen_->GetLabelOf(default_block));
+ __ j(kEqual, codegen_->GetLabelOf(successors[0]));
+
+ index = 1;
+ } else {
+ // Handle all the compare/jumps below.
+ first_condition = kBelow;
+ index = 0;
+ }
+
+ // Handle the rest of the compare/jumps.
+ for (; index + 1 < num_entries; index += 2) {
+ int32_t compare_to_value = lower_bound + index + 1;
+ __ cmpl(value_reg_in, Immediate(compare_to_value));
+ // Jump to successors[index] if value < case_value[index].
+ __ j(first_condition, codegen_->GetLabelOf(successors[index]));
+ // Jump to successors[index + 1] if value == case_value[index + 1].
+ __ j(kEqual, codegen_->GetLabelOf(successors[index + 1]));
+ }
+
+ if (index != num_entries) {
+ // There are an odd number of entries. Handle the last one.
+ DCHECK_EQ(index + 1, num_entries);
+ __ cmpl(value_reg_in, Immediate(static_cast<int32_t>(lower_bound + index)));
+ __ j(kEqual, codegen_->GetLabelOf(successors[index]));
+ }
+
+ // And the default for any other value.
+ if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+ __ jmp(codegen_->GetLabelOf(default_block));
+ }
+ return;
+ }
// Remove the bias, if needed.
Register value_reg_out = value_reg_in.AsRegister();
@@ -6346,7 +6405,6 @@
CpuRegister value_reg(value_reg_out);
// Is the value in range?
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
__ cmpl(value_reg, Immediate(num_entries - 1));
__ j(kAbove, codegen_->GetLabelOf(default_block));
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dda9ea2..9995416 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_X86_64_H_
+#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "code_generator.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
@@ -171,6 +172,7 @@
private:
void HandleInvoke(HInvoke* invoke);
void HandleBitwiseOperation(HBinaryOperation* operation);
+ void HandleCondition(HCondition* condition);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction);
@@ -212,6 +214,7 @@
void DivByPowerOfTwo(HDiv* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
+ void HandleCondition(HCondition* condition);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction,
@@ -479,6 +482,18 @@
int64_t v,
HInstruction* instruction);
+ // Ensure that prior stores complete to memory before subsequent loads.
+ // The locked add implementation will avoid serializing device memory, but will
+ // touch (but not change) the top of the stack. The locked add should not be used for
+ // ordering non-temporal stores.
+ void MemoryFence(bool force_mfence = false) {
+ if (!force_mfence && isa_features_.PrefersLockedAddSynchronization()) {
+ assembler_.lock()->addl(Address(CpuRegister(RSP), 0), Immediate(0));
+ } else {
+ assembler_.mfence();
+ }
+ }
+
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index e469c8d..a8f65bf 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -32,7 +32,7 @@
/**
* Fixture class for the constant folding and dce tests.
*/
-class ConstantFoldingTest : public testing::Test {
+class ConstantFoldingTest : public CommonCompilerTest {
public:
ConstantFoldingTest() : pool_(), allocator_(&pool_) {
graph_ = CreateGraph(&allocator_);
@@ -56,7 +56,7 @@
const std::string& expected_after_dce,
std::function<void(HGraph*)> check_after_cf) {
ASSERT_NE(graph_, nullptr);
- graph_->TryBuildingSsa();
+ TransformToSsa(graph_);
StringPrettyPrinter printer_before(graph_);
printer_before.VisitInsertionOrder();
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 02e5dab..67ff87a 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -165,6 +165,7 @@
if (!inst->HasSideEffects()
&& !inst->CanThrow()
&& !inst->IsSuspendCheck()
+ && !inst->IsNativeDebugInfo()
// If we added an explicit barrier then we should keep it.
&& !inst->IsMemoryBarrier()
&& !inst->IsParameterValue()
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 2c6a1ef..f0f98ef 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -26,6 +26,8 @@
namespace art {
+class DeadCodeEliminationTest : public CommonCompilerTest {};
+
static void TestCode(const uint16_t* data,
const std::string& expected_before,
const std::string& expected_after) {
@@ -34,7 +36,7 @@
HGraph* graph = CreateCFG(&allocator, data);
ASSERT_NE(graph, nullptr);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
StringPrettyPrinter printer_before(graph);
printer_before.VisitInsertionOrder();
@@ -55,7 +57,6 @@
ASSERT_EQ(actual_after, expected_after);
}
-
/**
* Small three-register program.
*
@@ -69,7 +70,7 @@
* L1: v2 <- v0 + v1 5. add-int v2, v0, v1
* return-void 7. return
*/
-TEST(DeadCodeElimination, AdditionAndConditionalJump) {
+TEST_F(DeadCodeEliminationTest, AdditionAndConditionalJump) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
@@ -131,7 +132,7 @@
* L3: v2 <- v1 + 4 11. add-int/lit16 v2, v1, #+4
* return 13. return-void
*/
-TEST(DeadCodeElimination, AdditionsAndInconditionalJumps) {
+TEST_F(DeadCodeEliminationTest, AdditionsAndInconditionalJumps) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index dfc363f..f3c1dbe 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -24,6 +24,7 @@
#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "base/stringprintf.h"
+#include "handle_scope-inl.h"
namespace art {
@@ -594,6 +595,17 @@
}
}
}
+
+ // Ensure that reference type instructions have reference type info.
+ if (instruction->GetType() == Primitive::kPrimNot) {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!instruction->GetReferenceTypeInfo().IsValid()) {
+ AddError(StringPrintf("Reference type instruction %s:%d does not have "
+ "valid reference type information.",
+ instruction->DebugName(),
+ instruction->GetId()));
+ }
+ }
}
static Primitive::Type PrimitiveKind(Primitive::Type type) {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index fee56c7..d10df4c 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -17,8 +17,6 @@
#include "graph_checker.h"
#include "optimizing_unit_test.h"
-#include "gtest/gtest.h"
-
namespace art {
/**
@@ -43,7 +41,6 @@
return graph;
}
-
static void TestCode(const uint16_t* data) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
@@ -61,8 +58,7 @@
HGraph* graph = CreateCFG(&allocator, data);
ASSERT_NE(graph, nullptr);
- graph->BuildDominatorTree();
- graph->TransformToSsa();
+ TransformToSsa(graph);
SSAChecker ssa_checker(graph);
ssa_checker.Run();
@@ -145,7 +141,9 @@
ASSERT_FALSE(graph_checker.IsValid());
}
-TEST(SSAChecker, SSAPhi) {
+class SSACheckerTest : public CommonCompilerTest {};
+
+TEST_F(SSACheckerTest, SSAPhi) {
// This code creates one Phi function during the conversion to SSA form.
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index e9fdb84..5f1328f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -30,6 +30,7 @@
#include "optimization.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
@@ -505,7 +506,7 @@
} else {
StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId();
}
- } else if ((IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName)
+ } else if ((IsPass(SsaBuilder::kSsaBuilderPassName)
|| IsPass(HInliner::kInlinerPassName))
&& (instruction->GetType() == Primitive::kPrimNot)) {
ReferenceTypeInfo info = instruction->IsLoadClass()
@@ -519,21 +520,15 @@
StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
} else if (instruction->IsLoadClass()) {
StartAttributeStream("klass") << "unresolved";
- } else if (instruction->IsNullConstant()) {
+ } else {
// The NullConstant may be added to the graph during other passes that happen between
// ReferenceTypePropagation and Inliner (e.g. InstructionSimplifier). If the inliner
// doesn't run or doesn't inline anything, the NullConstant remains untyped.
// So we should check NullConstants for validity only after reference type propagation.
- //
- // Note: The infrastructure to properly type NullConstants everywhere is to complex to add
- // for the benefits.
- StartAttributeStream("klass") << "not_set";
- DCHECK(!is_after_pass_
- || !IsPass(ReferenceTypePropagation::kReferenceTypePropagationPassName))
- << " Expected a valid rti after reference type propagation";
- } else {
- DCHECK(!is_after_pass_)
- << "Expected a valid rti after reference type propagation";
+ DCHECK(graph_in_bad_state_ ||
+ (!is_after_pass_ && IsPass(SsaBuilder::kSsaBuilderPassName)))
+ << instruction->DebugName() << instruction->GetId() << " has invalid rti "
+ << (is_after_pass_ ? "after" : "before") << " pass " << pass_name_;
}
}
if (disasm_info_ != nullptr) {
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index de60cf2..1f4eaf3 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -21,14 +21,14 @@
#include "optimizing_unit_test.h"
#include "side_effects_analysis.h"
-#include "gtest/gtest.h"
-
namespace art {
-TEST(GVNTest, LocalFieldElimination) {
+class GVNTest : public CommonCompilerTest {};
+
+TEST_F(GVNTest, LocalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -100,7 +100,7 @@
ASSERT_EQ(different_offset->GetBlock(), block);
ASSERT_EQ(use_after_kill->GetBlock(), block);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
GVNOptimization(graph, side_effects).Run();
@@ -110,10 +110,10 @@
ASSERT_EQ(use_after_kill->GetBlock(), block);
}
-TEST(GVNTest, GlobalFieldElimination) {
+TEST_F(GVNTest, GlobalFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -182,7 +182,7 @@
0));
join->AddInstruction(new (&allocator) HExit());
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
GVNOptimization(graph, side_effects).Run();
@@ -193,10 +193,10 @@
ASSERT_TRUE(join->GetFirstInstruction()->IsExit());
}
-TEST(GVNTest, LoopFieldElimination) {
+TEST_F(GVNTest, LoopFieldElimination) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
HGraph* graph = CreateGraph(&allocator);
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
@@ -288,7 +288,7 @@
ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
ASSERT_EQ(field_get_in_exit->GetBlock(), exit);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
{
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -316,10 +316,10 @@
}
// Test that inner loops affect the side effects of the outer loop.
-TEST(GVNTest, LoopSideEffects) {
+TEST_F(GVNTest, LoopSideEffects) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
static const SideEffects kCanTriggerGC = SideEffects::CanTriggerGC();
@@ -364,7 +364,7 @@
inner_loop_exit->AddInstruction(new (&allocator) HGoto());
outer_loop_exit->AddInstruction(new (&allocator) HExit());
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn(
*outer_loop_header->GetLoopInformation()));
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 19e6cbd..eef6cef 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -706,7 +706,6 @@
}
}
if (loop->IsDefinedOutOfTheLoop(instruction)) {
- DCHECK(instruction->GetBlock()->Dominates(loop->GetPreHeader()));
InductionInfo* info = CreateInvariantFetch(instruction);
AssignInfo(loop, instruction, info);
return info;
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 5de94f4..776c115 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -18,7 +18,6 @@
#include "base/arena_allocator.h"
#include "builder.h"
-#include "gtest/gtest.h"
#include "induction_var_analysis.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
@@ -28,7 +27,7 @@
/**
* Fixture class for the InductionVarAnalysis tests.
*/
-class InductionVarAnalysisTest : public testing::Test {
+class InductionVarAnalysisTest : public CommonCompilerTest {
public:
InductionVarAnalysisTest() : pool_(), allocator_(&pool_) {
graph_ = CreateGraph(&allocator_);
@@ -102,6 +101,7 @@
basic_[d] = new (&allocator_) HLocal(d);
entry_->AddInstruction(basic_[d]);
loop_preheader_[d]->AddInstruction(new (&allocator_) HStoreLocal(basic_[d], constant0_));
+ loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto());
HInstruction* load = new (&allocator_) HLoadLocal(basic_[d], Primitive::kPrimInt);
loop_header_[d]->AddInstruction(load);
HInstruction* compare = new (&allocator_) HLessThan(load, constant100_);
@@ -168,7 +168,7 @@
// Performs InductionVarAnalysis (after proper set up).
void PerformInductionVarAnalysis() {
- ASSERT_TRUE(graph_->TryBuildingSsa());
+ TransformToSsa(graph_);
iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
iva_->Run();
}
@@ -212,7 +212,7 @@
// ..
// }
BuildLoopNest(10);
- ASSERT_TRUE(graph_->TryBuildingSsa());
+ TransformToSsa(graph_);
ASSERT_EQ(entry_->GetLoopInformation(), nullptr);
for (int d = 0; d < 1; d++) {
ASSERT_EQ(loop_preheader_[d]->GetLoopInformation(),
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 5c0bdd7..eda9c01 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -16,7 +16,6 @@
#include "base/arena_allocator.h"
#include "builder.h"
-#include "gtest/gtest.h"
#include "induction_var_analysis.h"
#include "induction_var_range.h"
#include "nodes.h"
@@ -29,7 +28,7 @@
/**
* Fixture class for the InductionVarRange tests.
*/
-class InductionVarRangeTest : public testing::Test {
+class InductionVarRangeTest : public CommonCompilerTest {
public:
InductionVarRangeTest()
: pool_(),
@@ -113,7 +112,7 @@
/** Constructs SSA and performs induction variable analysis. */
void PerformInductionVarAnalysis() {
- ASSERT_TRUE(graph_->TryBuildingSsa());
+ TransformToSsa(graph_);
iva_->Run();
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a4dcb3a..0e50416 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -33,6 +33,7 @@
#include "reference_type_propagation.h"
#include "register_allocator.h"
#include "sharpening.h"
+#include "ssa_builder.h"
#include "ssa_phi_elimination.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
@@ -372,6 +373,18 @@
bool HInliner::TryInline(HInvoke* invoke_instruction, ArtMethod* method, bool do_rtp) {
const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile();
+
+ // Check whether we're allowed to inline. The outermost compilation unit is the relevant
+ // dex file here (though the transitivity of an inline chain would allow checking the calller).
+ if (!compiler_driver_->MayInline(method->GetDexFile(),
+ outer_compilation_unit_.GetDexFile())) {
+ VLOG(compiler) << "Won't inline " << PrettyMethod(method) << " in "
+ << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
+ << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
+ << method->GetDexFile()->GetLocation();
+ return false;
+ }
+
uint32_t method_index = FindMethodIndexIn(
method, caller_dex_file, invoke_instruction->GetDexMethodIndex());
if (method_index == DexFile::kDexNoIndex) {
@@ -514,7 +527,7 @@
return false;
}
- if (!callee_graph->TryBuildingSsa()) {
+ if (callee_graph->TryBuildingSsa(handles_) != kBuildSsaSuccess) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be transformed to SSA";
return false;
@@ -549,14 +562,12 @@
// Run simple optimizations on the graph.
HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
- ReferenceTypePropagation type_propagation(callee_graph, handles_);
HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
InstructionSimplifier simplify(callee_graph, stats_);
IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
HOptimization* optimizations[] = {
&intrinsics,
- &type_propagation,
&sharpening,
&simplify,
&fold,
@@ -677,42 +688,36 @@
DCHECK_EQ(graph_, return_replacement->GetBlock()->GetGraph());
}
- // When merging the graph we might create a new NullConstant in the caller graph which does
- // not have the chance to be typed. We assign the correct type here so that we can keep the
- // assertion that every reference has a valid type. This also simplifies checks along the way.
- HNullConstant* null_constant = graph_->GetNullConstant();
- if (!null_constant->GetReferenceTypeInfo().IsValid()) {
- ReferenceTypeInfo::TypeHandle obj_handle =
- handles_->NewHandle(class_linker->GetClassRoot(ClassLinker::kJavaLangObject));
- null_constant->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(obj_handle, false /* is_exact */));
- }
-
// Check the integrity of reference types and run another type propagation if needed.
- if ((return_replacement != nullptr)
- && (return_replacement->GetType() == Primitive::kPrimNot)) {
- if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
- // Make sure that we have a valid type for the return. We may get an invalid one when
- // we inline invokes with multiple branches and create a Phi for the result.
- // TODO: we could be more precise by merging the phi inputs but that requires
- // some functionality from the reference type propagation.
- DCHECK(return_replacement->IsPhi());
- size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- ReferenceTypeInfo::TypeHandle return_handle =
- handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
- return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
- return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
- }
+ if (return_replacement != nullptr) {
+ if (return_replacement->GetType() == Primitive::kPrimNot) {
+ if (!return_replacement->GetReferenceTypeInfo().IsValid()) {
+ // Make sure that we have a valid type for the return. We may get an invalid one when
+ // we inline invokes with multiple branches and create a Phi for the result.
+ // TODO: we could be more precise by merging the phi inputs but that requires
+ // some functionality from the reference type propagation.
+ DCHECK(return_replacement->IsPhi());
+ size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ ReferenceTypeInfo::TypeHandle return_handle =
+ handles_->NewHandle(resolved_method->GetReturnType(true /* resolve */, pointer_size));
+ return_replacement->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
+ return_handle, return_handle->CannotBeAssignedFromOtherTypes() /* is_exact */));
+ }
- if (do_rtp) {
- // If the return type is a refinement of the declared type run the type propagation again.
- ReferenceTypeInfo return_rti = return_replacement->GetReferenceTypeInfo();
- ReferenceTypeInfo invoke_rti = invoke_instruction->GetReferenceTypeInfo();
- if (invoke_rti.IsStrictSupertypeOf(return_rti)
- || (return_rti.IsExact() && !invoke_rti.IsExact())
- || !return_replacement->CanBeNull()) {
- ReferenceTypePropagation rtp_fixup(graph_, handles_);
- rtp_fixup.Run();
+ if (do_rtp) {
+ // If the return type is a refinement of the declared type run the type propagation again.
+ ReferenceTypeInfo return_rti = return_replacement->GetReferenceTypeInfo();
+ ReferenceTypeInfo invoke_rti = invoke_instruction->GetReferenceTypeInfo();
+ if (invoke_rti.IsStrictSupertypeOf(return_rti)
+ || (return_rti.IsExact() && !invoke_rti.IsExact())
+ || !return_replacement->CanBeNull()) {
+ ReferenceTypePropagation(graph_, handles_).Run();
+ }
+ }
+ } else if (return_replacement->IsInstanceOf()) {
+ if (do_rtp) {
+ // Inlining InstanceOf into an If may put a tighter bound on reference types.
+ ReferenceTypePropagation(graph_, handles_).Run();
}
}
}
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 67097de..c504ded 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -777,13 +777,6 @@
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// Try to fold an HCompare into this HCondition.
- // This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS64.
- InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- if (instruction_set == kMips64) {
- return;
- }
-
HInstruction* left = condition->GetLeft();
HInstruction* right = condition->GetRight();
// We can only replace an HCondition which compares a Compare to 0.
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 6a34b13..6bbc751 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -49,6 +49,7 @@
GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
HArm64IntermediateAddress* address =
new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
+ address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 0);
// Both instructions must depend on GC to prevent any instruction that can
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 7127215..c6da9a3 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -36,8 +36,8 @@
switch (i) {
case Intrinsics::kNone:
return kInterface; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
- case Intrinsics::k ## Name: \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
+ case Intrinsics::k ## Name: \
return IsStatic;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -52,8 +52,8 @@
switch (i) {
case Intrinsics::kNone:
return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
- case Intrinsics::k ## Name: \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
+ case Intrinsics::k ## Name: \
return NeedsEnvironmentOrCache;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -63,6 +63,38 @@
return kNeedsEnvironmentOrCache;
}
+// Function that returns whether an intrinsic has side effects.
+static inline IntrinsicSideEffects GetSideEffects(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kAllSideEffects;
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
+ case Intrinsics::k ## Name: \
+ return SideEffects;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kAllSideEffects;
+}
+
+// Function that returns whether an intrinsic can throw exceptions.
+static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kCanThrow;
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
+ case Intrinsics::k ## Name: \
+ return Exceptions;
+#include "intrinsics_list.h"
+INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kCanThrow;
+}
+
static Primitive::Type GetType(uint64_t data, bool is_op_size) {
if (is_op_size) {
switch (static_cast<OpSize>(data)) {
@@ -248,7 +280,7 @@
// Thread.currentThread.
case kIntrinsicCurrentThread:
- return Intrinsics::kThreadCurrentThread;
+ return Intrinsics::kThreadCurrentThread;
// Memory.peek.
case kIntrinsicPeek:
@@ -473,7 +505,10 @@
<< PrettyMethod(invoke->GetDexMethodIndex(), invoke->GetDexFile())
<< invoke->DebugName();
} else {
- invoke->SetIntrinsic(intrinsic, NeedsEnvironmentOrCache(intrinsic));
+ invoke->SetIntrinsic(intrinsic,
+ NeedsEnvironmentOrCache(intrinsic),
+ GetSideEffects(intrinsic),
+ GetExceptions(intrinsic));
}
}
}
@@ -487,7 +522,7 @@
case Intrinsics::kNone:
os << "None";
break;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
case Intrinsics::k ## Name: \
os << # Name; \
break;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index e459516..9f50d18 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -27,6 +27,9 @@
class CompilerDriver;
class DexFile;
+// Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751
+static constexpr bool kRoundIsPlusPointFive = false;
+
// Recognize intrinsics from HInvoke nodes.
class IntrinsicsRecognizer : public HOptimization {
public:
@@ -54,9 +57,9 @@
switch (invoke->GetIntrinsic()) {
case Intrinsics::kNone:
return;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \
- case Intrinsics::k ## Name: \
- Visit ## Name(invoke); \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment, SideEffects, Exceptions) \
+ case Intrinsics::k ## Name: \
+ Visit ## Name(invoke); \
return;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -69,7 +72,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironment, SideEffects, Exceptions) \
virtual void Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
}
#include "intrinsics_list.h"
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index e8181bb..4683aee 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -825,8 +825,15 @@
Label loop_head;
__ Bind(&loop_head);
+ // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
+ // the reference stored in the object before attempting the CAS,
+ // similar to the one in the art::Unsafe_compareAndSwapObject JNI
+ // implementation.
+ //
+ // Note that this code is not (yet) used when read barriers are
+ // enabled (see IntrinsicLocationsBuilderARM::VisitUnsafeCASObject).
+ DCHECK(!(type == Primitive::kPrimNot && kEmitCompilerReadBarrier));
__ ldrex(tmp_lo, tmp_ptr);
- // TODO: Do we need a read barrier here when `type == Primitive::kPrimNot`?
__ subs(tmp_lo, tmp_lo, ShifterOperand(expected_lo));
@@ -852,15 +859,17 @@
CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
}
void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
- // The UnsafeCASObject intrinsic does not always work when heap
+ // The UnsafeCASObject intrinsic is missing a read barrier, and
+ // therefore sometimes does not work as expected (b/25883050).
+ // Turn it off temporarily as a quick fix, until the read barrier is
+ // implemented (see TODO in GenCAS below).
+ //
+ // Also, the UnsafeCASObject intrinsic does not always work when heap
// poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
- // off temporarily as a quick fix.
+ // off temporarily as a quick fix (b/26204023).
//
- // TODO(rpl): Fix it and turn it back on.
- //
- // TODO(rpl): Also, we should investigate whether we need a read
- // barrier in the generated code.
- if (kPoisonHeapReferences) {
+ // TODO(rpl): Fix these two issues and re-enable this intrinsic.
+ if (kEmitCompilerReadBarrier || kPoisonHeapReferences) {
return;
}
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 127e9a4..e01b6ff 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -40,7 +40,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -67,7 +67,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 6b34daa..f723940 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -614,7 +614,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateFPToIntPlusTempLocations(arena_, invoke);
+ // See intrinsics.h.
+ if (kRoundIsPlusPointFive) {
+ CreateFPToIntPlusTempLocations(arena_, invoke);
+ }
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -622,7 +625,10 @@
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateFPToIntPlusTempLocations(arena_, invoke);
+ // See intrinsics.h.
+ if (kRoundIsPlusPointFive) {
+ CreateFPToIntPlusTempLocations(arena_, invoke);
+ }
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -1031,10 +1037,15 @@
} else {
__ Dmb(InnerShareable, BarrierWrites);
__ Bind(&loop_head);
- __ Ldxr(tmp_value, MemOperand(tmp_ptr));
- // TODO: Do we need a read barrier here when `type == Primitive::kPrimNot`?
+ // TODO: When `type == Primitive::kPrimNot`, add a read barrier for
+ // the reference stored in the object before attempting the CAS,
+ // similar to the one in the art::Unsafe_compareAndSwapObject JNI
+ // implementation.
+ //
// Note that this code is not (yet) used when read barriers are
// enabled (see IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject).
+ DCHECK(!(type == Primitive::kPrimNot && kEmitCompilerReadBarrier));
+ __ Ldxr(tmp_value, MemOperand(tmp_ptr));
__ Cmp(tmp_value, expected);
__ B(&exit_loop, ne);
__ Stxr(tmp_32, value, MemOperand(tmp_ptr));
@@ -1057,15 +1068,17 @@
CreateIntIntIntIntIntToInt(arena_, invoke);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
- // The UnsafeCASObject intrinsic does not always work when heap
+ // The UnsafeCASObject intrinsic is missing a read barrier, and
+ // therefore sometimes does not work as expected (b/25883050).
+ // Turn it off temporarily as a quick fix, until the read barrier is
+ // implemented (see TODO in GenCAS below).
+ //
+ // Also, the UnsafeCASObject intrinsic does not always work when heap
// poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
- // off temporarily as a quick fix.
+ // off temporarily as a quick fix (b/26204023).
//
- // TODO(rpl): Fix it and turn it back on.
- //
- // TODO(rpl): Also, we should investigate whether we need a read
- // barrier in the generated code.
- if (kPoisonHeapReferences) {
+ // TODO(rpl): Fix these two issues and re-enable this intrinsic.
+ if (kEmitCompilerReadBarrier || kPoisonHeapReferences) {
return;
}
diff --git a/compiler/optimizing/intrinsics_arm64.h b/compiler/optimizing/intrinsics_arm64.h
index 4250ecf..d47448a 100644
--- a/compiler/optimizing/intrinsics_arm64.h
+++ b/compiler/optimizing/intrinsics_arm64.h
@@ -41,7 +41,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -65,7 +65,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 96f43a0..2e87546 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -22,97 +22,97 @@
// environment.
#define INTRINSICS_LIST(V) \
- V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache) \
- V(DoubleLongBitsToDouble, kStatic, kNeedsEnvironmentOrCache) \
- V(FloatFloatToRawIntBits, kStatic, kNeedsEnvironmentOrCache) \
- V(FloatIntBitsToFloat, kStatic, kNeedsEnvironmentOrCache) \
- V(IntegerReverse, kStatic, kNeedsEnvironmentOrCache) \
- V(IntegerReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
- V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
- V(IntegerNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
- V(IntegerRotateRight, kStatic, kNeedsEnvironmentOrCache) \
- V(IntegerRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
- V(LongReverse, kStatic, kNeedsEnvironmentOrCache) \
- V(LongReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
- V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache) \
- V(LongNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache) \
- V(LongRotateRight, kStatic, kNeedsEnvironmentOrCache) \
- V(LongRotateLeft, kStatic, kNeedsEnvironmentOrCache) \
- V(ShortReverseBytes, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAbsDouble, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAbsFloat, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAbsLong, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAbsInt, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMinDoubleDouble, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMinFloatFloat, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMinLongLong, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMinIntInt, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMaxDoubleDouble, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMaxFloatFloat, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMaxLongLong, kStatic, kNeedsEnvironmentOrCache) \
- V(MathMaxIntInt, kStatic, kNeedsEnvironmentOrCache) \
- V(MathCos, kStatic, kNeedsEnvironmentOrCache) \
- V(MathSin, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAcos, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAsin, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAtan, kStatic, kNeedsEnvironmentOrCache) \
- V(MathAtan2, kStatic, kNeedsEnvironmentOrCache) \
- V(MathCbrt, kStatic, kNeedsEnvironmentOrCache) \
- V(MathCosh, kStatic, kNeedsEnvironmentOrCache) \
- V(MathExp, kStatic, kNeedsEnvironmentOrCache) \
- V(MathExpm1, kStatic, kNeedsEnvironmentOrCache) \
- V(MathHypot, kStatic, kNeedsEnvironmentOrCache) \
- V(MathLog, kStatic, kNeedsEnvironmentOrCache) \
- V(MathLog10, kStatic, kNeedsEnvironmentOrCache) \
- V(MathNextAfter, kStatic, kNeedsEnvironmentOrCache) \
- V(MathSinh, kStatic, kNeedsEnvironmentOrCache) \
- V(MathTan, kStatic, kNeedsEnvironmentOrCache) \
- V(MathTanh, kStatic, kNeedsEnvironmentOrCache) \
- V(MathSqrt, kStatic, kNeedsEnvironmentOrCache) \
- V(MathCeil, kStatic, kNeedsEnvironmentOrCache) \
- V(MathFloor, kStatic, kNeedsEnvironmentOrCache) \
- V(MathRint, kStatic, kNeedsEnvironmentOrCache) \
- V(MathRoundDouble, kStatic, kNeedsEnvironmentOrCache) \
- V(MathRoundFloat, kStatic, kNeedsEnvironmentOrCache) \
- V(SystemArrayCopyChar, kStatic, kNeedsEnvironmentOrCache) \
- V(SystemArrayCopy, kStatic, kNeedsEnvironmentOrCache) \
- V(ThreadCurrentThread, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPeekByte, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPeekIntNative, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPeekLongNative, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPeekShortNative, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPokeByte, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPokeIntNative, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPokeLongNative, kStatic, kNeedsEnvironmentOrCache) \
- V(MemoryPokeShortNative, kStatic, kNeedsEnvironmentOrCache) \
- V(StringCharAt, kDirect, kNeedsEnvironmentOrCache) \
- V(StringCompareTo, kDirect, kNeedsEnvironmentOrCache) \
- V(StringEquals, kDirect, kNeedsEnvironmentOrCache) \
- V(StringGetCharsNoCheck, kDirect, kNeedsEnvironmentOrCache) \
- V(StringIndexOf, kDirect, kNeedsEnvironmentOrCache) \
- V(StringIndexOfAfter, kDirect, kNeedsEnvironmentOrCache) \
- V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache) \
- V(StringNewStringFromChars, kStatic, kNeedsEnvironmentOrCache) \
- V(StringNewStringFromString, kStatic, kNeedsEnvironmentOrCache) \
- V(UnsafeCASInt, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeCASLong, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeCASObject, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeGet, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeGetVolatile, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeGetObject, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeGetObjectVolatile, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeGetLong, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafeGetLongVolatile, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePut, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutOrdered, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutVolatile, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutObject, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutObjectOrdered, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutObjectVolatile, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutLong, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutLongOrdered, kDirect, kNeedsEnvironmentOrCache) \
- V(UnsafePutLongVolatile, kDirect, kNeedsEnvironmentOrCache) \
- V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache)
+ V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(DoubleLongBitsToDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(FloatFloatToRawIntBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(FloatIntBitsToFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(IntegerReverse, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(IntegerReverseBytes, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(IntegerNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(IntegerRotateRight, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(IntegerRotateLeft, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(LongReverse, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(LongReverseBytes, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(LongNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(LongRotateRight, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(LongRotateLeft, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(ShortReverseBytes, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAbsDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAbsFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAbsLong, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAbsInt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMinDoubleDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMinFloatFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMinLongLong, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMinIntInt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMaxDoubleDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMaxFloatFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMaxLongLong, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathMaxIntInt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathCos, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathSin, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAcos, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAsin, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAtan, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathAtan2, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathCbrt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathCosh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathExp, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathExpm1, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathHypot, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathLog, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathLog10, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathNextAfter, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathSinh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathTan, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathTanh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathSqrt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathCeil, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathFloor, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathRint, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathRoundDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MathRoundFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(SystemArrayCopyChar, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(SystemArrayCopy, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(ThreadCurrentThread, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \
+ V(MemoryPeekByte, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(MemoryPeekIntNative, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(MemoryPeekLongNative, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(MemoryPeekShortNative, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(MemoryPokeByte, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow) \
+ V(MemoryPokeIntNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow) \
+ V(MemoryPokeLongNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow) \
+ V(MemoryPokeShortNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow) \
+ V(StringCharAt, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringCompareTo, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringEquals, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringGetCharsNoCheck, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringIndexOf, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringIndexOfAfter, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \
+ V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(StringNewStringFromChars, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(StringNewStringFromString, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeCASInt, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeCASLong, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeCASObject, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeGet, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeGetVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeGetObject, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeGetObjectVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeGetLong, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafeGetLongVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePut, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutOrdered, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutObject, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutObjectOrdered, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutObjectVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutLong, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutLongOrdered, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(UnsafePutLongVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \
+ V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow)
#endif // ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_
#undef ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_ // #define is only for lint.
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
index 19ad525..f86b0ef 100644
--- a/compiler/optimizing/intrinsics_mips.h
+++ b/compiler/optimizing/intrinsics_mips.h
@@ -36,7 +36,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -60,7 +60,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 8aa7d9f..8b45ea7 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1299,6 +1299,8 @@
if (type == Primitive::kPrimLong) {
__ Lld(out, TMP);
} else {
+ // Note: We will need a read barrier here, when read barrier
+ // support is added to the MIPS64 back end.
__ Ll(out, TMP);
}
__ Dsubu(out, out, expected); // If we didn't get the 'expected'
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
index 1481d24..4137fbd 100644
--- a/compiler/optimizing/intrinsics_mips64.h
+++ b/compiler/optimizing/intrinsics_mips64.h
@@ -36,7 +36,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -60,7 +60,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index fd454d8..677f2e9 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -720,6 +720,11 @@
// Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble,
// as it needs 64 bit instructions.
void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
+ // See intrinsics.h.
+ if (!kRoundIsPlusPointFive) {
+ return;
+ }
+
// Do we have instruction support?
if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -2005,7 +2010,7 @@
}
if (is_volatile) {
- __ mfence();
+ codegen->MemoryFence();
}
if (type == Primitive::kPrimNot) {
@@ -2085,6 +2090,17 @@
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic is missing a read barrier, and
+ // therefore sometimes does not work as expected (b/25883050).
+ // Turn it off temporarily as a quick fix, until the read barrier is
+ // implemented.
+ //
+ // TODO(rpl): Implement a read barrier in GenCAS below and re-enable
+ // this intrinsic.
+ if (kEmitCompilerReadBarrier) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
}
@@ -2136,6 +2152,13 @@
__ PoisonHeapReference(value);
}
+ // TODO: Add a read barrier for the reference stored in the object
+ // before attempting the CAS, similar to the one in the
+ // art::Unsafe_compareAndSwapObject JNI implementation.
+ //
+ // Note that this code is not (yet) used when read barriers are
+ // enabled (see IntrinsicLocationsBuilderX86::VisitUnsafeCASObject).
+ DCHECK(!kEmitCompilerReadBarrier);
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
// LOCK CMPXCHG has full barrier semantics, and we don't need
@@ -2145,11 +2168,8 @@
__ setb(kZero, out.AsRegister<Register>());
__ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
- // In the case of the `UnsafeCASObject` intrinsic, accessing an
- // object in the heap with LOCK CMPXCHG does not require a read
- // barrier, as we do not keep a reference to this heap location.
- // However, if heap poisoning is enabled, we need to unpoison the
- // values that were poisoned earlier.
+ // If heap poisoning is enabled, we need to unpoison the values
+ // that were poisoned earlier.
if (kPoisonHeapReferences) {
if (base_equals_value) {
// `value` has been moved to a temporary register, no need to
diff --git a/compiler/optimizing/intrinsics_x86.h b/compiler/optimizing/intrinsics_x86.h
index fefe9c6..08bd197 100644
--- a/compiler/optimizing/intrinsics_x86.h
+++ b/compiler/optimizing/intrinsics_x86.h
@@ -36,7 +36,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -61,7 +61,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ce737e3..690cf3d 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -610,7 +610,10 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundFloat(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ // See intrinsics.h.
+ if (kRoundIsPlusPointFive) {
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ }
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -657,7 +660,10 @@
}
void IntrinsicLocationsBuilderX86_64::VisitMathRoundDouble(HInvoke* invoke) {
- CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ // See intrinsics.h.
+ if (kRoundIsPlusPointFive) {
+ CreateSSE41FPToIntLocations(arena_, invoke, codegen_);
+ }
}
void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
@@ -2080,7 +2086,7 @@
}
if (is_volatile) {
- __ mfence();
+ codegen->MemoryFence();
}
if (type == Primitive::kPrimNot) {
@@ -2150,6 +2156,17 @@
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic is missing a read barrier, and
+ // therefore sometimes does not work as expected (b/25883050).
+ // Turn it off temporarily as a quick fix, until the read barrier is
+ // implemented.
+ //
+ // TODO(rpl): Implement a read barrier in GenCAS below and re-enable
+ // this intrinsic.
+ if (kEmitCompilerReadBarrier) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
}
@@ -2200,6 +2217,13 @@
__ PoisonHeapReference(CpuRegister(value_reg));
}
+ // TODO: Add a read barrier for the reference stored in the object
+ // before attempting the CAS, similar to the one in the
+ // art::Unsafe_compareAndSwapObject JNI implementation.
+ //
+ // Note that this code is not (yet) used when read barriers are
+ // enabled (see IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject).
+ DCHECK(!kEmitCompilerReadBarrier);
__ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
// LOCK CMPXCHG has full barrier semantics, and we don't need
@@ -2209,11 +2233,8 @@
__ setcc(kZero, out);
__ movzxb(out, out);
- // In the case of the `UnsafeCASObject` intrinsic, accessing an
- // object in the heap with LOCK CMPXCHG does not require a read
- // barrier, as we do not keep a reference to this heap location.
- // However, if heap poisoning is enabled, we need to unpoison the
- // values that were poisoned earlier.
+ // If heap poisoning is enabled, we need to unpoison the values
+ // that were poisoned earlier.
if (kPoisonHeapReferences) {
if (base_equals_value) {
// `value_reg` has been moved to a temporary register, no need
diff --git a/compiler/optimizing/intrinsics_x86_64.h b/compiler/optimizing/intrinsics_x86_64.h
index 6894e1b..155ff65 100644
--- a/compiler/optimizing/intrinsics_x86_64.h
+++ b/compiler/optimizing/intrinsics_x86_64.h
@@ -36,7 +36,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -61,7 +61,7 @@
// Define visitor methods.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) \
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
void Visit ## Name(HInvoke* invoke) OVERRIDE;
#include "intrinsics_list.h"
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 2bb769a..aa60fd6 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -16,7 +16,6 @@
#include "base/arena_allocator.h"
#include "builder.h"
-#include "gtest/gtest.h"
#include "licm.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
@@ -27,7 +26,7 @@
/**
* Fixture class for the LICM tests.
*/
-class LICMTest : public testing::Test {
+class LICMTest : public CommonCompilerTest {
public:
LICMTest() : pool_(), allocator_(&pool_) {
graph_ = CreateGraph(&allocator_);
@@ -70,16 +69,16 @@
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
loop_header_->AddInstruction(new (&allocator_) HIf(parameter_));
loop_body_->AddInstruction(new (&allocator_) HGoto());
+ return_->AddInstruction(new (&allocator_) HReturnVoid());
exit_->AddInstruction(new (&allocator_) HExit());
}
// Performs LICM optimizations (after proper set up).
void PerformLICM() {
- ASSERT_TRUE(graph_->TryBuildingSsa());
+ TransformToSsa(graph_);
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
- LICM licm(graph_, side_effects);
- licm.Run();
+ LICM(graph_, side_effects).Run();
}
// General building fields.
@@ -107,7 +106,7 @@
BuildLoop();
// Populate the loop with instructions: set/get field with different types.
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
Primitive::kPrimLong,
MemberOffset(10),
@@ -134,7 +133,7 @@
BuildLoop();
// Populate the loop with instructions: set/get field with same types.
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
Primitive::kPrimLong,
MemberOffset(10),
@@ -169,10 +168,10 @@
// Populate the loop with instructions: set/get array with different types.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimLong, 0);
+ parameter_, constant_, Primitive::kPrimByte, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
- parameter_, constant_, constant_, Primitive::kPrimInt, 0);
+ parameter_, constant_, constant_, Primitive::kPrimShort, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
EXPECT_EQ(get_array->GetBlock(), loop_body_);
@@ -187,10 +186,10 @@
// Populate the loop with instructions: set/get array with same types.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimLong, 0);
+ parameter_, constant_, Primitive::kPrimByte, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
- parameter_, get_array, constant_, Primitive::kPrimLong, 0);
+ parameter_, get_array, constant_, Primitive::kPrimByte, 0);
loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction());
EXPECT_EQ(get_array->GetBlock(), loop_body_);
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index a059766..ed275b1 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -29,13 +29,12 @@
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "pretty_printer.h"
-#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
-#include "gtest/gtest.h"
-
namespace art {
+class LinearizeTest : public CommonCompilerTest {};
+
template <size_t number_of_blocks>
static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) {
ArenaPool pool;
@@ -46,7 +45,7 @@
bool graph_built = builder.BuildGraph(*item);
ASSERT_TRUE(graph_built);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -60,7 +59,7 @@
}
}
-TEST(LinearizeTest, CFG1) {
+TEST_F(LinearizeTest, CFG1) {
// Structure of this graph (+ are back edges)
// Block0
// |
@@ -85,7 +84,7 @@
TestCode(data, blocks);
}
-TEST(LinearizeTest, CFG2) {
+TEST_F(LinearizeTest, CFG2) {
// Structure of this graph (+ are back edges)
// Block0
// |
@@ -110,7 +109,7 @@
TestCode(data, blocks);
}
-TEST(LinearizeTest, CFG3) {
+TEST_F(LinearizeTest, CFG3) {
// Structure of this graph (+ are back edges)
// Block0
// |
@@ -137,7 +136,7 @@
TestCode(data, blocks);
}
-TEST(LinearizeTest, CFG4) {
+TEST_F(LinearizeTest, CFG4) {
/* Structure of this graph (+ are back edges)
// Block0
// |
@@ -167,7 +166,7 @@
TestCode(data, blocks);
}
-TEST(LinearizeTest, CFG5) {
+TEST_F(LinearizeTest, CFG5) {
/* Structure of this graph (+ are back edges)
// Block0
// |
@@ -197,7 +196,7 @@
TestCode(data, blocks);
}
-TEST(LinearizeTest, CFG6) {
+TEST_F(LinearizeTest, CFG6) {
// Block0
// |
// Block1
@@ -223,7 +222,7 @@
TestCode(data, blocks);
}
-TEST(LinearizeTest, CFG7) {
+TEST_F(LinearizeTest, CFG7) {
// Structure of this graph (+ are back edges)
// Block0
// |
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 7f67560..926f939 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -27,10 +27,10 @@
#include "prepare_for_register_allocation.h"
#include "ssa_liveness_analysis.h"
-#include "gtest/gtest.h"
-
namespace art {
+class LiveRangesTest : public CommonCompilerTest {};
+
static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
HGraph* graph = CreateGraph(allocator);
HGraphBuilder builder(graph);
@@ -39,13 +39,13 @@
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
return graph;
}
-TEST(LiveRangesTest, CFG1) {
+TEST_F(LiveRangesTest, CFG1) {
/*
* Test the following snippet:
* return 0;
@@ -83,7 +83,7 @@
ASSERT_TRUE(range->GetNext() == nullptr);
}
-TEST(LiveRangesTest, CFG2) {
+TEST_F(LiveRangesTest, CFG2) {
/*
* Test the following snippet:
* var a = 0;
@@ -131,7 +131,7 @@
ASSERT_TRUE(range->GetNext() == nullptr);
}
-TEST(LiveRangesTest, CFG3) {
+TEST_F(LiveRangesTest, CFG3) {
/*
* Test the following snippet:
* var a = 0;
@@ -204,7 +204,7 @@
ASSERT_TRUE(range->GetNext() == nullptr);
}
-TEST(LiveRangesTest, Loop1) {
+TEST_F(LiveRangesTest, Loop1) {
/*
* Test the following snippet:
* var a = 0;
@@ -284,7 +284,7 @@
ASSERT_TRUE(range->GetNext() == nullptr);
}
-TEST(LiveRangesTest, Loop2) {
+TEST_F(LiveRangesTest, Loop2) {
/*
* Test the following snippet:
* var a = 0;
@@ -360,7 +360,7 @@
ASSERT_TRUE(range->GetNext() == nullptr);
}
-TEST(LiveRangesTest, CFG4) {
+TEST_F(LiveRangesTest, CFG4) {
/*
* Test the following snippet:
* var a = 0;
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 9d7d0b6..7736eed 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -27,10 +27,10 @@
#include "prepare_for_register_allocation.h"
#include "ssa_liveness_analysis.h"
-#include "gtest/gtest.h"
-
namespace art {
+class LivenessTest : public CommonCompilerTest {};
+
static void DumpBitVector(BitVector* vector,
std::ostream& buffer,
size_t count,
@@ -51,7 +51,7 @@
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
bool graph_built = builder.BuildGraph(*item);
ASSERT_TRUE(graph_built);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
@@ -75,7 +75,7 @@
ASSERT_STREQ(expected, buffer.str().c_str());
}
-TEST(LivenessTest, CFG1) {
+TEST_F(LivenessTest, CFG1) {
const char* expected =
"Block 0\n"
" live in: (0)\n"
@@ -98,7 +98,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, CFG2) {
+TEST_F(LivenessTest, CFG2) {
const char* expected =
"Block 0\n"
" live in: (0)\n"
@@ -120,7 +120,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, CFG3) {
+TEST_F(LivenessTest, CFG3) {
const char* expected =
"Block 0\n" // entry block
" live in: (000)\n"
@@ -149,7 +149,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, CFG4) {
+TEST_F(LivenessTest, CFG4) {
// var a;
// if (0 == 0) {
// a = 5;
@@ -197,7 +197,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, CFG5) {
+TEST_F(LivenessTest, CFG5) {
// var a = 0;
// if (0 == 0) {
// } else {
@@ -242,7 +242,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, Loop1) {
+TEST_F(LivenessTest, Loop1) {
// Simple loop with one preheader and one back edge.
// var a = 0;
// while (a == a) {
@@ -288,7 +288,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, Loop3) {
+TEST_F(LivenessTest, Loop3) {
// Test that the returned value stays live in a preceding loop.
// var a = 0;
// while (a == a) {
@@ -335,7 +335,7 @@
}
-TEST(LivenessTest, Loop4) {
+TEST_F(LivenessTest, Loop4) {
// Make sure we support a preheader of a loop not being the first predecessor
// in the predecessor list of the header.
// var a = 0;
@@ -387,7 +387,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, Loop5) {
+TEST_F(LivenessTest, Loop5) {
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
// Bitsets are made of:
@@ -443,7 +443,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, Loop6) {
+TEST_F(LivenessTest, Loop6) {
// Bitsets are made of:
// (constant0, constant4, constant5, phi in block 2)
const char* expected =
@@ -494,7 +494,7 @@
}
-TEST(LivenessTest, Loop7) {
+TEST_F(LivenessTest, Loop7) {
// Bitsets are made of:
// (constant0, constant4, constant5, phi in block 2, phi in block 6)
const char* expected =
@@ -548,7 +548,7 @@
TestCode(data, expected);
}
-TEST(LivenessTest, Loop8) {
+TEST_F(LivenessTest, Loop8) {
// var a = 0;
// while (a == a) {
// a = a + a;
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index adde004..727f2bb 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -119,10 +119,16 @@
: ref_info_(ref_info),
offset_(offset),
index_(index),
- declaring_class_def_index_(declaring_class_def_index) {
+ declaring_class_def_index_(declaring_class_def_index),
+ value_killed_by_loop_side_effects_(true) {
DCHECK(ref_info != nullptr);
DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
(offset != kInvalidFieldOffset && index == nullptr));
+ if (ref_info->IsSingleton() && !IsArrayElement()) {
+ // Assume this location's value cannot be killed by loop side effects
+ // until proven otherwise.
+ value_killed_by_loop_side_effects_ = false;
+ }
}
ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
@@ -139,11 +145,22 @@
return index_ != nullptr;
}
+ bool IsValueKilledByLoopSideEffects() const {
+ return value_killed_by_loop_side_effects_;
+ }
+
+ void SetValueKilledByLoopSideEffects(bool val) {
+ value_killed_by_loop_side_effects_ = val;
+ }
+
private:
ReferenceInfo* const ref_info_; // reference for instance/static field or array access.
const size_t offset_; // offset of static/instance field.
HInstruction* const index_; // index of an array element.
const int16_t declaring_class_def_index_; // declaring class's def's dex index.
+ bool value_killed_by_loop_side_effects_; // value of this location may be killed by loop
+ // side effects because this location is stored
+ // into inside a loop.
DISALLOW_COPY_AND_ASSIGN(HeapLocation);
};
@@ -370,13 +387,13 @@
return heap_locations_[heap_location_idx];
}
- void VisitFieldAccess(HInstruction* ref, const FieldInfo& field_info) {
+ HeapLocation* VisitFieldAccess(HInstruction* ref, const FieldInfo& field_info) {
if (field_info.IsVolatile()) {
has_volatile_ = true;
}
const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
const size_t offset = field_info.GetFieldOffset().SizeValue();
- GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+ return GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
}
void VisitArrayAccess(HInstruction* array, HInstruction* index) {
@@ -390,8 +407,11 @@
}
void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
- VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
+ HeapLocation* location = VisitFieldAccess(instruction->InputAt(0), instruction->GetFieldInfo());
has_heap_stores_ = true;
+ if (instruction->GetBlock()->GetLoopInformation() != nullptr) {
+ location->SetValueKilledByLoopSideEffects(true);
+ }
}
void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
@@ -565,23 +585,26 @@
HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
ArenaVector<HInstruction*>& pre_header_heap_values =
heap_values_for_[pre_header->GetBlockId()];
+ // Inherit the values from pre-header.
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ heap_values[i] = pre_header_heap_values[i];
+ }
+
// We do a single pass in reverse post order. For loops, use the side effects as a hint
// to see if the heap values should be killed.
if (side_effects_.GetLoopEffects(block).DoesAnyWrite()) {
- for (size_t i = 0; i < pre_header_heap_values.size(); i++) {
- // heap value is killed by loop side effects, need to keep the last store.
- KeepIfIsStore(pre_header_heap_values[i]);
- }
- if (kIsDebugBuild) {
- // heap_values should all be kUnknownHeapValue that it is inited with.
- for (size_t i = 0; i < heap_values.size(); i++) {
- DCHECK_EQ(heap_values[i], kUnknownHeapValue);
- }
- }
- } else {
- // Inherit the values from pre-header.
for (size_t i = 0; i < heap_values.size(); i++) {
- heap_values[i] = pre_header_heap_values[i];
+ HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
+ ReferenceInfo* ref_info = location->GetReferenceInfo();
+ if (!ref_info->IsSingleton() || location->IsValueKilledByLoopSideEffects()) {
+ // heap value is killed by loop side effects (stored into directly, or due to
+ // aliasing).
+ KeepIfIsStore(pre_header_heap_values[i]);
+ heap_values[i] = kUnknownHeapValue;
+ } else {
+ // A singleton's field that's not stored into inside a loop is invariant throughout
+ // the loop.
+ }
}
}
}
@@ -655,6 +678,16 @@
}
}
+ static bool IsIntFloatAlias(Primitive::Type type1, Primitive::Type type2) {
+ return (type1 == Primitive::kPrimFloat && type2 == Primitive::kPrimInt) ||
+ (type2 == Primitive::kPrimFloat && type1 == Primitive::kPrimInt);
+ }
+
+ static bool IsLongDoubleAlias(Primitive::Type type1, Primitive::Type type2) {
+ return (type1 == Primitive::kPrimDouble && type2 == Primitive::kPrimLong) ||
+ (type2 == Primitive::kPrimDouble && type1 == Primitive::kPrimLong);
+ }
+
void VisitGetLocation(HInstruction* instruction,
HInstruction* ref,
size_t offset,
@@ -686,7 +719,8 @@
if ((heap_value != kUnknownHeapValue) &&
// Keep the load due to possible I/F, J/D array aliasing.
// See b/22538329 for details.
- (heap_value->GetType() == instruction->GetType())) {
+ !IsIntFloatAlias(heap_value->GetType(), instruction->GetType()) &&
+ !IsLongDoubleAlias(heap_value->GetType(), instruction->GetType())) {
removed_loads_.push_back(instruction);
substitute_instructions_for_loads_.push_back(heap_value);
TryRemovingNullCheck(instruction);
@@ -751,6 +785,9 @@
if (loop_info != nullptr) {
// instruction is a store in the loop so the loop must does write.
DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
+ // If it's a singleton, IsValueKilledByLoopSideEffects() must be true.
+ DCHECK(!ref_info->IsSingleton() ||
+ heap_location_collector_.GetHeapLocation(idx)->IsValueKilledByLoopSideEffects());
if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 926bc15..fc12224 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -198,10 +198,38 @@
}
}
-void HGraph::TransformToSsa() {
- DCHECK(!reverse_post_order_.empty());
- SsaBuilder ssa_builder(this);
- ssa_builder.BuildSsa();
+BuildSsaResult HGraph::TryBuildingSsa(StackHandleScopeCollection* handles) {
+ BuildDominatorTree();
+
+ // The SSA builder requires loops to all be natural. Specifically, the dead phi
+ // elimination phase checks the consistency of the graph when doing a post-order
+ // visit for eliminating dead phis: a dead phi can only have loop header phi
+ // users remaining when being visited.
+ BuildSsaResult result = AnalyzeNaturalLoops();
+ if (result != kBuildSsaSuccess) {
+ return result;
+ }
+
+ // Precompute per-block try membership before entering the SSA builder,
+ // which needs the information to build catch block phis from values of
+ // locals at throwing instructions inside try blocks.
+ ComputeTryBlockInformation();
+
+ // Create the inexact Object reference type and store it in the HGraph.
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ inexact_object_rti_ = ReferenceTypeInfo::Create(
+ handles->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject)),
+ /* is_exact */ false);
+
+ // Tranforms graph to SSA form.
+ result = SsaBuilder(this, handles).BuildSsa();
+ if (result != kBuildSsaSuccess) {
+ return result;
+ }
+
+ in_ssa_form_ = true;
+ return kBuildSsaSuccess;
}
HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
@@ -410,7 +438,7 @@
}
}
-bool HGraph::AnalyzeNaturalLoops() const {
+BuildSsaResult HGraph::AnalyzeNaturalLoops() const {
// Order does not matter.
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
@@ -418,16 +446,16 @@
if (block->IsCatchBlock()) {
// TODO: Dealing with exceptional back edges could be tricky because
// they only approximate the real control flow. Bail out for now.
- return false;
+ return kBuildSsaFailThrowCatchLoop;
}
HLoopInformation* info = block->GetLoopInformation();
if (!info->Populate()) {
// Abort if the loop is non natural. We currently bailout in such cases.
- return false;
+ return kBuildSsaFailNonNaturalLoop;
}
}
}
- return true;
+ return kBuildSsaSuccess;
}
void HGraph::InsertConstant(HConstant* constant) {
@@ -446,8 +474,13 @@
// id and/or any invariants the graph is assuming when adding new instructions.
if ((cached_null_constant_ == nullptr) || (cached_null_constant_->GetBlock() == nullptr)) {
cached_null_constant_ = new (arena_) HNullConstant(dex_pc);
+ cached_null_constant_->SetReferenceTypeInfo(inexact_object_rti_);
InsertConstant(cached_null_constant_);
}
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(cached_null_constant_->GetReferenceTypeInfo().IsValid());
+ }
return cached_null_constant_;
}
@@ -777,6 +810,10 @@
user_record.GetInstruction()->RemoveEnvironmentUser(user_record.GetUseNode());
}
+HInstruction::InstructionKind HInstruction::GetKind() const {
+ return GetKindInternal();
+}
+
HInstruction* HInstruction::GetNextDisregardingMoves() const {
HInstruction* next = GetNext();
while (next != nullptr && next->IsParallelMove()) {
@@ -960,7 +997,7 @@
visitor->Visit##name(this); \
}
-FOR_EACH_INSTRUCTION(DEFINE_ACCEPT)
+FOR_EACH_CONCRETE_INSTRUCTION(DEFINE_ACCEPT)
#undef DEFINE_ACCEPT
@@ -2087,13 +2124,27 @@
}
void HInvoke::SetIntrinsic(Intrinsics intrinsic,
- IntrinsicNeedsEnvironmentOrCache needs_env_or_cache) {
+ IntrinsicNeedsEnvironmentOrCache needs_env_or_cache,
+ IntrinsicSideEffects side_effects,
+ IntrinsicExceptions exceptions) {
intrinsic_ = intrinsic;
IntrinsicOptimizations opt(this);
if (needs_env_or_cache == kNoEnvironmentOrCache) {
opt.SetDoesNotNeedDexCache();
opt.SetDoesNotNeedEnvironment();
}
+ // Adjust method's side effects from intrinsic table.
+ switch (side_effects) {
+ case kNoSideEffects: SetSideEffects(SideEffects::None()); break;
+ case kReadSideEffects: SetSideEffects(SideEffects::AllReads()); break;
+ case kWriteSideEffects: SetSideEffects(SideEffects::AllWrites()); break;
+ case kAllSideEffects: SetSideEffects(SideEffects::AllExceptGCDependency()); break;
+ }
+ // Adjust method's exception status from intrinsic table.
+ switch (exceptions) {
+ case kNoThrow: SetCanThrow(false); break;
+ case kCanThrow: SetCanThrow(true); break;
+ }
}
bool HInvoke::NeedsEnvironment() const {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1f8ef47..5b072cf 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -98,6 +98,13 @@
kCondAE, // >=
};
+enum BuildSsaResult {
+ kBuildSsaFailNonNaturalLoop,
+ kBuildSsaFailThrowCatchLoop,
+ kBuildSsaFailAmbiguousArrayGet,
+ kBuildSsaSuccess,
+};
+
class HInstructionList : public ValueObject {
public:
HInstructionList() : first_instruction_(nullptr), last_instruction_(nullptr) {}
@@ -143,6 +150,122 @@
DISALLOW_COPY_AND_ASSIGN(HInstructionList);
};
+class ReferenceTypeInfo : ValueObject {
+ public:
+ typedef Handle<mirror::Class> TypeHandle;
+
+ static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) {
+ // The constructor will check that the type_handle is valid.
+ return ReferenceTypeInfo(type_handle, is_exact);
+ }
+
+ static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); }
+
+ static bool IsValidHandle(TypeHandle handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ return handle.GetReference() != nullptr;
+ }
+
+ bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ return IsValidHandle(type_handle_);
+ }
+
+ bool IsExact() const { return is_exact_; }
+
+ bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsObjectClass();
+ }
+
+ bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsStringClass();
+ }
+
+ bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
+ }
+
+ bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsInterface();
+ }
+
+ bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsArrayClass();
+ }
+
+ bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsPrimitiveArray();
+ }
+
+ bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
+ }
+
+ bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ if (!IsExact()) return false;
+ if (!IsArrayClass()) return false;
+ return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
+ }
+
+ bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ if (!IsExact()) return false;
+ if (!IsArrayClass()) return false;
+ if (!rti.IsArrayClass()) return false;
+ return GetTypeHandle()->GetComponentType()->IsAssignableFrom(
+ rti.GetTypeHandle()->GetComponentType());
+ }
+
+ Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
+
+ bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ DCHECK(rti.IsValid());
+ return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
+ }
+
+ bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ DCHECK(rti.IsValid());
+ return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
+ GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
+ }
+
+ // Returns true if the type information provide the same amount of details.
+ // Note that it does not mean that the instructions have the same actual type
+ // (because the type can be the result of a merge).
+ bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (!IsValid() && !rti.IsValid()) {
+ // Invalid types are equal.
+ return true;
+ }
+ if (!IsValid() || !rti.IsValid()) {
+ // One is valid, the other not.
+ return false;
+ }
+ return IsExact() == rti.IsExact()
+ && GetTypeHandle().Get() == rti.GetTypeHandle().Get();
+ }
+
+ private:
+ ReferenceTypeInfo();
+ ReferenceTypeInfo(TypeHandle type_handle, bool is_exact);
+
+ // The class of the object.
+ TypeHandle type_handle_;
+ // Whether or not the type is exact or a superclass of the actual type.
+ // Whether or not we have any information about this type.
+ bool is_exact_;
+};
+
+std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
+
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocGraph> {
public:
@@ -179,7 +302,8 @@
cached_float_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
- cached_current_method_(nullptr) {
+ cached_current_method_(nullptr),
+ inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -197,36 +321,23 @@
void AddBlock(HBasicBlock* block);
- // Try building the SSA form of this graph, with dominance computation and loop
- // recognition. Returns whether it was successful in doing all these steps.
- bool TryBuildingSsa() {
- BuildDominatorTree();
- // The SSA builder requires loops to all be natural. Specifically, the dead phi
- // elimination phase checks the consistency of the graph when doing a post-order
- // visit for eliminating dead phis: a dead phi can only have loop header phi
- // users remaining when being visited.
- if (!AnalyzeNaturalLoops()) return false;
- // Precompute per-block try membership before entering the SSA builder,
- // which needs the information to build catch block phis from values of
- // locals at throwing instructions inside try blocks.
- ComputeTryBlockInformation();
- TransformToSsa();
- in_ssa_form_ = true;
- return true;
- }
+ // Try building the SSA form of this graph, with dominance computation and
+ // loop recognition. Returns a code specifying that it was successful or the
+ // reason for failure.
+ BuildSsaResult TryBuildingSsa(StackHandleScopeCollection* handles);
void ComputeDominanceInformation();
void ClearDominanceInformation();
void BuildDominatorTree();
- void TransformToSsa();
void SimplifyCFG();
void SimplifyCatchBlocks();
- // Analyze all natural loops in this graph. Returns false if one
- // loop is not natural, that is the header does not dominate the
- // back edge.
- bool AnalyzeNaturalLoops() const;
+ // Analyze all natural loops in this graph. Returns a code specifying that it
+ // was successful or the reason for failure. The method will fail if a loop
+ // is not natural, that is the header does not dominate a back edge, or if it
+ // is a throw-catch loop, i.e. the header is a catch block.
+ BuildSsaResult AnalyzeNaturalLoops() const;
// Iterate over blocks to compute try block membership. Needs reverse post
// order and loop information.
@@ -487,6 +598,10 @@
// (such as when the superclass could not be found).
ArtMethod* art_method_;
+ // Keep the RTI of inexact Object to avoid having to pass stack handle
+ // collection pointer to passes which may create NullConstant.
+ ReferenceTypeInfo inexact_object_rti_;
+
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
@@ -1034,7 +1149,6 @@
M(ClearException, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
- M(Condition, BinaryOperation) \
M(CurrentMethod, Instruction) \
M(Deoptimize, Instruction) \
M(Div, BinaryOperation) \
@@ -1067,6 +1181,7 @@
M(MemoryBarrier, Instruction) \
M(MonitorOperation, Instruction) \
M(Mul, BinaryOperation) \
+ M(NativeDebugInfo, Instruction) \
M(Neg, UnaryOperation) \
M(NewArray, Instruction) \
M(NewInstance, Instruction) \
@@ -1141,27 +1256,34 @@
FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
-#define FOR_EACH_INSTRUCTION(M) \
- FOR_EACH_CONCRETE_INSTRUCTION(M) \
+#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
+ M(Condition, BinaryOperation) \
M(Constant, Instruction) \
M(UnaryOperation, Instruction) \
M(BinaryOperation, Instruction) \
M(Invoke, Instruction)
+#define FOR_EACH_INSTRUCTION(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION(M) \
+ FOR_EACH_ABSTRACT_INSTRUCTION(M)
+
#define FORWARD_DECLARATION(type, super) class H##type;
FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
#undef FORWARD_DECLARATION
#define DECLARE_INSTRUCTION(type) \
- InstructionKind GetKind() const OVERRIDE { return k##type; } \
+ InstructionKind GetKindInternal() const OVERRIDE { return k##type; } \
const char* DebugName() const OVERRIDE { return #type; } \
- const H##type* As##type() const OVERRIDE { return this; } \
- H##type* As##type() OVERRIDE { return this; } \
bool InstructionTypeEquals(HInstruction* other) const OVERRIDE { \
return other->Is##type(); \
} \
void Accept(HGraphVisitor* visitor) OVERRIDE
+#define DECLARE_ABSTRACT_INSTRUCTION(type) \
+ bool Is##type() const { return As##type() != nullptr; } \
+ const H##type* As##type() const { return this; } \
+ H##type* As##type() { return this; }
+
template <typename T> class HUseList;
template <typename T>
@@ -1674,122 +1796,6 @@
DISALLOW_COPY_AND_ASSIGN(HEnvironment);
};
-class ReferenceTypeInfo : ValueObject {
- public:
- typedef Handle<mirror::Class> TypeHandle;
-
- static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact) {
- // The constructor will check that the type_handle is valid.
- return ReferenceTypeInfo(type_handle, is_exact);
- }
-
- static ReferenceTypeInfo CreateInvalid() { return ReferenceTypeInfo(); }
-
- static bool IsValidHandle(TypeHandle handle) SHARED_REQUIRES(Locks::mutator_lock_) {
- return handle.GetReference() != nullptr;
- }
-
- bool IsValid() const SHARED_REQUIRES(Locks::mutator_lock_) {
- return IsValidHandle(type_handle_);
- }
-
- bool IsExact() const { return is_exact_; }
-
- bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsObjectClass();
- }
-
- bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsStringClass();
- }
-
- bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
- }
-
- bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsInterface();
- }
-
- bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsArrayClass();
- }
-
- bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsPrimitiveArray();
- }
-
- bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
- }
-
- bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- if (!IsExact()) return false;
- if (!IsArrayClass()) return false;
- return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
- }
-
- bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- if (!IsExact()) return false;
- if (!IsArrayClass()) return false;
- if (!rti.IsArrayClass()) return false;
- return GetTypeHandle()->GetComponentType()->IsAssignableFrom(
- rti.GetTypeHandle()->GetComponentType());
- }
-
- Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
-
- bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- DCHECK(rti.IsValid());
- return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
- }
-
- bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK(IsValid());
- DCHECK(rti.IsValid());
- return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
- GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
- }
-
- // Returns true if the type information provide the same amount of details.
- // Note that it does not mean that the instructions have the same actual type
- // (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) {
- if (!IsValid() && !rti.IsValid()) {
- // Invalid types are equal.
- return true;
- }
- if (!IsValid() || !rti.IsValid()) {
- // One is valid, the other not.
- return false;
- }
- return IsExact() == rti.IsExact()
- && GetTypeHandle().Get() == rti.GetTypeHandle().Get();
- }
-
- private:
- ReferenceTypeInfo();
- ReferenceTypeInfo(TypeHandle type_handle, bool is_exact);
-
- // The class of the object.
- TypeHandle type_handle_;
- // Whether or not the type is exact or a superclass of the actual type.
- // Whether or not we have any information about this type.
- bool is_exact_;
-};
-
-std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
-
class HInstruction : public ArenaObject<kArenaAllocInstruction> {
public:
HInstruction(SideEffects side_effects, uint32_t dex_pc)
@@ -1972,11 +1978,18 @@
void MoveBeforeFirstUserAndOutOfLoops();
#define INSTRUCTION_TYPE_CHECK(type, super) \
+ bool Is##type() const; \
+ const H##type* As##type() const; \
+ H##type* As##type();
+
+ FOR_EACH_CONCRETE_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
+#undef INSTRUCTION_TYPE_CHECK
+
+#define INSTRUCTION_TYPE_CHECK(type, super) \
bool Is##type() const { return (As##type() != nullptr); } \
virtual const H##type* As##type() const { return nullptr; } \
virtual H##type* As##type() { return nullptr; }
-
- FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
+ FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
#undef INSTRUCTION_TYPE_CHECK
// Returns whether the instruction can be moved within the graph.
@@ -1999,7 +2012,12 @@
// 2) Their inputs are identical.
bool Equals(HInstruction* other) const;
- virtual InstructionKind GetKind() const = 0;
+ // TODO: Remove this indirection when the [[pure]] attribute proposal (n3744)
+ // is adopted and implemented by our C++ compiler(s). Fow now, we need to hide
+ // the virtual function because the __attribute__((__pure__)) doesn't really
+ // apply the strong requirement for virtual functions, preventing optimizations.
+ InstructionKind GetKind() const PURE;
+ virtual InstructionKind GetKindInternal() const = 0;
virtual size_t ComputeHashCode() const {
size_t result = GetKind();
@@ -2045,6 +2063,7 @@
protected:
virtual const HUserRecord<HInstruction*> InputRecordAt(size_t i) const = 0;
virtual void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) = 0;
+ void SetSideEffects(SideEffects other) { side_effects_ = other; }
private:
void RemoveEnvironmentUser(HUseListNode<HEnvironment*>* use_node) { env_uses_.Remove(use_node); }
@@ -2297,7 +2316,7 @@
virtual uint64_t GetValueAsUint64() const = 0;
- DECLARE_INSTRUCTION(Constant);
+ DECLARE_ABSTRACT_INSTRUCTION(Constant);
private:
DISALLOW_COPY_AND_ASSIGN(HConstant);
@@ -2558,7 +2577,7 @@
virtual HConstant* Evaluate(HIntConstant* x) const = 0;
virtual HConstant* Evaluate(HLongConstant* x) const = 0;
- DECLARE_INSTRUCTION(UnaryOperation);
+ DECLARE_ABSTRACT_INSTRUCTION(UnaryOperation);
private:
DISALLOW_COPY_AND_ASSIGN(HUnaryOperation);
@@ -2651,7 +2670,7 @@
// one. Otherwise it returns null.
HInstruction* GetLeastConstantLeft() const;
- DECLARE_INSTRUCTION(BinaryOperation);
+ DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation);
private:
DISALLOW_COPY_AND_ASSIGN(HBinaryOperation);
@@ -2679,7 +2698,7 @@
// `instruction`, and disregard moves in between.
bool IsBeforeWhenDisregardMoves(HInstruction* instruction) const;
- DECLARE_INSTRUCTION(Condition);
+ DECLARE_ABSTRACT_INSTRUCTION(Condition);
virtual IfCondition GetCondition() const = 0;
@@ -3228,7 +3247,8 @@
};
enum class Intrinsics {
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache) k ## Name,
+#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions) \
+ k ## Name,
#include "intrinsics_list.h"
kNone,
INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
@@ -3242,6 +3262,18 @@
kNeedsEnvironmentOrCache // Intrinsic requires an environment or requires a dex cache.
};
+enum IntrinsicSideEffects {
+ kNoSideEffects, // Intrinsic does not have any heap memory side effects.
+ kReadSideEffects, // Intrinsic may read heap memory.
+ kWriteSideEffects, // Intrinsic may write heap memory.
+ kAllSideEffects // Intrinsic may read or write heap memory, or trigger GC.
+};
+
+enum IntrinsicExceptions {
+ kNoThrow, // Intrinsic does not throw any exceptions.
+ kCanThrow // Intrinsic may throw exceptions.
+};
+
class HInvoke : public HInstruction {
public:
size_t InputCount() const OVERRIDE { return inputs_.size(); }
@@ -3260,7 +3292,6 @@
Primitive::Type GetType() const OVERRIDE { return return_type_; }
-
uint32_t GetDexMethodIndex() const { return dex_method_index_; }
const DexFile& GetDexFile() const { return GetEnvironment()->GetDexFile(); }
@@ -3270,13 +3301,22 @@
return intrinsic_;
}
- void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironmentOrCache needs_env_or_cache);
+ void SetIntrinsic(Intrinsics intrinsic,
+ IntrinsicNeedsEnvironmentOrCache needs_env_or_cache,
+ IntrinsicSideEffects side_effects,
+ IntrinsicExceptions exceptions);
bool IsFromInlinedInvoke() const {
return GetEnvironment()->IsFromInlinedInvoke();
}
- bool CanThrow() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return can_throw_; }
+
+ bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ return intrinsic_ != Intrinsics::kNone && intrinsic_ == other->AsInvoke()->intrinsic_;
+ }
uint32_t* GetIntrinsicOptimizations() {
return &intrinsic_optimizations_;
@@ -3288,7 +3328,7 @@
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
- DECLARE_INSTRUCTION(Invoke);
+ DECLARE_ABSTRACT_INSTRUCTION(Invoke);
protected:
HInvoke(ArenaAllocator* arena,
@@ -3306,6 +3346,7 @@
return_type_(return_type),
dex_method_index_(dex_method_index),
original_invoke_type_(original_invoke_type),
+ can_throw_(true),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
}
@@ -3318,11 +3359,14 @@
inputs_[index] = input;
}
+ void SetCanThrow(bool can_throw) { can_throw_ = can_throw; }
+
uint32_t number_of_arguments_;
ArenaVector<HUserRecord<HInstruction*>> inputs_;
const Primitive::Type return_type_;
const uint32_t dex_method_index_;
const InvokeType original_invoke_type_;
+ bool can_throw_;
Intrinsics intrinsic_;
// A magic word holding optimizations for intrinsics. See intrinsics.h.
@@ -4417,7 +4461,16 @@
void RemoveInputAt(size_t index);
Primitive::Type GetType() const OVERRIDE { return type_; }
- void SetType(Primitive::Type type) { type_ = type; }
+ void SetType(Primitive::Type new_type) {
+ // Make sure that only valid type changes occur. The following are allowed:
+ // (1) int -> float/ref (primitive type propagation),
+ // (2) long -> double (primitive type propagation).
+ DCHECK(type_ == new_type ||
+ (type_ == Primitive::kPrimInt && new_type == Primitive::kPrimFloat) ||
+ (type_ == Primitive::kPrimInt && new_type == Primitive::kPrimNot) ||
+ (type_ == Primitive::kPrimLong && new_type == Primitive::kPrimDouble));
+ type_ = new_type;
+ }
bool CanBeNull() const OVERRIDE { return can_be_null_; }
void SetCanBeNull(bool can_be_null) { can_be_null_ = can_be_null; }
@@ -4657,7 +4710,21 @@
return false;
}
- void SetType(Primitive::Type type) { type_ = type; }
+ bool IsEquivalentOf(HArrayGet* other) const {
+ bool result = (GetDexPc() == other->GetDexPc());
+ if (kIsDebugBuild && result) {
+ DCHECK_EQ(GetBlock(), other->GetBlock());
+ DCHECK_EQ(GetArray(), other->GetArray());
+ DCHECK_EQ(GetIndex(), other->GetIndex());
+ if (Primitive::IsIntOrLongType(GetType())) {
+ DCHECK(Primitive::IsFloatingPointType(other->GetType()));
+ } else {
+ DCHECK(Primitive::IsFloatingPointType(GetType()));
+ DCHECK(Primitive::IsIntOrLongType(other->GetType()));
+ }
+ }
+ return result;
+ }
HInstruction* GetArray() const { return InputAt(0); }
HInstruction* GetIndex() const { return InputAt(1); }
@@ -4854,6 +4921,23 @@
DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
+// Pseudo-instruction which provides the native debugger with mapping information.
+// It ensures that we can generate line number and local variables at this point.
+class HNativeDebugInfo : public HTemplateInstruction<0> {
+ public:
+ explicit HNativeDebugInfo(uint32_t dex_pc)
+ : HTemplateInstruction<0>(SideEffects::None(), dex_pc) {}
+
+ bool NeedsEnvironment() const OVERRIDE {
+ return true;
+ }
+
+ DECLARE_INSTRUCTION(NativeDebugInfo);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HNativeDebugInfo);
+};
+
/**
* Instruction to load a Class object.
*/
@@ -5869,6 +5953,18 @@
return &lhs == &rhs;
}
+#define INSTRUCTION_TYPE_CHECK(type, super) \
+ inline bool HInstruction::Is##type() const { return GetKind() == k##type; } \
+ inline const H##type* HInstruction::As##type() const { \
+ return Is##type() ? down_cast<const H##type*>(this) : nullptr; \
+ } \
+ inline H##type* HInstruction::As##type() { \
+ return Is##type() ? static_cast<H##type*>(this) : nullptr; \
+ }
+
+ FOR_EACH_CONCRETE_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
+#undef INSTRUCTION_TYPE_CHECK
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 831b626..3de870e 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -501,11 +501,8 @@
CompilerDriver* driver,
OptimizingCompilerStats* stats,
const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
- ScopedThreadSuspension sts(soa.Self(), kNative);
-
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) {
ArenaAllocator* arena = graph->GetArena();
HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
@@ -522,29 +519,23 @@
LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
- ReferenceTypePropagation* type_propagation =
- new (arena) ReferenceTypePropagation(graph, &handles);
HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_after_types");
- InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_after_bce");
- InstructionSimplifier* simplify4 = new (arena) InstructionSimplifier(
+ InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver);
HOptimization* optimizations1[] = {
intrinsics,
+ sharpening,
fold1,
simplify1,
- type_propagation,
- sharpening,
dce1,
- simplify2
};
RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
- MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, &handles);
+ MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
HOptimization* optimizations2[] = {
// BooleanSimplifier depends on the InstructionSimplifier removing
@@ -557,13 +548,13 @@
induction,
bce,
fold3, // evaluates code generated by dynamic bce
- simplify3,
+ simplify2,
lse,
dce2,
// The codegen has a few assumptions that only the instruction simplifier
// can satisfy. For example, the code generator does not expect to see a
// HTypeConversion from a type to the same type.
- simplify4,
+ simplify3,
};
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
@@ -768,14 +759,29 @@
}
VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName();
+
if (run_optimizations_) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ ScopedThreadSuspension sts(soa.Self(), kNative);
+
{
PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
- if (!graph->TryBuildingSsa()) {
- // We could not transform the graph to SSA, bailout.
- LOG(INFO) << "Skipping compilation of " << pass_observer.GetMethodName()
- << ": it contains a non natural loop";
- MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
+ BuildSsaResult result = graph->TryBuildingSsa(&handles);
+ if (result != kBuildSsaSuccess) {
+ switch (result) {
+ case kBuildSsaFailNonNaturalLoop:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledNonNaturalLoop);
+ break;
+ case kBuildSsaFailThrowCatchLoop:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
+ break;
+ case kBuildSsaFailAmbiguousArrayGet:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayGet);
+ break;
+ case kBuildSsaSuccess:
+ UNREACHABLE();
+ }
pass_observer.SetGraphInBadState();
return nullptr;
}
@@ -786,7 +792,8 @@
compiler_driver,
compilation_stats_.get(),
dex_compilation_unit,
- &pass_observer);
+ &pass_observer,
+ &handles);
codegen->CompileOptimized(code_allocator);
} else {
codegen->CompileBaseline(code_allocator);
@@ -880,7 +887,11 @@
bool IsCompilingWithCoreImage() {
const std::string& image = Runtime::Current()->GetImageLocation();
- return EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art");
+ // TODO: This is under-approximating...
+ if (EndsWith(image, "core.art") || EndsWith(image, "core-optimizing.art")) {
+ return true;
+ }
+ return false;
}
bool OptimizingCompiler::JitCompile(Thread* self,
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 6296eed..4713514 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -38,7 +38,9 @@
kRemovedDeadInstruction,
kRemovedNullCheck,
kNotCompiledBranchOutsideMethodCode,
- kNotCompiledCannotBuildSSA,
+ kNotCompiledNonNaturalLoop,
+ kNotCompiledThrowCatchLoop,
+ kNotCompiledAmbiguousArrayGet,
kNotCompiledHugeMethod,
kNotCompiledLargeMethodNoBranches,
kNotCompiledMalformedOpcode,
@@ -104,7 +106,9 @@
case kRemovedDeadInstruction: name = "RemovedDeadInstruction"; break;
case kRemovedNullCheck: name = "RemovedNullCheck"; break;
case kNotCompiledBranchOutsideMethodCode: name = "NotCompiledBranchOutsideMethodCode"; break;
- case kNotCompiledCannotBuildSSA : name = "NotCompiledCannotBuildSSA"; break;
+ case kNotCompiledNonNaturalLoop : name = "NotCompiledNonNaturalLoop"; break;
+ case kNotCompiledThrowCatchLoop : name = "NotCompiledThrowCatchLoop"; break;
+ case kNotCompiledAmbiguousArrayGet : name = "NotCompiledAmbiguousArrayGet"; break;
case kNotCompiledHugeMethod : name = "NotCompiledHugeMethod"; break;
case kNotCompiledLargeMethodNoBranches : name = "NotCompiledLargeMethodNoBranches"; break;
case kNotCompiledMalformedOpcode : name = "NotCompiledMalformedOpcode"; break;
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 350f0b1..af3a005 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -19,9 +19,13 @@
#include "nodes.h"
#include "builder.h"
+#include "common_compiler_test.h"
#include "compiler/dex/pass_manager.h"
#include "dex_file.h"
#include "dex_instruction.h"
+#include "handle_scope-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
#include "gtest/gtest.h"
@@ -42,7 +46,6 @@
#define FIVE_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(5, __VA_ARGS__)
#define SIX_REGISTERS_CODE_ITEM(...) N_REGISTERS_CODE_ITEM(6, __VA_ARGS__)
-
LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
ArenaAllocator* allocator,
@@ -111,6 +114,12 @@
return instruction->GetBlock() == nullptr;
}
+inline void TransformToSsa(HGraph* graph) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ EXPECT_EQ(graph->TryBuildingSsa(&handles), kBuildSsaSuccess);
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 30bcf19..176c50c 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -169,7 +169,7 @@
// If `other_move` was swapped, we iterate again to find a new
// potential cycle.
required_swap = nullptr;
- i = 0;
+ i = -1;
} else if (required_swap != nullptr) {
// A move is required to swap. We walk back the cycle to find the
// move by just returning from this `PerforrmMove`.
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 46e6f3e..5e8fe37 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -609,4 +609,36 @@
}
}
+TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves2) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+
+ {
+ TypeParam resolver(&allocator);
+ HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
+ moves->AddMove(
+ Location::RegisterLocation(0),
+ Location::RegisterLocation(3),
+ Primitive::kPrimInt,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterPairLocation(2, 3),
+ Location::RegisterPairLocation(0, 1),
+ Primitive::kPrimLong,
+ nullptr);
+ moves->AddMove(
+ Location::RegisterLocation(7),
+ Location::RegisterLocation(2),
+ Primitive::kPrimInt,
+ nullptr);
+ resolver.EmitNativeCode(moves);
+ if (TestFixture::has_swap) {
+ ASSERT_STREQ("(2,3 <-> 0,1) (2 -> 3) (7 -> 2)", resolver.GetMessage().c_str());
+ } else {
+ ASSERT_STREQ("(2,3 -> T0,T1) (0 -> 3) (T0,T1 -> 0,1) (7 -> 2)",
+ resolver.GetMessage().c_str());
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index b383f1e..a385448 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -15,6 +15,7 @@
*/
#include "pc_relative_fixups_x86.h"
+#include "code_generator_x86.h"
namespace art {
namespace x86 {
@@ -79,6 +80,10 @@
}
void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ if (switch_insn->GetNumEntries() <=
+ InstructionCodeGeneratorX86::kPackedSwitchJumpTableThreshold) {
+ return;
+ }
// We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
// address the constant area.
InitializePCRelativeBasePointer();
diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc
deleted file mode 100644
index bde54ee..0000000
--- a/compiler/optimizing/primitive_type_propagation.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "primitive_type_propagation.h"
-
-#include "nodes.h"
-#include "ssa_builder.h"
-
-namespace art {
-
-static Primitive::Type MergeTypes(Primitive::Type existing, Primitive::Type new_type) {
- // We trust the verifier has already done the necessary checking.
- switch (existing) {
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- case Primitive::kPrimNot:
- return existing;
- default:
- // Phis are initialized with a void type, so if we are asked
- // to merge with a void type, we should use the existing one.
- return new_type == Primitive::kPrimVoid
- ? existing
- : HPhi::ToPhiType(new_type);
- }
-}
-
-// Re-compute and update the type of the instruction. Returns
-// whether or not the type was changed.
-bool PrimitiveTypePropagation::UpdateType(HPhi* phi) {
- DCHECK(phi->IsLive());
- Primitive::Type existing = phi->GetType();
-
- Primitive::Type new_type = existing;
- for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
- Primitive::Type input_type = phi->InputAt(i)->GetType();
- new_type = MergeTypes(new_type, input_type);
- }
- phi->SetType(new_type);
-
- if (new_type == Primitive::kPrimDouble
- || new_type == Primitive::kPrimFloat
- || new_type == Primitive::kPrimNot) {
- // If the phi is of floating point type, we need to update its inputs to that
- // type. For inputs that are phis, we need to recompute their types.
- for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
- HInstruction* input = phi->InputAt(i);
- if (input->GetType() != new_type) {
- HInstruction* equivalent = (new_type == Primitive::kPrimNot)
- ? SsaBuilder::GetReferenceTypeEquivalent(input)
- : SsaBuilder::GetFloatOrDoubleEquivalent(phi, input, new_type);
- phi->ReplaceInput(equivalent, i);
- if (equivalent->IsPhi()) {
- AddToWorklist(equivalent->AsPhi());
- } else if (equivalent == input) {
- // The input has changed its type. It can be an input of other phis,
- // so we need to put phi users in the work list.
- AddDependentInstructionsToWorklist(equivalent);
- }
- }
- }
- }
-
- return existing != new_type;
-}
-
-void PrimitiveTypePropagation::Run() {
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
- }
- ProcessWorklist();
-}
-
-void PrimitiveTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- if (block->IsLoopHeader()) {
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- if (phi->IsLive()) {
- AddToWorklist(phi);
- }
- }
- } else {
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- // Eagerly compute the type of the phi, for quicker convergence. Note
- // that we don't need to add users to the worklist because we are
- // doing a reverse post-order visit, therefore either the phi users are
- // non-loop phi and will be visited later in the visit, or are loop-phis,
- // and they are already in the work list.
- HPhi* phi = it.Current()->AsPhi();
- if (phi->IsLive()) {
- UpdateType(phi);
- }
- }
- }
-}
-
-void PrimitiveTypePropagation::ProcessWorklist() {
- while (!worklist_.empty()) {
- HPhi* instruction = worklist_.back();
- worklist_.pop_back();
- if (UpdateType(instruction)) {
- AddDependentInstructionsToWorklist(instruction);
- }
- }
-}
-
-void PrimitiveTypePropagation::AddToWorklist(HPhi* instruction) {
- DCHECK(instruction->IsLive());
- worklist_.push_back(instruction);
-}
-
-void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
- for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->GetUser()->AsPhi();
- if (phi != nullptr && phi->IsLive() && phi->GetType() != instruction->GetType()) {
- AddToWorklist(phi);
- }
- }
-}
-
-} // namespace art
diff --git a/compiler/optimizing/primitive_type_propagation.h b/compiler/optimizing/primitive_type_propagation.h
deleted file mode 100644
index 212fcfc..0000000
--- a/compiler/optimizing/primitive_type_propagation.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_PRIMITIVE_TYPE_PROPAGATION_H_
-#define ART_COMPILER_OPTIMIZING_PRIMITIVE_TYPE_PROPAGATION_H_
-
-#include "base/arena_containers.h"
-#include "nodes.h"
-
-namespace art {
-
-// Compute and propagate primitive types of phis in the graph.
-class PrimitiveTypePropagation : public ValueObject {
- public:
- explicit PrimitiveTypePropagation(HGraph* graph)
- : graph_(graph), worklist_(graph->GetArena()->Adapter(kArenaAllocPrimitiveTypePropagation)) {
- worklist_.reserve(kDefaultWorklistSize);
- }
-
- void Run();
-
- private:
- void VisitBasicBlock(HBasicBlock* block);
- void ProcessWorklist();
- void AddToWorklist(HPhi* phi);
- void AddDependentInstructionsToWorklist(HInstruction* instruction);
- bool UpdateType(HPhi* phi);
-
- HGraph* const graph_;
- ArenaVector<HPhi*> worklist_;
-
- static constexpr size_t kDefaultWorklistSize = 8;
-
- DISALLOW_COPY_AND_ASSIGN(PrimitiveTypePropagation);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_OPTIMIZING_PRIMITIVE_TYPE_PROPAGATION_H_
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index fea903d..94a297c 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -40,7 +40,6 @@
throwable_class_handle_(throwable_class_handle),
worklist_(worklist) {}
- void VisitNullConstant(HNullConstant* null_constant) OVERRIDE;
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
void VisitClinitCheck(HClinitCheck* clinit_check) OVERRIDE;
@@ -71,8 +70,6 @@
ReferenceTypeInfo::TypeHandle string_class_handle_;
ReferenceTypeInfo::TypeHandle throwable_class_handle_;
ArenaVector<HInstruction*>* worklist_;
-
- static constexpr size_t kDefaultWorklistSize = 8;
};
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
@@ -171,9 +168,13 @@
ScopedObjectAccess soa(Thread::Current());
for (HReversePostOrderIterator block_it(*graph); !block_it.Done(); block_it.Advance()) {
for (HInstructionIterator it(block_it.Current()->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- fn(instr);
+ HPhi* phi = it.Current()->AsPhi();
+ // Note that the graph may contain dead phis when run from the SsaBuilder.
+ // Skip those as they might have a type conflict and will be removed anyway.
+ if (phi->IsLive() &&
+ phi->GetType() == Primitive::kPrimNot &&
+ !phi->GetReferenceTypeInfo().IsValid()) {
+ fn(phi);
}
}
for (HInstructionIterator it(block_it.Current()->GetInstructions()); !it.Done(); it.Advance()) {
@@ -376,6 +377,75 @@
}
}
+// Returns true if one of the patterns below has been recognized. If so, the
+// InstanceOf instruction together with the true branch of `ifInstruction` will
+// be returned using the out parameters.
+// Recognized patterns:
+// (1) patterns equivalent to `if (obj instanceof X)`
+// (a) InstanceOf -> Equal to 1 -> If
+// (b) InstanceOf -> NotEqual to 0 -> If
+// (c) InstanceOf -> If
+// (2) patterns equivalent to `if (!(obj instanceof X))`
+// (a) InstanceOf -> Equal to 0 -> If
+// (b) InstanceOf -> NotEqual to 1 -> If
+// (c) InstanceOf -> BooleanNot -> If
+static bool MatchIfInstanceOf(HIf* ifInstruction,
+ /* out */ HInstanceOf** instanceOf,
+ /* out */ HBasicBlock** trueBranch) {
+ HInstruction* input = ifInstruction->InputAt(0);
+
+ if (input->IsEqual()) {
+ HInstruction* rhs = input->AsEqual()->GetConstantRight();
+ if (rhs != nullptr) {
+ HInstruction* lhs = input->AsEqual()->GetLeastConstantLeft();
+ if (lhs->IsInstanceOf() && rhs->IsIntConstant()) {
+ if (rhs->AsIntConstant()->IsOne()) {
+ // Case (1a)
+ *trueBranch = ifInstruction->IfTrueSuccessor();
+ } else {
+ // Case (2a)
+ DCHECK(rhs->AsIntConstant()->IsZero());
+ *trueBranch = ifInstruction->IfFalseSuccessor();
+ }
+ *instanceOf = lhs->AsInstanceOf();
+ return true;
+ }
+ }
+ } else if (input->IsNotEqual()) {
+ HInstruction* rhs = input->AsNotEqual()->GetConstantRight();
+ if (rhs != nullptr) {
+ HInstruction* lhs = input->AsNotEqual()->GetLeastConstantLeft();
+ if (lhs->IsInstanceOf() && rhs->IsIntConstant()) {
+ if (rhs->AsIntConstant()->IsZero()) {
+ // Case (1b)
+ *trueBranch = ifInstruction->IfTrueSuccessor();
+ } else {
+ // Case (2b)
+ DCHECK(rhs->AsIntConstant()->IsOne());
+ *trueBranch = ifInstruction->IfFalseSuccessor();
+ }
+ *instanceOf = lhs->AsInstanceOf();
+ return true;
+ }
+ }
+ } else if (input->IsInstanceOf()) {
+ // Case (1c)
+ *instanceOf = input->AsInstanceOf();
+ *trueBranch = ifInstruction->IfTrueSuccessor();
+ return true;
+ } else if (input->IsBooleanNot()) {
+ HInstruction* not_input = input->InputAt(0);
+ if (not_input->IsInstanceOf()) {
+ // Case (2c)
+ *instanceOf = not_input->AsInstanceOf();
+ *trueBranch = ifInstruction->IfFalseSuccessor();
+ return true;
+ }
+ }
+
+ return false;
+}
+
// Detects if `block` is the True block for the pattern
// `if (x instanceof ClassX) { }`
// If that's the case insert an HBoundType instruction to bound the type of `x`
@@ -385,22 +455,11 @@
if (ifInstruction == nullptr) {
return;
}
- HInstruction* ifInput = ifInstruction->InputAt(0);
- HInstruction* instanceOf = nullptr;
- HBasicBlock* instanceOfTrueBlock = nullptr;
- // The instruction simplifier has transformed:
- // - `if (a instanceof A)` into an HIf with an HInstanceOf input
- // - `if (!(a instanceof A)` into an HIf with an HBooleanNot input (which in turn
- // has an HInstanceOf input)
- // So we should not see the usual HEqual here.
- if (ifInput->IsInstanceOf()) {
- instanceOf = ifInput;
- instanceOfTrueBlock = ifInstruction->IfTrueSuccessor();
- } else if (ifInput->IsBooleanNot() && ifInput->InputAt(0)->IsInstanceOf()) {
- instanceOf = ifInput->InputAt(0);
- instanceOfTrueBlock = ifInstruction->IfFalseSuccessor();
- } else {
+ // Try to recognize common `if (instanceof)` and `if (!instanceof)` patterns.
+ HInstanceOf* instanceOf = nullptr;
+ HBasicBlock* instanceOfTrueBlock = nullptr;
+ if (!MatchIfInstanceOf(ifInstruction, &instanceOf, &instanceOfTrueBlock)) {
return;
}
@@ -505,13 +564,6 @@
SetClassAsTypeInfo(instr, dex_cache->GetResolvedType(type_idx), is_exact);
}
-void RTPVisitor::VisitNullConstant(HNullConstant* instr) {
- // TODO: The null constant could be bound contextually (e.g. based on return statements)
- // to a more precise type.
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
-}
-
void RTPVisitor::VisitNewInstance(HNewInstance* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
@@ -523,7 +575,11 @@
static mirror::Class* GetClassFromDexCache(Thread* self, const DexFile& dex_file, uint16_t type_idx)
SHARED_REQUIRES(Locks::mutator_lock_) {
mirror::DexCache* dex_cache =
- Runtime::Current()->GetClassLinker()->FindDexCache(self, dex_file, false);
+ Runtime::Current()->GetClassLinker()->FindDexCache(self, dex_file, /* allow_failure */ true);
+ if (dex_cache == nullptr) {
+ // Dex cache could not be found. This should only happen during gtests.
+ return nullptr;
+ }
// Get type from dex cache assuming it was populated by the verifier.
return dex_cache->GetResolvedType(type_idx);
}
@@ -540,17 +596,24 @@
void RTPVisitor::UpdateFieldAccessTypeInfo(HInstruction* instr,
const FieldInfo& info) {
- // The field index is unknown only during tests.
- if (instr->GetType() != Primitive::kPrimNot || info.GetFieldIndex() == kUnknownFieldIndex) {
+ if (instr->GetType() != Primitive::kPrimNot) {
return;
}
ScopedObjectAccess soa(Thread::Current());
- ClassLinker* cl = Runtime::Current()->GetClassLinker();
- ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), info.GetDexCache().Get());
- // TODO: There are certain cases where we can't resolve the field.
- // b/21914925 is open to keep track of a repro case for this issue.
- mirror::Class* klass = (field == nullptr) ? nullptr : field->GetType<false>();
+ mirror::Class* klass = nullptr;
+
+ // The field index is unknown only during tests.
+ if (info.GetFieldIndex() != kUnknownFieldIndex) {
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ ArtField* field = cl->GetResolvedField(info.GetFieldIndex(), info.GetDexCache().Get());
+ // TODO: There are certain cases where we can't resolve the field.
+ // b/21914925 is open to keep track of a repro case for this issue.
+ if (field != nullptr) {
+ klass = field->GetType<false>();
+ }
+ }
+
SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
}
@@ -666,7 +729,7 @@
}
void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
- if (phi->GetType() != Primitive::kPrimNot) {
+ if (phi->IsDead() || phi->GetType() != Primitive::kPrimNot) {
return;
}
@@ -824,6 +887,8 @@
// NullConstant inputs are ignored during merging as they do not provide any useful information.
// If all the inputs are NullConstants then the type of the phi will be set to Object.
void ReferenceTypePropagation::UpdatePhi(HPhi* instr) {
+ DCHECK(instr->IsLive());
+
size_t input_count = instr->InputCount();
size_t first_input_index_not_null = 0;
while (first_input_index_not_null < input_count &&
@@ -868,7 +933,7 @@
// Re-computes and updates the nullability of the instruction. Returns whether or
// not the nullability was changed.
bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) {
- DCHECK(instr->IsPhi()
+ DCHECK((instr->IsPhi() && instr->AsPhi()->IsLive())
|| instr->IsBoundType()
|| instr->IsNullCheck()
|| instr->IsArrayGet());
@@ -916,7 +981,7 @@
void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
HInstruction* user = it.Current()->GetUser();
- if (user->IsPhi()
+ if ((user->IsPhi() && user->AsPhi()->IsLive())
|| user->IsBoundType()
|| user->IsNullCheck()
|| (user->IsArrayGet() && (user->GetType() == Primitive::kPrimNot))) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 080f970..306a457 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -28,13 +28,13 @@
#include "ssa_liveness_analysis.h"
#include "ssa_phi_elimination.h"
-#include "gtest/gtest.h"
-
namespace art {
// Note: the register allocator tests rely on the fact that constants have live
// intervals and registers get allocated to them.
+class RegisterAllocatorTest : public CommonCompilerTest {};
+
static bool Check(const uint16_t* data) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
@@ -42,7 +42,7 @@
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
builder.BuildGraph(*item);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -57,7 +57,7 @@
* Unit testing of RegisterAllocator::ValidateIntervals. Register allocator
* tests are based on this validation method.
*/
-TEST(RegisterAllocatorTest, ValidateIntervals) {
+TEST_F(RegisterAllocatorTest, ValidateIntervals) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HGraph* graph = CreateGraph(&allocator);
@@ -146,7 +146,7 @@
}
}
-TEST(RegisterAllocatorTest, CFG1) {
+TEST_F(RegisterAllocatorTest, CFG1) {
/*
* Test the following snippet:
* return 0;
@@ -166,7 +166,7 @@
ASSERT_TRUE(Check(data));
}
-TEST(RegisterAllocatorTest, Loop1) {
+TEST_F(RegisterAllocatorTest, Loop1) {
/*
* Test the following snippet:
* int a = 0;
@@ -205,7 +205,7 @@
ASSERT_TRUE(Check(data));
}
-TEST(RegisterAllocatorTest, Loop2) {
+TEST_F(RegisterAllocatorTest, Loop2) {
/*
* Test the following snippet:
* int a = 0;
@@ -259,11 +259,11 @@
HGraphBuilder builder(graph);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
builder.BuildGraph(*item);
- graph->TryBuildingSsa();
+ TransformToSsa(graph);
return graph;
}
-TEST(RegisterAllocatorTest, Loop3) {
+TEST_F(RegisterAllocatorTest, Loop3) {
/*
* Test the following snippet:
* int a = 0
@@ -326,7 +326,7 @@
ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
}
-TEST(RegisterAllocatorTest, FirstRegisterUse) {
+TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8,
@@ -366,7 +366,7 @@
ASSERT_EQ(new_interval->FirstRegisterUse(), last_xor->GetLifetimePosition());
}
-TEST(RegisterAllocatorTest, DeadPhi) {
+TEST_F(RegisterAllocatorTest, DeadPhi) {
/* Test for a dead loop phi taking as back-edge input a phi that also has
* this loop phi as input. Walking backwards in SsaDeadPhiElimination
* does not solve the problem because the loop phi will be visited last.
@@ -407,7 +407,7 @@
* that share the same register. It should split the interval it is currently
* allocating for at the minimum lifetime position between the two inactive intervals.
*/
-TEST(RegisterAllocatorTest, FreeUntil) {
+TEST_F(RegisterAllocatorTest, FreeUntil) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -472,7 +472,7 @@
HInstruction** input2) {
HGraph* graph = CreateGraph(allocator);
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
HInstruction* parameter = new (allocator) HParameterValue(
@@ -539,7 +539,7 @@
return graph;
}
-TEST(RegisterAllocatorTest, PhiHint) {
+TEST_F(RegisterAllocatorTest, PhiHint) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HPhi *phi;
@@ -624,7 +624,7 @@
HInstruction** field,
HInstruction** ret) {
HGraph* graph = CreateGraph(allocator);
- NullHandle<mirror::DexCache> dex_cache;
+ ScopedNullHandle<mirror::DexCache> dex_cache;
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -658,7 +658,7 @@
return graph;
}
-TEST(RegisterAllocatorTest, ExpectedInRegisterHint) {
+TEST_F(RegisterAllocatorTest, ExpectedInRegisterHint) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *field, *ret;
@@ -726,7 +726,7 @@
return graph;
}
-TEST(RegisterAllocatorTest, SameAsFirstInputHint) {
+TEST_F(RegisterAllocatorTest, SameAsFirstInputHint) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *first_sub, *second_sub;
@@ -795,7 +795,7 @@
return graph;
}
-TEST(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
+TEST_F(RegisterAllocatorTest, ExpectedExactInRegisterAndSameOutputHint) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HInstruction *div;
@@ -819,7 +819,7 @@
// Test a bug in the register allocator, where allocating a blocked
// register would lead to spilling an inactive interval at the wrong
// position.
-TEST(RegisterAllocatorTest, SpillInactive) {
+TEST_F(RegisterAllocatorTest, SpillInactive) {
ArenaPool pool;
// Create a synthesized graph to please the register_allocator and
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 9e6cfbe..9e869e1 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -17,214 +17,11 @@
#include "ssa_builder.h"
#include "nodes.h"
-#include "primitive_type_propagation.h"
+#include "reference_type_propagation.h"
#include "ssa_phi_elimination.h"
namespace art {
-// Returns whether this is a loop header phi which was eagerly created but later
-// found inconsistent due to the vreg being undefined in one of its predecessors.
-// Such phi is marked dead and should be ignored until its removal in SsaPhiElimination.
-static bool IsUndefinedLoopHeaderPhi(HPhi* phi) {
- return phi->IsLoopHeaderPhi() && phi->InputCount() != phi->GetBlock()->GetPredecessors().size();
-}
-
-/**
- * A debuggable application may require to reviving phis, to ensure their
- * associated DEX register is available to a debugger. This class implements
- * the logic for statement (c) of the SsaBuilder (see ssa_builder.h). It
- * also makes sure that phis with incompatible input types are not revived
- * (statement (b) of the SsaBuilder).
- *
- * This phase must be run after detecting dead phis through the
- * DeadPhiElimination phase, and before deleting the dead phis.
- */
-class DeadPhiHandling : public ValueObject {
- public:
- explicit DeadPhiHandling(HGraph* graph)
- : graph_(graph), worklist_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)) {
- worklist_.reserve(kDefaultWorklistSize);
- }
-
- void Run();
-
- private:
- void VisitBasicBlock(HBasicBlock* block);
- void ProcessWorklist();
- void AddToWorklist(HPhi* phi);
- void AddDependentInstructionsToWorklist(HPhi* phi);
- bool UpdateType(HPhi* phi);
-
- HGraph* const graph_;
- ArenaVector<HPhi*> worklist_;
-
- static constexpr size_t kDefaultWorklistSize = 8;
-
- DISALLOW_COPY_AND_ASSIGN(DeadPhiHandling);
-};
-
-static bool HasConflictingEquivalent(HPhi* phi) {
- if (phi->GetNext() == nullptr) {
- return false;
- }
- HPhi* next = phi->GetNext()->AsPhi();
- if (next->GetRegNumber() == phi->GetRegNumber()) {
- if (next->GetType() == Primitive::kPrimVoid) {
- // We only get a void type for an equivalent phi we processed and found out
- // it was conflicting.
- return true;
- } else {
- // Go to the next phi, in case it is also an equivalent.
- return HasConflictingEquivalent(next);
- }
- }
- return false;
-}
-
-bool DeadPhiHandling::UpdateType(HPhi* phi) {
- if (phi->IsDead()) {
- // Phi was rendered dead while waiting in the worklist because it was replaced
- // with an equivalent.
- return false;
- }
-
- Primitive::Type existing = phi->GetType();
-
- bool conflict = false;
- Primitive::Type new_type = existing;
- for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
- HInstruction* input = phi->InputAt(i);
- if (input->IsPhi() && input->AsPhi()->IsDead()) {
- // We are doing a reverse post order visit of the graph, reviving
- // phis that have environment uses and updating their types. If an
- // input is a phi, and it is dead (because its input types are
- // conflicting), this phi must be marked dead as well.
- conflict = true;
- break;
- }
- Primitive::Type input_type = HPhi::ToPhiType(input->GetType());
-
- // The only acceptable transitions are:
- // - From void to typed: first time we update the type of this phi.
- // - From int to reference (or reference to int): the phi has to change
- // to reference type. If the integer input cannot be converted to a
- // reference input, the phi will remain dead.
- if (new_type == Primitive::kPrimVoid) {
- new_type = input_type;
- } else if (new_type == Primitive::kPrimNot && input_type == Primitive::kPrimInt) {
- if (input->IsPhi() && HasConflictingEquivalent(input->AsPhi())) {
- // If we already asked for an equivalent of the input phi, but that equivalent
- // ended up conflicting, make this phi conflicting too.
- conflict = true;
- break;
- }
- HInstruction* equivalent = SsaBuilder::GetReferenceTypeEquivalent(input);
- if (equivalent == nullptr) {
- conflict = true;
- break;
- }
- phi->ReplaceInput(equivalent, i);
- if (equivalent->IsPhi()) {
- DCHECK_EQ(equivalent->GetType(), Primitive::kPrimNot);
- // We created a new phi, but that phi has the same inputs as the old phi. We
- // add it to the worklist to ensure its inputs can also be converted to reference.
- // If not, it will remain dead, and the algorithm will make the current phi dead
- // as well.
- equivalent->AsPhi()->SetLive();
- AddToWorklist(equivalent->AsPhi());
- }
- } else if (new_type == Primitive::kPrimInt && input_type == Primitive::kPrimNot) {
- new_type = Primitive::kPrimNot;
- // Start over, we may request reference equivalents for the inputs of the phi.
- i = -1;
- } else if (new_type != input_type) {
- conflict = true;
- break;
- }
- }
-
- if (conflict) {
- phi->SetType(Primitive::kPrimVoid);
- phi->SetDead();
- return true;
- } else if (existing == new_type) {
- return false;
- }
-
- DCHECK(phi->IsLive());
- phi->SetType(new_type);
-
- // There might exist a `new_type` equivalent of `phi` already. In that case,
- // we replace the equivalent with the, now live, `phi`.
- HPhi* equivalent = phi->GetNextEquivalentPhiWithSameType();
- if (equivalent != nullptr) {
- // There cannot be more than two equivalents with the same type.
- DCHECK(equivalent->GetNextEquivalentPhiWithSameType() == nullptr);
- // If doing fix-point iteration, the equivalent might be in `worklist_`.
- // Setting it dead will make UpdateType skip it.
- equivalent->SetDead();
- equivalent->ReplaceWith(phi);
- }
-
- return true;
-}
-
-void DeadPhiHandling::VisitBasicBlock(HBasicBlock* block) {
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- if (IsUndefinedLoopHeaderPhi(phi)) {
- DCHECK(phi->IsDead());
- continue;
- }
- if (phi->IsDead() && phi->HasEnvironmentUses()) {
- phi->SetLive();
- if (block->IsLoopHeader()) {
- // Loop phis must have a type to guarantee convergence of the algorithm.
- DCHECK_NE(phi->GetType(), Primitive::kPrimVoid);
- AddToWorklist(phi);
- } else {
- // Because we are doing a reverse post order visit, all inputs of
- // this phi have been visited and therefore had their (initial) type set.
- UpdateType(phi);
- }
- }
- }
-}
-
-void DeadPhiHandling::ProcessWorklist() {
- while (!worklist_.empty()) {
- HPhi* instruction = worklist_.back();
- worklist_.pop_back();
- // Note that the same equivalent phi can be added multiple times in the work list, if
- // used by multiple phis. The first call to `UpdateType` will know whether the phi is
- // dead or live.
- if (instruction->IsLive() && UpdateType(instruction)) {
- AddDependentInstructionsToWorklist(instruction);
- }
- }
-}
-
-void DeadPhiHandling::AddToWorklist(HPhi* instruction) {
- DCHECK(instruction->IsLive());
- worklist_.push_back(instruction);
-}
-
-void DeadPhiHandling::AddDependentInstructionsToWorklist(HPhi* instruction) {
- for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->GetUser()->AsPhi();
- if (phi != nullptr && !phi->IsDead()) {
- AddToWorklist(phi);
- }
- }
-}
-
-void DeadPhiHandling::Run() {
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
- }
- ProcessWorklist();
-}
-
void SsaBuilder::SetLoopHeaderPhiInputs() {
for (size_t i = loop_headers_.size(); i > 0; --i) {
HBasicBlock* block = loop_headers_[i - 1];
@@ -285,10 +82,11 @@
HPhi* phi = it.Current()->AsPhi();
HPhi* next = phi->GetNextEquivalentPhiWithSameType();
if (next != nullptr) {
- // Make sure we do not replace a live phi with a dead phi. A live phi has been
- // handled by the type propagation phase, unlike a dead phi.
+ // Make sure we do not replace a live phi with a dead phi. A live phi
+ // has been handled by the type propagation phase, unlike a dead phi.
if (next->IsLive()) {
phi->ReplaceWith(next);
+ phi->SetDead();
} else {
next->ReplaceWith(phi);
}
@@ -300,64 +98,7 @@
}
}
-void SsaBuilder::BuildSsa() {
- // 1) Visit in reverse post order. We need to have all predecessors of a block visited
- // (with the exception of loops) in order to create the right environment for that
- // block. For loops, we create phis whose inputs will be set in 2).
- for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
- VisitBasicBlock(it.Current());
- }
-
- // 2) Set inputs of loop phis.
- SetLoopHeaderPhiInputs();
-
- // 3) Mark dead phis. This will mark phis that are only used by environments:
- // at the DEX level, the type of these phis does not need to be consistent, but
- // our code generator will complain if the inputs of a phi do not have the same
- // type. The marking allows the type propagation to know which phis it needs
- // to handle. We mark but do not eliminate: the elimination will be done in
- // step 9).
- SsaDeadPhiElimination dead_phis_for_type_propagation(GetGraph());
- dead_phis_for_type_propagation.MarkDeadPhis();
-
- // 4) Propagate types of phis. At this point, phis are typed void in the general
- // case, or float/double/reference when we created an equivalent phi. So we
- // need to propagate the types across phis to give them a correct type.
- PrimitiveTypePropagation type_propagation(GetGraph());
- type_propagation.Run();
-
- // 5) When creating equivalent phis we copy the inputs of the original phi which
- // may be improperly typed. This was fixed during the type propagation in 4) but
- // as a result we may end up with two equivalent phis with the same type for
- // the same dex register. This pass cleans them up.
- EquivalentPhisCleanup();
-
- // 6) Mark dead phis again. Step 4) may have introduced new phis.
- // Step 5) might enable the death of new phis.
- SsaDeadPhiElimination dead_phis(GetGraph());
- dead_phis.MarkDeadPhis();
-
- // 7) Now that the graph is correctly typed, we can get rid of redundant phis.
- // Note that we cannot do this phase before type propagation, otherwise
- // we could get rid of phi equivalents, whose presence is a requirement for the
- // type propagation phase. Note that this is to satisfy statement (a) of the
- // SsaBuilder (see ssa_builder.h).
- SsaRedundantPhiElimination redundant_phi(GetGraph());
- redundant_phi.Run();
-
- // 8) Fix the type for null constants which are part of an equality comparison.
- // We need to do this after redundant phi elimination, to ensure the only cases
- // that we can see are reference comparison against 0. The redundant phi
- // elimination ensures we do not see a phi taking two 0 constants in a HEqual
- // or HNotEqual.
- FixNullConstantType();
-
- // 9) Make sure environments use the right phi "equivalent": a phi marked dead
- // can have a phi equivalent that is not dead. We must therefore update
- // all environment uses of the dead phi to use its equivalent. Note that there
- // can be multiple phis for the same Dex register that are live (for example
- // when merging constants), in which case it is OK for the environments
- // to just reference one.
+void SsaBuilder::FixEnvironmentPhis() {
for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
for (HInstructionIterator it_phis(block->GetPhis()); !it_phis.Done(); it_phis.Advance()) {
@@ -378,24 +119,345 @@
phi->ReplaceWith(next);
}
}
+}
- // 10) Deal with phis to guarantee liveness of phis in case of a debuggable
- // application. This is for satisfying statement (c) of the SsaBuilder
- // (see ssa_builder.h).
- if (GetGraph()->IsDebuggable()) {
- DeadPhiHandling dead_phi_handler(GetGraph());
- dead_phi_handler.Run();
+static void AddDependentInstructionsToWorklist(HInstruction* instruction,
+ ArenaVector<HPhi*>* worklist) {
+ // If `instruction` is a dead phi, type conflict was just identified. All its
+ // live phi users, and transitively users of those users, therefore need to be
+ // marked dead/conflicting too, so we add them to the worklist. Otherwise we
+ // add users whose type does not match and needs to be updated.
+ bool add_all_live_phis = instruction->IsPhi() && instruction->AsPhi()->IsDead();
+ for (HUseIterator<HInstruction*> it(instruction->GetUses()); !it.Done(); it.Advance()) {
+ HInstruction* user = it.Current()->GetUser();
+ if (user->IsPhi() && user->AsPhi()->IsLive()) {
+ if (add_all_live_phis || user->GetType() != instruction->GetType()) {
+ worklist->push_back(user->AsPhi());
+ }
+ }
+ }
+}
+
+// Find a candidate primitive type for `phi` by merging the type of its inputs.
+// Return false if conflict is identified.
+static bool TypePhiFromInputs(HPhi* phi) {
+ Primitive::Type common_type = phi->GetType();
+
+ for (HInputIterator it(phi); !it.Done(); it.Advance()) {
+ HInstruction* input = it.Current();
+ if (input->IsPhi() && input->AsPhi()->IsDead()) {
+ // Phis are constructed live so if an input is a dead phi, it must have
+ // been made dead due to type conflict. Mark this phi conflicting too.
+ return false;
+ }
+
+ Primitive::Type input_type = HPhi::ToPhiType(input->GetType());
+ if (common_type == input_type) {
+ // No change in type.
+ } else if (Primitive::ComponentSize(common_type) != Primitive::ComponentSize(input_type)) {
+ // Types are of different sizes, e.g. int vs. long. Must be a conflict.
+ return false;
+ } else if (Primitive::IsIntegralType(common_type)) {
+ // Previous inputs were integral, this one is not but is of the same size.
+ // This does not imply conflict since some bytecode instruction types are
+ // ambiguous. TypeInputsOfPhi will either type them or detect a conflict.
+ DCHECK(Primitive::IsFloatingPointType(input_type) || input_type == Primitive::kPrimNot);
+ common_type = input_type;
+ } else if (Primitive::IsIntegralType(input_type)) {
+ // Input is integral, common type is not. Same as in the previous case, if
+ // there is a conflict, it will be detected during TypeInputsOfPhi.
+ DCHECK(Primitive::IsFloatingPointType(common_type) || common_type == Primitive::kPrimNot);
+ } else {
+ // Combining float and reference types. Clearly a conflict.
+ DCHECK((common_type == Primitive::kPrimFloat && input_type == Primitive::kPrimNot) ||
+ (common_type == Primitive::kPrimNot && input_type == Primitive::kPrimFloat));
+ return false;
+ }
}
- // 11) Now that the right phis are used for the environments, and we
- // have potentially revive dead phis in case of a debuggable application,
- // we can eliminate phis we do not need. Regardless of the debuggable status,
- // this phase is necessary for statement (b) of the SsaBuilder (see ssa_builder.h),
- // as well as for the code generation, which does not deal with phis of conflicting
- // input types.
- dead_phis.EliminateDeadPhis();
+ // We have found a candidate type for the phi. Set it and return true. We may
+ // still discover conflict whilst typing the individual inputs in TypeInputsOfPhi.
+ phi->SetType(common_type);
+ return true;
+}
- // 12) Clear locals.
+// Replace inputs of `phi` to match its type. Return false if conflict is identified.
+bool SsaBuilder::TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+ Primitive::Type common_type = phi->GetType();
+ if (common_type == Primitive::kPrimVoid || Primitive::IsIntegralType(common_type)) {
+ // Phi either contains only other untyped phis (common_type == kPrimVoid),
+ // or `common_type` is integral and we do not need to retype ambiguous inputs
+ // because they are always constructed with the integral type candidate.
+ if (kIsDebugBuild) {
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ if (common_type == Primitive::kPrimVoid) {
+ DCHECK(input->IsPhi() && input->GetType() == Primitive::kPrimVoid);
+ } else {
+ DCHECK((input->IsPhi() && input->GetType() == Primitive::kPrimVoid) ||
+ HPhi::ToPhiType(input->GetType()) == common_type);
+ }
+ }
+ }
+ // Inputs did not need to be replaced, hence no conflict. Report success.
+ return true;
+ } else {
+ DCHECK(common_type == Primitive::kPrimNot || Primitive::IsFloatingPointType(common_type));
+ for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
+ HInstruction* input = phi->InputAt(i);
+ if (input->GetType() != common_type) {
+ // Input type does not match phi's type. Try to retype the input or
+ // generate a suitably typed equivalent.
+ HInstruction* equivalent = (common_type == Primitive::kPrimNot)
+ ? GetReferenceTypeEquivalent(input)
+ : GetFloatOrDoubleEquivalent(input, common_type);
+ if (equivalent == nullptr) {
+ // Input could not be typed. Report conflict.
+ return false;
+ }
+ // Make sure the input did not change its type and we do not need to
+ // update its users.
+ DCHECK_NE(input, equivalent);
+
+ phi->ReplaceInput(equivalent, i);
+ if (equivalent->IsPhi()) {
+ worklist->push_back(equivalent->AsPhi());
+ }
+ }
+ }
+ // All inputs either matched the type of the phi or we successfully replaced
+ // them with a suitable equivalent. Report success.
+ return true;
+ }
+}
+
+// Attempt to set the primitive type of `phi` to match its inputs. Return whether
+// it was changed by the algorithm or not.
+bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist) {
+ DCHECK(phi->IsLive());
+ Primitive::Type original_type = phi->GetType();
+
+ // Try to type the phi in two stages:
+ // (1) find a candidate type for the phi by merging types of all its inputs,
+ // (2) try to type the phi's inputs to that candidate type.
+ // Either of these stages may detect a type conflict and fail, in which case
+ // we immediately abort.
+ if (!TypePhiFromInputs(phi) || !TypeInputsOfPhi(phi, worklist)) {
+ // Conflict detected. Mark the phi dead and return true because it changed.
+ phi->SetDead();
+ return true;
+ }
+
+ // Return true if the type of the phi has changed.
+ return phi->GetType() != original_type;
+}
+
+void SsaBuilder::RunPrimitiveTypePropagation() {
+ ArenaVector<HPhi*> worklist(GetGraph()->GetArena()->Adapter());
+
+ for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (block->IsLoopHeader()) {
+ for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* phi = phi_it.Current()->AsPhi();
+ if (phi->IsLive()) {
+ worklist.push_back(phi);
+ }
+ }
+ } else {
+ for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ // Eagerly compute the type of the phi, for quicker convergence. Note
+ // that we don't need to add users to the worklist because we are
+ // doing a reverse post-order visit, therefore either the phi users are
+ // non-loop phi and will be visited later in the visit, or are loop-phis,
+ // and they are already in the work list.
+ HPhi* phi = phi_it.Current()->AsPhi();
+ if (phi->IsLive()) {
+ UpdatePrimitiveType(phi, &worklist);
+ }
+ }
+ }
+ }
+
+ ProcessPrimitiveTypePropagationWorklist(&worklist);
+ EquivalentPhisCleanup();
+}
+
+void SsaBuilder::ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist) {
+ // Process worklist
+ while (!worklist->empty()) {
+ HPhi* phi = worklist->back();
+ worklist->pop_back();
+ // The phi could have been made dead as a result of conflicts while in the
+ // worklist. If it is now dead, there is no point in updating its type.
+ if (phi->IsLive() && UpdatePrimitiveType(phi, worklist)) {
+ AddDependentInstructionsToWorklist(phi, worklist);
+ }
+ }
+}
+
+static HArrayGet* FindFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) {
+ Primitive::Type type = aget->GetType();
+ DCHECK(Primitive::IsIntOrLongType(type));
+ HArrayGet* next = aget->GetNext()->AsArrayGet();
+ return (next != nullptr && next->IsEquivalentOf(aget)) ? next : nullptr;
+}
+
+static HArrayGet* CreateFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) {
+ Primitive::Type type = aget->GetType();
+ DCHECK(Primitive::IsIntOrLongType(type));
+ DCHECK(FindFloatOrDoubleEquivalentOfArrayGet(aget) == nullptr);
+
+ HArrayGet* equivalent = new (aget->GetBlock()->GetGraph()->GetArena()) HArrayGet(
+ aget->GetArray(),
+ aget->GetIndex(),
+ type == Primitive::kPrimInt ? Primitive::kPrimFloat : Primitive::kPrimDouble,
+ aget->GetDexPc());
+ aget->GetBlock()->InsertInstructionAfter(equivalent, aget);
+ return equivalent;
+}
+
+// Returns true if the array input of `aget` is either of type int[] or long[].
+// Should only be called on ArrayGets with ambiguous type (int/float, long/double)
+// on arrays which were typed to an array class by RTP.
+static bool IsArrayGetOnIntegralArray(HArrayGet* aget) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo array_type = aget->GetArray()->GetReferenceTypeInfo();
+ DCHECK(array_type.IsPrimitiveArrayClass());
+ ReferenceTypeInfo::TypeHandle array_type_handle = array_type.GetTypeHandle();
+
+ bool is_integral_type;
+ if (Primitive::Is64BitType(aget->GetType())) {
+ is_integral_type = array_type_handle->GetComponentType()->IsPrimitiveLong();
+ DCHECK(is_integral_type || array_type_handle->GetComponentType()->IsPrimitiveDouble());
+ } else {
+ is_integral_type = array_type_handle->GetComponentType()->IsPrimitiveInt();
+ DCHECK(is_integral_type || array_type_handle->GetComponentType()->IsPrimitiveFloat());
+ }
+ return is_integral_type;
+}
+
+bool SsaBuilder::FixAmbiguousArrayGets() {
+ if (ambiguous_agets_.empty()) {
+ return true;
+ }
+
+ // The wrong ArrayGet equivalent may still have Phi uses coming from ArraySet
+ // uses (because they are untyped) and environment uses (if --debuggable).
+ // After resolving all ambiguous ArrayGets, we will re-run primitive type
+ // propagation on the Phis which need to be updated.
+ ArenaVector<HPhi*> worklist(GetGraph()->GetArena()->Adapter());
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+
+ for (HArrayGet* aget_int : ambiguous_agets_) {
+ if (!aget_int->GetArray()->GetReferenceTypeInfo().IsPrimitiveArrayClass()) {
+ // RTP did not type the input array. Bail.
+ return false;
+ }
+
+ HArrayGet* aget_float = FindFloatOrDoubleEquivalentOfArrayGet(aget_int);
+ if (IsArrayGetOnIntegralArray(aget_int)) {
+ if (aget_float != nullptr) {
+ // There is a float/double equivalent. We must replace it and re-run
+ // primitive type propagation on all dependent instructions.
+ aget_float->ReplaceWith(aget_int);
+ aget_float->GetBlock()->RemoveInstruction(aget_float);
+ AddDependentInstructionsToWorklist(aget_int, &worklist);
+ }
+ } else {
+ if (aget_float == nullptr) {
+ // This is a float/double ArrayGet but there were no typed uses which
+ // would create the typed equivalent. Create it now.
+ aget_float = CreateFloatOrDoubleEquivalentOfArrayGet(aget_int);
+ }
+ // Replace the original int/long instruction. Note that it may have phi
+ // uses, environment uses, as well as real uses (from untyped ArraySets).
+ // We need to re-run primitive type propagation on its dependent instructions.
+ aget_int->ReplaceWith(aget_float);
+ aget_int->GetBlock()->RemoveInstruction(aget_int);
+ AddDependentInstructionsToWorklist(aget_float, &worklist);
+ }
+ }
+ }
+
+ // Set a flag stating that types of ArrayGets have been resolved. This is used
+ // by GetFloatOrDoubleEquivalentOfArrayGet to report conflict.
+ agets_fixed_ = true;
+
+ if (!worklist.empty()) {
+ ProcessPrimitiveTypePropagationWorklist(&worklist);
+ EquivalentPhisCleanup();
+ }
+
+ return true;
+}
+
+BuildSsaResult SsaBuilder::BuildSsa() {
+ // 1) Visit in reverse post order. We need to have all predecessors of a block
+ // visited (with the exception of loops) in order to create the right environment
+ // for that block. For loops, we create phis whose inputs will be set in 2).
+ for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
+ VisitBasicBlock(it.Current());
+ }
+
+ // 2) Set inputs of loop header phis.
+ SetLoopHeaderPhiInputs();
+
+ // 3) Propagate types of phis. At this point, phis are typed void in the general
+ // case, or float/double/reference if we created an equivalent phi. So we need
+ // to propagate the types across phis to give them a correct type. If a type
+ // conflict is detected in this stage, the phi is marked dead.
+ RunPrimitiveTypePropagation();
+
+ // 4) Now that the correct primitive types have been assigned, we can get rid
+ // of redundant phis. Note that we cannot do this phase before type propagation,
+ // otherwise we could get rid of phi equivalents, whose presence is a requirement
+ // for the type propagation phase. Note that this is to satisfy statement (a)
+ // of the SsaBuilder (see ssa_builder.h).
+ SsaRedundantPhiElimination(GetGraph()).Run();
+
+ // 5) Fix the type for null constants which are part of an equality comparison.
+ // We need to do this after redundant phi elimination, to ensure the only cases
+ // that we can see are reference comparison against 0. The redundant phi
+ // elimination ensures we do not see a phi taking two 0 constants in a HEqual
+ // or HNotEqual.
+ FixNullConstantType();
+
+ // 6) Compute type of reference type instructions. The pass assumes that
+ // NullConstant has been fixed up.
+ ReferenceTypePropagation(GetGraph(), handles_).Run();
+
+ // 7) Step 1) duplicated ArrayGet instructions with ambiguous type (int/float
+ // or long/double). Now that RTP computed the type of the array input, the
+ // ambiguity can be resolved and the correct equivalent kept.
+ if (!FixAmbiguousArrayGets()) {
+ return kBuildSsaFailAmbiguousArrayGet;
+ }
+
+ // 8) Mark dead phis. This will mark phis which are not used by instructions
+ // or other live phis. If compiling as debuggable code, phis will also be kept
+ // live if they have an environment use.
+ SsaDeadPhiElimination dead_phi_elimimation(GetGraph());
+ dead_phi_elimimation.MarkDeadPhis();
+
+ // 9) Make sure environments use the right phi equivalent: a phi marked dead
+ // can have a phi equivalent that is not dead. In that case we have to replace
+ // it with the live equivalent because deoptimization and try/catch rely on
+ // environments containing values of all live vregs at that point. Note that
+ // there can be multiple phis for the same Dex register that are live
+ // (for example when merging constants), in which case it is okay for the
+ // environments to just reference one.
+ FixEnvironmentPhis();
+
+ // 10) Now that the right phis are used for the environments, we can eliminate
+ // phis we do not need. Regardless of the debuggable status, this phase is
+ /// necessary for statement (b) of the SsaBuilder (see ssa_builder.h), as well
+ // as for the code generation, which does not deal with phis of conflicting
+ // input types.
+ dead_phi_elimimation.EliminateDeadPhis();
+
+ // 11) Clear locals.
for (HInstructionIterator it(GetGraph()->GetEntryBlock()->GetInstructions());
!it.Done();
it.Advance()) {
@@ -404,6 +466,8 @@
current->GetBlock()->RemoveInstruction(current);
}
}
+
+ return kBuildSsaSuccess;
}
ArenaVector<HInstruction*>* SsaBuilder::GetLocalsFor(HBasicBlock* block) {
@@ -591,6 +655,8 @@
* phi with a floating point / reference type.
*/
HPhi* SsaBuilder::GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type) {
+ DCHECK(phi->IsLive()) << "Cannot get equivalent of a dead phi since it would create a live one.";
+
// We place the floating point /reference phi next to this phi.
HInstruction* next = phi->GetNext();
if (next != nullptr
@@ -606,35 +672,50 @@
ArenaAllocator* allocator = phi->GetBlock()->GetGraph()->GetArena();
HPhi* new_phi = new (allocator) HPhi(allocator, phi->GetRegNumber(), phi->InputCount(), type);
for (size_t i = 0, e = phi->InputCount(); i < e; ++i) {
- // Copy the inputs. Note that the graph may not be correctly typed by doing this copy,
- // but the type propagation phase will fix it.
+ // Copy the inputs. Note that the graph may not be correctly typed
+ // by doing this copy, but the type propagation phase will fix it.
new_phi->SetRawInputAt(i, phi->InputAt(i));
}
phi->GetBlock()->InsertPhiAfter(new_phi, phi);
+ DCHECK(new_phi->IsLive());
return new_phi;
} else {
+ // An existing equivalent was found. If it is dead, conflict was previously
+ // identified and we return nullptr instead.
HPhi* next_phi = next->AsPhi();
DCHECK_EQ(next_phi->GetType(), type);
- if (next_phi->IsDead()) {
- // TODO(dbrazdil): Remove this SetLive (we should not need to revive phis)
- // once we stop running MarkDeadPhis before PrimitiveTypePropagation. This
- // cannot revive undefined loop header phis because they cannot have uses.
- DCHECK(!IsUndefinedLoopHeaderPhi(next_phi));
- next_phi->SetLive();
- }
- return next_phi;
+ return next_phi->IsLive() ? next_phi : nullptr;
}
}
-HInstruction* SsaBuilder::GetFloatOrDoubleEquivalent(HInstruction* user,
- HInstruction* value,
- Primitive::Type type) {
+HArrayGet* SsaBuilder::GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) {
+ DCHECK(Primitive::IsIntegralType(aget->GetType()));
+
+ if (!Primitive::IsIntOrLongType(aget->GetType())) {
+ // Cannot type boolean, char, byte, short to float/double.
+ return nullptr;
+ }
+
+ DCHECK(ContainsElement(ambiguous_agets_, aget));
+ if (agets_fixed_) {
+ // This used to be an ambiguous ArrayGet but its type has been resolved to
+ // int/long. Requesting a float/double equivalent should lead to a conflict.
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current());
+ DCHECK(IsArrayGetOnIntegralArray(aget));
+ }
+ return nullptr;
+ } else {
+ // This is an ambiguous ArrayGet which has not been resolved yet. Return an
+ // equivalent float/double instruction to use until it is resolved.
+ HArrayGet* equivalent = FindFloatOrDoubleEquivalentOfArrayGet(aget);
+ return (equivalent == nullptr) ? CreateFloatOrDoubleEquivalentOfArrayGet(aget) : equivalent;
+ }
+}
+
+HInstruction* SsaBuilder::GetFloatOrDoubleEquivalent(HInstruction* value, Primitive::Type type) {
if (value->IsArrayGet()) {
- // The verifier has checked that values in arrays cannot be used for both
- // floating point and non-floating point operations. It is therefore safe to just
- // change the type of the operation.
- value->AsArrayGet()->SetType(type);
- return value;
+ return GetFloatOrDoubleEquivalentOfArrayGet(value->AsArrayGet());
} else if (value->IsLongConstant()) {
return GetDoubleEquivalent(value->AsLongConstant());
} else if (value->IsIntConstant()) {
@@ -642,12 +723,7 @@
} else if (value->IsPhi()) {
return GetFloatDoubleOrReferenceEquivalentOfPhi(value->AsPhi(), type);
} else {
- // For other instructions, we assume the verifier has checked that the dex format is correctly
- // typed and the value in a dex register will not be used for both floating point and
- // non-floating point operations. So the only reason an instruction would want a floating
- // point equivalent is for an unused phi that will be removed by the dead phi elimination phase.
- DCHECK(user->IsPhi()) << "is actually " << user->DebugName() << " (" << user->GetId() << ")";
- return value;
+ return nullptr;
}
}
@@ -662,15 +738,17 @@
}
void SsaBuilder::VisitLoadLocal(HLoadLocal* load) {
+ Primitive::Type load_type = load->GetType();
HInstruction* value = (*current_locals_)[load->GetLocal()->GetRegNumber()];
// If the operation requests a specific type, we make sure its input is of that type.
- if (load->GetType() != value->GetType()) {
- if (load->GetType() == Primitive::kPrimFloat || load->GetType() == Primitive::kPrimDouble) {
- value = GetFloatOrDoubleEquivalent(load, value, load->GetType());
- } else if (load->GetType() == Primitive::kPrimNot) {
+ if (load_type != value->GetType()) {
+ if (load_type == Primitive::kPrimFloat || load_type == Primitive::kPrimDouble) {
+ value = GetFloatOrDoubleEquivalent(value, load_type);
+ } else if (load_type == Primitive::kPrimNot) {
value = GetReferenceTypeEquivalent(value);
}
}
+
load->ReplaceWith(value);
load->GetBlock()->RemoveInstruction(load);
}
@@ -760,4 +838,13 @@
temp->GetBlock()->RemoveInstruction(temp);
}
+void SsaBuilder::VisitArrayGet(HArrayGet* aget) {
+ Primitive::Type type = aget->GetType();
+ DCHECK(!Primitive::IsFloatingPointType(type));
+ if (Primitive::IsIntOrLongType(type)) {
+ ambiguous_agets_.push_back(aget);
+ }
+ VisitInstruction(aget);
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index dcce5e4..ed6f5ca 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -49,17 +49,20 @@
*/
class SsaBuilder : public HGraphVisitor {
public:
- explicit SsaBuilder(HGraph* graph)
+ explicit SsaBuilder(HGraph* graph, StackHandleScopeCollection* handles)
: HGraphVisitor(graph),
+ handles_(handles),
+ agets_fixed_(false),
current_locals_(nullptr),
loop_headers_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
+ ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
locals_for_(graph->GetBlocks().size(),
ArenaVector<HInstruction*>(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)),
graph->GetArena()->Adapter(kArenaAllocSsaBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
- void BuildSsa();
+ BuildSsaResult BuildSsa();
// Returns locals vector for `block`. If it is a catch block, the vector will be
// prepopulated with catch phis for vregs which are defined in `current_locals_`.
@@ -71,23 +74,38 @@
void VisitStoreLocal(HStoreLocal* store);
void VisitInstruction(HInstruction* instruction);
void VisitTemporary(HTemporary* instruction);
-
- static HInstruction* GetFloatOrDoubleEquivalent(HInstruction* user,
- HInstruction* instruction,
- Primitive::Type type);
-
- static HInstruction* GetReferenceTypeEquivalent(HInstruction* instruction);
+ void VisitArrayGet(HArrayGet* aget);
static constexpr const char* kSsaBuilderPassName = "ssa_builder";
private:
void SetLoopHeaderPhiInputs();
+ void FixEnvironmentPhis();
void FixNullConstantType();
void EquivalentPhisCleanup();
+ void RunPrimitiveTypePropagation();
- static HFloatConstant* GetFloatEquivalent(HIntConstant* constant);
- static HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant);
- static HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type);
+ // Attempts to resolve types of aget and aget-wide instructions from reference
+ // type information on the input array. Returns false if the type of the array
+ // is unknown.
+ bool FixAmbiguousArrayGets();
+
+ bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist);
+ bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist);
+ void ProcessPrimitiveTypePropagationWorklist(ArenaVector<HPhi*>* worklist);
+
+ HInstruction* GetFloatOrDoubleEquivalent(HInstruction* instruction, Primitive::Type type);
+ HInstruction* GetReferenceTypeEquivalent(HInstruction* instruction);
+
+ HFloatConstant* GetFloatEquivalent(HIntConstant* constant);
+ HDoubleConstant* GetDoubleEquivalent(HLongConstant* constant);
+ HPhi* GetFloatDoubleOrReferenceEquivalentOfPhi(HPhi* phi, Primitive::Type type);
+ HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget);
+
+ StackHandleScopeCollection* const handles_;
+
+ // True if types of ambiguous ArrayGets have been resolved.
+ bool agets_fixed_;
// Locals for the current block being visited.
ArenaVector<HInstruction*>* current_locals_;
@@ -96,6 +114,8 @@
// over these blocks to set the inputs of their phis.
ArenaVector<HBasicBlock*> loop_headers_;
+ ArenaVector<HArrayGet*> ambiguous_agets_;
+
// HEnvironment for each block.
ArenaVector<ArenaVector<HInstruction*>> locals_for_;
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index a3219dc..63aba88 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -40,15 +40,17 @@
continue;
}
- bool has_non_phi_use = false;
- for (HUseIterator<HInstruction*> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) {
- if (!use_it.Current()->GetUser()->IsPhi()) {
- has_non_phi_use = true;
- break;
+ bool keep_alive = (graph_->IsDebuggable() && phi->HasEnvironmentUses());
+ if (!keep_alive) {
+ for (HUseIterator<HInstruction*> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) {
+ if (!use_it.Current()->GetUser()->IsPhi()) {
+ keep_alive = true;
+ break;
+ }
}
}
- if (has_non_phi_use) {
+ if (keep_alive) {
worklist_.push_back(phi);
} else {
phi->SetDead();
@@ -94,8 +96,8 @@
for (HUseIterator<HInstruction*> use_it(phi->GetUses()); !use_it.Done();
use_it.Advance()) {
HInstruction* user = use_it.Current()->GetUser();
- DCHECK(user->IsLoopHeaderPhi()) << user->GetId();
- DCHECK(user->AsPhi()->IsDead()) << user->GetId();
+ DCHECK(user->IsLoopHeaderPhi());
+ DCHECK(user->AsPhi()->IsDead());
}
}
// Remove the phi from use lists of its inputs.
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 024278f..d2885a8 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -28,6 +28,8 @@
namespace art {
+class SsaTest : public CommonCompilerTest {};
+
class SsaPrettyPrinter : public HPrettyPrinter {
public:
explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
@@ -83,11 +85,10 @@
bool graph_built = builder.BuildGraph(*item);
ASSERT_TRUE(graph_built);
- graph->BuildDominatorTree();
+ TransformToSsa(graph);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
- graph->TransformToSsa();
ReNumberInstructions(graph);
// Test that phis had their type set.
@@ -103,7 +104,7 @@
ASSERT_STREQ(expected, printer.str().c_str());
}
-TEST(SsaTest, CFG1) {
+TEST_F(SsaTest, CFG1) {
// Test that we get rid of loads and stores.
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -131,7 +132,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, CFG2) {
+TEST_F(SsaTest, CFG2) {
// Test that we create a phi for the join block of an if control flow instruction
// when there is only code in the else branch.
const char* expected =
@@ -162,7 +163,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, CFG3) {
+TEST_F(SsaTest, CFG3) {
// Test that we create a phi for the join block of an if control flow instruction
// when both branches update a local.
const char* expected =
@@ -195,7 +196,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop1) {
+TEST_F(SsaTest, Loop1) {
// Test that we create a phi for an initialized local at entry of a loop.
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -228,7 +229,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop2) {
+TEST_F(SsaTest, Loop2) {
// Simple loop with one preheader and one back edge.
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -258,7 +259,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop3) {
+TEST_F(SsaTest, Loop3) {
// Test that a local not yet defined at the entry of a loop is handled properly.
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -290,7 +291,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop4) {
+TEST_F(SsaTest, Loop4) {
// Make sure we support a preheader of a loop not being the first predecessor
// in the predecessor list of the header.
const char* expected =
@@ -325,7 +326,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop5) {
+TEST_F(SsaTest, Loop5) {
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
const char* expected =
@@ -367,7 +368,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop6) {
+TEST_F(SsaTest, Loop6) {
// Test a loop with one preheader and two back edges (e.g. continue).
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -406,7 +407,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, Loop7) {
+TEST_F(SsaTest, Loop7) {
// Test a loop with one preheader, one back edge, and two exit edges (e.g. break).
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -448,7 +449,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, DeadLocal) {
+TEST_F(SsaTest, DeadLocal) {
// Test that we correctly handle a local not being used.
const char* expected =
"BasicBlock 0, succ: 1\n"
@@ -466,7 +467,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, LocalInIf) {
+TEST_F(SsaTest, LocalInIf) {
// Test that we do not create a phi in the join block when one predecessor
// does not update the local.
const char* expected =
@@ -496,7 +497,7 @@
TestCode(data, expected);
}
-TEST(SsaTest, MultiplePredecessors) {
+TEST_F(SsaTest, MultiplePredecessors) {
// Test that we do not create a phi when one predecessor
// does not update the local.
const char* expected =
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 98a1a8f..b79c2f0 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -878,7 +878,15 @@
Register rn,
Opcode opcode,
uint32_t immediate,
+ SetCc set_cc,
ShifterOperand* shifter_op) = 0;
+ bool ShifterOperandCanHold(Register rd,
+ Register rn,
+ Opcode opcode,
+ uint32_t immediate,
+ ShifterOperand* shifter_op) {
+ return ShifterOperandCanHold(rd, rn, opcode, immediate, kCcDontCare, shifter_op);
+ }
virtual bool ShifterOperandCanAlwaysHold(uint32_t immediate) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index a7dbacd..ebca25b 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -57,6 +57,7 @@
Register rn ATTRIBUTE_UNUSED,
Opcode opcode ATTRIBUTE_UNUSED,
uint32_t immediate,
+ SetCc set_cc ATTRIBUTE_UNUSED,
ShifterOperand* shifter_op) {
return ShifterOperandCanHoldArm32(immediate, shifter_op);
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index ce3a872..bf332fe 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -297,7 +297,9 @@
Register rn,
Opcode opcode,
uint32_t immediate,
+ SetCc set_cc,
ShifterOperand* shifter_op) OVERRIDE;
+ using ArmAssembler::ShifterOperandCanHold; // Don't hide the non-virtual override.
bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index cdeb443..f341030 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -500,6 +500,7 @@
Register rn ATTRIBUTE_UNUSED,
Opcode opcode,
uint32_t immediate,
+ SetCc set_cc,
ShifterOperand* shifter_op) {
shifter_op->type_ = ShifterOperand::kImmediate;
shifter_op->immed_ = immediate;
@@ -508,7 +509,8 @@
switch (opcode) {
case ADD:
case SUB:
- if (immediate < (1 << 12)) { // Less than (or equal to) 12 bits can always be done.
+ // Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
+ if (immediate < (1 << 12) && set_cc != kCcSet) {
return true;
}
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
@@ -1239,7 +1241,10 @@
// The only thumb1 instructions with a register and an immediate are ADD and SUB
// with a 3-bit immediate, and RSB with zero immediate.
if (opcode == ADD || opcode == SUB) {
- if (!IsUint<3>(so.GetImmediate())) {
+ if ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet) {
+ return true; // Cannot match "setflags".
+ }
+ if (!IsUint<3>(so.GetImmediate()) && !IsUint<3>(-so.GetImmediate())) {
return true;
}
} else {
@@ -1249,8 +1254,12 @@
// ADD, SUB, CMP and MOV may be thumb1 only if the immediate is 8 bits.
if (!(opcode == ADD || opcode == SUB || opcode == MOV || opcode == CMP)) {
return true;
+ } else if (opcode != CMP && ((cond == AL) ? set_cc == kCcKeep : set_cc == kCcSet)) {
+ return true; // Cannot match "setflags" for ADD, SUB or MOV.
} else {
- if (!IsUint<8>(so.GetImmediate())) {
+ // For ADD and SUB allow also negative 8-bit immediate as we will emit the oposite opcode.
+ if (!IsUint<8>(so.GetImmediate()) &&
+ (opcode == MOV || opcode == CMP || !IsUint<8>(-so.GetImmediate()))) {
return true;
}
}
@@ -1602,12 +1611,18 @@
uint8_t rn_shift = 3;
uint8_t immediate_shift = 0;
bool use_immediate = false;
- uint32_t immediate = 0; // Should be at most 9 bits but keep the full immediate for CHECKs.
+ uint32_t immediate = 0; // Should be at most 10 bits but keep the full immediate for CHECKs.
uint8_t thumb_opcode;
if (so.IsImmediate()) {
use_immediate = true;
immediate = so.GetImmediate();
+ if (!IsUint<10>(immediate)) {
+ // Flip ADD/SUB.
+ opcode = (opcode == ADD) ? SUB : ADD;
+ immediate = -immediate;
+ DCHECK(IsUint<10>(immediate)); // More stringent checks below.
+ }
}
switch (opcode) {
@@ -1644,7 +1659,7 @@
dp_opcode = 2U /* 0b10 */;
thumb_opcode = 3U /* 0b11 */;
opcode_shift = 12;
- CHECK_LT(immediate, (1u << 9));
+ CHECK(IsUint<9>(immediate));
CHECK_ALIGNED(immediate, 4);
// Remove rd and rn from instruction by orring it with immed and clearing bits.
@@ -1658,7 +1673,7 @@
dp_opcode = 2U /* 0b10 */;
thumb_opcode = 5U /* 0b101 */;
opcode_shift = 11;
- CHECK_LT(immediate, (1u << 10));
+ CHECK(IsUint<10>(immediate));
CHECK_ALIGNED(immediate, 4);
// Remove rn from instruction.
@@ -1668,11 +1683,13 @@
immediate >>= 2;
} else if (rn != rd) {
// Must use T1.
+ CHECK(IsUint<3>(immediate));
opcode_shift = 9;
thumb_opcode = 14U /* 0b01110 */;
immediate_shift = 6;
} else {
// T2 encoding.
+ CHECK(IsUint<8>(immediate));
opcode_shift = 11;
thumb_opcode = 6U /* 0b110 */;
rd_shift = 8;
@@ -1702,7 +1719,7 @@
dp_opcode = 2U /* 0b10 */;
thumb_opcode = 0x61 /* 0b1100001 */;
opcode_shift = 7;
- CHECK_LT(immediate, (1u << 9));
+ CHECK(IsUint<9>(immediate));
CHECK_ALIGNED(immediate, 4);
// Remove rd and rn from instruction by orring it with immed and clearing bits.
@@ -1713,11 +1730,13 @@
immediate >>= 2;
} else if (rn != rd) {
// Must use T1.
+ CHECK(IsUint<3>(immediate));
opcode_shift = 9;
thumb_opcode = 15U /* 0b01111 */;
immediate_shift = 6;
} else {
// T2 encoding.
+ CHECK(IsUint<8>(immediate));
opcode_shift = 11;
thumb_opcode = 7U /* 0b111 */;
rd_shift = 8;
@@ -3401,25 +3420,30 @@
// positive values and sub for negatives ones, which would slightly improve
// the readability of generated code for some constants.
ShifterOperand shifter_op;
- if (ShifterOperandCanHold(rd, rn, ADD, value, &shifter_op)) {
+ if (ShifterOperandCanHold(rd, rn, ADD, value, set_cc, &shifter_op)) {
add(rd, rn, shifter_op, cond, set_cc);
- } else if (ShifterOperandCanHold(rd, rn, SUB, -value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(rd, rn, SUB, -value, set_cc, &shifter_op)) {
sub(rd, rn, shifter_op, cond, set_cc);
} else {
CHECK(rn != IP);
- if (ShifterOperandCanHold(rd, rn, MVN, ~value, &shifter_op)) {
- mvn(IP, shifter_op, cond, kCcKeep);
- add(rd, rn, ShifterOperand(IP), cond, set_cc);
- } else if (ShifterOperandCanHold(rd, rn, MVN, ~(-value), &shifter_op)) {
- mvn(IP, shifter_op, cond, kCcKeep);
- sub(rd, rn, ShifterOperand(IP), cond, set_cc);
+ // If rd != rn, use rd as temp. This alows 16-bit ADD/SUB in more situations than using IP.
+ Register temp = (rd != rn) ? rd : IP;
+ if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~value, set_cc, &shifter_op)) {
+ mvn(temp, shifter_op, cond, kCcKeep);
+ add(rd, rn, ShifterOperand(temp), cond, set_cc);
+ } else if (ShifterOperandCanHold(temp, kNoRegister, MVN, ~(-value), set_cc, &shifter_op)) {
+ mvn(temp, shifter_op, cond, kCcKeep);
+ sub(rd, rn, ShifterOperand(temp), cond, set_cc);
+ } else if (High16Bits(-value) == 0) {
+ movw(temp, Low16Bits(-value), cond);
+ sub(rd, rn, ShifterOperand(temp), cond, set_cc);
} else {
- movw(IP, Low16Bits(value), cond);
+ movw(temp, Low16Bits(value), cond);
uint16_t value_high = High16Bits(value);
if (value_high != 0) {
- movt(IP, value_high, cond);
+ movt(temp, value_high, cond);
}
- add(rd, rn, ShifterOperand(IP), cond, set_cc);
+ add(rd, rn, ShifterOperand(temp), cond, set_cc);
}
}
}
@@ -3429,9 +3453,9 @@
// positive values and sub for negatives ones, which would slightly improve
// the readability of generated code for some constants.
ShifterOperand shifter_op;
- if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, &shifter_op)) {
+ if (ShifterOperandCanHold(kNoRegister, rn, CMP, value, kCcSet, &shifter_op)) {
cmp(rn, shifter_op, cond);
- } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, &shifter_op)) {
+ } else if (ShifterOperandCanHold(kNoRegister, rn, CMN, ~value, kCcSet, &shifter_op)) {
cmn(rn, shifter_op, cond);
} else {
CHECK(rn != IP);
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 9aeece8..bf07b2d 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -342,7 +342,9 @@
Register rn,
Opcode opcode,
uint32_t immediate,
+ SetCc set_cc,
ShifterOperand* shifter_op) OVERRIDE;
+ using ArmAssembler::ShifterOperandCanHold; // Don't hide the non-virtual override.
bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 5ae2cc2..0ef0dc1 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -135,7 +135,8 @@
toolsdir.c_str(), filename);
if (kPrintResults) {
// Print the results only, don't check. This is used to generate new output for inserting
- // into the .inc file.
+ // into the .inc file, so let's add the appropriate prefix/suffix needed in the C++ code.
+ strcat(cmd, " | sed '-es/^/ \"/' | sed '-es/$/\\\\n\",/'");
int cmd_result3 = system(cmd);
ASSERT_EQ(cmd_result3, 0) << strerror(errno);
} else {
@@ -1379,6 +1380,252 @@
EmitAndCheck(&assembler, "CompareAndBranch");
}
+TEST(Thumb2AssemblerTest, AddConstant) {
+ arm::Thumb2Assembler assembler;
+
+ // Low registers, Rd != Rn.
+ __ AddConstant(R0, R1, 0); // MOV.
+ __ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1.
+ __ AddConstant(R0, R1, 7); // 16-bit ADDS, encoding T1.
+ __ AddConstant(R0, R1, 8); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 255); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 256); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 257); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R1, 0xfff); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R1, 0x1000); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 0x1001); // MVN+SUB.
+ __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
+ __ AddConstant(R0, R1, 0xffff); // MOVW+ADD.
+ __ AddConstant(R0, R1, 0x10000); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 0x10001); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 0x10002); // MVN+SUB.
+ __ AddConstant(R0, R1, 0x10003); // MOVW+MOVT+ADD.
+ __ AddConstant(R0, R1, -1); // 16-bit SUBS.
+ __ AddConstant(R0, R1, -7); // 16-bit SUBS.
+ __ AddConstant(R0, R1, -8); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -255); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -256); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -257); // 32-bit SUB, encoding T4.
+ __ AddConstant(R0, R1, -0xfff); // 32-bit SUB, encoding T4.
+ __ AddConstant(R0, R1, -0x1000); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -0x1001); // MVN+ADD.
+ __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
+ __ AddConstant(R0, R1, -0xffff); // MOVW+SUB.
+ __ AddConstant(R0, R1, -0x10000); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -0x10001); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -0x10002); // MVN+ADD.
+ __ AddConstant(R0, R1, -0x10003); // MOVW+MOVT+ADD.
+
+ // Low registers, Rd == Rn.
+ __ AddConstant(R0, R0, 0); // Nothing.
+ __ AddConstant(R1, R1, 1); // 16-bit ADDS, encoding T2,
+ __ AddConstant(R0, R0, 7); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R1, R1, 8); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R0, R0, 255); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R1, R1, 256); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R0, 257); // 32-bit ADD, encoding T4.
+ __ AddConstant(R1, R1, 0xfff); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R0, 0x1000); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, 0x1001); // MVN+SUB.
+ __ AddConstant(R0, R0, 0x1002); // MOVW+ADD.
+ __ AddConstant(R1, R1, 0xffff); // MOVW+ADD.
+ __ AddConstant(R0, R0, 0x10000); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, 0x10001); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R0, 0x10002); // MVN+SUB.
+ __ AddConstant(R1, R1, 0x10003); // MOVW+MOVT+ADD.
+ __ AddConstant(R0, R0, -1); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R1, R1, -7); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R0, R0, -8); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R1, R1, -255); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R0, R0, -256); // 32-bit SUB, encoding T3.
+ __ AddConstant(R1, R1, -257); // 32-bit SUB, encoding T4.
+ __ AddConstant(R0, R0, -0xfff); // 32-bit SUB, encoding T4.
+ __ AddConstant(R1, R1, -0x1000); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -0x1001); // MVN+ADD.
+ __ AddConstant(R1, R1, -0x1002); // MOVW+SUB.
+ __ AddConstant(R0, R0, -0xffff); // MOVW+SUB.
+ __ AddConstant(R1, R1, -0x10000); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -0x10001); // 32-bit SUB, encoding T3.
+ __ AddConstant(R1, R1, -0x10002); // MVN+ADD.
+ __ AddConstant(R0, R0, -0x10003); // MOVW+MOVT+ADD.
+
+ // High registers.
+ __ AddConstant(R8, R8, 0); // Nothing.
+ __ AddConstant(R8, R1, 1); // 32-bit ADD, encoding T3,
+ __ AddConstant(R0, R8, 7); // 32-bit ADD, encoding T3.
+ __ AddConstant(R8, R8, 8); // 32-bit ADD, encoding T3.
+ __ AddConstant(R8, R1, 255); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R8, 256); // 32-bit ADD, encoding T3.
+ __ AddConstant(R8, R8, 257); // 32-bit ADD, encoding T4.
+ __ AddConstant(R8, R1, 0xfff); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R8, 0x1000); // 32-bit ADD, encoding T3.
+ __ AddConstant(R8, R8, 0x1001); // MVN+SUB.
+ __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
+ __ AddConstant(R0, R8, 0xffff); // MOVW+ADD.
+ __ AddConstant(R8, R8, 0x10000); // 32-bit ADD, encoding T3.
+ __ AddConstant(R8, R1, 0x10001); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R8, 0x10002); // MVN+SUB.
+ __ AddConstant(R0, R8, 0x10003); // MOVW+MOVT+ADD.
+ __ AddConstant(R8, R8, -1); // 32-bit ADD, encoding T3.
+ __ AddConstant(R8, R1, -7); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R8, -8); // 32-bit SUB, encoding T3.
+ __ AddConstant(R8, R8, -255); // 32-bit SUB, encoding T3.
+ __ AddConstant(R8, R1, -256); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R8, -257); // 32-bit SUB, encoding T4.
+ __ AddConstant(R8, R8, -0xfff); // 32-bit SUB, encoding T4.
+ __ AddConstant(R8, R1, -0x1000); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R8, -0x1001); // MVN+ADD.
+ __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
+ __ AddConstant(R8, R1, -0xffff); // MOVW+SUB.
+ __ AddConstant(R0, R8, -0x10000); // 32-bit SUB, encoding T3.
+ __ AddConstant(R8, R8, -0x10001); // 32-bit SUB, encoding T3.
+ __ AddConstant(R8, R1, -0x10002); // MVN+SUB.
+ __ AddConstant(R0, R8, -0x10003); // MOVW+MOVT+ADD.
+
+ // Low registers, Rd != Rn, kCcKeep.
+ __ AddConstant(R0, R1, 0, AL, kCcKeep); // MOV.
+ __ AddConstant(R0, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R1, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
+ __ AddConstant(R0, R1, 0x1002, AL, kCcKeep); // MOVW+ADD.
+ __ AddConstant(R0, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
+ __ AddConstant(R0, R1, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, 0x10002, AL, kCcKeep); // MVN+SUB.
+ __ AddConstant(R0, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
+ __ AddConstant(R0, R1, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
+ __ AddConstant(R0, R1, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
+ __ AddConstant(R0, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -0x1001, AL, kCcKeep); // MVN+ADD.
+ __ AddConstant(R0, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
+ __ AddConstant(R0, R1, -0xffff, AL, kCcKeep); // MOVW+SUB.
+ __ AddConstant(R0, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
+ __ AddConstant(R0, R1, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
+
+ // Low registers, Rd == Rn, kCcKeep.
+ __ AddConstant(R0, R0, 0, AL, kCcKeep); // Nothing.
+ __ AddConstant(R1, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R0, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R0, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R0, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
+ __ AddConstant(R1, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
+ __ AddConstant(R0, R0, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
+ __ AddConstant(R0, R0, 0x1002, AL, kCcKeep); // MOVW+ADD.
+ __ AddConstant(R1, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
+ __ AddConstant(R0, R0, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R0, R0, 0x10002, AL, kCcKeep); // MVN+SUB.
+ __ AddConstant(R1, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
+ __ AddConstant(R0, R0, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
+ __ AddConstant(R1, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R1, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R1, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
+ __ AddConstant(R0, R0, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
+ __ AddConstant(R1, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -0x1001, AL, kCcKeep); // MVN+ADD.
+ __ AddConstant(R1, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
+ __ AddConstant(R0, R0, -0xffff, AL, kCcKeep); // MOVW+SUB.
+ __ AddConstant(R1, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
+ __ AddConstant(R1, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
+ __ AddConstant(R0, R0, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
+
+ // Low registers, Rd != Rn, kCcSet.
+ __ AddConstant(R0, R1, 0, AL, kCcSet); // 16-bit ADDS.
+ __ AddConstant(R0, R1, 1, AL, kCcSet); // 16-bit ADDS.
+ __ AddConstant(R0, R1, 7, AL, kCcSet); // 16-bit ADDS.
+ __ AddConstant(R0, R1, 8, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R1, 255, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R1, 257, AL, kCcSet); // MVN+SUBS.
+ __ AddConstant(R0, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
+ __ AddConstant(R0, R1, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
+ __ AddConstant(R0, R1, 0x1002, AL, kCcSet); // MOVW+ADDS.
+ __ AddConstant(R0, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
+ __ AddConstant(R0, R1, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R1, 0x10002, AL, kCcSet); // MVN+SUBS.
+ __ AddConstant(R0, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
+ __ AddConstant(R0, R1, -1, AL, kCcSet); // 16-bit SUBS.
+ __ AddConstant(R0, R1, -7, AL, kCcSet); // 16-bit SUBS.
+ __ AddConstant(R0, R1, -8, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R1, -255, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R1, -256, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R1, -257, AL, kCcSet); // MVN+ADDS.
+ __ AddConstant(R0, R1, -0xfff, AL, kCcSet); // MOVW+SUBS.
+ __ AddConstant(R0, R1, -0x1000, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R1, -0x1001, AL, kCcSet); // MVN+ADDS.
+ __ AddConstant(R0, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
+ __ AddConstant(R0, R1, -0xffff, AL, kCcSet); // MOVW+SUBS.
+ __ AddConstant(R0, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R1, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
+ __ AddConstant(R0, R1, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
+
+ // Low registers, Rd == Rn, kCcSet.
+ __ AddConstant(R0, R0, 0, AL, kCcSet); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R1, R1, 1, AL, kCcSet); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R0, R0, 7, AL, kCcSet); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R1, R1, 8, AL, kCcSet); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R0, R0, 255, AL, kCcSet); // 16-bit ADDS, encoding T2.
+ __ AddConstant(R1, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R0, 257, AL, kCcSet); // MVN+SUBS.
+ __ AddConstant(R1, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
+ __ AddConstant(R0, R0, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R1, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
+ __ AddConstant(R0, R0, 0x1002, AL, kCcSet); // MOVW+ADDS.
+ __ AddConstant(R1, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
+ __ AddConstant(R0, R0, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R1, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
+ __ AddConstant(R0, R0, 0x10002, AL, kCcSet); // MVN+SUBS.
+ __ AddConstant(R1, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
+ __ AddConstant(R0, R0, -1, AL, kCcSet); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R1, R1, -7, AL, kCcSet); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R0, R0, -8, AL, kCcSet); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R1, R1, -255, AL, kCcSet); // 16-bit SUBS, encoding T2.
+ __ AddConstant(R0, R0, -256, AL, kCcSet); // 32-bit SUB, encoding T3.
+ __ AddConstant(R1, R1, -257, AL, kCcSet); // MNV+ADDS.
+ __ AddConstant(R0, R0, -0xfff, AL, kCcSet); // MOVW+SUBS.
+ __ AddConstant(R1, R1, -0x1000, AL, kCcSet); // 32-bit SUB, encoding T3.
+ __ AddConstant(R0, R0, -0x1001, AL, kCcSet); // MVN+ADDS.
+ __ AddConstant(R1, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
+ __ AddConstant(R0, R0, -0xffff, AL, kCcSet); // MOVW+SUBS.
+ __ AddConstant(R1, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R0, R0, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
+ __ AddConstant(R1, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
+ __ AddConstant(R0, R0, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
+
+ __ it(EQ);
+ __ AddConstant(R0, R1, 1, EQ, kCcSet); // 32-bit ADDS, encoding T3.
+ __ it(NE);
+ __ AddConstant(R0, R1, 1, NE, kCcKeep); // 16-bit ADDS, encoding T1.
+ __ it(GE);
+ __ AddConstant(R0, R0, 1, GE, kCcSet); // 32-bit ADDS, encoding T3.
+ __ it(LE);
+ __ AddConstant(R0, R0, 1, LE, kCcKeep); // 16-bit ADDS, encoding T2.
+
+ EmitAndCheck(&assembler, "AddConstant");
+}
+
#undef __
} // namespace arm
} // namespace art
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 886295e..f07f8c7 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5052,6 +5052,324 @@
nullptr
};
+const char* AddConstantResults[] = {
+ " 0: 4608 mov r0, r1\n",
+ " 2: 1c48 adds r0, r1, #1\n",
+ " 4: 1dc8 adds r0, r1, #7\n",
+ " 6: f101 0008 add.w r0, r1, #8\n",
+ " a: f101 00ff add.w r0, r1, #255 ; 0xff\n",
+ " e: f501 7080 add.w r0, r1, #256 ; 0x100\n",
+ " 12: f201 1001 addw r0, r1, #257 ; 0x101\n",
+ " 16: f601 70ff addw r0, r1, #4095 ; 0xfff\n",
+ " 1a: f501 5080 add.w r0, r1, #4096 ; 0x1000\n",
+ " 1e: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 22: 1a08 subs r0, r1, r0\n",
+ " 24: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 28: 1808 adds r0, r1, r0\n",
+ " 2a: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 2e: 1808 adds r0, r1, r0\n",
+ " 30: f501 3080 add.w r0, r1, #65536 ; 0x10000\n",
+ " 34: f101 1001 add.w r0, r1, #65537 ; 0x10001\n",
+ " 38: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 3c: 1a08 subs r0, r1, r0\n",
+ " 3e: f240 0003 movw r0, #3\n",
+ " 42: f2c0 0001 movt r0, #1\n",
+ " 46: 1808 adds r0, r1, r0\n",
+ " 48: 1e48 subs r0, r1, #1\n",
+ " 4a: 1fc8 subs r0, r1, #7\n",
+ " 4c: f1a1 0008 sub.w r0, r1, #8\n",
+ " 50: f1a1 00ff sub.w r0, r1, #255 ; 0xff\n",
+ " 54: f5a1 7080 sub.w r0, r1, #256 ; 0x100\n",
+ " 58: f2a1 1001 subw r0, r1, #257 ; 0x101\n",
+ " 5c: f6a1 70ff subw r0, r1, #4095 ; 0xfff\n",
+ " 60: f5a1 5080 sub.w r0, r1, #4096 ; 0x1000\n",
+ " 64: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 68: 1808 adds r0, r1, r0\n",
+ " 6a: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 6e: 1a08 subs r0, r1, r0\n",
+ " 70: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 74: 1a08 subs r0, r1, r0\n",
+ " 76: f5a1 3080 sub.w r0, r1, #65536 ; 0x10000\n",
+ " 7a: f1a1 1001 sub.w r0, r1, #65537 ; 0x10001\n",
+ " 7e: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 82: 1808 adds r0, r1, r0\n",
+ " 84: f64f 70fd movw r0, #65533 ; 0xfffd\n",
+ " 88: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
+ " 8c: 1808 adds r0, r1, r0\n",
+ " 8e: 3101 adds r1, #1\n",
+ " 90: 3007 adds r0, #7\n",
+ " 92: 3108 adds r1, #8\n",
+ " 94: 30ff adds r0, #255 ; 0xff\n",
+ " 96: f501 7180 add.w r1, r1, #256 ; 0x100\n",
+ " 9a: f200 1001 addw r0, r0, #257 ; 0x101\n",
+ " 9e: f601 71ff addw r1, r1, #4095 ; 0xfff\n",
+ " a2: f500 5080 add.w r0, r0, #4096 ; 0x1000\n",
+ " a6: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " aa: eba1 010c sub.w r1, r1, ip\n",
+ " ae: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " b2: 4460 add r0, ip\n",
+ " b4: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " b8: 4461 add r1, ip\n",
+ " ba: f500 3080 add.w r0, r0, #65536 ; 0x10000\n",
+ " be: f101 1101 add.w r1, r1, #65537 ; 0x10001\n",
+ " c2: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " c6: eba0 000c sub.w r0, r0, ip\n",
+ " ca: f240 0c03 movw ip, #3\n",
+ " ce: f2c0 0c01 movt ip, #1\n",
+ " d2: 4461 add r1, ip\n",
+ " d4: 3801 subs r0, #1\n",
+ " d6: 3907 subs r1, #7\n",
+ " d8: 3808 subs r0, #8\n",
+ " da: 39ff subs r1, #255 ; 0xff\n",
+ " dc: f5a0 7080 sub.w r0, r0, #256 ; 0x100\n",
+ " e0: f2a1 1101 subw r1, r1, #257 ; 0x101\n",
+ " e4: f6a0 70ff subw r0, r0, #4095 ; 0xfff\n",
+ " e8: f5a1 5180 sub.w r1, r1, #4096 ; 0x1000\n",
+ " ec: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " f0: 4460 add r0, ip\n",
+ " f2: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " f6: eba1 010c sub.w r1, r1, ip\n",
+ " fa: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " fe: eba0 000c sub.w r0, r0, ip\n",
+ " 102: f5a1 3180 sub.w r1, r1, #65536 ; 0x10000\n",
+ " 106: f1a0 1001 sub.w r0, r0, #65537 ; 0x10001\n",
+ " 10a: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 10e: 4461 add r1, ip\n",
+ " 110: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 114: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " 118: 4460 add r0, ip\n",
+ " 11a: f101 0801 add.w r8, r1, #1\n",
+ " 11e: f108 0007 add.w r0, r8, #7\n",
+ " 122: f108 0808 add.w r8, r8, #8\n",
+ " 126: f101 08ff add.w r8, r1, #255 ; 0xff\n",
+ " 12a: f508 7080 add.w r0, r8, #256 ; 0x100\n",
+ " 12e: f208 1801 addw r8, r8, #257 ; 0x101\n",
+ " 132: f601 78ff addw r8, r1, #4095 ; 0xfff\n",
+ " 136: f508 5080 add.w r0, r8, #4096 ; 0x1000\n",
+ " 13a: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 13e: eba8 080c sub.w r8, r8, ip\n",
+ " 142: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 146: 1808 adds r0, r1, r0\n",
+ " 148: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 14c: eb08 0000 add.w r0, r8, r0\n",
+ " 150: f508 3880 add.w r8, r8, #65536 ; 0x10000\n",
+ " 154: f101 1801 add.w r8, r1, #65537 ; 0x10001\n",
+ " 158: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 15c: eba8 0000 sub.w r0, r8, r0\n",
+ " 160: f240 0003 movw r0, #3\n",
+ " 164: f2c0 0001 movt r0, #1\n",
+ " 168: eb08 0000 add.w r0, r8, r0\n",
+ " 16c: f108 38ff add.w r8, r8, #4294967295 ; 0xffffffff\n",
+ " 170: f1a1 0807 sub.w r8, r1, #7\n",
+ " 174: f1a8 0008 sub.w r0, r8, #8\n",
+ " 178: f1a8 08ff sub.w r8, r8, #255 ; 0xff\n",
+ " 17c: f5a1 7880 sub.w r8, r1, #256 ; 0x100\n",
+ " 180: f2a8 1001 subw r0, r8, #257 ; 0x101\n",
+ " 184: f6a8 78ff subw r8, r8, #4095 ; 0xfff\n",
+ " 188: f5a1 5880 sub.w r8, r1, #4096 ; 0x1000\n",
+ " 18c: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 190: eb08 0000 add.w r0, r8, r0\n",
+ " 194: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 198: 1a08 subs r0, r1, r0\n",
+ " 19a: f64f 78ff movw r8, #65535 ; 0xffff\n",
+ " 19e: eba1 0808 sub.w r8, r1, r8\n",
+ " 1a2: f5a8 3080 sub.w r0, r8, #65536 ; 0x10000\n",
+ " 1a6: f1a8 1801 sub.w r8, r8, #65537 ; 0x10001\n",
+ " 1aa: f06f 1801 mvn.w r8, #65537 ; 0x10001\n",
+ " 1ae: eb01 0808 add.w r8, r1, r8\n",
+ " 1b2: f64f 70fd movw r0, #65533 ; 0xfffd\n",
+ " 1b6: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
+ " 1ba: eb08 0000 add.w r0, r8, r0\n",
+ " 1be: 4608 mov r0, r1\n",
+ " 1c0: f101 0001 add.w r0, r1, #1\n",
+ " 1c4: f101 0007 add.w r0, r1, #7\n",
+ " 1c8: f101 0008 add.w r0, r1, #8\n",
+ " 1cc: f101 00ff add.w r0, r1, #255 ; 0xff\n",
+ " 1d0: f501 7080 add.w r0, r1, #256 ; 0x100\n",
+ " 1d4: f201 1001 addw r0, r1, #257 ; 0x101\n",
+ " 1d8: f601 70ff addw r0, r1, #4095 ; 0xfff\n",
+ " 1dc: f501 5080 add.w r0, r1, #4096 ; 0x1000\n",
+ " 1e0: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 1e4: eba1 0000 sub.w r0, r1, r0\n",
+ " 1e8: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 1ec: eb01 0000 add.w r0, r1, r0\n",
+ " 1f0: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 1f4: eb01 0000 add.w r0, r1, r0\n",
+ " 1f8: f501 3080 add.w r0, r1, #65536 ; 0x10000\n",
+ " 1fc: f101 1001 add.w r0, r1, #65537 ; 0x10001\n",
+ " 200: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 204: eba1 0000 sub.w r0, r1, r0\n",
+ " 208: f240 0003 movw r0, #3\n",
+ " 20c: f2c0 0001 movt r0, #1\n",
+ " 210: eb01 0000 add.w r0, r1, r0\n",
+ " 214: f101 30ff add.w r0, r1, #4294967295 ; 0xffffffff\n",
+ " 218: f1a1 0007 sub.w r0, r1, #7\n",
+ " 21c: f1a1 0008 sub.w r0, r1, #8\n",
+ " 220: f1a1 00ff sub.w r0, r1, #255 ; 0xff\n",
+ " 224: f5a1 7080 sub.w r0, r1, #256 ; 0x100\n",
+ " 228: f2a1 1001 subw r0, r1, #257 ; 0x101\n",
+ " 22c: f6a1 70ff subw r0, r1, #4095 ; 0xfff\n",
+ " 230: f5a1 5080 sub.w r0, r1, #4096 ; 0x1000\n",
+ " 234: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 238: eb01 0000 add.w r0, r1, r0\n",
+ " 23c: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 240: eba1 0000 sub.w r0, r1, r0\n",
+ " 244: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 248: eba1 0000 sub.w r0, r1, r0\n",
+ " 24c: f5a1 3080 sub.w r0, r1, #65536 ; 0x10000\n",
+ " 250: f1a1 1001 sub.w r0, r1, #65537 ; 0x10001\n",
+ " 254: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 258: eb01 0000 add.w r0, r1, r0\n",
+ " 25c: f64f 70fd movw r0, #65533 ; 0xfffd\n",
+ " 260: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
+ " 264: eb01 0000 add.w r0, r1, r0\n",
+ " 268: f101 0101 add.w r1, r1, #1\n",
+ " 26c: f100 0007 add.w r0, r0, #7\n",
+ " 270: f101 0108 add.w r1, r1, #8\n",
+ " 274: f100 00ff add.w r0, r0, #255 ; 0xff\n",
+ " 278: f501 7180 add.w r1, r1, #256 ; 0x100\n",
+ " 27c: f200 1001 addw r0, r0, #257 ; 0x101\n",
+ " 280: f601 71ff addw r1, r1, #4095 ; 0xfff\n",
+ " 284: f500 5080 add.w r0, r0, #4096 ; 0x1000\n",
+ " 288: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 28c: eba1 010c sub.w r1, r1, ip\n",
+ " 290: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 294: 4460 add r0, ip\n",
+ " 296: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 29a: 4461 add r1, ip\n",
+ " 29c: f500 3080 add.w r0, r0, #65536 ; 0x10000\n",
+ " 2a0: f101 1101 add.w r1, r1, #65537 ; 0x10001\n",
+ " 2a4: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 2a8: eba0 000c sub.w r0, r0, ip\n",
+ " 2ac: f240 0c03 movw ip, #3\n",
+ " 2b0: f2c0 0c01 movt ip, #1\n",
+ " 2b4: 4461 add r1, ip\n",
+ " 2b6: f100 30ff add.w r0, r0, #4294967295 ; 0xffffffff\n",
+ " 2ba: f1a1 0107 sub.w r1, r1, #7\n",
+ " 2be: f1a0 0008 sub.w r0, r0, #8\n",
+ " 2c2: f1a1 01ff sub.w r1, r1, #255 ; 0xff\n",
+ " 2c6: f5a0 7080 sub.w r0, r0, #256 ; 0x100\n",
+ " 2ca: f2a1 1101 subw r1, r1, #257 ; 0x101\n",
+ " 2ce: f6a0 70ff subw r0, r0, #4095 ; 0xfff\n",
+ " 2d2: f5a1 5180 sub.w r1, r1, #4096 ; 0x1000\n",
+ " 2d6: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 2da: 4460 add r0, ip\n",
+ " 2dc: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 2e0: eba1 010c sub.w r1, r1, ip\n",
+ " 2e4: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 2e8: eba0 000c sub.w r0, r0, ip\n",
+ " 2ec: f5a1 3180 sub.w r1, r1, #65536 ; 0x10000\n",
+ " 2f0: f1a0 1001 sub.w r0, r0, #65537 ; 0x10001\n",
+ " 2f4: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 2f8: 4461 add r1, ip\n",
+ " 2fa: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 2fe: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " 302: 4460 add r0, ip\n",
+ " 304: 1c08 adds r0, r1, #0\n",
+ " 306: 1c48 adds r0, r1, #1\n",
+ " 308: 1dc8 adds r0, r1, #7\n",
+ " 30a: f111 0008 adds.w r0, r1, #8\n",
+ " 30e: f111 00ff adds.w r0, r1, #255 ; 0xff\n",
+ " 312: f511 7080 adds.w r0, r1, #256 ; 0x100\n",
+ " 316: f46f 7080 mvn.w r0, #256 ; 0x100\n",
+ " 31a: 1a08 subs r0, r1, r0\n",
+ " 31c: f640 70ff movw r0, #4095 ; 0xfff\n",
+ " 320: 1808 adds r0, r1, r0\n",
+ " 322: f511 5080 adds.w r0, r1, #4096 ; 0x1000\n",
+ " 326: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 32a: 1a08 subs r0, r1, r0\n",
+ " 32c: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 330: 1808 adds r0, r1, r0\n",
+ " 332: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 336: 1808 adds r0, r1, r0\n",
+ " 338: f511 3080 adds.w r0, r1, #65536 ; 0x10000\n",
+ " 33c: f111 1001 adds.w r0, r1, #65537 ; 0x10001\n",
+ " 340: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 344: 1a08 subs r0, r1, r0\n",
+ " 346: f240 0003 movw r0, #3\n",
+ " 34a: f2c0 0001 movt r0, #1\n",
+ " 34e: 1808 adds r0, r1, r0\n",
+ " 350: 1e48 subs r0, r1, #1\n",
+ " 352: 1fc8 subs r0, r1, #7\n",
+ " 354: f1b1 0008 subs.w r0, r1, #8\n",
+ " 358: f1b1 00ff subs.w r0, r1, #255 ; 0xff\n",
+ " 35c: f5b1 7080 subs.w r0, r1, #256 ; 0x100\n",
+ " 360: f46f 7080 mvn.w r0, #256 ; 0x100\n",
+ " 364: 1808 adds r0, r1, r0\n",
+ " 366: f640 70ff movw r0, #4095 ; 0xfff\n",
+ " 36a: 1a08 subs r0, r1, r0\n",
+ " 36c: f5b1 5080 subs.w r0, r1, #4096 ; 0x1000\n",
+ " 370: f46f 5080 mvn.w r0, #4096 ; 0x1000\n",
+ " 374: 1808 adds r0, r1, r0\n",
+ " 376: f241 0002 movw r0, #4098 ; 0x1002\n",
+ " 37a: 1a08 subs r0, r1, r0\n",
+ " 37c: f64f 70ff movw r0, #65535 ; 0xffff\n",
+ " 380: 1a08 subs r0, r1, r0\n",
+ " 382: f5b1 3080 subs.w r0, r1, #65536 ; 0x10000\n",
+ " 386: f1b1 1001 subs.w r0, r1, #65537 ; 0x10001\n",
+ " 38a: f06f 1001 mvn.w r0, #65537 ; 0x10001\n",
+ " 38e: 1808 adds r0, r1, r0\n",
+ " 390: f64f 70fd movw r0, #65533 ; 0xfffd\n",
+ " 394: f6cf 70fe movt r0, #65534 ; 0xfffe\n",
+ " 398: 1808 adds r0, r1, r0\n",
+ " 39a: 3000 adds r0, #0\n",
+ " 39c: 3101 adds r1, #1\n",
+ " 39e: 3007 adds r0, #7\n",
+ " 3a0: 3108 adds r1, #8\n",
+ " 3a2: 30ff adds r0, #255 ; 0xff\n",
+ " 3a4: f511 7180 adds.w r1, r1, #256 ; 0x100\n",
+ " 3a8: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 3ac: ebb0 000c subs.w r0, r0, ip\n",
+ " 3b0: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 3b4: eb11 010c adds.w r1, r1, ip\n",
+ " 3b8: f510 5080 adds.w r0, r0, #4096 ; 0x1000\n",
+ " 3bc: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 3c0: ebb1 010c subs.w r1, r1, ip\n",
+ " 3c4: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 3c8: eb10 000c adds.w r0, r0, ip\n",
+ " 3cc: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 3d0: eb11 010c adds.w r1, r1, ip\n",
+ " 3d4: f510 3080 adds.w r0, r0, #65536 ; 0x10000\n",
+ " 3d8: f111 1101 adds.w r1, r1, #65537 ; 0x10001\n",
+ " 3dc: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 3e0: ebb0 000c subs.w r0, r0, ip\n",
+ " 3e4: f240 0c03 movw ip, #3\n",
+ " 3e8: f2c0 0c01 movt ip, #1\n",
+ " 3ec: eb11 010c adds.w r1, r1, ip\n",
+ " 3f0: 3801 subs r0, #1\n",
+ " 3f2: 3907 subs r1, #7\n",
+ " 3f4: 3808 subs r0, #8\n",
+ " 3f6: 39ff subs r1, #255 ; 0xff\n",
+ " 3f8: f5b0 7080 subs.w r0, r0, #256 ; 0x100\n",
+ " 3fc: f46f 7c80 mvn.w ip, #256 ; 0x100\n",
+ " 400: eb11 010c adds.w r1, r1, ip\n",
+ " 404: f640 7cff movw ip, #4095 ; 0xfff\n",
+ " 408: ebb0 000c subs.w r0, r0, ip\n",
+ " 40c: f5b1 5180 subs.w r1, r1, #4096 ; 0x1000\n",
+ " 410: f46f 5c80 mvn.w ip, #4096 ; 0x1000\n",
+ " 414: eb10 000c adds.w r0, r0, ip\n",
+ " 418: f241 0c02 movw ip, #4098 ; 0x1002\n",
+ " 41c: ebb1 010c subs.w r1, r1, ip\n",
+ " 420: f64f 7cff movw ip, #65535 ; 0xffff\n",
+ " 424: ebb0 000c subs.w r0, r0, ip\n",
+ " 428: f5b1 3180 subs.w r1, r1, #65536 ; 0x10000\n",
+ " 42c: f1b0 1001 subs.w r0, r0, #65537 ; 0x10001\n",
+ " 430: f06f 1c01 mvn.w ip, #65537 ; 0x10001\n",
+ " 434: eb11 010c adds.w r1, r1, ip\n",
+ " 438: f64f 7cfd movw ip, #65533 ; 0xfffd\n",
+ " 43c: f6cf 7cfe movt ip, #65534 ; 0xfffe\n",
+ " 440: eb10 000c adds.w r0, r0, ip\n",
+ " 444: bf08 it eq\n",
+ " 446: f111 0001 addseq.w r0, r1, #1\n",
+ " 44a: bf18 it ne\n",
+ " 44c: 1c48 addne r0, r1, #1\n",
+ " 44e: bfa8 it ge\n",
+ " 450: f110 0001 addsge.w r0, r0, #1\n",
+ " 454: bfd8 it le\n",
+ " 456: 3001 addle r0, #1\n",
+ nullptr
+};
+
std::map<std::string, const char* const*> test_results;
void setup_results() {
test_results["SimpleMov"] = SimpleMovResults;
@@ -5102,4 +5420,5 @@
test_results["LoadStoreLiteral"] = LoadStoreLiteralResults;
test_results["LoadStoreLimits"] = LoadStoreLimitsResults;
test_results["CompareAndBranch"] = CompareAndBranchResults;
+ test_results["AddConstant"] = AddConstantResults;
}
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 107d5bb..cfd8421 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -616,6 +616,14 @@
EmitI21(0x3E, rs, imm21);
}
+void Mips64Assembler::Bc1eqz(FpuRegister ft, uint16_t imm16) {
+ EmitFI(0x11, 0x9, ft, imm16);
+}
+
+void Mips64Assembler::Bc1nez(FpuRegister ft, uint16_t imm16) {
+ EmitFI(0x11, 0xD, ft, imm16);
+}
+
void Mips64Assembler::EmitBcondc(BranchCondition cond,
GpuRegister rs,
GpuRegister rt,
@@ -669,6 +677,14 @@
case kCondGEU:
Bgeuc(rs, rt, imm16_21);
break;
+ case kCondF:
+ CHECK_EQ(rt, ZERO);
+ Bc1eqz(static_cast<FpuRegister>(rs), imm16_21);
+ break;
+ case kCondT:
+ CHECK_EQ(rt, ZERO);
+ Bc1nez(static_cast<FpuRegister>(rs), imm16_21);
+ break;
case kUncond:
LOG(FATAL) << "Unexpected branch condition " << cond;
UNREACHABLE();
@@ -827,6 +843,86 @@
EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
}
+void Mips64Assembler::CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x01);
+}
+
+void Mips64Assembler::CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x02);
+}
+
+void Mips64Assembler::CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x03);
+}
+
+void Mips64Assembler::CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x04);
+}
+
+void Mips64Assembler::CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x05);
+}
+
+void Mips64Assembler::CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x06);
+}
+
+void Mips64Assembler::CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x07);
+}
+
+void Mips64Assembler::CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x11);
+}
+
+void Mips64Assembler::CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x12);
+}
+
+void Mips64Assembler::CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x14, ft, fs, fd, 0x13);
+}
+
+void Mips64Assembler::CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x01);
+}
+
+void Mips64Assembler::CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x02);
+}
+
+void Mips64Assembler::CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x03);
+}
+
+void Mips64Assembler::CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x04);
+}
+
+void Mips64Assembler::CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x05);
+}
+
+void Mips64Assembler::CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x06);
+}
+
+void Mips64Assembler::CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x07);
+}
+
+void Mips64Assembler::CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x11);
+}
+
+void Mips64Assembler::CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x12);
+}
+
+void Mips64Assembler::CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x15, ft, fs, fd, 0x13);
+}
+
void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
}
@@ -1134,6 +1230,10 @@
CHECK_NE(lhs_reg, ZERO);
CHECK_EQ(rhs_reg, ZERO);
break;
+ case kCondF:
+ case kCondT:
+ CHECK_EQ(rhs_reg, ZERO);
+ break;
case kUncond:
UNREACHABLE();
}
@@ -1188,6 +1288,10 @@
return kCondGEU;
case kCondGEU:
return kCondLTU;
+ case kCondF:
+ return kCondT;
+ case kCondT:
+ return kCondF;
case kUncond:
LOG(FATAL) << "Unexpected branch condition " << cond;
}
@@ -1567,7 +1671,7 @@
case Branch::kCondBranch:
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
EmitBcondc(condition, lhs, rhs, offset);
- Nop(); // TODO: improve by filling the forbidden slot.
+ Nop(); // TODO: improve by filling the forbidden/delay slot.
break;
case Branch::kCall:
CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
@@ -1657,6 +1761,14 @@
Bcond(label, kCondNEZ, rs);
}
+void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label) {
+ Bcond(label, kCondF, static_cast<GpuRegister>(ft), ZERO);
+}
+
+void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label) {
+ Bcond(label, kCondT, static_cast<GpuRegister>(ft), ZERO);
+}
+
void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
int32_t offset) {
if (!IsInt<16>(offset)) {
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 57fc19a..883f013 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -227,6 +227,8 @@
void Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16);
void Beqzc(GpuRegister rs, uint32_t imm21);
void Bnezc(GpuRegister rs, uint32_t imm21);
+ void Bc1eqz(FpuRegister ft, uint16_t imm16);
+ void Bc1nez(FpuRegister ft, uint16_t imm16);
void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
@@ -266,6 +268,26 @@
void MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void Cvtsw(FpuRegister fd, FpuRegister fs);
void Cvtdw(FpuRegister fd, FpuRegister fs);
@@ -317,6 +339,8 @@
void Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label);
void Beqzc(GpuRegister rs, Mips64Label* label);
void Bnezc(GpuRegister rs, Mips64Label* label);
+ void Bc1eqz(FpuRegister ft, Mips64Label* label);
+ void Bc1nez(FpuRegister ft, Mips64Label* label);
void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
@@ -474,6 +498,8 @@
kCondNEZ,
kCondLTU,
kCondGEU,
+ kCondF, // Floating-point predicate false.
+ kCondT, // Floating-point predicate true.
kUncond,
};
friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 29a5a88..bac4375 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -403,6 +403,106 @@
DriverStr(RepeatFFF(&mips64::Mips64Assembler::MaxD, "max.d ${reg1}, ${reg2}, ${reg3}"), "max.d");
}
+TEST_F(AssemblerMIPS64Test, CmpUnS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUnS, "cmp.un.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.un.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpEqS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpEqS, "cmp.eq.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.eq.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUeqS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUeqS, "cmp.ueq.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ueq.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpLtS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLtS, "cmp.lt.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.lt.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUltS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUltS, "cmp.ult.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ult.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpLeS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLeS, "cmp.le.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.le.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUleS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUleS, "cmp.ule.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ule.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpOrS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpOrS, "cmp.or.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.or.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUneS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUneS, "cmp.une.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.une.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpNeS) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpNeS, "cmp.ne.s ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ne.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUnD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUnD, "cmp.un.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.un.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpEqD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpEqD, "cmp.eq.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.eq.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUeqD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUeqD, "cmp.ueq.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ueq.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpLtD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLtD, "cmp.lt.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.lt.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUltD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUltD, "cmp.ult.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ult.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpLeD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLeD, "cmp.le.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.le.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUleD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUleD, "cmp.ule.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ule.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpOrD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpOrD, "cmp.or.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.or.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpUneD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUneD, "cmp.une.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.une.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CmpNeD) {
+ DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpNeD, "cmp.ne.d ${reg1}, ${reg2}, ${reg3}"),
+ "cmp.ne.d");
+}
+
TEST_F(AssemblerMIPS64Test, CvtDL) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "cvt.d.l");
}
@@ -591,6 +691,58 @@
BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc");
}
+TEST_F(AssemblerMIPS64Test, Bc1eqz) {
+ mips64::Mips64Label label;
+ __ Bc1eqz(mips64::F0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bc1eqz(mips64::F31, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bc1eqz $f0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bc1eqz $f31, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bc1eqz");
+}
+
+TEST_F(AssemblerMIPS64Test, Bc1nez) {
+ mips64::Mips64Label label;
+ __ Bc1nez(mips64::F0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
+ }
+ __ Bc1nez(mips64::F31, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bc1nez $f0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bc1nez $f31, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bc1nez");
+}
+
TEST_F(AssemblerMIPS64Test, LongBeqc) {
mips64::Mips64Label label;
__ Beqc(mips64::A0, mips64::A1, &label);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 9eb5e67..db07267 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1213,6 +1213,7 @@
void X86_64Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32());
EmitOperandSizeOverride();
EmitOptionalRex32(address);
EmitComplex(7, address, imm);
@@ -1221,6 +1222,7 @@
void X86_64Assembler::cmpl(CpuRegister reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32());
EmitOptionalRex32(reg);
EmitComplex(7, Operand(reg), imm);
}
@@ -1252,6 +1254,7 @@
void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int32());
EmitOptionalRex32(address);
EmitComplex(7, address, imm);
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 6fae8e4..50480d9 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -193,7 +193,7 @@
UsageError(" corresponding to the file descriptor specified by --zip-fd.");
UsageError(" Example: --zip-location=/system/app/Calculator.apk");
UsageError("");
- UsageError(" --oat-file=<file.oat>: specifies the oat output destination via a filename.");
+ UsageError(" --oat-file=<file.oat>: specifies an oat output destination via a filename.");
UsageError(" Example: --oat-file=/system/framework/boot.oat");
UsageError("");
UsageError(" --oat-fd=<number>: specifies the oat output destination via a file descriptor.");
@@ -203,10 +203,10 @@
UsageError(" to the file descriptor specified by --oat-fd.");
UsageError(" Example: --oat-location=/data/dalvik-cache/system@app@Calculator.apk.oat");
UsageError("");
- UsageError(" --oat-symbols=<file.oat>: specifies the oat output destination with full symbols.");
+ UsageError(" --oat-symbols=<file.oat>: specifies an oat output destination with full symbols.");
UsageError(" Example: --oat-symbols=/symbols/system/framework/boot.oat");
UsageError("");
- UsageError(" --image=<file.art>: specifies the output image filename.");
+ UsageError(" --image=<file.art>: specifies an output image filename.");
UsageError(" Example: --image=/system/framework/boot.art");
UsageError("");
UsageError(" --image-format=(uncompressed|lz4):");
@@ -315,10 +315,13 @@
UsageError(" stripped using standard command line tools such as strip or objcopy.");
UsageError(" (enabled by default in debug builds, disabled by default otherwise)");
UsageError("");
- UsageError(" --debuggable: Produce debuggable code. Implies --generate-debug-info.");
- UsageError("");
UsageError(" --no-generate-debug-info: Do not generate debug information for native debugging.");
UsageError("");
+ UsageError(" --debuggable: Produce code debuggable with Java debugger. Implies -g.");
+ UsageError("");
+ UsageError(" --native-debuggable: Produce code debuggable with native debugger (like LLDB).");
+ UsageError(" Implies --debuggable.");
+ UsageError("");
UsageError(" --runtime-arg <argument>: used to specify various arguments for the runtime,");
UsageError(" such as initial heap size, maximum heap size, and verbose output.");
UsageError(" Use a separate --runtime-arg switch for each argument.");
@@ -352,6 +355,9 @@
UsageError(" --app-image-file=<file-name>: specify a file name for app image.");
UsageError(" Example: --app-image-file=/data/dalvik-cache/system@app@Calculator.apk.art");
UsageError("");
+ UsageError(" --multi-image: specify that separate oat and image files be generated for each "
+ "input dex file.");
+ UsageError("");
std::cerr << "See log for usage error information\n";
exit(EXIT_FAILURE);
}
@@ -533,7 +539,9 @@
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
dex_file.release();
}
- oat_file_.release();
+ for (std::unique_ptr<File>& oat_file : oat_files_) {
+ oat_file.release();
+ }
runtime_.release();
verification_results_.release();
key_value_store_.release();
@@ -541,7 +549,7 @@
}
struct ParserOptions {
- std::string oat_symbols;
+ std::vector<const char*> oat_symbols;
std::string boot_image_filename;
bool watch_dog_enabled = true;
bool requested_specific_compiler = false;
@@ -641,8 +649,8 @@
}
}
- void ProcessOptions(ParserOptions* parser_options) {
- boot_image_ = !image_filename_.empty();
+ void ProcessOptions(ParserOptions* parser_options, bool multi_image) {
+ boot_image_ = !image_filenames_.empty();
app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
if (IsAppImage() && IsBootImage()) {
@@ -654,11 +662,11 @@
compiler_options_->debuggable_ = true;
}
- if (oat_filename_.empty() && oat_fd_ == -1) {
+ if (oat_filenames_.empty() && oat_fd_ == -1) {
Usage("Output must be supplied with either --oat-file or --oat-fd");
}
- if (!oat_filename_.empty() && oat_fd_ != -1) {
+ if (!oat_filenames_.empty() && oat_fd_ != -1) {
Usage("--oat-file should not be used with --oat-fd");
}
@@ -670,10 +678,19 @@
Usage("--oat-symbols should not be used with --host");
}
- if (oat_fd_ != -1 && !image_filename_.empty()) {
+ if (oat_fd_ != -1 && !image_filenames_.empty()) {
Usage("--oat-fd should not be used with --image");
}
+ if (!parser_options->oat_symbols.empty() &&
+ parser_options->oat_symbols.size() != oat_filenames_.size()) {
+ Usage("--oat-file arguments do not match --oat-symbols arguments");
+ }
+
+ if (!image_filenames_.empty() && image_filenames_.size() != oat_filenames_.size()) {
+ Usage("--oat-file arguments do not match --image arguments");
+ }
+
if (android_root_.empty()) {
const char* android_root_env_var = getenv("ANDROID_ROOT");
if (android_root_env_var == nullptr) {
@@ -734,6 +751,12 @@
Usage("--dex-location arguments do not match --dex-file arguments");
}
+ if (!dex_filenames_.empty() && !oat_filenames_.empty()) {
+ if (oat_filenames_.size() != 1 && oat_filenames_.size() != dex_filenames_.size()) {
+ Usage("--oat-file arguments must be singular or match --dex-file arguments");
+ }
+ }
+
if (zip_fd_ != -1 && zip_location_.empty()) {
Usage("--zip-location should be supplied with --zip-fd");
}
@@ -744,11 +767,8 @@
}
}
- oat_stripped_ = oat_filename_;
if (!parser_options->oat_symbols.empty()) {
- oat_unstripped_ = parser_options->oat_symbols;
- } else {
- oat_unstripped_ = oat_filename_;
+ oat_unstripped_ = std::move(parser_options->oat_symbols);
}
// If no instruction set feature was given, use the default one for the target
@@ -813,6 +833,89 @@
compiler_options_->verbose_methods_ = verbose_methods_.empty() ? nullptr : &verbose_methods_;
+ if (!IsBootImage() && multi_image) {
+ Usage("--multi-image can only be used when creating boot images");
+ }
+ if (IsBootImage() && multi_image && image_filenames_.size() > 1) {
+ Usage("--multi-image cannot be used with multiple image names");
+ }
+
+ // For now, if we're on the host and compile the boot image, *always* use multiple image files.
+ if (!kIsTargetBuild && IsBootImage()) {
+ if (image_filenames_.size() == 1) {
+ multi_image = true;
+ }
+ }
+
+ if (IsBootImage() && multi_image) {
+ // Expand the oat and image filenames.
+ std::string base_oat = oat_filenames_[0];
+ size_t last_oat_slash = base_oat.rfind('/');
+ if (last_oat_slash == std::string::npos) {
+ Usage("--multi-image used with unusable oat filename %s", base_oat.c_str());
+ }
+ // We also need to honor path components that were encoded through '@'. Otherwise the loading
+ // code won't be able to find the images.
+ if (base_oat.find('@', last_oat_slash) != std::string::npos) {
+ last_oat_slash = base_oat.rfind('@');
+ }
+ base_oat = base_oat.substr(0, last_oat_slash + 1);
+
+ std::string base_img = image_filenames_[0];
+ size_t last_img_slash = base_img.rfind('/');
+ if (last_img_slash == std::string::npos) {
+ Usage("--multi-image used with unusable image filename %s", base_img.c_str());
+ }
+ // We also need to honor path components that were encoded through '@'. Otherwise the loading
+ // code won't be able to find the images.
+ if (base_img.find('@', last_img_slash) != std::string::npos) {
+ last_img_slash = base_img.rfind('@');
+ }
+
+ // Get the prefix, which is the primary image name (without path components). Strip the
+ // extension.
+ std::string prefix = base_img.substr(last_img_slash + 1);
+ if (prefix.rfind('.') != std::string::npos) {
+ prefix = prefix.substr(0, prefix.rfind('.'));
+ }
+ if (!prefix.empty()) {
+ prefix = prefix + "-";
+ }
+
+ base_img = base_img.substr(0, last_img_slash + 1);
+
+ // Note: we have some special case here for our testing. We have to inject the differentiating
+ // parts for the different core images.
+ std::string infix; // Empty infix by default.
+ {
+ // Check the first name.
+ std::string dex_file = oat_filenames_[0];
+ size_t last_dex_slash = dex_file.rfind('/');
+ if (last_dex_slash != std::string::npos) {
+ dex_file = dex_file.substr(last_dex_slash + 1);
+ }
+ size_t last_dex_dot = dex_file.rfind('.');
+ if (last_dex_dot != std::string::npos) {
+ dex_file = dex_file.substr(0, last_dex_dot);
+ }
+ if (StartsWith(dex_file, "core-")) {
+ infix = dex_file.substr(strlen("core"));
+ }
+ }
+
+ // Now create the other names. Use a counted loop to skip the first one.
+ for (size_t i = 1; i < dex_locations_.size(); ++i) {
+ // TODO: Make everything properly std::string.
+ std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".art");
+ char_backing_storage_.push_back(base_img + image_name);
+ image_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+
+ std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat");
+ char_backing_storage_.push_back(base_oat + oat_name);
+ oat_filenames_.push_back((char_backing_storage_.end() - 1)->c_str());
+ }
+ }
+
// Done with usage checks, enable watchdog if requested
if (parser_options->watch_dog_enabled) {
watchdog_.reset(new WatchDog(true));
@@ -822,6 +925,37 @@
key_value_store_.reset(new SafeMap<std::string, std::string>());
}
+ // Modify the input string in the following way:
+ // 0) Assume input is /a/b/c.d
+ // 1) Strip the path -> c.d
+ // 2) Inject prefix p -> pc.d
+ // 3) Inject infix i -> pci.d
+ // 4) Replace suffix with s if it's "jar" -> d == "jar" -> pci.s
+ static std::string CreateMultiImageName(std::string in,
+ const std::string& prefix,
+ const std::string& infix,
+ const char* replace_suffix) {
+ size_t last_dex_slash = in.rfind('/');
+ if (last_dex_slash != std::string::npos) {
+ in = in.substr(last_dex_slash + 1);
+ }
+ if (!prefix.empty()) {
+ in = prefix + in;
+ }
+ if (!infix.empty()) {
+ // Inject infix.
+ size_t last_dot = in.rfind('.');
+ if (last_dot != std::string::npos) {
+ in.insert(last_dot, infix);
+ }
+ }
+ if (EndsWith(in, ".jar")) {
+ in = in.substr(0, in.length() - strlen(".jar")) +
+ (replace_suffix != nullptr ? replace_suffix : "");
+ }
+ return in;
+ }
+
void InsertCompileOptions(int argc, char** argv) {
std::ostringstream oss;
for (int i = 0; i < argc; ++i) {
@@ -862,6 +996,8 @@
std::unique_ptr<ParserOptions> parser_options(new ParserOptions());
compiler_options_.reset(new CompilerOptions());
+ bool multi_image = false;
+
for (int i = 0; i < argc; i++) {
const StringPiece option(argv[i]);
const bool log_options = false;
@@ -877,9 +1013,9 @@
} else if (option.starts_with("--zip-location=")) {
zip_location_ = option.substr(strlen("--zip-location=")).data();
} else if (option.starts_with("--oat-file=")) {
- oat_filename_ = option.substr(strlen("--oat-file=")).data();
+ oat_filenames_.push_back(option.substr(strlen("--oat-file=")).data());
} else if (option.starts_with("--oat-symbols=")) {
- parser_options->oat_symbols = option.substr(strlen("--oat-symbols=")).data();
+ parser_options->oat_symbols.push_back(option.substr(strlen("--oat-symbols=")).data());
} else if (option.starts_with("--oat-fd=")) {
ParseOatFd(option);
} else if (option == "--watch-dog") {
@@ -891,7 +1027,7 @@
} else if (option.starts_with("--oat-location=")) {
oat_location_ = option.substr(strlen("--oat-location=")).data();
} else if (option.starts_with("--image=")) {
- image_filename_ = option.substr(strlen("--image=")).data();
+ image_filenames_.push_back(option.substr(strlen("--image=")).data());
} else if (option.starts_with("--image-classes=")) {
image_classes_filename_ = option.substr(strlen("--image-classes=")).data();
} else if (option.starts_with("--image-classes-zip=")) {
@@ -958,41 +1094,56 @@
// conditional on having verbost methods.
gLogVerbosity.compiler = false;
Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_);
+ } else if (option == "--multi-image") {
+ multi_image = true;
+ } else if (option.starts_with("--no-inline-from=")) {
+ no_inline_from_string_ = option.substr(strlen("--no-inline-from=")).data();
} else if (!compiler_options_->ParseCompilerOption(option, Usage)) {
Usage("Unknown argument %s", option.data());
}
}
- ProcessOptions(parser_options.get());
+ ProcessOptions(parser_options.get(), multi_image);
// Insert some compiler things.
InsertCompileOptions(argc, argv);
}
- // Check whether the oat output file is writable, and open it for later. Also open a swap file,
- // if a name is given.
+ // Check whether the oat output files are writable, and open them for later. Also open a swap
+ // file, if a name is given.
bool OpenFile() {
- bool create_file = !oat_unstripped_.empty(); // as opposed to using open file descriptor
+ bool create_file = oat_fd_ == -1; // as opposed to using open file descriptor
if (create_file) {
- oat_file_.reset(OS::CreateEmptyFile(oat_unstripped_.c_str()));
- if (oat_location_.empty()) {
- oat_location_ = oat_filename_;
+ for (const char* oat_filename : oat_filenames_) {
+ std::unique_ptr<File> oat_file(OS::CreateEmptyFile(oat_filename));
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to create oat file: " << oat_filename;
+ return false;
+ }
+ if (create_file && fchmod(oat_file->Fd(), 0644) != 0) {
+ PLOG(ERROR) << "Failed to make oat file world readable: " << oat_filename;
+ oat_file->Erase();
+ return false;
+ }
+ oat_files_.push_back(std::move(oat_file));
}
} else {
- oat_file_.reset(new File(oat_fd_, oat_location_, true));
- oat_file_->DisableAutoClose();
- if (oat_file_->SetLength(0) != 0) {
+ std::unique_ptr<File> oat_file(new File(oat_fd_, oat_location_, true));
+ oat_file->DisableAutoClose();
+ if (oat_file->SetLength(0) != 0) {
PLOG(WARNING) << "Truncating oat file " << oat_location_ << " failed.";
}
- }
- if (oat_file_.get() == nullptr) {
- PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
- return false;
- }
- if (create_file && fchmod(oat_file_->Fd(), 0644) != 0) {
- PLOG(ERROR) << "Failed to make oat file world readable: " << oat_location_;
- oat_file_->Erase();
- return false;
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
+ return false;
+ }
+ if (create_file && fchmod(oat_file->Fd(), 0644) != 0) {
+ PLOG(ERROR) << "Failed to make oat file world readable: " << oat_location_;
+ oat_file->Erase();
+ return false;
+ }
+ oat_filenames_.push_back(oat_location_.c_str());
+ oat_files_.push_back(std::move(oat_file));
}
// Swap file handling.
@@ -1017,10 +1168,12 @@
return true;
}
- void EraseOatFile() {
- DCHECK(oat_file_.get() != nullptr);
- oat_file_->Erase();
- oat_file_.reset();
+ void EraseOatFiles() {
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ DCHECK(oat_files_[i].get() != nullptr);
+ oat_files_[i]->Erase();
+ oat_files_[i].reset();
+ }
}
void Shutdown() {
@@ -1155,9 +1308,40 @@
}
}
+ // Organize inputs, handling multi-dex and multiple oat file outputs.
+ CreateDexOatMappings();
+
return true;
}
+ void CreateDexOatMappings() {
+ if (oat_files_.size() > 1) {
+ // TODO: This needs to change, as it is not a stable mapping. If a dex file is missing,
+ // the images will be out of whack. b/26317072
+ size_t index = 0;
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ std::vector<const DexFile*> dex_files;
+ if (index < dex_files_.size()) {
+ dex_files.push_back(dex_files_[index]);
+ dex_file_oat_filename_map_.emplace(dex_files_[index], oat_filenames_[i]);
+ index++;
+ while (index < dex_files_.size() &&
+ (dex_files_[index]->GetBaseLocation() == dex_files_[index - 1]->GetBaseLocation())) {
+ dex_file_oat_filename_map_.emplace(dex_files_[index], oat_filenames_[i]);
+ dex_files.push_back(dex_files_[index]);
+ index++;
+ }
+ }
+ dex_files_per_oat_file_.push_back(std::move(dex_files));
+ }
+ } else {
+ dex_files_per_oat_file_.push_back(dex_files_);
+ for (const DexFile* dex_file : dex_files_) {
+ dex_file_oat_filename_map_.emplace(dex_file, oat_filenames_[0]);
+ }
+ }
+ }
+
// Create and invoke the compiler driver. This will compile all the dex files.
void Compile() {
TimingLogger::ScopedTiming t("dex2oat Compile", timings_);
@@ -1188,6 +1372,96 @@
class_loader = class_linker->CreatePathClassLoader(self, dex_files_, class_path_class_loader);
}
+ // Find the dex file we should not inline from.
+
+ // For now, on the host always have core-oj removed.
+ if (!kIsTargetBuild && no_inline_from_string_.empty()) {
+ no_inline_from_string_ = "core-oj";
+ }
+
+ if (!no_inline_from_string_.empty()) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ std::vector<const DexFile*> class_path_files = MakeNonOwningPointerVector(class_path_files_);
+ std::vector<const std::vector<const DexFile*>*> dex_file_vectors = {
+ &class_linker->GetBootClassPath(),
+ &class_path_files,
+ &dex_files_
+ };
+ for (const std::vector<const DexFile*>* dex_file_vector : dex_file_vectors) {
+ if (dex_file_vector == nullptr) {
+ continue;
+ }
+
+ bool found = false;
+
+ for (const DexFile* dex_file : *dex_file_vector) {
+ // Try the complete location first.
+ found = no_inline_from_string_ == dex_file->GetLocation();
+ // The try just the name.
+ if (!found) {
+ size_t last_slash = dex_file->GetLocation().rfind('/');
+ if (last_slash != std::string::npos) {
+ found = StartsWith(dex_file->GetLocation().substr(last_slash + 1),
+ no_inline_from_string_.c_str());
+ }
+ }
+
+ if (found) {
+ VLOG(compiler) << "Disabling inlining from " << dex_file->GetLocation();
+ compiler_options_->no_inline_from_ = dex_file;
+ break;
+ }
+ }
+
+ if (found) {
+ break;
+ }
+ }
+ }
+
+ if (IsBootImage() && image_filenames_.size() > 1) {
+ // If we're compiling the boot image, store the boot classpath into the Key-Value store. If
+ // the image filename was adapted (e.g., for our tests), we need to change this here, too, but
+ // need to strip all path components (they will be re-established when loading).
+ // We need this for the multi-image case.
+ std::ostringstream bootcp_oss;
+ bool first_bootcp = true;
+ for (size_t i = 0; i < dex_locations_.size(); ++i) {
+ if (!first_bootcp) {
+ bootcp_oss << ":";
+ }
+
+ std::string dex_loc = dex_locations_[i];
+ std::string image_filename = image_filenames_[i];
+
+ // Use the dex_loc path, but the image_filename name (without path elements).
+ size_t dex_last_slash = dex_loc.rfind('/');
+
+ // npos is max(size_t). That makes this a bit ugly.
+ size_t image_last_slash = image_filename.rfind('/');
+ size_t image_last_at = image_filename.rfind('@');
+ size_t image_last_sep = (image_last_slash == std::string::npos)
+ ? image_last_at
+ : (image_last_at == std::string::npos)
+ ? std::string::npos
+ : std::max(image_last_slash, image_last_at);
+ // Note: whenever image_last_sep == npos, +1 overflow means using the full string.
+
+ if (dex_last_slash == std::string::npos) {
+ dex_loc = image_filename.substr(image_last_sep + 1);
+ } else {
+ dex_loc = dex_loc.substr(0, dex_last_slash + 1) +
+ image_filename.substr(image_last_sep + 1);
+ }
+
+ // Image filenames already end with .art, no need to replace.
+
+ bootcp_oss << dex_loc;
+ first_bootcp = false;
+ }
+ key_value_store_->Put(OatHeader::kBootClassPath, bootcp_oss.str());
+ }
+
driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
&method_inliner_map_,
@@ -1205,12 +1479,14 @@
dump_cfg_append_,
compiler_phases_timings_.get(),
swap_fd_,
- profile_file_));
+ profile_file_,
+ &dex_file_oat_filename_map_));
driver_->SetDexFilesForOatFile(dex_files_);
driver_->CompileAll(class_loader, dex_files_, timings_);
}
+ // TODO: Update comments about how this works for multi image. b/26317072
// Notes on the interleaving of creating the image and oat file to
// ensure the references between the two are correct.
//
@@ -1272,17 +1548,16 @@
// Steps 1.-3. are done by the CreateOatFile() above, steps 4.-5.
// are done by the CreateImageFile() below.
-
// Write out the generated code part. Calls the OatWriter and ElfBuilder. Also prepares the
// ImageWriter, if necessary.
// Note: Flushing (and closing) the file is the caller's responsibility, except for the failure
// case (when the file will be explicitly erased).
- bool CreateOatFile() {
+ bool CreateOatFiles() {
CHECK(key_value_store_.get() != nullptr);
TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
- std::unique_ptr<OatWriter> oat_writer;
+ std::vector<std::unique_ptr<OatWriter>> oat_writers;
{
TimingLogger::ScopedTiming t2("dex2oat OatWriter", timings_);
std::string image_file_location;
@@ -1291,10 +1566,14 @@
int32_t image_patch_delta = 0;
if (app_image_ && image_base_ == 0) {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- image_base_ = RoundUp(
- reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
- kPageSize);
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ // TODO: IS THIS IN ORDER? JUST TAKE THE LAST ONE?
+ image_base_ = std::max(image_base_, RoundUp(
+ reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
+ kPageSize));
+ }
VLOG(compiler) << "App image base=" << reinterpret_cast<void*>(image_base_);
}
@@ -1304,27 +1583,36 @@
if (!IsBootImage()) {
TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ image_file_location_oat_checksum = image_spaces[0]->GetImageHeader().GetOatChecksum();
image_file_location_oat_data_begin =
- reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatDataBegin());
- image_file_location = image_space->GetImageFilename();
- image_patch_delta = image_space->GetImageHeader().GetPatchDelta();
+ reinterpret_cast<uintptr_t>(image_spaces[0]->GetImageHeader().GetOatDataBegin());
+ image_patch_delta = image_spaces[0]->GetImageHeader().GetPatchDelta();
+ std::vector<std::string> image_filenames;
+ for (const gc::space::ImageSpace* image_space : image_spaces) {
+ image_filenames.push_back(image_space->GetImageFilename());
+ }
+ image_file_location = Join(image_filenames, ':');
}
if (!image_file_location.empty()) {
key_value_store_->Put(OatHeader::kImageLocationKey, image_file_location);
}
- oat_writer.reset(new OatWriter(dex_files_,
- image_file_location_oat_checksum,
- image_file_location_oat_data_begin,
- image_patch_delta,
- driver_.get(),
- image_writer_.get(),
- IsBootImage(),
- timings_,
- key_value_store_.get()));
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ std::vector<const DexFile*>& dex_files = dex_files_per_oat_file_[i];
+ std::unique_ptr<OatWriter> oat_writer(new OatWriter(dex_files,
+ image_file_location_oat_checksum,
+ image_file_location_oat_data_begin,
+ image_patch_delta,
+ driver_.get(),
+ image_writer_.get(),
+ IsBootImage(),
+ timings_,
+ key_value_store_.get()));
+ oat_writers.push_back(std::move(oat_writer));
+ }
}
if (IsImage()) {
@@ -1339,37 +1627,56 @@
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
- std::unique_ptr<ElfWriter> elf_writer =
- CreateElfWriterQuick(instruction_set_, compiler_options_.get(), oat_file_.get());
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ std::unique_ptr<File>& oat_file = oat_files_[i];
+ std::unique_ptr<OatWriter>& oat_writer = oat_writers[i];
+ std::unique_ptr<ElfWriter> elf_writer =
+ CreateElfWriterQuick(instruction_set_, compiler_options_.get(), oat_file.get());
- elf_writer->Start();
+ elf_writer->Start();
- OutputStream* rodata = elf_writer->StartRoData();
- if (!oat_writer->WriteRodata(rodata)) {
- LOG(ERROR) << "Failed to write .rodata section to the ELF file " << oat_file_->GetPath();
- return false;
- }
- elf_writer->EndRoData(rodata);
+ OutputStream* rodata = elf_writer->StartRoData();
+ if (!oat_writer->WriteRodata(rodata)) {
+ LOG(ERROR) << "Failed to write .rodata section to the ELF file " << oat_file->GetPath();
+ return false;
+ }
+ elf_writer->EndRoData(rodata);
- OutputStream* text = elf_writer->StartText();
- if (!oat_writer->WriteCode(text)) {
- LOG(ERROR) << "Failed to write .text section to the ELF file " << oat_file_->GetPath();
- return false;
- }
- elf_writer->EndText(text);
+ OutputStream* text = elf_writer->StartText();
+ if (!oat_writer->WriteCode(text)) {
+ LOG(ERROR) << "Failed to write .text section to the ELF file " << oat_file->GetPath();
+ return false;
+ }
+ elf_writer->EndText(text);
- elf_writer->SetBssSize(oat_writer->GetBssSize());
- elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
- elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
+ elf_writer->SetBssSize(oat_writer->GetBssSize());
+ elf_writer->WriteDynamicSection();
+ elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations());
- if (!elf_writer->End()) {
- LOG(ERROR) << "Failed to write ELF file " << oat_file_->GetPath();
- return false;
+ if (!elf_writer->End()) {
+ LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
+ return false;
+ }
+
+ // Flush the oat file.
+ if (oat_files_[i] != nullptr) {
+ if (oat_files_[i]->Flush() != 0) {
+ PLOG(ERROR) << "Failed to flush oat file: " << oat_filenames_[i];
+ oat_files_[i]->Erase();
+ return false;
+ }
+ }
+
+ if (IsImage()) {
+ // Update oat estimates.
+ UpdateImageWriter(i);
+ }
+
+ VLOG(compiler) << "Oat file written successfully: " << oat_filenames_[i];
}
}
- VLOG(compiler) << "Oat file written successfully (unstripped): " << oat_location_;
return true;
}
@@ -1380,70 +1687,80 @@
if (!CreateImageFile()) {
return false;
}
- VLOG(compiler) << "Image written successfully: " << image_filename_;
+ VLOG(compiler) << "Images written successfully";
}
return true;
}
- // Create a copy from unstripped to stripped.
- bool CopyUnstrippedToStripped() {
- // If we don't want to strip in place, copy from unstripped location to stripped location.
- // We need to strip after image creation because FixupElf needs to use .strtab.
- if (oat_unstripped_ != oat_stripped_) {
- // If the oat file is still open, flush it.
- if (oat_file_.get() != nullptr && oat_file_->IsOpened()) {
- if (!FlushCloseOatFile()) {
+ // Create a copy from stripped to unstripped.
+ bool CopyStrippedToUnstripped() {
+ for (size_t i = 0; i < oat_unstripped_.size(); ++i) {
+ // If we don't want to strip in place, copy from stripped location to unstripped location.
+ // We need to strip after image creation because FixupElf needs to use .strtab.
+ if (strcmp(oat_unstripped_[i], oat_filenames_[i]) != 0) {
+ // If the oat file is still open, flush it.
+ if (oat_files_[i].get() != nullptr && oat_files_[i]->IsOpened()) {
+ if (!FlushCloseOatFile(i)) {
+ return false;
+ }
+ }
+
+ TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
+ std::unique_ptr<File> in(OS::OpenFileForReading(oat_filenames_[i]));
+ std::unique_ptr<File> out(OS::CreateEmptyFile(oat_unstripped_[i]));
+ size_t buffer_size = 8192;
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
+ while (true) {
+ int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
+ if (bytes_read <= 0) {
+ break;
+ }
+ bool write_ok = out->WriteFully(buffer.get(), bytes_read);
+ CHECK(write_ok);
+ }
+ if (out->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_unstripped_[i];
+ return false;
+ }
+ VLOG(compiler) << "Oat file copied successfully (unstripped): " << oat_unstripped_[i];
+ }
+ }
+ return true;
+ }
+
+ bool FlushOatFiles() {
+ TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ if (oat_files_[i].get() != nullptr) {
+ if (oat_files_[i]->Flush() != 0) {
+ PLOG(ERROR) << "Failed to flush oat file: " << oat_filenames_[i];
+ oat_files_[i]->Erase();
return false;
}
}
-
- TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
- std::unique_ptr<File> in(OS::OpenFileForReading(oat_unstripped_.c_str()));
- std::unique_ptr<File> out(OS::CreateEmptyFile(oat_stripped_.c_str()));
- size_t buffer_size = 8192;
- std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
- while (true) {
- int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
- if (bytes_read <= 0) {
- break;
- }
- bool write_ok = out->WriteFully(buffer.get(), bytes_read);
- CHECK(write_ok);
- }
- if (out->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_stripped_;
- return false;
- }
- VLOG(compiler) << "Oat file copied successfully (stripped): " << oat_stripped_;
}
return true;
}
- bool FlushOatFile() {
- if (oat_file_.get() != nullptr) {
- TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
- if (oat_file_->Flush() != 0) {
- PLOG(ERROR) << "Failed to flush oat file: " << oat_location_ << " / "
- << oat_filename_;
- oat_file_->Erase();
- return false;
- }
- }
- return true;
- }
-
- bool FlushCloseOatFile() {
- if (oat_file_.get() != nullptr) {
- std::unique_ptr<File> tmp(oat_file_.release());
+ bool FlushCloseOatFile(size_t i) {
+ if (oat_files_[i].get() != nullptr) {
+ std::unique_ptr<File> tmp(oat_files_[i].release());
if (tmp->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close oat file: " << oat_location_ << " / "
- << oat_filename_;
+ PLOG(ERROR) << "Failed to flush and close oat file: " << oat_filenames_[i];
return false;
}
}
return true;
}
+ bool FlushCloseOatFiles() {
+ bool result = true;
+ for (size_t i = 0; i < oat_files_.size(); ++i) {
+ result &= FlushCloseOatFile(i);
+ }
+ return result;
+ }
+
void DumpTiming() {
if (dump_timing_ || (dump_slow_timing_ && timings_->GetTotalNs() > MsToNs(1000))) {
LOG(INFO) << Dumpable<TimingLogger>(*timings_);
@@ -1704,42 +2021,52 @@
image_base,
compiler_options_->GetCompilePic(),
IsAppImage(),
- image_storage_mode_));
+ image_storage_mode_,
+ oat_filenames_,
+ dex_file_oat_filename_map_));
}
- // Let the ImageWriter write the image file. If we do not compile PIC, also fix up the oat file.
+ // Let the ImageWriter write the image files. If we do not compile PIC, also fix up the oat files.
bool CreateImageFile()
REQUIRES(!Locks::mutator_lock_) {
CHECK(image_writer_ != nullptr);
- if (!image_writer_->Write(app_image_fd_,
- IsBootImage() ? image_filename_ : app_image_file_name_,
- oat_unstripped_,
- oat_location_)) {
- LOG(ERROR) << "Failed to create image file " << image_filename_;
+ if (!IsBootImage()) {
+ image_filenames_.push_back(app_image_file_name_.c_str());
+ }
+ if (!image_writer_->Write(app_image_fd_, image_filenames_, oat_filenames_)) {
+ LOG(ERROR) << "Failure during image file creation";
return false;
}
- uintptr_t oat_data_begin = image_writer_->GetOatDataBegin();
+ // We need the OatDataBegin entries.
+ std::map<const char*, uintptr_t> oat_data_begins;
+ for (const char* oat_filename : oat_filenames_) {
+ oat_data_begins.emplace(oat_filename, image_writer_->GetOatDataBegin(oat_filename));
+ }
// Destroy ImageWriter before doing FixupElf.
image_writer_.reset();
- // Do not fix up the ELF file if we are --compile-pic or compiing the app image
- if (!compiler_options_->GetCompilePic() && IsBootImage()) {
- std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_unstripped_.c_str()));
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open ELF file: " << oat_unstripped_;
- return false;
- }
+ for (const char* oat_filename : oat_filenames_) {
+ // Do not fix up the ELF file if we are --compile-pic or compiling the app image
+ if (!compiler_options_->GetCompilePic() && IsBootImage()) {
+ std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
+ if (oat_file.get() == nullptr) {
+ PLOG(ERROR) << "Failed to open ELF file: " << oat_filename;
+ return false;
+ }
- if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
- oat_file->Erase();
- LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
- return false;
- }
+ uintptr_t oat_data_begin = oat_data_begins.find(oat_filename)->second;
- if (oat_file->FlushCloseOrErase()) {
- PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
- return false;
+ if (!ElfWriter::Fixup(oat_file.get(), oat_data_begin)) {
+ oat_file->Erase();
+ LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
+ return false;
+ }
+
+ if (oat_file->FlushCloseOrErase()) {
+ PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
+ return false;
+ }
}
}
@@ -1842,6 +2169,33 @@
"");
}
+ std::string StripIsaFrom(const char* image_filename, InstructionSet isa) {
+ std::string res(image_filename);
+ size_t last_slash = res.rfind('/');
+ if (last_slash == std::string::npos || last_slash == 0) {
+ return res;
+ }
+ size_t penultimate_slash = res.rfind('/', last_slash - 1);
+ if (penultimate_slash == std::string::npos) {
+ return res;
+ }
+ // Check that the string in-between is the expected one.
+ if (res.substr(penultimate_slash + 1, last_slash - penultimate_slash - 1) !=
+ GetInstructionSetString(isa)) {
+ LOG(WARNING) << "Unexpected string when trying to strip isa: " << res;
+ return res;
+ }
+ return res.substr(0, penultimate_slash) + res.substr(last_slash);
+ }
+
+ // Update the estimate for the oat file with the given index.
+ void UpdateImageWriter(size_t index) {
+ DCHECK(image_writer_ != nullptr);
+ DCHECK_LT(index, oat_filenames_.size());
+
+ image_writer_->UpdateOatFile(oat_filenames_[index]);
+ }
+
std::unique_ptr<CompilerOptions> compiler_options_;
Compiler::Kind compiler_kind_;
@@ -1863,11 +2217,10 @@
size_t thread_count_;
uint64_t start_ns_;
std::unique_ptr<WatchDog> watchdog_;
- std::unique_ptr<File> oat_file_;
- std::string oat_stripped_;
- std::string oat_unstripped_;
+ std::vector<std::unique_ptr<File>> oat_files_;
std::string oat_location_;
- std::string oat_filename_;
+ std::vector<const char*> oat_filenames_;
+ std::vector<const char*> oat_unstripped_;
int oat_fd_;
std::vector<const char*> dex_filenames_;
std::vector<const char*> dex_locations_;
@@ -1875,7 +2228,7 @@
std::string zip_location_;
std::string boot_image_filename_;
std::vector<const char*> runtime_args_;
- std::string image_filename_;
+ std::vector<const char*> image_filenames_;
uintptr_t image_base_;
const char* image_classes_zip_filename_;
const char* image_classes_filename_;
@@ -1892,6 +2245,7 @@
bool is_host_;
std::string android_root_;
std::vector<const DexFile*> dex_files_;
+ std::string no_inline_from_string_;
std::vector<jobject> dex_caches_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
@@ -1912,6 +2266,11 @@
std::string profile_file_; // Profile file to use
TimingLogger* timings_;
std::unique_ptr<CumulativeLogger> compiler_phases_timings_;
+ std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_;
+ std::unordered_map<const DexFile*, const char*> dex_file_oat_filename_map_;
+
+ // Backing storage.
+ std::vector<std::string> char_backing_storage_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
};
@@ -1939,19 +2298,18 @@
static int CompileImage(Dex2Oat& dex2oat) {
dex2oat.Compile();
- // Create the boot.oat.
- if (!dex2oat.CreateOatFile()) {
- dex2oat.EraseOatFile();
+ if (!dex2oat.CreateOatFiles()) {
+ dex2oat.EraseOatFiles();
return EXIT_FAILURE;
}
- // Flush and close the boot.oat. We always expect the output file by name, and it will be
- // re-opened from the unstripped name.
- if (!dex2oat.FlushCloseOatFile()) {
+ // Close the image oat files. We always expect the output file by name, and it will be
+ // re-opened from the unstripped name. Note: it's easier to *flush* and close...
+ if (!dex2oat.FlushCloseOatFiles()) {
return EXIT_FAILURE;
}
- // Creates the boot.art and patches the boot.oat.
+ // Creates the boot.art and patches the oat files.
if (!dex2oat.HandleImage()) {
return EXIT_FAILURE;
}
@@ -1962,13 +2320,13 @@
return EXIT_SUCCESS;
}
- // Copy unstripped to stripped location, if necessary.
- if (!dex2oat.CopyUnstrippedToStripped()) {
+ // Copy stripped to unstripped location, if necessary.
+ if (!dex2oat.CopyStrippedToUnstripped()) {
return EXIT_FAILURE;
}
- // FlushClose again, as stripping might have re-opened the oat file.
- if (!dex2oat.FlushCloseOatFile()) {
+ // FlushClose again, as stripping might have re-opened the oat files.
+ if (!dex2oat.FlushCloseOatFiles()) {
return EXIT_FAILURE;
}
@@ -1979,21 +2337,17 @@
static int CompileApp(Dex2Oat& dex2oat) {
dex2oat.Compile();
- // Create the app oat.
- if (!dex2oat.CreateOatFile()) {
- dex2oat.EraseOatFile();
+ if (!dex2oat.CreateOatFiles()) {
+ dex2oat.EraseOatFiles();
return EXIT_FAILURE;
}
- // Do not close the oat file here. We might haven gotten the output file by file descriptor,
+ // Do not close the oat files here. We might have gotten the output file by file descriptor,
// which we would lose.
- if (!dex2oat.FlushOatFile()) {
- return EXIT_FAILURE;
- }
// When given --host, finish early without stripping.
if (dex2oat.IsHost()) {
- if (!dex2oat.FlushCloseOatFile()) {
+ if (!dex2oat.FlushCloseOatFiles()) {
return EXIT_FAILURE;
}
@@ -2001,14 +2355,14 @@
return EXIT_SUCCESS;
}
- // Copy unstripped to stripped location, if necessary. This will implicitly flush & close the
- // unstripped version. If this is given, we expect to be able to open writable files by name.
- if (!dex2oat.CopyUnstrippedToStripped()) {
+ // Copy stripped to unstripped location, if necessary. This will implicitly flush & close the
+ // stripped versions. If this is given, we expect to be able to open writable files by name.
+ if (!dex2oat.CopyStrippedToUnstripped()) {
return EXIT_FAILURE;
}
- // Flush and close the file.
- if (!dex2oat.FlushCloseOatFile()) {
+ // Flush and close the files.
+ if (!dex2oat.FlushCloseOatFiles()) {
return EXIT_FAILURE;
}
@@ -2044,7 +2398,7 @@
}
if (!dex2oat.Setup()) {
- dex2oat.EraseOatFile();
+ dex2oat.EraseOatFiles();
return EXIT_FAILURE;
}
diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc
index 4230cb2..9819233 100644
--- a/dexdump/dexdump_test.cc
+++ b/dexdump/dexdump_test.cc
@@ -37,7 +37,7 @@
virtual void SetUp() {
CommonRuntimeTest::SetUp();
// Dogfood our own lib core dex file.
- dex_file_ = GetLibCoreDexFileName();
+ dex_file_ = GetLibCoreDexFileNames()[0];
}
// Runs test with given arguments.
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 82179de..9a65ba6 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -37,7 +37,7 @@
virtual void SetUp() {
CommonRuntimeTest::SetUp();
// Dogfood our own lib core dex file.
- dex_file_ = GetLibCoreDexFileName();
+ dex_file_ = GetLibCoreDexFileNames()[0];
}
// Runs test with given arguments.
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 5e71053..93a0974 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -49,7 +49,7 @@
public:
explicit ImgDiagDumper(std::ostream* os,
const ImageHeader& image_header,
- const char* image_location,
+ const std::string& image_location,
pid_t image_diff_pid)
: os_(os),
image_header_(image_header),
@@ -163,7 +163,7 @@
std::string error_msg;
// Walk the bytes and diff against our boot image
- const ImageHeader& boot_image_header = GetBootImageHeader();
+ const ImageHeader& boot_image_header = image_header_;
os << "\nObserving boot image header at address "
<< reinterpret_cast<const void*>(&boot_image_header)
@@ -812,14 +812,6 @@
return page_frame_number != page_frame_number_clean;
}
- static const ImageHeader& GetBootImageHeader() {
- gc::Heap* heap = Runtime::Current()->GetHeap();
- gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
- CHECK(image_space != nullptr);
- const ImageHeader& image_header = image_space->GetImageHeader();
- return image_header;
- }
-
private:
// Return the image location, stripped of any directories, e.g. "boot.art" or "core.art"
std::string GetImageLocationBaseName() const {
@@ -828,28 +820,31 @@
std::ostream* os_;
const ImageHeader& image_header_;
- const char* image_location_;
+ const std::string image_location_;
pid_t image_diff_pid_; // Dump image diff against boot.art if pid is non-negative
DISALLOW_COPY_AND_ASSIGN(ImgDiagDumper);
};
-static int DumpImage(Runtime* runtime, const char* image_location,
- std::ostream* os, pid_t image_diff_pid) {
+static int DumpImage(Runtime* runtime, std::ostream* os, pid_t image_diff_pid) {
ScopedObjectAccess soa(Thread::Current());
gc::Heap* heap = runtime->GetHeap();
- gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
- CHECK(image_space != nullptr);
- const ImageHeader& image_header = image_space->GetImageHeader();
- if (!image_header.IsValid()) {
- fprintf(stderr, "Invalid image header %s\n", image_location);
- return EXIT_FAILURE;
+ std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
+ CHECK(!image_spaces.empty());
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ if (!image_header.IsValid()) {
+ fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
+ return EXIT_FAILURE;
+ }
+
+ ImgDiagDumper img_diag_dumper(
+ os, image_header, image_space->GetImageLocation(), image_diff_pid);
+ if (!img_diag_dumper.Dump()) {
+ return EXIT_FAILURE;
+ }
}
-
- ImgDiagDumper img_diag_dumper(os, image_header, image_location, image_diff_pid);
-
- bool success = img_diag_dumper.Dump();
- return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
+ return EXIT_SUCCESS;
}
struct ImgDiagArgs : public CmdlineArgs {
@@ -935,7 +930,6 @@
CHECK(args_ != nullptr);
return DumpImage(runtime,
- args_->boot_image_location_,
args_->os_,
args_->image_diff_pid_) == EXIT_SUCCESS;
}
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index a926ca5..dc101e5 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -47,9 +47,10 @@
CommonRuntimeTest::SetUp();
// We loaded the runtime with an explicit image. Therefore the image space must exist.
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- ASSERT_TRUE(image_space != nullptr);
- boot_image_location_ = image_space->GetImageLocation();
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ ASSERT_TRUE(!image_spaces.empty());
+ boot_image_location_ = image_spaces[0]->GetImageLocation();
}
virtual void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE {
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index a3ef38d..5c75f20 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -74,14 +74,14 @@
.PHONY: dump-oat-boot-$(TARGET_ARCH)
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
dump-oat-boot-$(TARGET_ARCH): $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
- $(OATDUMP) --image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION) \
+ $(OATDUMP) $(addprefix --image=,$(DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
--output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_ARCH).oatdump.txt --instruction-set=$(TARGET_ARCH)
@echo Output in $(ART_DUMP_OAT_PATH)/boot.$(TARGET_ARCH).oatdump.txt
endif
ifdef TARGET_2ND_ARCH
dump-oat-boot-$(TARGET_2ND_ARCH): $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
- $(OATDUMP) --image=$(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION) \
+ $(OATDUMP) $(addprefix --image=,$(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
--output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt --instruction-set=$(TARGET_2ND_ARCH)
@echo Output in $(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt
endif
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 5833129..52c6524 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1492,6 +1492,8 @@
os << "MAGIC: " << image_header_.GetMagic() << "\n\n";
+ os << "IMAGE LOCATION: " << image_space_.GetImageLocation() << "\n\n";
+
os << "IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetImageBegin()) << "\n\n";
os << "IMAGE SIZE: " << image_header_.GetImageSize() << "\n\n";
@@ -1599,9 +1601,8 @@
os << "OBJECTS:\n" << std::flush;
- // Loop through all the image spaces and dump their objects.
+ // Loop through the image space and dump its objects.
gc::Heap* heap = runtime->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
Thread* self = Thread::Current();
{
{
@@ -1629,21 +1630,16 @@
}
}
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- for (const auto& space : spaces) {
- if (space->IsImageSpace()) {
- auto* image_space = space->AsImageSpace();
- // Dump the normal objects before ArtMethods.
- image_space->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
- indent_os << "\n";
- // TODO: Dump fields.
- // Dump methods after.
- const auto& methods_section = image_header_.GetMethodsSection();
- const size_t pointer_size =
- InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet());
- DumpArtMethodVisitor visitor(this);
- methods_section.VisitPackedArtMethods(&visitor, image_space->Begin(), pointer_size);
- }
- }
+ // Dump the normal objects before ArtMethods.
+ image_space_.GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+ indent_os << "\n";
+ // TODO: Dump fields.
+ // Dump methods after.
+ const auto& methods_section = image_header_.GetMethodsSection();
+ const size_t pointer_size =
+ InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet());
+ DumpArtMethodVisitor visitor(this);
+ methods_section.VisitPackedArtMethods(&visitor, image_space_.Begin(), pointer_size);
// Dump the large objects separately.
heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
indent_os << "\n";
@@ -2163,6 +2159,9 @@
size_t sum_of_expansion = 0;
size_t sum_of_expansion_squared = 0;
size_t n = method_outlier_size.size();
+ if (n == 0) {
+ return;
+ }
for (size_t i = 0; i < n; i++) {
size_t cur_size = method_outlier_size[i];
sum_of_sizes += cur_size;
@@ -2377,26 +2376,28 @@
DISALLOW_COPY_AND_ASSIGN(ImageDumper);
};
-static int DumpImage(Runtime* runtime, const char* image_location, OatDumperOptions* options,
- std::ostream* os) {
+static int DumpImage(Runtime* runtime, OatDumperOptions* options, std::ostream* os) {
// Dumping the image, no explicit class loader.
- NullHandle<mirror::ClassLoader> null_class_loader;
+ ScopedNullHandle<mirror::ClassLoader> null_class_loader;
options->class_loader_ = &null_class_loader;
ScopedObjectAccess soa(Thread::Current());
gc::Heap* heap = runtime->GetHeap();
- gc::space::ImageSpace* image_space = heap->GetBootImageSpace();
- CHECK(image_space != nullptr);
- const ImageHeader& image_header = image_space->GetImageHeader();
- if (!image_header.IsValid()) {
- fprintf(stderr, "Invalid image header %s\n", image_location);
- return EXIT_FAILURE;
+ std::vector<gc::space::ImageSpace*> image_spaces = heap->GetBootImageSpaces();
+ CHECK(!image_spaces.empty());
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ if (!image_header.IsValid()) {
+ fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
+ return EXIT_FAILURE;
+ }
+
+ ImageDumper image_dumper(os, *image_space, image_header, options);
+ if (!image_dumper.Dump()) {
+ return EXIT_FAILURE;
+ }
}
-
- ImageDumper image_dumper(os, *image_space, image_header, options);
-
- bool success = image_dumper.Dump();
- return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
+ return EXIT_SUCCESS;
}
static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOptions* options,
@@ -2439,7 +2440,7 @@
static int DumpOatWithoutRuntime(OatFile* oat_file, OatDumperOptions* options, std::ostream* os) {
CHECK(oat_file != nullptr && options != nullptr);
// No image = no class loader.
- NullHandle<mirror::ClassLoader> null_class_loader;
+ ScopedNullHandle<mirror::ClassLoader> null_class_loader;
options->class_loader_ = &null_class_loader;
OatDumper oat_dumper(*oat_file, *options);
@@ -2689,8 +2690,7 @@
args_->os_) == EXIT_SUCCESS;
}
- return DumpImage(runtime, args_->image_location_, oat_dumper_options_.get(), args_->os_)
- == EXIT_SUCCESS;
+ return DumpImage(runtime, oat_dumper_options_.get(), args_->os_) == EXIT_SUCCESS;
}
std::unique_ptr<OatDumperOptions> oat_dumper_options_;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 46ab34b..b403abd 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -118,6 +118,38 @@
return true;
}
+static File* CreateOrOpen(const char* name, bool* created) {
+ if (OS::FileExists(name)) {
+ *created = false;
+ return OS::OpenFileReadWrite(name);
+ } else {
+ *created = true;
+ std::unique_ptr<File> f(OS::CreateEmptyFile(name));
+ if (f.get() != nullptr) {
+ if (fchmod(f->Fd(), 0644) != 0) {
+ PLOG(ERROR) << "Unable to make " << name << " world readable";
+ TEMP_FAILURE_RETRY(unlink(name));
+ return nullptr;
+ }
+ }
+ return f.release();
+ }
+}
+
+// Either try to close the file (close=true), or erase it.
+static bool FinishFile(File* file, bool close) {
+ if (close) {
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close file.";
+ return false;
+ }
+ return true;
+ } else {
+ file->Erase();
+ return false;
+ }
+}
+
bool PatchOat::Patch(const std::string& image_location, off_t delta,
File* output_image, InstructionSet isa,
TimingLogger* timings) {
@@ -195,11 +227,12 @@
LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
return false;
}
- gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpace();
+ // TODO: Support multi-image when patchoat is only patching images. Ever used? b/26317072
+ gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpaces()[0];
PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings);
t.NewTiming("Patching files");
- if (!p.PatchImage()) {
+ if (!p.PatchImage(true)) {
LOG(ERROR) << "Failed to patch image file " << input_image->GetPath();
return false;
}
@@ -214,7 +247,7 @@
bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t delta,
File* output_oat, File* output_image, InstructionSet isa,
TimingLogger* timings,
- bool output_oat_opened_from_fd,
+ bool output_oat_opened_from_fd ATTRIBUTE_UNUSED,
bool new_oat_out) {
CHECK(Runtime::Current() == nullptr);
CHECK(output_image != nullptr);
@@ -236,31 +269,6 @@
isa = GetInstructionSetFromELF(elf_hdr.e_machine, elf_hdr.e_flags);
}
const char* isa_name = GetInstructionSetString(isa);
- std::string image_filename;
- if (!LocationToFilename(image_location, isa, &image_filename)) {
- LOG(ERROR) << "Unable to find image at location " << image_location;
- return false;
- }
- std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str()));
- if (input_image.get() == nullptr) {
- LOG(ERROR) << "unable to open input image file at " << image_filename
- << " for location " << image_location;
- return false;
- }
- int64_t image_len = input_image->GetLength();
- if (image_len < 0) {
- LOG(ERROR) << "Error while getting image length";
- return false;
- }
- ImageHeader image_header;
- if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
- sizeof(image_header), 0)) {
- LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
- }
-
- /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
- // Nothing special to do right now since the image always needs to get patched.
- // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
// Set up the runtime
RuntimeOptions options;
@@ -279,70 +287,169 @@
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
ScopedObjectAccess soa(Thread::Current());
+ std::string output_directory =
+ output_image->GetPath().substr(0, output_image->GetPath().find_last_of("/"));
t.NewTiming("Image and oat Patching setup");
- // Create the map where we will write the image patches to.
- std::string error_msg;
- std::unique_ptr<MemMap> image(MemMap::MapFile(image_len,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg));
- if (image.get() == nullptr) {
- LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
- return false;
- }
- gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpace();
+ std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ std::map<gc::space::ImageSpace*, std::unique_ptr<File>> space_to_file_map;
+ std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>> space_to_memmap_map;
+ std::map<gc::space::ImageSpace*, PatchOat> space_to_patchoat_map;
+ std::map<gc::space::ImageSpace*, bool> space_to_skip_patching_map;
- std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat,
- PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
- if (elf.get() == nullptr) {
- LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg;
- return false;
- }
-
- bool skip_patching_oat = false;
- MaybePic is_oat_pic = IsOatPic(elf.get());
- if (is_oat_pic >= ERROR_FIRST) {
- // Error logged by IsOatPic
- return false;
- } else if (is_oat_pic == PIC) {
- // Do not need to do ELF-file patching. Create a symlink and skip the ELF patching.
- if (!ReplaceOatFileWithSymlink(input_oat->GetPath(),
- output_oat->GetPath(),
- output_oat_opened_from_fd,
- new_oat_out)) {
- // Errors already logged by above call.
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
+ std::string input_image_filename = space->GetImageFilename();
+ std::unique_ptr<File> input_image(OS::OpenFileForReading(input_image_filename.c_str()));
+ if (input_image.get() == nullptr) {
+ LOG(ERROR) << "Unable to open input image file at " << input_image_filename;
return false;
}
- // Don't patch the OAT, since we just symlinked it. Image still needs patching.
- skip_patching_oat = true;
- } else {
- CHECK(is_oat_pic == NOT_PIC);
+
+ int64_t image_len = input_image->GetLength();
+ if (image_len < 0) {
+ LOG(ERROR) << "Error while getting image length";
+ return false;
+ }
+ ImageHeader image_header;
+ if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
+ sizeof(image_header), 0)) {
+ LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
+ }
+
+ /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
+ // Nothing special to do right now since the image always needs to get patched.
+ // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
+
+ // Create the map where we will write the image patches to.
+ std::string error_msg;
+ std::unique_ptr<MemMap> image(MemMap::MapFile(image_len,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ input_image->Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image->GetPath().c_str(),
+ &error_msg));
+ if (image.get() == nullptr) {
+ LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
+ return false;
+ }
+ space_to_file_map.emplace(space, std::move(input_image));
+ space_to_memmap_map.emplace(space, std::move(image));
}
- PatchOat p(isa, elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
- delta, timings);
- t.NewTiming("Patching files");
- if (!skip_patching_oat && !p.PatchElf()) {
- LOG(ERROR) << "Failed to patch oat file " << input_oat->GetPath();
- return false;
- }
- if (!p.PatchImage()) {
- LOG(ERROR) << "Failed to patch image file " << input_image->GetPath();
- return false;
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
+ std::string input_image_filename = space->GetImageFilename();
+ std::string input_oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(input_image_filename);
+ std::unique_ptr<File> input_oat_file(OS::OpenFileForReading(input_oat_filename.c_str()));
+ if (input_oat_file.get() == nullptr) {
+ LOG(ERROR) << "Unable to open input oat file at " << input_oat_filename;
+ return false;
+ }
+ std::string error_msg;
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat_file.get(),
+ PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
+ if (elf.get() == nullptr) {
+ LOG(ERROR) << "Unable to open oat file " << input_oat_file->GetPath() << " : " << error_msg;
+ return false;
+ }
+
+ bool skip_patching_oat = false;
+ MaybePic is_oat_pic = IsOatPic(elf.get());
+ if (is_oat_pic >= ERROR_FIRST) {
+ // Error logged by IsOatPic
+ return false;
+ } else if (is_oat_pic == PIC) {
+ // Do not need to do ELF-file patching. Create a symlink and skip the ELF patching.
+
+ std::string converted_image_filename = space->GetImageLocation();
+ std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
+ std::string output_image_filename = output_directory +
+ (StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
+ std::string output_oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
+
+ if (!ReplaceOatFileWithSymlink(input_oat_file->GetPath(),
+ output_oat_filename,
+ false,
+ true)) {
+ // Errors already logged by above call.
+ return false;
+ }
+ // Don't patch the OAT, since we just symlinked it. Image still needs patching.
+ skip_patching_oat = true;
+ } else {
+ CHECK(is_oat_pic == NOT_PIC);
+ }
+
+ PatchOat& p = space_to_patchoat_map.emplace(space,
+ PatchOat(
+ isa,
+ elf.release(),
+ space_to_memmap_map.find(space)->second.get(),
+ space->GetLiveBitmap(),
+ space->GetMemMap(),
+ delta,
+ &space_to_memmap_map,
+ timings)).first->second;
+
+ t.NewTiming("Patching files");
+ if (!skip_patching_oat && !p.PatchElf()) {
+ LOG(ERROR) << "Failed to patch oat file " << input_oat_file->GetPath();
+ return false;
+ }
+ if (!p.PatchImage(i == 0)) {
+ LOG(ERROR) << "Failed to patch image file " << input_image_filename;
+ return false;
+ }
+
+ space_to_skip_patching_map.emplace(space, skip_patching_oat);
}
- t.NewTiming("Writing files");
- if (!skip_patching_oat && !p.WriteElf(output_oat)) {
- LOG(ERROR) << "Failed to write oat file " << input_oat->GetPath();
- return false;
- }
- if (!p.WriteImage(output_image)) {
- LOG(ERROR) << "Failed to write image file " << input_image->GetPath();
- return false;
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
+ std::string input_image_filename = space->GetImageFilename();
+
+ t.NewTiming("Writing files");
+ std::string converted_image_filename = space->GetImageLocation();
+ std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
+ std::string output_image_filename = output_directory +
+ (StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
+ std::unique_ptr<File>
+ output_image_file(CreateOrOpen(output_image_filename.c_str(), &new_oat_out));
+ if (output_image_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
+ return false;
+ }
+
+ PatchOat& p = space_to_patchoat_map.find(space)->second;
+
+ if (!p.WriteImage(output_image_file.get())) {
+ LOG(ERROR) << "Failed to write image file " << output_image_file->GetPath();
+ return false;
+ }
+ FinishFile(output_image_file.get(), true);
+
+ bool skip_patching_oat = space_to_skip_patching_map.find(space)->second;
+ if (!skip_patching_oat) {
+ std::string output_oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
+ std::unique_ptr<File>
+ output_oat_file(CreateOrOpen(output_oat_filename.c_str(), &new_oat_out));
+ if (output_oat_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open output oat file at " << output_oat_filename;
+ return false;
+ }
+ if (!p.WriteElf(output_oat_file.get())) {
+ LOG(ERROR) << "Failed to write oat file " << output_oat_file->GetPath();
+ return false;
+ }
+ FinishFile(output_oat_file.get(), true);
+ }
}
return true;
}
@@ -616,7 +723,7 @@
}
}
-bool PatchOat::PatchImage() {
+bool PatchOat::PatchImage(bool primary_image) {
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
CHECK_GT(image_->Size(), sizeof(ImageHeader));
// These are the roots from the original file.
@@ -630,9 +737,12 @@
// Patch dex file int/long arrays which point to ArtFields.
PatchDexFileArrays(img_roots);
- VisitObject(img_roots);
+ if (primary_image) {
+ VisitObject(img_roots);
+ }
+
if (!image_header->IsValid()) {
- LOG(ERROR) << "reloction renders image header invalid";
+ LOG(ERROR) << "relocation renders image header invalid";
return false;
}
@@ -655,7 +765,8 @@
void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
- DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
+ // TODO: Modify check for multi-image support? b/26317072
+ // DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
@@ -664,7 +775,8 @@
mirror::Reference* ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
- DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
+ // TODO: Modify check for multi-image support? b/26317072
+ // DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
@@ -691,7 +803,7 @@
klass->FixupNativePointers(copy_klass, pointer_size, native_visitor);
auto* vtable = klass->GetVTable();
if (vtable != nullptr) {
- vtable->Fixup(RelocatedCopyOf(vtable), pointer_size, native_visitor);
+ vtable->Fixup(RelocatedCopyOfFollowImages(vtable), pointer_size, native_visitor);
}
auto* iftable = klass->GetIfTable();
if (iftable != nullptr) {
@@ -699,7 +811,9 @@
if (iftable->GetMethodArrayCount(i) > 0) {
auto* method_array = iftable->GetMethodArray(i);
CHECK(method_array != nullptr);
- method_array->Fixup(RelocatedCopyOf(method_array), pointer_size, native_visitor);
+ method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
+ pointer_size,
+ native_visitor);
}
}
}
@@ -972,38 +1086,6 @@
return true;
}
-static File* CreateOrOpen(const char* name, bool* created) {
- if (OS::FileExists(name)) {
- *created = false;
- return OS::OpenFileReadWrite(name);
- } else {
- *created = true;
- std::unique_ptr<File> f(OS::CreateEmptyFile(name));
- if (f.get() != nullptr) {
- if (fchmod(f->Fd(), 0644) != 0) {
- PLOG(ERROR) << "Unable to make " << name << " world readable";
- TEMP_FAILURE_RETRY(unlink(name));
- return nullptr;
- }
- }
- return f.release();
- }
-}
-
-// Either try to close the file (close=true), or erase it.
-static bool FinishFile(File* file, bool close) {
- if (close) {
- if (file->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close file.";
- return false;
- }
- return true;
- } else {
- file->Erase();
- return false;
- }
-}
-
static int patchoat(int argc, char **argv) {
InitLogging(argv);
MemMap::Init();
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 38bd865..cb0d14b 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -23,8 +23,10 @@
#include "elf_file.h"
#include "elf_utils.h"
#include "gc/accounting/space_bitmap.h"
+#include "gc/space/image_space.h"
#include "gc/heap.h"
#include "os.h"
+#include "runtime.h"
namespace art {
@@ -57,21 +59,23 @@
bool output_oat_opened_from_fd, // Was this using --oatput-oat-fd ?
bool new_oat_out); // Output oat was a new file created by us?
+ ~PatchOat() {}
+ PatchOat(PatchOat&&) = default;
+
private:
// Takes ownership only of the ElfFile. All other pointers are only borrowed.
PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings)
: oat_file_(oat_file), image_(nullptr), bitmap_(nullptr), heap_(nullptr), delta_(delta),
- isa_(kNone), timings_(timings) {}
+ isa_(kNone), space_map_(nullptr), timings_(timings) {}
PatchOat(InstructionSet isa, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
MemMap* heap, off_t delta, TimingLogger* timings)
: image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), isa_(isa), timings_(timings) {}
+ delta_(delta), isa_(isa), space_map_(nullptr), timings_(timings) {}
PatchOat(InstructionSet isa, ElfFile* oat_file, MemMap* image,
gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
- TimingLogger* timings)
+ std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* map, TimingLogger* timings)
: oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), isa_(isa), timings_(timings) {}
- ~PatchOat() {}
+ delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
// Was the .art image at image_path made with --compile-pic ?
static bool IsImagePic(const ImageHeader& image_header, const std::string& image_path);
@@ -111,7 +115,7 @@
template <typename ElfFileImpl>
bool PatchOatHeader(ElfFileImpl* oat_file);
- bool PatchImage() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
@@ -129,15 +133,34 @@
if (obj == nullptr) {
return nullptr;
}
- DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
- DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
+ // TODO: Fix these checks for multi-image. Some may still be valid. b/26317072
+ // DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
+ // DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
uintptr_t heap_off =
reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
- DCHECK_LT(heap_off, image_->Size());
+ // DCHECK_LT(heap_off, image_->Size());
return reinterpret_cast<T*>(image_->Begin() + heap_off);
}
template <typename T>
+ T* RelocatedCopyOfFollowImages(T* obj) const {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ // Find ImageSpace this belongs to.
+ auto image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ if (image_space->Contains(obj)) {
+ uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
+ reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
+ return reinterpret_cast<T*>(space_map_->find(image_space)->second->Begin() + heap_off);
+ }
+ }
+ LOG(FATAL) << "Did not find object in boot image space " << obj;
+ UNREACHABLE();
+ }
+
+ template <typename T>
T* RelocatedAddressOfPointer(T* obj) const {
if (obj == nullptr) {
return obj;
@@ -197,6 +220,8 @@
// Active instruction set, used to know the entrypoint size.
const InstructionSet isa_;
+ const std::map<gc::space::ImageSpace*, std::unique_ptr<MemMap>>* space_map_;
+
TimingLogger* timings_;
friend class FixupRootVisitor;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 4096117..de4314c 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -102,11 +102,13 @@
jdwp/jdwp_socket.cc \
jdwp/object_registry.cc \
jni_env_ext.cc \
+ jit/debugger_interface.cc \
jit/jit.cc \
jit/jit_code_cache.cc \
jit/jit_instrumentation.cc \
jit/offline_profiling_info.cc \
jit/profiling_info.cc \
+ jit/profile_saver.cc \
lambda/art_lambda_method.cc \
lambda/box_table.cc \
lambda/closure.cc \
@@ -362,6 +364,8 @@
thread_state.h \
verifier/method_verifier.h
+LIBOPENJDKJVM_SRC_FILES := openjdkjvm/OpenjdkJvm.cc
+
LIBART_CFLAGS := -DBUILDING_LIBART=1
LIBART_TARGET_CFLAGS :=
@@ -398,8 +402,9 @@
# $(1): target or host
# $(2): ndebug or debug
-# $(3): static or shared (empty means shared, applies only for host)
-define build-libart
+# $(3): static or shared (note that static only applies for host)
+# $(4): module name : either libart or libopenjdkjvm
+define build-runtime-library
ifneq ($(1),target)
ifneq ($(1),host)
$$(error expected target or host for argument 1, received $(1))
@@ -410,6 +415,11 @@
$$(error expected ndebug or debug for argument 2, received $(2))
endif
endif
+ ifneq ($(4),libart)
+ ifneq ($(4),libopenjdkjvm)
+ $$(error expected libart of libopenjdkjvm for argument 4, received $(4))
+ endif
+ endif
art_target_or_host := $(1)
art_ndebug_or_debug := $(2)
@@ -418,12 +428,12 @@
include $$(CLEAR_VARS)
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
- LOCAL_MODULE := libart
+ LOCAL_MODULE := $(4)
ifeq ($$(art_target_or_host),target)
LOCAL_FDO_SUPPORT := true
endif
else # debug
- LOCAL_MODULE := libartd
+ LOCAL_MODULE := $(4)d
endif
LOCAL_MODULE_TAGS := optional
@@ -434,17 +444,25 @@
LOCAL_MODULE_CLASS := SHARED_LIBRARIES
endif
- ifeq ($$(art_target_or_host),target)
- LOCAL_SRC_FILES := $$(LIBART_TARGET_SRC_FILES)
- $$(foreach arch,$$(ART_TARGET_SUPPORTED_ARCH), \
- $$(eval LOCAL_SRC_FILES_$$(arch) := $$$$(LIBART_TARGET_SRC_FILES_$$(arch))))
- else # host
- LOCAL_SRC_FILES := $$(LIBART_HOST_SRC_FILES)
- LOCAL_SRC_FILES_32 := $$(LIBART_HOST_SRC_FILES_32)
- LOCAL_SRC_FILES_64 := $$(LIBART_HOST_SRC_FILES_64)
- LOCAL_IS_HOST_MODULE := true
+ ifeq ($(4),libart)
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_SRC_FILES := $$(LIBART_TARGET_SRC_FILES)
+ $$(foreach arch,$$(ART_TARGET_SUPPORTED_ARCH), \
+ $$(eval LOCAL_SRC_FILES_$$(arch) := $$$$(LIBART_TARGET_SRC_FILES_$$(arch))))
+ else # host
+ LOCAL_SRC_FILES := $$(LIBART_HOST_SRC_FILES)
+ LOCAL_SRC_FILES_32 := $$(LIBART_HOST_SRC_FILES_32)
+ LOCAL_SRC_FILES_64 := $$(LIBART_HOST_SRC_FILES_64)
+ LOCAL_IS_HOST_MODULE := true
+ endif
+ else # libopenjdkjvm
+ LOCAL_SRC_FILES := $$(LIBOPENJDKJVM_SRC_FILES)
+ ifeq ($$(art_target_or_host),host)
+ LOCAL_IS_HOST_MODULE := true
+ endif
endif
+ifeq ($(4),libart)
GENERATED_SRC_DIR := $$(call local-generated-sources-dir)
ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,$$(LIBART_ENUM_OPERATOR_OUT_HEADER_FILES))
ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES))
@@ -455,6 +473,7 @@
$$(transform-generated-source)
LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
+endif
LOCAL_CFLAGS := $$(LIBART_CFLAGS)
LOCAL_LDFLAGS := $$(LIBART_LDFLAGS)
@@ -536,6 +555,15 @@
LOCAL_SHARED_LIBRARIES += libcutils
endif
endif
+
+ ifeq ($(4),libopenjdkjvm)
+ ifeq ($$(art_ndebug_or_debug),ndebug)
+ LOCAL_SHARED_LIBRARIES += libart
+ else
+ LOCAL_SHARED_LIBRARIES += libartd
+ endif
+ LOCAL_NOTICE_FILE := $(LOCAL_PATH)/openjdkjvm/NOTICE
+ endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $$(LOCAL_PATH)/Android.mk
@@ -572,24 +600,30 @@
# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since
# they are used to cross compile for the target.
ifeq ($(ART_BUILD_HOST_NDEBUG),true)
- $(eval $(call build-libart,host,ndebug))
+ $(eval $(call build-runtime-library,host,ndebug,shared,libart))
+ $(eval $(call build-runtime-library,host,ndebug,shared,libopenjdkjvm))
ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-libart,host,ndebug,static))
+ $(eval $(call build-runtime-library,host,ndebug,static,libart))
+ $(eval $(call build-runtime-library,host,ndebug,static,libopenjdkjvm))
endif
endif
ifeq ($(ART_BUILD_HOST_DEBUG),true)
- $(eval $(call build-libart,host,debug))
+ $(eval $(call build-runtime-library,host,debug,shared,libart))
+ $(eval $(call build-runtime-library,host,debug,shared,libopenjdkjvm))
ifeq ($(ART_BUILD_HOST_STATIC),true)
- $(eval $(call build-libart,host,debug,static))
+ $(eval $(call build-runtime-library,host,debug,static,libart))
+ $(eval $(call build-runtime-library,host,debug,static,libopenjdkjvm))
endif
endif
ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
-# $(error $(call build-libart,target,ndebug))
- $(eval $(call build-libart,target,ndebug))
+# $(error $(call build-runtime-library,target,ndebug))
+ $(eval $(call build-runtime-library,target,ndebug,shared,libart))
+ $(eval $(call build-runtime-library,target,ndebug,shared,libopenjdkjvm))
endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
- $(eval $(call build-libart,target,debug))
+ $(eval $(call build-runtime-library,target,debug,shared,libart))
+ $(eval $(call build-runtime-library,target,debug,shared,libopenjdkjvm))
endif
# Clear locally defined variables.
@@ -620,4 +654,4 @@
LIBART_CFLAGS :=
LIBART_TARGET_CFLAGS :=
LIBART_HOST_CFLAGS :=
-build-libart :=
+build-runtime-library :=
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d6ba304..1680bbd 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -30,6 +30,13 @@
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
}
+ // Do not do any of the finalization. We don't want to run any code, we don't need the heap
+ // prepared, it actually will be a problem with setting the instruction set to x86_64 in
+ // SetUpRuntimeOptions.
+ void FinalizeSetup() OVERRIDE {
+ ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
+ }
+
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
Runtime* const runtime = Runtime::Current();
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 28d1942..51f992b 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -42,15 +42,15 @@
// Look for variants that have divide support.
static const char* arm_variants_with_div[] = {
"cortex-a7", "cortex-a12", "cortex-a15", "cortex-a17", "cortex-a53", "cortex-a57",
- "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
- "cyclone", "denver", "krait", "swift"};
+ "cortex-a53.a57", "cortex-m3", "cortex-m4", "cortex-r4", "cortex-r5",
+ "cyclone", "denver", "krait", "swift" };
bool has_div = FindVariantInArray(arm_variants_with_div, arraysize(arm_variants_with_div),
variant);
// Look for variants that have LPAE support.
static const char* arm_variants_with_lpae[] = {
- "cortex-a7", "cortex-a15", "krait", "denver"
+ "cortex-a7", "cortex-a15", "krait", "denver", "cortex-a53", "cortex-a57", "cortex-a53.a57"
};
bool has_lpae = FindVariantInArray(arm_variants_with_lpae, arraysize(arm_variants_with_lpae),
variant);
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index ef39999..42f5df4 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -45,6 +45,11 @@
"silvermont",
};
+static constexpr const char* x86_variants_prefer_locked_add_sync[] = {
+ "atom",
+ "silvermont",
+};
+
const X86InstructionSetFeatures* X86InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED,
bool x86_64) {
@@ -60,6 +65,10 @@
bool has_AVX = false;
bool has_AVX2 = false;
+ bool prefers_locked_add = FindVariantInArray(x86_variants_prefer_locked_add_sync,
+ arraysize(x86_variants_prefer_locked_add_sync),
+ variant);
+
bool known_variant = FindVariantInArray(x86_known_variants, arraysize(x86_known_variants),
variant);
if (!known_variant && variant != "default") {
@@ -68,10 +77,10 @@
if (x86_64) {
return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ has_AVX2, prefers_locked_add);
} else {
return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ has_AVX2, prefers_locked_add);
}
}
@@ -83,11 +92,13 @@
bool has_SSE4_2 = (bitmap & kSse4_2Bitfield) != 0;
bool has_AVX = (bitmap & kAvxBitfield) != 0;
bool has_AVX2 = (bitmap & kAvxBitfield) != 0;
+ bool prefers_locked_add = (bitmap & kPrefersLockedAdd) != 0;
if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
+ has_AVX, has_AVX2, prefers_locked_add);
} else {
- return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2,
+ has_AVX, has_AVX2, prefers_locked_add);
}
}
@@ -124,11 +135,15 @@
const bool has_AVX2 = true;
#endif
+ // No #define for memory synchronization preference.
+ const bool prefers_locked_add = false;
+
if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2, prefers_locked_add);
} else {
return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ has_AVX2, prefers_locked_add);
}
}
@@ -141,6 +156,8 @@
bool has_SSE4_2 = false;
bool has_AVX = false;
bool has_AVX2 = false;
+ // No cpuinfo for memory synchronization preference.
+ const bool prefers_locked_add = false;
std::ifstream in("/proc/cpuinfo");
if (!in.fail()) {
@@ -177,10 +194,11 @@
LOG(ERROR) << "Failed to open /proc/cpuinfo";
}
if (x86_64) {
- return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2);
+ return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2, prefers_locked_add);
} else {
return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ has_AVX2, prefers_locked_add);
}
}
@@ -204,7 +222,8 @@
(has_SSE4_1_ == other_as_x86->has_SSE4_1_) &&
(has_SSE4_2_ == other_as_x86->has_SSE4_2_) &&
(has_AVX_ == other_as_x86->has_AVX_) &&
- (has_AVX2_ == other_as_x86->has_AVX2_);
+ (has_AVX2_ == other_as_x86->has_AVX2_) &&
+ (prefers_locked_add_ == other_as_x86->prefers_locked_add_);
}
uint32_t X86InstructionSetFeatures::AsBitmap() const {
@@ -213,7 +232,8 @@
(has_SSE4_1_ ? kSse4_1Bitfield : 0) |
(has_SSE4_2_ ? kSse4_2Bitfield : 0) |
(has_AVX_ ? kAvxBitfield : 0) |
- (has_AVX2_ ? kAvx2Bitfield : 0);
+ (has_AVX2_ ? kAvx2Bitfield : 0) |
+ (prefers_locked_add_ ? kPrefersLockedAdd : 0);
}
std::string X86InstructionSetFeatures::GetFeatureString() const {
@@ -248,6 +268,11 @@
} else {
result += ",-avx2";
}
+ if (prefers_locked_add_) {
+ result += ",lock_add";
+ } else {
+ result += ",-lock_add";
+ }
return result;
}
@@ -259,6 +284,7 @@
bool has_SSE4_2 = has_SSE4_2_;
bool has_AVX = has_AVX_;
bool has_AVX2 = has_AVX2_;
+ bool prefers_locked_add = prefers_locked_add_;
for (auto i = features.begin(); i != features.end(); i++) {
std::string feature = Trim(*i);
if (feature == "ssse3") {
@@ -281,6 +307,10 @@
has_AVX2 = true;
} else if (feature == "-avx2") {
has_AVX2 = false;
+ } else if (feature == "lock_add") {
+ prefers_locked_add = true;
+ } else if (feature == "-lock_add") {
+ prefers_locked_add = false;
} else {
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
@@ -288,10 +318,10 @@
}
if (x86_64) {
return new X86_64InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ has_AVX2, prefers_locked_add);
} else {
return new X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
- has_AVX2);
+ has_AVX2, prefers_locked_add);
}
}
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 7b61245..2b845f8 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -60,6 +60,8 @@
bool HasSSE4_1() const { return has_SSE4_1_; }
+ bool PrefersLockedAddSynchronization() const { return prefers_locked_add_; }
+
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
virtual const InstructionSetFeatures*
@@ -73,9 +75,10 @@
bool x86_64, std::string* error_msg) const;
X86InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2,
- bool has_AVX, bool has_AVX2)
+ bool has_AVX, bool has_AVX2, bool prefers_locked_add)
: InstructionSetFeatures(smp), has_SSSE3_(has_SSSE3), has_SSE4_1_(has_SSE4_1),
- has_SSE4_2_(has_SSE4_2), has_AVX_(has_AVX), has_AVX2_(has_AVX2) {
+ has_SSE4_2_(has_SSE4_2), has_AVX_(has_AVX), has_AVX2_(has_AVX2),
+ prefers_locked_add_(prefers_locked_add) {
}
private:
@@ -87,6 +90,7 @@
kSse4_2Bitfield = 8,
kAvxBitfield = 16,
kAvx2Bitfield = 32,
+ kPrefersLockedAdd = 64,
};
const bool has_SSSE3_; // x86 128bit SIMD - Supplemental SSE.
@@ -94,6 +98,7 @@
const bool has_SSE4_2_; // x86 128bit SIMD SSE4.2.
const bool has_AVX_; // x86 256bit SIMD AVX.
const bool has_AVX2_; // x86 256bit SIMD AVX 2.0.
+ const bool prefers_locked_add_; // x86 use locked add for memory synchronization.
DISALLOW_COPY_AND_ASSIGN(X86InstructionSetFeatures);
};
diff --git a/runtime/arch/x86/instruction_set_features_x86_test.cc b/runtime/arch/x86/instruction_set_features_x86_test.cc
index 25a406b..e8d01e6 100644
--- a/runtime/arch/x86/instruction_set_features_x86_test.cc
+++ b/runtime/arch/x86/instruction_set_features_x86_test.cc
@@ -27,7 +27,8 @@
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
- EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-lock_add",
+ x86_features->GetFeatureString().c_str());
EXPECT_EQ(x86_features->AsBitmap(), 1U);
}
@@ -39,8 +40,9 @@
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
- EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_features->AsBitmap(), 3U);
+ EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2,lock_add",
+ x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 67U);
// Build features for a 32-bit x86 default processor.
std::unique_ptr<const InstructionSetFeatures> x86_default_features(
@@ -48,7 +50,7 @@
ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
- EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-lock_add",
x86_default_features->GetFeatureString().c_str());
EXPECT_EQ(x86_default_features->AsBitmap(), 1U);
@@ -58,9 +60,9 @@
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
- EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ EXPECT_STREQ("smp,ssse3,-sse4.1,-sse4.2,-avx,-avx2,lock_add",
x86_64_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_64_features->AsBitmap(), 3U);
+ EXPECT_EQ(x86_64_features->AsBitmap(), 67U);
EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
@@ -75,8 +77,9 @@
ASSERT_TRUE(x86_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_features->GetInstructionSet(), kX86);
EXPECT_TRUE(x86_features->Equals(x86_features.get()));
- EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2", x86_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_features->AsBitmap(), 15U);
+ EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2,lock_add",
+ x86_features->GetFeatureString().c_str());
+ EXPECT_EQ(x86_features->AsBitmap(), 79U);
// Build features for a 32-bit x86 default processor.
std::unique_ptr<const InstructionSetFeatures> x86_default_features(
@@ -84,7 +87,7 @@
ASSERT_TRUE(x86_default_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_default_features->GetInstructionSet(), kX86);
EXPECT_TRUE(x86_default_features->Equals(x86_default_features.get()));
- EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-lock_add",
x86_default_features->GetFeatureString().c_str());
EXPECT_EQ(x86_default_features->AsBitmap(), 1U);
@@ -94,9 +97,9 @@
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
- EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2",
+ EXPECT_STREQ("smp,ssse3,sse4.1,sse4.2,-avx,-avx2,lock_add",
x86_64_features->GetFeatureString().c_str());
- EXPECT_EQ(x86_64_features->AsBitmap(), 15U);
+ EXPECT_EQ(x86_64_features->AsBitmap(), 79U);
EXPECT_FALSE(x86_64_features->Equals(x86_features.get()));
EXPECT_FALSE(x86_64_features->Equals(x86_default_features.get()));
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64.h b/runtime/arch/x86_64/instruction_set_features_x86_64.h
index 3280177..b8000d0 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64.h
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64.h
@@ -74,8 +74,9 @@
private:
X86_64InstructionSetFeatures(bool smp, bool has_SSSE3, bool has_SSE4_1, bool has_SSE4_2,
- bool has_AVX, bool has_AVX2)
- : X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX, has_AVX2) {
+ bool has_AVX, bool has_AVX2, bool prefers_locked_add)
+ : X86InstructionSetFeatures(smp, has_SSSE3, has_SSE4_1, has_SSE4_2, has_AVX,
+ has_AVX2, prefers_locked_add) {
}
friend class X86InstructionSetFeatures;
diff --git a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
index 5171080..4562c64 100644
--- a/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
+++ b/runtime/arch/x86_64/instruction_set_features_x86_64_test.cc
@@ -27,7 +27,7 @@
ASSERT_TRUE(x86_64_features.get() != nullptr) << error_msg;
EXPECT_EQ(x86_64_features->GetInstructionSet(), kX86_64);
EXPECT_TRUE(x86_64_features->Equals(x86_64_features.get()));
- EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2",
+ EXPECT_STREQ("smp,-ssse3,-sse4.1,-sse4.2,-avx,-avx2,-lock_add",
x86_64_features->GetFeatureString().c_str());
EXPECT_EQ(x86_64_features->AsBitmap(), 1U);
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 238d9f3..effa1c5 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -146,10 +146,9 @@
mirror::IfTable* iftable = GetDeclaringClass()->GetIfTable();
for (size_t i = 0; i < iftable->Count() && result == nullptr; i++) {
mirror::Class* interface = iftable->GetInterface(i);
- for (size_t j = 0; j < interface->NumVirtualMethods(); ++j) {
- ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size);
- if (HasSameNameAndSignature(interface_method->GetInterfaceMethodIfProxy(sizeof(void*)))) {
- result = interface_method;
+ for (ArtMethod& interface_method : interface->GetVirtualMethods(pointer_size)) {
+ if (HasSameNameAndSignature(interface_method.GetInterfaceMethodIfProxy(pointer_size))) {
+ result = &interface_method;
break;
}
}
@@ -157,8 +156,8 @@
}
}
DCHECK(result == nullptr ||
- GetInterfaceMethodIfProxy(sizeof(void*))->HasSameNameAndSignature(
- result->GetInterfaceMethodIfProxy(sizeof(void*))));
+ GetInterfaceMethodIfProxy(pointer_size)->HasSameNameAndSignature(
+ result->GetInterfaceMethodIfProxy(pointer_size)));
return result;
}
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index c86614c..2b4826e 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -160,16 +160,16 @@
ADD_TEST_EQ(size_t(MIRROR_OBJECT_HEADER_SIZE), sizeof(art::mirror::Object))
// Offsets within java.lang.Class.
-#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
art::mirror::Class::ComponentTypeOffset().Int32Value())
-#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (72 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (36 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (116 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 87de506..0faa3c6 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -199,6 +199,11 @@
return this->load(std::memory_order_relaxed);
}
+ // Load from memory with acquire ordering.
+ T LoadAcquire() const {
+ return this->load(std::memory_order_acquire);
+ }
+
// Word tearing allowed, but may race.
// TODO: Optimize?
// There has been some discussion of eventually disallowing word
diff --git a/runtime/base/array_slice.h b/runtime/base/array_slice.h
new file mode 100644
index 0000000..19ad302
--- /dev/null
+++ b/runtime/base/array_slice.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_ARRAY_SLICE_H_
+#define ART_RUNTIME_BASE_ARRAY_SLICE_H_
+
+#include "length_prefixed_array.h"
+#include "stride_iterator.h"
+#include "base/bit_utils.h"
+#include "base/casts.h"
+#include "base/iteration_range.h"
+
+namespace art {
+
+// An ArraySlice is an abstraction over an array or a part of an array of a particular type. It does
+// bounds checking and can be made from several common array-like structures in Art.
+template<typename T>
+class ArraySlice {
+ public:
+ // Create an empty array slice.
+ ArraySlice() : array_(nullptr), size_(0), element_size_(0) {}
+
+ // Create an array slice of the first 'length' elements of the array, with each element being
+ // element_size bytes long.
+ ArraySlice(T* array,
+ size_t length,
+ size_t element_size = sizeof(T))
+ : array_(array),
+ size_(dchecked_integral_cast<uint32_t>(length)),
+ element_size_(element_size) {
+ DCHECK(array_ != nullptr || length == 0);
+ }
+
+ // Create an array slice of the elements between start_offset and end_offset of the array with
+ // each element being element_size bytes long. Both start_offset and end_offset are in
+ // element_size units.
+ ArraySlice(T* array,
+ uint32_t start_offset,
+ uint32_t end_offset,
+ size_t element_size = sizeof(T))
+ : array_(nullptr),
+ size_(end_offset - start_offset),
+ element_size_(element_size) {
+ DCHECK(array_ != nullptr || size_ == 0);
+ DCHECK_LE(start_offset, end_offset);
+ if (size_ != 0) {
+ uintptr_t offset = start_offset * element_size_;
+ array_ = *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array) + offset);
+ }
+ }
+
+ // Create an array slice of the elements between start_offset and end_offset of the array with
+ // each element being element_size bytes long and having the given alignment. Both start_offset
+ // and end_offset are in element_size units.
+ ArraySlice(LengthPrefixedArray<T>* array,
+ uint32_t start_offset,
+ uint32_t end_offset,
+ size_t element_size = sizeof(T),
+ size_t alignment = alignof(T))
+ : array_(nullptr),
+ size_(end_offset - start_offset),
+ element_size_(element_size) {
+ DCHECK(array != nullptr || size_ == 0);
+ if (size_ != 0) {
+ DCHECK_LE(start_offset, end_offset);
+ DCHECK_LE(start_offset, array->size());
+ DCHECK_LE(end_offset, array->size());
+ array_ = &array->At(start_offset, element_size_, alignment);
+ }
+ }
+
+ T& At(size_t index) {
+ DCHECK_LT(index, size_);
+ return AtUnchecked(index);
+ }
+
+ const T& At(size_t index) const {
+ DCHECK_LT(index, size_);
+ return AtUnchecked(index);
+ }
+
+ T& operator[](size_t index) {
+ return At(index);
+ }
+
+ const T& operator[](size_t index) const {
+ return At(index);
+ }
+
+ StrideIterator<T> begin() {
+ return StrideIterator<T>(&AtUnchecked(0), element_size_);
+ }
+
+ StrideIterator<const T> begin() const {
+ return StrideIterator<const T>(&AtUnchecked(0), element_size_);
+ }
+
+ StrideIterator<T> end() {
+ return StrideIterator<T>(&AtUnchecked(size_), element_size_);
+ }
+
+ StrideIterator<const T> end() const {
+ return StrideIterator<const T>(&AtUnchecked(size_), element_size_);
+ }
+
+ IterationRange<StrideIterator<T>> AsRange() {
+ return size() != 0 ? MakeIterationRange(begin(), end())
+ : MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
+ }
+
+ size_t size() const {
+ return size_;
+ }
+
+ size_t ElementSize() const {
+ return element_size_;
+ }
+
+ private:
+ T& AtUnchecked(size_t index) {
+ return *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array_) + index * element_size_);
+ }
+
+ const T& AtUnchecked(size_t index) const {
+ return *reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(array_) + index * element_size_);
+ }
+
+ T* array_;
+ size_t size_;
+ size_t element_size_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_ARRAY_SLICE_H_
diff --git a/runtime/length_prefixed_array.h b/runtime/base/length_prefixed_array.h
similarity index 95%
rename from runtime/length_prefixed_array.h
rename to runtime/base/length_prefixed_array.h
index e01b6cc..d632871 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/base/length_prefixed_array.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_LENGTH_PREFIXED_ARRAY_H_
-#define ART_RUNTIME_LENGTH_PREFIXED_ARRAY_H_
+#ifndef ART_RUNTIME_BASE_LENGTH_PREFIXED_ARRAY_H_
+#define ART_RUNTIME_BASE_LENGTH_PREFIXED_ARRAY_H_
#include <stddef.h> // for offsetof()
@@ -110,4 +110,4 @@
} // namespace art
-#endif // ART_RUNTIME_LENGTH_PREFIXED_ARRAY_H_
+#endif // ART_RUNTIME_BASE_LENGTH_PREFIXED_ARRAY_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d4c9057..263f50d 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -63,6 +63,9 @@
kLambdaTableLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
+ kRosAllocGlobalLock,
+ kRosAllocBracketLock,
+ kRosAllocBulkFreeLock,
kTransactionLogLock,
kMarkSweepMarkStackLock,
kJniWeakGlobalsLock,
@@ -73,9 +76,6 @@
kReferenceQueueClearedReferencesLock,
kReferenceProcessorLock,
kJitCodeCacheLock,
- kRosAllocGlobalLock,
- kRosAllocBracketLock,
- kRosAllocBulkFreeLock,
kAllocSpaceLock,
kBumpPointerSpaceBlockLock,
kArenaPoolLock,
diff --git a/runtime/base/time_utils.cc b/runtime/base/time_utils.cc
index 48b0a09..b7cf207 100644
--- a/runtime/base/time_utils.cc
+++ b/runtime/base/time_utils.cc
@@ -174,8 +174,6 @@
}
void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
- int64_t endSec;
-
if (absolute) {
#if !defined(__APPLE__)
clock_gettime(clock, ts);
@@ -190,13 +188,13 @@
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
- endSec = ts->tv_sec + ms / 1000;
- if (UNLIKELY(endSec >= 0x7fffffff)) {
- std::ostringstream ss;
- LOG(INFO) << "Note: end time exceeds epoch: " << ss.str();
- endSec = 0x7ffffffe;
+
+ int64_t end_sec = ts->tv_sec + ms / 1000;
+ if (UNLIKELY(end_sec >= 0x7fffffff)) {
+ LOG(INFO) << "Note: end time exceeds INT32_MAX: " << end_sec;
+ end_sec = 0x7ffffffe;
}
- ts->tv_sec = endSec;
+ ts->tv_sec = end_sec;
ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
// Catch rollover.
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index a5d10b2..ea1afa8 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -30,7 +30,7 @@
namespace art {
inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* descriptor) {
- return FindClass(self, descriptor, NullHandle<mirror::ClassLoader>());
+ return FindClass(self, descriptor, ScopedNullHandle<mirror::ClassLoader>());
}
inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** element_class) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f5085ed..0518911 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -330,7 +330,7 @@
Runtime* const runtime = Runtime::Current();
gc::Heap* const heap = runtime->GetHeap();
- CHECK(!heap->HasImageSpace()) << "Runtime has image. We should use it.";
+ CHECK(!heap->HasBootImageSpace()) << "Runtime has image. We should use it.";
CHECK(!init_done_);
// Use the pointer size from the runtime since we are probably creating the image.
@@ -736,7 +736,7 @@
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
- gc::space::ImageSpace* space)
+ std::vector<gc::space::ImageSpace*> spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (m->IsRuntimeMethod()) {
CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m);
@@ -745,18 +745,22 @@
} else if (expected_class != nullptr) {
CHECK_EQ(m->GetDeclaringClassUnchecked(), expected_class) << PrettyMethod(m);
}
- if (space != nullptr) {
- auto& header = space->GetImageHeader();
- auto& methods = header.GetMethodsSection();
- auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
- CHECK(methods.Contains(offset)) << m << " not in " << methods;
+ if (!spaces.empty()) {
+ bool contains = false;
+ for (gc::space::ImageSpace* space : spaces) {
+ auto& header = space->GetImageHeader();
+ auto& methods = header.GetMethodsSection();
+ auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin();
+ contains |= methods.Contains(offset);
+ }
+ CHECK(contains) << m << " not found";
}
}
static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr,
mirror::Class* expected_class,
size_t pointer_size,
- gc::space::ImageSpace* space)
+ std::vector<gc::space::ImageSpace*> spaces)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(arr != nullptr);
for (int32_t j = 0; j < arr->GetLength(); ++j) {
@@ -766,11 +770,12 @@
CHECK(method != nullptr);
}
if (method != nullptr) {
- SanityCheckArtMethod(method, expected_class, space);
+ SanityCheckArtMethod(method, expected_class, spaces);
}
}
}
+/* TODO: Modify check to support multiple image spaces and reenable. b/26317072
static void SanityCheckArtMethodPointerArray(
ArtMethod** arr,
size_t size,
@@ -790,6 +795,7 @@
}
}
}
+*/
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -805,32 +811,30 @@
CHECK_EQ(field.GetDeclaringClass(), klass);
}
auto* runtime = Runtime::Current();
- auto* image_space = runtime->GetHeap()->GetBootImageSpace();
+ auto image_spaces = runtime->GetHeap()->GetBootImageSpaces();
auto pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
- for (auto& m : klass->GetDirectMethods(pointer_size)) {
- SanityCheckArtMethod(&m, klass, image_space);
- }
- for (auto& m : klass->GetVirtualMethods(pointer_size)) {
- SanityCheckArtMethod(&m, klass, image_space);
+ for (auto& m : klass->GetMethods(pointer_size)) {
+ SanityCheckArtMethod(&m, klass, image_spaces);
}
auto* vtable = klass->GetVTable();
if (vtable != nullptr) {
- SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_space);
+ SanityCheckArtMethodPointerArray(vtable, nullptr, pointer_size, image_spaces);
}
if (klass->ShouldHaveEmbeddedImtAndVTable()) {
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
- SanityCheckArtMethod(klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_space);
+ SanityCheckArtMethod(
+ klass->GetEmbeddedImTableEntry(i, pointer_size), nullptr, image_spaces);
}
for (int32_t i = 0; i < klass->GetEmbeddedVTableLength(); ++i) {
- SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_space);
+ SanityCheckArtMethod(klass->GetEmbeddedVTableEntry(i, pointer_size), nullptr, image_spaces);
}
}
auto* iftable = klass->GetIfTable();
if (iftable != nullptr) {
for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
if (iftable->GetMethodArrayCount(i) > 0) {
- SanityCheckArtMethodPointerArray(iftable->GetMethodArray(i), nullptr, pointer_size,
- image_space);
+ SanityCheckArtMethodPointerArray(
+ iftable->GetMethodArray(i), nullptr, pointer_size, image_spaces);
}
}
}
@@ -859,6 +863,33 @@
DISALLOW_COPY_AND_ASSIGN(SetInterpreterEntrypointArtMethodVisitor);
};
+struct TrampolineCheckData {
+ const void* quick_resolution_trampoline;
+ const void* quick_imt_conflict_trampoline;
+ const void* quick_generic_jni_trampoline;
+ const void* quick_to_interpreter_bridge_trampoline;
+ size_t pointer_size;
+ ArtMethod* m;
+ bool error;
+};
+static void CheckTrampolines(mirror::Object* obj, void* arg) NO_THREAD_SAFETY_ANALYSIS {
+ if (obj->IsClass()) {
+ mirror::Class* klass = obj->AsClass();
+ TrampolineCheckData* d = reinterpret_cast<TrampolineCheckData*>(arg);
+ for (ArtMethod& m : klass->GetMethods(d->pointer_size)) {
+ const void* entrypoint = m.GetEntryPointFromQuickCompiledCodePtrSize(d->pointer_size);
+ if (entrypoint == d->quick_resolution_trampoline ||
+ entrypoint == d->quick_imt_conflict_trampoline ||
+ entrypoint == d->quick_generic_jni_trampoline ||
+ entrypoint == d->quick_to_interpreter_bridge_trampoline) {
+ d->m = &m;
+ d->error = true;
+ return;
+ }
+ }
+ }
+}
+
bool ClassLinker::InitFromImage(std::string* error_msg) {
VLOG(startup) << "ClassLinker::InitFromImage entering";
CHECK(!init_done_);
@@ -866,28 +897,71 @@
Runtime* const runtime = Runtime::Current();
Thread* const self = Thread::Current();
gc::Heap* const heap = runtime->GetHeap();
- gc::space::ImageSpace* const space = heap->GetBootImageSpace();
- CHECK(space != nullptr);
- image_pointer_size_ = space->GetImageHeader().GetPointerSize();
+ std::vector<gc::space::ImageSpace*> spaces = heap->GetBootImageSpaces();
+ CHECK(!spaces.empty());
+ image_pointer_size_ = spaces[0]->GetImageHeader().GetPointerSize();
dex_cache_boot_image_class_lookup_required_ = true;
- const OatFile* oat_file = runtime->GetOatFileManager().RegisterImageOatFile(space);
- DCHECK(oat_file != nullptr);
- CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
- CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
- const char* image_file_location = oat_file->GetOatHeader().
+ std::vector<const OatFile*> oat_files =
+ runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
+ DCHECK(!oat_files.empty());
+ const OatHeader& default_oat_header = oat_files[0]->GetOatHeader();
+ CHECK_EQ(default_oat_header.GetImageFileLocationOatChecksum(), 0U);
+ CHECK_EQ(default_oat_header.GetImageFileLocationOatDataBegin(), 0U);
+ const char* image_file_location = oat_files[0]->GetOatHeader().
GetStoreValueByKey(OatHeader::kImageLocationKey);
CHECK(image_file_location == nullptr || *image_file_location == 0);
- quick_resolution_trampoline_ = oat_file->GetOatHeader().GetQuickResolutionTrampoline();
- quick_imt_conflict_trampoline_ = oat_file->GetOatHeader().GetQuickImtConflictTrampoline();
- quick_generic_jni_trampoline_ = oat_file->GetOatHeader().GetQuickGenericJniTrampoline();
- quick_to_interpreter_bridge_trampoline_ = oat_file->GetOatHeader().GetQuickToInterpreterBridge();
- StackHandleScope<2> hs(self);
- mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches(
- hs.NewHandle(dex_caches_object->AsObjectArray<mirror::DexCache>()));
+ quick_resolution_trampoline_ = default_oat_header.GetQuickResolutionTrampoline();
+ quick_imt_conflict_trampoline_ = default_oat_header.GetQuickImtConflictTrampoline();
+ quick_generic_jni_trampoline_ = default_oat_header.GetQuickGenericJniTrampoline();
+ quick_to_interpreter_bridge_trampoline_ = default_oat_header.GetQuickToInterpreterBridge();
+ if (kIsDebugBuild) {
+ // Check that the other images use the same trampoline.
+ for (size_t i = 1; i < oat_files.size(); ++i) {
+ const OatHeader& ith_oat_header = oat_files[i]->GetOatHeader();
+ const void* ith_quick_resolution_trampoline =
+ ith_oat_header.GetQuickResolutionTrampoline();
+ const void* ith_quick_imt_conflict_trampoline =
+ ith_oat_header.GetQuickImtConflictTrampoline();
+ const void* ith_quick_generic_jni_trampoline =
+ ith_oat_header.GetQuickGenericJniTrampoline();
+ const void* ith_quick_to_interpreter_bridge_trampoline =
+ ith_oat_header.GetQuickToInterpreterBridge();
+ if (ith_quick_resolution_trampoline != quick_resolution_trampoline_ ||
+ ith_quick_imt_conflict_trampoline != quick_imt_conflict_trampoline_ ||
+ ith_quick_generic_jni_trampoline != quick_generic_jni_trampoline_ ||
+ ith_quick_to_interpreter_bridge_trampoline != quick_to_interpreter_bridge_trampoline_) {
+ // Make sure that all methods in this image do not contain those trampolines as
+ // entrypoints. Otherwise the class-linker won't be able to work with a single set.
+ TrampolineCheckData data;
+ data.error = false;
+ data.pointer_size = GetImagePointerSize();
+ data.quick_resolution_trampoline = ith_quick_resolution_trampoline;
+ data.quick_imt_conflict_trampoline = ith_quick_imt_conflict_trampoline;
+ data.quick_generic_jni_trampoline = ith_quick_generic_jni_trampoline;
+ data.quick_to_interpreter_bridge_trampoline = ith_quick_to_interpreter_bridge_trampoline;
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ spaces[i]->GetLiveBitmap()->Walk(CheckTrampolines, &data);
+ if (data.error) {
+ ArtMethod* m = data.m;
+ LOG(ERROR) << "Found a broken ArtMethod: " << PrettyMethod(m);
+ *error_msg = "Found an ArtMethod with a bad entrypoint";
+ return false;
+ }
+ }
+ }
+ }
- Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
- space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
+ StackHandleScopeCollection handles(self);
+ std::vector<Handle<mirror::ObjectArray<mirror::DexCache>>> dex_caches_vector;
+ for (gc::space::ImageSpace* space : spaces) {
+ Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches(handles.NewHandle(
+ space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches)->
+ AsObjectArray<mirror::DexCache>()));
+ dex_caches_vector.push_back(dex_caches);
+ }
+
+ Handle<mirror::ObjectArray<mirror::Class>> class_roots(handles.NewHandle(
+ spaces[0]->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
AsObjectArray<mirror::Class>()));
class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get());
@@ -899,56 +973,70 @@
java_lang_Object->SetObjectSize(sizeof(mirror::Object));
// Allocate in non-movable so that it's possible to check if a JNI weak global ref has been
// cleared without triggering the read barrier and unintentionally mark the sentinel alive.
- runtime->SetSentinel(heap->AllocNonMovableObject<true>(self,
- java_lang_Object,
- java_lang_Object->GetObjectSize(),
- VoidFunctor()));
+ runtime->SetSentinel(heap->AllocNonMovableObject<true>(
+ self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
- if (oat_file->GetOatHeader().GetDexFileCount() !=
- static_cast<uint32_t>(dex_caches->GetLength())) {
+ uint32_t dex_file_count = 0;
+ for (const OatFile* oat_file : oat_files) {
+ dex_file_count += oat_file->GetOatHeader().GetDexFileCount();
+ }
+ uint32_t dex_caches_count = 0;
+ for (auto dex_caches : dex_caches_vector) {
+ dex_caches_count += dex_caches->GetLength();
+ }
+ if (dex_file_count != dex_caches_count) {
*error_msg = "Dex cache count and dex file count mismatch while trying to initialize from "
"image";
return false;
}
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- StackHandleScope<1> hs2(self);
- Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i)));
- const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location.c_str(),
- nullptr);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Failed finding oat dex file for %s %s",
- oat_file->GetLocation().c_str(),
- dex_file_location.c_str());
- return false;
- }
- std::string inner_error_msg;
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&inner_error_msg);
- if (dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to open dex file %s from within oat file %s error '%s'",
- dex_file_location.c_str(),
- oat_file->GetLocation().c_str(),
- inner_error_msg.c_str());
- return false;
- }
+ for (auto dex_caches : dex_caches_vector) {
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ StackHandleScope<1> hs2(self);
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i)));
+ const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
+ const OatFile::OatDexFile* oat_dex_file = nullptr;
+ for (const OatFile* oat_file : oat_files) {
+ const OatFile::OatDexFile* oat_dex =
+ oat_file->GetOatDexFile(dex_file_location.c_str(), nullptr, false);
+ if (oat_dex != nullptr) {
+ DCHECK(oat_dex_file == nullptr);
+ oat_dex_file = oat_dex;
+ }
+ }
- if (kSanityCheckObjects) {
- SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(),
- dex_cache->NumResolvedMethods(),
- image_pointer_size_,
- space);
- }
+ if (oat_dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed finding oat dex file for %s",
+ dex_file_location.c_str());
+ return false;
+ }
+ std::string inner_error_msg;
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&inner_error_msg);
+ if (dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file %s error '%s'",
+ dex_file_location.c_str(),
+ inner_error_msg.c_str());
+ return false;
+ }
- if (dex_file->GetLocationChecksum() != oat_dex_file->GetDexFileLocationChecksum()) {
- *error_msg = StringPrintf("Checksums do not match for %s: %x vs %x",
- dex_file_location.c_str(),
- dex_file->GetLocationChecksum(),
- oat_dex_file->GetDexFileLocationChecksum());
- return false;
- }
+ // TODO: Modify check to support multiple image spaces and reenable.
+// if (kSanityCheckObjects) {
+// SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(),
+// dex_cache->NumResolvedMethods(),
+// image_pointer_size_,
+// spaces);
+// }
- AppendToBootClassPath(*dex_file.get(), dex_cache);
- opened_dex_files_.push_back(std::move(dex_file));
+ if (dex_file->GetLocationChecksum() != oat_dex_file->GetDexFileLocationChecksum()) {
+ *error_msg = StringPrintf("Checksums do not match for %s: %x vs %x",
+ dex_file_location.c_str(),
+ dex_file->GetLocationChecksum(),
+ oat_dex_file->GetDexFileLocationChecksum());
+ return false;
+ }
+
+ AppendToBootClassPath(*dex_file.get(), dex_cache);
+ opened_dex_files_.push_back(std::move(dex_file));
+ }
}
if (!ValidPointerSize(image_pointer_size_)) {
@@ -971,12 +1059,14 @@
}
if (kSanityCheckObjects) {
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- auto* dex_cache = dex_caches->Get(i);
- for (size_t j = 0; j < dex_cache->NumResolvedFields(); ++j) {
- auto* field = dex_cache->GetResolvedField(j, image_pointer_size_);
- if (field != nullptr) {
- CHECK(field->GetDeclaringClass()->GetClass() != nullptr);
+ for (auto dex_caches : dex_caches_vector) {
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ auto* dex_cache = dex_caches->Get(i);
+ for (size_t j = 0; j < dex_cache->NumResolvedFields(); ++j) {
+ auto* field = dex_cache->GetResolvedField(j, image_pointer_size_);
+ if (field != nullptr) {
+ CHECK(field->GetDeclaringClass()->GetClass() != nullptr);
+ }
}
}
}
@@ -985,10 +1075,12 @@
// Set entry point to interpreter if in InterpretOnly mode.
if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) {
- const ImageHeader& header = space->GetImageHeader();
- const ImageSection& methods = header.GetMethodsSection();
- SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
- methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
+ for (gc::space::ImageSpace* space : spaces) {
+ const ImageHeader& header = space->GetImageHeader();
+ const ImageSection& methods = header.GetMethodsSection();
+ SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_);
+ methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_);
+ }
}
// reinit class_roots_
@@ -1017,13 +1109,15 @@
mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable));
mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
- const ImageHeader& header = space->GetImageHeader();
- const ImageSection& section = header.GetImageSection(ImageHeader::kSectionClassTable);
- if (section.Size() > 0u) {
- WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
- class_table->ReadFromMemory(space->Begin() + section.Offset());
- dex_cache_boot_image_class_lookup_required_ = false;
+ for (gc::space::ImageSpace* space : spaces) {
+ const ImageHeader& header = space->GetImageHeader();
+ const ImageSection& section = header.GetImageSection(ImageHeader::kSectionClassTable);
+ if (section.Size() > 0u) {
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ClassTable* const class_table = InsertClassTableForClassLoader(nullptr);
+ class_table->ReadFromMemory(space->Begin() + section.Offset());
+ dex_cache_boot_image_class_lookup_required_ = false;
+ }
}
FinishInit(self);
@@ -1442,8 +1536,12 @@
if (klass != nullptr) {
*result = EnsureResolved(self, descriptor, klass);
} else {
- *result = DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(),
- *pair.first, *pair.second);
+ *result = DefineClass(self,
+ descriptor,
+ hash,
+ ScopedNullHandle<mirror::ClassLoader>(),
+ *pair.first,
+ *pair.second);
}
if (*result == nullptr) {
CHECK(self->IsExceptionPending()) << descriptor;
@@ -1568,7 +1666,11 @@
// The boot class loader, search the boot class path.
ClassPathEntry pair = FindInClassPath(descriptor, hash, boot_class_path_);
if (pair.second != nullptr) {
- return DefineClass(self, descriptor, hash, NullHandle<mirror::ClassLoader>(), *pair.first,
+ return DefineClass(self,
+ descriptor,
+ hash,
+ ScopedNullHandle<mirror::ClassLoader>(),
+ *pair.first,
*pair.second);
} else {
// The boot class loader is searched ahead of the application class loader, failures are
@@ -1877,12 +1979,10 @@
// We're invoking a virtual method directly (thanks to sharpening), compute the oat_method_index
// by search for its position in the declared virtual methods.
oat_method_index = declaring_class->NumDirectMethods();
- size_t end = declaring_class->NumVirtualMethods();
bool found_virtual = false;
- for (size_t i = 0; i < end; i++) {
+ for (ArtMethod& art_method : declaring_class->GetVirtualMethods(image_pointer_size_)) {
// Check method index instead of identity in case of duplicate method definitions.
- if (method->GetDexMethodIndex() ==
- declaring_class->GetVirtualMethod(i, image_pointer_size_)->GetDexMethodIndex()) {
+ if (method->GetDexMethodIndex() == art_method.GetDexMethodIndex()) {
found_virtual = true;
break;
}
@@ -1971,7 +2071,7 @@
}
Runtime* runtime = Runtime::Current();
if (!runtime->IsStarted()) {
- if (runtime->IsAotCompiler() || runtime->GetHeap()->HasImageSpace()) {
+ if (runtime->IsAotCompiler() || runtime->GetHeap()->HasBootImageSpace()) {
return; // OAT file unavailable.
}
}
@@ -2245,11 +2345,14 @@
klass->SetIFieldsPtr(ifields);
DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
// Load methods.
- klass->SetDirectMethodsPtr(AllocArtMethodArray(self, allocator, it.NumDirectMethods()));
- klass->SetVirtualMethodsPtr(AllocArtMethodArray(self, allocator, it.NumVirtualMethods()));
+ klass->SetMethodsPtr(
+ AllocArtMethodArray(self, allocator, it.NumDirectMethods() + it.NumVirtualMethods()),
+ it.NumDirectMethods(),
+ it.NumVirtualMethods());
size_t class_def_method_index = 0;
uint32_t last_dex_method_index = DexFile::kDexNoIndex;
size_t last_class_def_method_index = 0;
+ // TODO These should really use the iterators.
for (size_t i = 0; it.HasNextDirectMethod(); i++, it.Next()) {
ArtMethod* method = klass->GetDirectMethodUnchecked(i, image_pointer_size_);
LoadMethod(self, dex_file, it, klass, method);
@@ -2728,9 +2831,12 @@
return nullptr;
}
-void ClassLinker::UpdateClassVirtualMethods(mirror::Class* klass,
- LengthPrefixedArray<ArtMethod>* new_methods) {
- klass->SetVirtualMethodsPtr(new_methods);
+// TODO This should really be in mirror::Class.
+void ClassLinker::UpdateClassMethods(mirror::Class* klass,
+ LengthPrefixedArray<ArtMethod>* new_methods) {
+ klass->SetMethodsPtrUnchecked(new_methods,
+ klass->NumDirectMethods(),
+ klass->NumDeclaredVirtualMethods());
// Need to mark the card so that the remembered sets and mod union tables get updated.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass);
}
@@ -2774,23 +2880,27 @@
return result;
}
-static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches(gc::space::ImageSpace* image_space)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- CHECK(image_space != nullptr);
- mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- DCHECK(root != nullptr);
- return root->AsObjectArray<mirror::DexCache>();
+static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
+ std::vector<gc::space::ImageSpace*> image_spaces) SHARED_REQUIRES(Locks::mutator_lock_) {
+ CHECK(!image_spaces.empty());
+ std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ DCHECK(root != nullptr);
+ dex_caches_vector.push_back(root->AsObjectArray<mirror::DexCache>());
+ }
+ return dex_caches_vector;
}
void ClassLinker::AddBootImageClassesToClassTable() {
if (dex_cache_boot_image_class_lookup_required_) {
- AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpace(),
+ AddImageClassesToClassTable(Runtime::Current()->GetHeap()->GetBootImageSpaces(),
/*class_loader*/nullptr);
dex_cache_boot_image_class_lookup_required_ = false;
}
}
-void ClassLinker::AddImageClassesToClassTable(gc::space::ImageSpace* image_space,
+void ClassLinker::AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
mirror::ClassLoader* class_loader) {
Thread* self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -2798,25 +2908,28 @@
ClassTable* const class_table = InsertClassTableForClassLoader(class_loader);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(image_space);
std::string temp;
- for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
- for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
- mirror::Class* klass = types[j].Read();
- if (klass != nullptr) {
- DCHECK_EQ(klass->GetClassLoader(), class_loader);
- const char* descriptor = klass->GetDescriptor(&temp);
- size_t hash = ComputeModifiedUtf8Hash(descriptor);
- mirror::Class* existing = class_table->Lookup(descriptor, hash);
- if (existing != nullptr) {
- CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
- << PrettyClassAndClassLoader(klass);
- } else {
- class_table->Insert(klass);
- if (log_new_class_table_roots_) {
- new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
+ std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
+ GetImageDexCaches(image_spaces);
+ for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
+ for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
+ mirror::Class* klass = types[j].Read();
+ if (klass != nullptr) {
+ DCHECK_EQ(klass->GetClassLoader(), class_loader);
+ const char* descriptor = klass->GetDescriptor(&temp);
+ size_t hash = ComputeModifiedUtf8Hash(descriptor);
+ mirror::Class* existing = class_table->Lookup(descriptor, hash);
+ if (existing != nullptr) {
+ CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
+ << PrettyClassAndClassLoader(klass);
+ } else {
+ class_table->Insert(klass);
+ if (log_new_class_table_roots_) {
+ new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
+ }
}
}
}
@@ -2847,18 +2960,20 @@
mirror::Class* ClassLinker::LookupClassFromBootImage(const char* descriptor) {
ScopedAssertNoThreadSuspension ants(Thread::Current(), "Image class lookup");
- mirror::ObjectArray<mirror::DexCache>* dex_caches = GetImageDexCaches(
- Runtime::Current()->GetHeap()->GetBootImageSpace());
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Try binary searching the type index by descriptor.
- const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
- if (type_id != nullptr) {
- uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
- if (klass != nullptr) {
- return klass;
+ std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector =
+ GetImageDexCaches(Runtime::Current()->GetHeap()->GetBootImageSpaces());
+ for (mirror::ObjectArray<mirror::DexCache>* dex_caches : dex_caches_vector) {
+ for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ // Try binary searching the type index by descriptor.
+ const DexFile::TypeId* type_id = dex_file->FindTypeId(descriptor);
+ if (type_id != nullptr) {
+ uint16_t type_idx = dex_file->GetIndexForTypeId(*type_id);
+ mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
+ if (klass != nullptr) {
+ return klass;
+ }
}
}
}
@@ -3158,7 +3273,7 @@
// the runtime isn't started. On the other hand, app classes can be re-verified even if they are
// already pre-opted, as then the runtime is started.
if (!Runtime::Current()->IsAotCompiler() &&
- !Runtime::Current()->GetHeap()->HasImageSpace() &&
+ !Runtime::Current()->GetHeap()->HasBootImageSpace() &&
klass->GetClassLoader() != nullptr) {
return false;
}
@@ -3210,11 +3325,8 @@
void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
Handle<mirror::Class> klass) {
- for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
- ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i, image_pointer_size_));
- }
- for (size_t i = 0; i < klass->NumVirtualMethods(); i++) {
- ResolveMethodExceptionHandlerTypes(dex_file, klass->GetVirtualMethod(i, image_pointer_size_));
+ for (ArtMethod& method : klass->GetMethods(image_pointer_size_)) {
+ ResolveMethodExceptionHandlerTypes(dex_file, &method);
}
}
@@ -3302,29 +3414,30 @@
throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
// Proxies have 1 direct method, the constructor
- LengthPrefixedArray<ArtMethod>* directs = AllocArtMethodArray(self, allocator, 1);
- // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
- // want to throw OOM in the future.
- if (UNLIKELY(directs == nullptr)) {
- self->AssertPendingOOMException();
- return nullptr;
- }
- klass->SetDirectMethodsPtr(directs);
- CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_));
+ const size_t num_direct_methods = 1;
- // Create virtual method using specified prototypes.
+ // They have as many virtual methods as the array
auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods));
DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass())
<< PrettyClass(h_methods->GetClass());
const size_t num_virtual_methods = h_methods->GetLength();
- auto* virtuals = AllocArtMethodArray(self, allocator, num_virtual_methods);
+
+ // Create the methods array.
+ LengthPrefixedArray<ArtMethod>* proxy_class_methods = AllocArtMethodArray(
+ self, allocator, num_direct_methods + num_virtual_methods);
// Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
// want to throw OOM in the future.
- if (UNLIKELY(virtuals == nullptr)) {
+ if (UNLIKELY(proxy_class_methods == nullptr)) {
self->AssertPendingOOMException();
return nullptr;
}
- klass->SetVirtualMethodsPtr(virtuals);
+ klass->SetMethodsPtr(proxy_class_methods, num_direct_methods, num_virtual_methods);
+
+ // Create the single direct method.
+ CreateProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_));
+
+ // Create virtual method using specified prototypes.
+ // TODO These should really use the iterators.
for (size_t i = 0; i < num_virtual_methods; ++i) {
auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
auto* prototype = h_methods->Get(i)->GetArtMethod();
@@ -3434,7 +3547,7 @@
void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
// Create constructor for Proxy that must initialize the method.
- CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 16u);
+ CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 19u);
ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked(
2, image_pointer_size_);
// Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden
@@ -4102,14 +4215,8 @@
}
DCHECK_EQ(temp_class->NumDirectMethods(), 0u);
- for (auto& method : new_class->GetDirectMethods(image_pointer_size_)) {
- if (method.GetDeclaringClass() == temp_class) {
- method.SetDeclaringClass(new_class);
- }
- }
-
DCHECK_EQ(temp_class->NumVirtualMethods(), 0u);
- for (auto& method : new_class->GetVirtualMethods(image_pointer_size_)) {
+ for (auto& method : new_class->GetMethods(image_pointer_size_)) {
if (method.GetDeclaringClass() == temp_class) {
method.SetDeclaringClass(new_class);
}
@@ -4193,8 +4300,7 @@
// ArtMethod array pointers. If this occurs, it causes bugs in remembered sets since the GC
// may not see any references to the target space and clean the card for a class if another
// class had the same array pointer.
- klass->SetDirectMethodsPtrUnchecked(nullptr);
- klass->SetVirtualMethodsPtr(nullptr);
+ klass->SetMethodsPtrUnchecked(nullptr, 0, 0);
klass->SetSFieldsPtrUnchecked(nullptr);
klass->SetIFieldsPtrUnchecked(nullptr);
if (UNLIKELY(h_new_class.Get() == nullptr)) {
@@ -4959,12 +5065,10 @@
for (size_t k = ifstart + 1; k < iftable_count; k++) {
// Skip ifstart since our current interface obviously cannot override itself.
current_iface.Assign(iftable->GetInterface(k));
- size_t num_instance_methods = current_iface->NumVirtualMethods();
- // Iterate through every method on this interface. The order does not matter so we go forwards.
- for (size_t m = 0; m < num_instance_methods; m++) {
- ArtMethod* current_method = current_iface->GetVirtualMethodUnchecked(m, image_pointer_size);
+ // Iterate through every method on this interface. The order does not matter.
+ for (ArtMethod& current_method : current_iface->GetDeclaredVirtualMethods(image_pointer_size)) {
if (UNLIKELY(target.HasSameNameAndSignature(
- current_method->GetInterfaceMethodIfProxy(image_pointer_size)))) {
+ current_method.GetInterfaceMethodIfProxy(image_pointer_size)))) {
// Check if the i'th interface is a subtype of this one.
if (iface->IsAssignableFrom(current_iface.Get())) {
return true;
@@ -5017,10 +5121,9 @@
DCHECK_LT(k, iftable->Count());
iface.Assign(iftable->GetInterface(k));
- size_t num_instance_methods = iface->NumVirtualMethods();
- // Iterate through every method on this interface. The order does not matter so we go forwards.
- for (size_t m = 0; m < num_instance_methods; m++) {
- ArtMethod* current_method = iface->GetVirtualMethodUnchecked(m, image_pointer_size_);
+ // Iterate through every declared method on this interface. The order does not matter.
+ for (auto& method_iter : iface->GetDeclaredVirtualMethods(image_pointer_size_)) {
+ ArtMethod* current_method = &method_iter;
// Skip abstract methods and methods with different names.
if (current_method->IsAbstract() ||
!target_name_comparator.HasSameNameAndSignature(
@@ -5327,6 +5430,26 @@
return nullptr;
}
+static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::PointerArray* check_vtable = klass->GetVTableDuringLinking();
+ mirror::Class* superclass = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr;
+ int32_t super_vtable_length = (superclass != nullptr) ? superclass->GetVTableLength() : 0;
+ for (int32_t i = 0; i < check_vtable->GetLength(); ++i) {
+ ArtMethod* m = check_vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
+ CHECK(m != nullptr);
+
+ ArraySlice<ArtMethod> virtuals = klass->GetVirtualMethodsSliceUnchecked(pointer_size);
+ auto is_same_method = [m] (const ArtMethod& meth) {
+ return &meth == m;
+ };
+ CHECK((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) ||
+ std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end())
+ << "While linking class '" << PrettyClass(klass.Get()) << "' unable to find owning class "
+ << "of '" << PrettyMethod(m) << "' (vtable index: " << i << ").";
+ }
+}
+
bool ClassLinker::LinkInterfaceMethods(
Thread* self,
Handle<mirror::Class> klass,
@@ -5449,25 +5572,31 @@
const bool super_interface = is_super && extend_super_iftable;
auto method_array(hs2.NewHandle(iftable->GetMethodArray(i)));
- LengthPrefixedArray<ArtMethod>* input_virtual_methods = nullptr;
- Handle<mirror::PointerArray> input_vtable_array = NullHandle<mirror::PointerArray>();
+ ArraySlice<ArtMethod> input_virtual_methods;
+ ScopedNullHandle<mirror::PointerArray> null_handle;
+ Handle<mirror::PointerArray> input_vtable_array(null_handle);
int32_t input_array_length = 0;
+
// TODO Cleanup Needed: In the presence of default methods this optimization is rather dirty
// and confusing. Default methods should always look through all the superclasses
// because they are the last choice of an implementation. We get around this by looking
// at the super-classes iftable methods (copied into method_array previously) when we are
// looking for the implementation of a super-interface method but that is rather dirty.
+ bool using_virtuals;
if (super_interface) {
- // We are overwriting a super class interface, try to only virtual methods instead of the
+ // If we are overwriting a super class interface, try to only virtual methods instead of the
// whole vtable.
- input_virtual_methods = klass->GetVirtualMethodsPtr();
- input_array_length = klass->NumVirtualMethods();
+ using_virtuals = true;
+ input_virtual_methods = klass->GetDeclaredMethodsSlice(image_pointer_size_);
+ input_array_length = input_virtual_methods.size();
} else {
- // A new interface, we need the whole vtable in case a new interface method is implemented
- // in the whole superclass.
+ // For a new interface, however, we need the whole vtable in case a new
+ // interface method is implemented in the whole superclass.
+ using_virtuals = false;
input_vtable_array = vtable;
input_array_length = input_vtable_array->GetLength();
}
+
// For each method in interface
for (size_t j = 0; j < num_methods; ++j) {
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
@@ -5488,8 +5617,8 @@
bool found_impl = false;
ArtMethod* vtable_impl = nullptr;
for (int32_t k = input_array_length - 1; k >= 0; --k) {
- ArtMethod* vtable_method = input_virtual_methods != nullptr ?
- &input_virtual_methods->At(k, method_size, method_alignment) :
+ ArtMethod* vtable_method = using_virtuals ?
+ &input_virtual_methods[k] :
input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
ArtMethod* vtable_method_for_name_comparison =
vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_);
@@ -5650,38 +5779,39 @@
VLOG(class_linker) << PrettyClass(klass.Get()) << ": miranda_methods=" << miranda_methods.size()
<< " default_methods=" << default_methods.size()
<< " default_conflict_methods=" << default_conflict_methods.size();
- const size_t old_method_count = klass->NumVirtualMethods();
+ const size_t old_method_count = klass->NumMethods();
const size_t new_method_count = old_method_count +
miranda_methods.size() +
default_methods.size() +
default_conflict_methods.size();
// Attempt to realloc to save RAM if possible.
- LengthPrefixedArray<ArtMethod>* old_virtuals = klass->GetVirtualMethodsPtr();
- // The Realloced virtual methods aren't visiblef from the class roots, so there is no issue
+ LengthPrefixedArray<ArtMethod>* old_methods = klass->GetMethodsPtr();
+ // The Realloced virtual methods aren't visible from the class roots, so there is no issue
// where GCs could attempt to mark stale pointers due to memcpy. And since we overwrite the
// realloced memory with out->CopyFrom, we are guaranteed to have objects in the to space since
// CopyFrom has internal read barriers.
- const size_t old_size = old_virtuals != nullptr
- ? LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count,
- method_size,
- method_alignment)
- : 0u;
+ //
+ // TODO We should maybe move some of this into mirror::Class or at least into another method.
+ const size_t old_size = LengthPrefixedArray<ArtMethod>::ComputeSize(old_method_count,
+ method_size,
+ method_alignment);
const size_t new_size = LengthPrefixedArray<ArtMethod>::ComputeSize(new_method_count,
method_size,
method_alignment);
- auto* virtuals = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- runtime->GetLinearAlloc()->Realloc(self, old_virtuals, old_size, new_size));
- if (UNLIKELY(virtuals == nullptr)) {
+ const size_t old_methods_ptr_size = (old_methods != nullptr) ? old_size : 0;
+ auto* methods = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
+ runtime->GetLinearAlloc()->Realloc(self, old_methods, old_methods_ptr_size, new_size));
+ if (UNLIKELY(methods == nullptr)) {
self->AssertPendingOOMException();
self->EndAssertNoThreadSuspension(old_cause);
return false;
}
ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
- if (virtuals != old_virtuals) {
+ if (methods != old_methods) {
// Maps from heap allocated miranda method to linear alloc miranda method.
- StrideIterator<ArtMethod> out = virtuals->begin(method_size, method_alignment);
- // Copy over the old methods + miranda methods.
- for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
+ StrideIterator<ArtMethod> out = methods->begin(method_size, method_alignment);
+ // Copy over the old methods.
+ for (auto& m : klass->GetMethods(image_pointer_size_)) {
move_table.emplace(&m, &*out);
// The CopyFrom is only necessary to not miss read barriers since Realloc won't do read
// barriers when it copies.
@@ -5689,8 +5819,7 @@
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->begin(method_size, method_alignment)
- + old_method_count);
+ StrideIterator<ArtMethod> out(methods->begin(method_size, method_alignment) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
for (ArtMethod* mir_method : miranda_methods) {
@@ -5702,9 +5831,8 @@
move_table.emplace(mir_method, &new_method);
++out;
}
- // We need to copy the default methods into our own virtual method table since the runtime
- // requires that every method on a class's vtable be in that respective class's virtual method
- // table.
+ // We need to copy the default methods into our own method table since the runtime requires that
+ // every method on a class's vtable be in that respective class's virtual method table.
// NOTE This means that two classes might have the same implementation of a method from the same
// interface but will have different ArtMethod*s for them. This also means we cannot compare a
// default method found on a class with one found on the declaring interface directly and must
@@ -5738,8 +5866,8 @@
move_table.emplace(conf_method, &new_method);
++out;
}
- virtuals->SetSize(new_method_count);
- UpdateClassVirtualMethods(klass.Get(), virtuals);
+ methods->SetSize(new_method_count);
+ UpdateClassMethods(klass.Get(), methods);
// Done copying methods, they are all roots in the class now, so we can end the no thread
// suspension assert.
self->EndAssertNoThreadSuspension(old_cause);
@@ -5755,7 +5883,7 @@
self->AssertPendingOOMException();
return false;
}
- out = virtuals->begin(method_size, method_alignment) + old_method_count;
+ out = methods->begin(method_size, method_alignment) + old_method_count;
size_t vtable_pos = old_vtable_count;
for (size_t i = old_method_count; i < new_method_count; ++i) {
// Leave the declaring class alone as type indices are relative to it
@@ -5809,8 +5937,16 @@
}
}
+ if (kIsDebugBuild) {
+ for (size_t i = 0; i < new_vtable_count; ++i) {
+ CHECK(move_table.find(vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_)) ==
+ move_table.end());
+ }
+ }
+
klass->SetVTable(vtable.Get());
- // Go fix up all the stale miranda pointers.
+ // Go fix up all the stale (old miranda or default method) pointers.
+ // First do it on the iftable.
for (size_t i = 0; i < ifcount; ++i) {
for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) {
auto* method_array = iftable->GetMethodArray(i);
@@ -5824,7 +5960,7 @@
}
}
}
- // Fix up IMT in case it has any miranda methods in it.
+ // Fix up IMT next
for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
auto it = move_table.find(out_imt[i]);
if (it != move_table.end()) {
@@ -5836,25 +5972,26 @@
auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods();
for (size_t i = 0, count = klass->GetDexCache()->NumResolvedMethods(); i < count; ++i) {
auto* m = mirror::DexCache::GetElementPtrSize(resolved_methods, i, image_pointer_size_);
- // We don't remove default methods from the move table since we need them to update the
- // vtable. Therefore just skip them for this check.
- if (!m->IsDefault()) {
- CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m);
- }
+ CHECK(move_table.find(m) == move_table.end() ||
+ // The original versions of copied methods will still be present so allow those too.
+ // Note that if the first check passes this might fail to GetDeclaringClass().
+ std::find_if(m->GetDeclaringClass()->GetMethods(image_pointer_size_).begin(),
+ m->GetDeclaringClass()->GetMethods(image_pointer_size_).end(),
+ [m] (ArtMethod& meth) {
+ return &meth == m;
+ }) != m->GetDeclaringClass()->GetMethods(image_pointer_size_).end())
+ << "Obsolete methods " << PrettyMethod(m) << " is in dex cache!";
}
}
- // Put some random garbage in old virtuals to help find stale pointers.
- if (virtuals != old_virtuals) {
- memset(old_virtuals, 0xFEu, old_size);
+ // Put some random garbage in old methods to help find stale pointers.
+ if (methods != old_methods && old_methods != nullptr) {
+ memset(old_methods, 0xFEu, old_size);
}
} else {
self->EndAssertNoThreadSuspension(old_cause);
}
if (kIsDebugBuild) {
- auto* check_vtable = klass->GetVTableDuringLinking();
- for (int i = 0; i < check_vtable->GetLength(); ++i) {
- CHECK(check_vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_) != nullptr);
- }
+ SanityCheckVTable(klass, image_pointer_size_);
}
return true;
}
@@ -5929,6 +6066,20 @@
// we want a relatively stable order so that adding new fields
// minimizes disruption of C++ version such as Class and Method.
+ //
+ // The overall sort order order is:
+ // 1) All object reference fields, sorted alphabetically.
+ // 2) All java long (64-bit) integer fields, sorted alphabetically.
+ // 3) All java double (64-bit) floating point fields, sorted alphabetically.
+ // 4) All java int (32-bit) integer fields, sorted alphabetically.
+ // 5) All java float (32-bit) floating point fields, sorted alphabetically.
+ // 6) All java char (16-bit) integer fields, sorted alphabetically.
+ // 7) All java short (16-bit) integer fields, sorted alphabetically.
+ // 8) All java boolean (8-bit) integer fields, sorted alphabetically.
+ // 9) All java byte (8-bit) integer fields, sorted alphabetically.
+ //
+ // Once the fields are sorted in this order we will attempt to fill any gaps that might be present
+ // in the memory layout of the structure. See ShuffleForward for how this is done.
std::deque<ArtField*> grouped_and_sorted_fields;
const char* old_no_suspend_cause = self->StartAssertNoThreadSuspension(
"Naked ArtField references in deque");
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 0d3bc1e..9d432c6 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -505,7 +505,7 @@
SHARED_REQUIRES(Locks::mutator_lock_);
// Add image classes to the class table.
- void AddImageClassesToClassTable(gc::space::ImageSpace* image_space,
+ void AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -983,8 +983,8 @@
bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents)
SHARED_REQUIRES(Locks::mutator_lock_);
- void UpdateClassVirtualMethods(mirror::Class* klass,
- LengthPrefixedArray<ArtMethod>* new_methods)
+ void UpdateClassMethods(mirror::Class* klass,
+ LengthPrefixedArray<ArtMethod>* new_methods)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 2c086c5..99353c5 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -254,10 +254,20 @@
EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
}
- for (ArtMethod& method : klass->GetVirtualMethods(sizeof(void*))) {
+ for (ArtMethod& method : klass->GetDeclaredVirtualMethods(sizeof(void*))) {
AssertMethod(&method);
EXPECT_FALSE(method.IsDirect());
- EXPECT_TRUE(method.GetDeclaringClass()->IsAssignableFrom(klass.Get()));
+ EXPECT_EQ(klass.Get(), method.GetDeclaringClass());
+ }
+
+ for (ArtMethod& method : klass->GetCopiedMethods(sizeof(void*))) {
+ AssertMethod(&method);
+ EXPECT_FALSE(method.IsDirect());
+ EXPECT_TRUE(method.IsMiranda() || method.IsDefault() || method.IsDefaultConflicting());
+ EXPECT_TRUE(method.GetDeclaringClass()->IsInterface())
+ << "declaring class: " << PrettyClass(method.GetDeclaringClass());
+ EXPECT_TRUE(method.GetDeclaringClass()->IsAssignableFrom(klass.Get()))
+ << "declaring class: " << PrettyClass(method.GetDeclaringClass());
}
for (size_t i = 0; i < klass->NumInstanceFields(); i++) {
@@ -491,18 +501,20 @@
struct ClassOffsets : public CheckOffsets<mirror::Class> {
ClassOffsets() : CheckOffsets<mirror::Class>(false, "Ljava/lang/Class;") {
addOffset(OFFSETOF_MEMBER(mirror::Class, access_flags_), "accessFlags");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, annotation_type_), "annotationType");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_flags_), "classFlags");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_loader_), "classLoader");
addOffset(OFFSETOF_MEMBER(mirror::Class, class_size_), "classSize");
addOffset(OFFSETOF_MEMBER(mirror::Class, clinit_thread_id_), "clinitThreadId");
addOffset(OFFSETOF_MEMBER(mirror::Class, component_type_), "componentType");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, copied_methods_offset_), "copiedMethodsOffset");
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_), "dexCache");
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_cache_strings_), "dexCacheStrings");
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_class_def_idx_), "dexClassDefIndex");
addOffset(OFFSETOF_MEMBER(mirror::Class, dex_type_idx_), "dexTypeIndex");
- addOffset(OFFSETOF_MEMBER(mirror::Class, direct_methods_), "directMethods");
addOffset(OFFSETOF_MEMBER(mirror::Class, ifields_), "iFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, iftable_), "ifTable");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, methods_), "methods");
addOffset(OFFSETOF_MEMBER(mirror::Class, name_), "name");
addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_instance_fields_),
"numReferenceInstanceFields");
@@ -516,7 +528,7 @@
addOffset(OFFSETOF_MEMBER(mirror::Class, status_), "status");
addOffset(OFFSETOF_MEMBER(mirror::Class, super_class_), "superClass");
addOffset(OFFSETOF_MEMBER(mirror::Class, verify_error_), "verifyError");
- addOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_), "virtualMethods");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, virtual_methods_offset_), "virtualMethodsOffset");
addOffset(OFFSETOF_MEMBER(mirror::Class, vtable_), "vtable");
};
};
@@ -524,15 +536,15 @@
struct StringOffsets : public CheckOffsets<mirror::String> {
StringOffsets() : CheckOffsets<mirror::String>(false, "Ljava/lang/String;") {
addOffset(OFFSETOF_MEMBER(mirror::String, count_), "count");
- addOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hashCode");
+ addOffset(OFFSETOF_MEMBER(mirror::String, hash_code_), "hash");
};
};
struct ThrowableOffsets : public CheckOffsets<mirror::Throwable> {
ThrowableOffsets() : CheckOffsets<mirror::Throwable>(false, "Ljava/lang/Throwable;") {
+ addOffset(OFFSETOF_MEMBER(mirror::Throwable, backtrace_), "backtrace");
addOffset(OFFSETOF_MEMBER(mirror::Throwable, cause_), "cause");
addOffset(OFFSETOF_MEMBER(mirror::Throwable, detail_message_), "detailMessage");
- addOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_state_), "stackState");
addOffset(OFFSETOF_MEMBER(mirror::Throwable, stack_trace_), "stackTrace");
addOffset(OFFSETOF_MEMBER(mirror::Throwable, suppressed_exceptions_), "suppressedExceptions");
};
@@ -601,7 +613,7 @@
struct AccessibleObjectOffsets : public CheckOffsets<mirror::AccessibleObject> {
AccessibleObjectOffsets() : CheckOffsets<mirror::AccessibleObject>(
false, "Ljava/lang/reflect/AccessibleObject;") {
- addOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "flag");
+ addOffset(mirror::AccessibleObject::FlagOffset().Uint32Value(), "override");
};
};
@@ -844,7 +856,7 @@
// Validate that the "value" field is always the 0th field in each of java.lang's box classes.
// This lets UnboxPrimitive avoid searching for the field by name at runtime.
ScopedObjectAccess soa(Thread::Current());
- NullHandle<mirror::ClassLoader> class_loader;
+ ScopedNullHandle<mirror::ClassLoader> class_loader;
mirror::Class* c;
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader);
EXPECT_STREQ("value", c->GetIFieldsPtr()->At(0).GetName());
@@ -1090,7 +1102,7 @@
TEST_F(ClassLinkerTest, ValidatePredefinedClassSizes) {
ScopedObjectAccess soa(Thread::Current());
- NullHandle<mirror::ClassLoader> class_loader;
+ ScopedNullHandle<mirror::ClassLoader> class_loader;
mirror::Class* c;
c = class_linker_->FindClass(soa.Self(), "Ljava/lang/Class;", class_loader);
@@ -1122,10 +1134,7 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccPreverified) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
- for (auto& m : c->GetDirectMethods(sizeof(void*))) {
- CheckMethod(&m, preverified);
- }
- for (auto& m : c->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(sizeof(void*))) {
CheckMethod(&m, preverified);
}
}
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f705a50..403dd4c 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -303,7 +303,12 @@
RuntimeOptions options;
- std::string boot_class_path_string = "-Xbootclasspath:" + GetLibCoreDexFileName();
+ std::string boot_class_path_string = "-Xbootclasspath";
+ for (const std::string &core_dex_file_name : GetLibCoreDexFileNames()) {
+ boot_class_path_string += ":";
+ boot_class_path_string += core_dex_file_name;
+ }
+
options.push_back(std::make_pair(boot_class_path_string, nullptr));
options.push_back(std::make_pair("-Xcheck:jni", nullptr));
options.push_back(std::make_pair(min_heap_string, nullptr));
@@ -328,6 +333,19 @@
class_linker_ = runtime_->GetClassLinker();
class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
+ // Runtime::Create acquired the mutator_lock_ that is normally given away when we
+ // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess.
+ Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+
+ // Get the boot class path from the runtime so it can be used in tests.
+ boot_class_path_ = class_linker_->GetBootClassPath();
+ ASSERT_FALSE(boot_class_path_.empty());
+ java_lang_dex_file_ = boot_class_path_[0];
+
+ FinalizeSetup();
+}
+
+void CommonRuntimeTest::FinalizeSetup() {
// Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
// set up.
if (!unstarted_initialized_) {
@@ -335,14 +353,10 @@
unstarted_initialized_ = true;
}
- class_linker_->RunRootClinits();
- boot_class_path_ = class_linker_->GetBootClassPath();
- java_lang_dex_file_ = boot_class_path_[0];
-
-
- // Runtime::Create acquired the mutator_lock_ that is normally given away when we
- // Runtime::Start, give it away now and then switch to a more managable ScopedObjectAccess.
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ class_linker_->RunRootClinits();
+ }
// We're back in native, take the opportunity to initialize well known classes.
WellKnownClasses::Init(Thread::Current()->GetJniEnv());
@@ -353,11 +367,6 @@
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
// Reduce timinig-dependent flakiness in OOME behavior (eg StubTest.AllocObject).
runtime_->GetHeap()->SetMinIntervalHomogeneousSpaceCompactionByOom(0U);
-
- // Get the boot class path from the runtime so it can be used in tests.
- boot_class_path_ = class_linker_->GetBootClassPath();
- ASSERT_FALSE(boot_class_path_.empty());
- java_lang_dex_file_ = boot_class_path_[0];
}
void CommonRuntimeTest::ClearDirectory(const char* dirpath) {
@@ -405,10 +414,30 @@
(*icu_cleanup_fn)();
Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test
+
+ // Manually closing the JNI libraries.
+ // Runtime does not support repeatedly doing JNI->CreateVM, thus we need to manually clean up the
+ // dynamic linking loader so that gtests would not fail.
+ // Bug: 25785594
+ if (runtime_->IsStarted()) {
+ {
+ // We retrieve the handle by calling dlopen on the library. To close it, we need to call
+ // dlclose twice, the first time to undo our dlopen and the second time to actually unload it.
+ // See man dlopen.
+ void* handle = dlopen("libjavacore.so", RTLD_LAZY);
+ dlclose(handle);
+ CHECK_EQ(0, dlclose(handle));
+ }
+ {
+ void* handle = dlopen("libopenjdk.so", RTLD_LAZY);
+ dlclose(handle);
+ CHECK_EQ(0, dlclose(handle));
+ }
+ }
}
-std::string CommonRuntimeTest::GetLibCoreDexFileName() {
- return GetDexFileName("core-libart");
+std::vector<std::string> CommonRuntimeTest::GetLibCoreDexFileNames() {
+ return std::vector<std::string>({GetDexFileName("core-oj"), GetDexFileName("core-libart")});
}
std::string CommonRuntimeTest::GetDexFileName(const std::string& jar_prefix) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 6da2bef..8d9e628 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -77,8 +77,8 @@
CommonRuntimeTest();
~CommonRuntimeTest();
- // Gets the path of the libcore dex file.
- static std::string GetLibCoreDexFileName();
+ // Gets the paths of the libcore dex files.
+ static std::vector<std::string> GetLibCoreDexFileNames();
// Returns bin directory which contains host's prebuild tools.
static std::string GetAndroidHostToolsDir();
@@ -114,6 +114,10 @@
// Called after the runtime is created.
virtual void PostRuntimeCreate() {}
+ // Called to finish up runtime creation and filling test fields. By default runs root
+ // initializers, initialize well-known classes, and creates the heap thread pool.
+ virtual void FinalizeSetup();
+
// Gets the path of the specified dex file for host or target.
static std::string GetDexFileName(const std::string& jar_prefix);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index e5d648b..f009fe6 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -730,7 +730,8 @@
if (o == nullptr) {
return JDWP::ERR_INVALID_OBJECT;
}
- expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
+ DCHECK(o->IsClass());
+ expandBufAddObjectId(pReply, gRegistry->Add(o->AsClass()->GetClassLoader()));
return JDWP::ERR_NONE;
}
@@ -1491,25 +1492,20 @@
return error;
}
- size_t direct_method_count = c->NumDirectMethods();
- size_t virtual_method_count = c->NumVirtualMethods();
-
- expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
+ expandBufAdd4BE(pReply, c->NumMethods());
auto* cl = Runtime::Current()->GetClassLinker();
auto ptr_size = cl->GetImagePointerSize();
- for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
- ArtMethod* m = i < direct_method_count ?
- c->GetDirectMethod(i, ptr_size) : c->GetVirtualMethod(i - direct_method_count, ptr_size);
- expandBufAddMethodId(pReply, ToMethodId(m));
- expandBufAddUtf8String(pReply, m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
+ for (ArtMethod& m : c->GetMethods(ptr_size)) {
+ expandBufAddMethodId(pReply, ToMethodId(&m));
+ expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
expandBufAddUtf8String(pReply,
- m->GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
+ m.GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
if (with_generic) {
const char* generic_signature = "";
expandBufAddUtf8String(pReply, generic_signature);
}
- expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
+ expandBufAdd4BE(pReply, MangleAccessFlags(m.GetAccessFlags()));
}
return JDWP::ERR_NONE;
}
@@ -2038,29 +2034,28 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(thread_group != nullptr);
- // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
- ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
- mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
- {
- // The "groups" field is declared as a java.util.List: check it really is
- // an instance of java.util.ArrayList.
- CHECK(groups_array_list != nullptr);
- mirror::Class* java_util_ArrayList_class =
- soa.Decode<mirror::Class*>(WellKnownClasses::java_util_ArrayList);
- CHECK(groups_array_list->InstanceOf(java_util_ArrayList_class));
+ // Get the int "ngroups" count of this thread group...
+ ArtField* ngroups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
+ CHECK(ngroups_field != nullptr);
+ const int32_t size = ngroups_field->GetInt(thread_group);
+ if (size == 0) {
+ return;
}
- // Get the array and size out of the ArrayList<ThreadGroup>...
- ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
- ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
- mirror::ObjectArray<mirror::Object>* groups_array =
- array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
- const int32_t size = size_field->GetInt(groups_array_list);
+ // Get the ThreadGroup[] "groups" out of this thread group...
+ ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
+ mirror::Object* groups_array = groups_field->GetObject(thread_group);
+
+ CHECK(groups_array != nullptr);
+ CHECK(groups_array->IsObjectArray());
+
+ mirror::ObjectArray<mirror::Object>* groups_array_as_array =
+ groups_array->AsObjectArray<mirror::Object>();
// Copy the first 'size' elements out of the array into the result.
ObjectRegistry* registry = Dbg::GetObjectRegistry();
for (int32_t i = 0; i < size; ++i) {
- child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
+ child_thread_group_ids->push_back(registry->Add(groups_array_as_array->Get(i)));
}
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 880d3e0..bc8ba97 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -1383,8 +1383,11 @@
if (annotation_item == nullptr) {
return nullptr;
}
- mirror::Object* obj = GetAnnotationValue(
- klass, annotation_item, "value", NullHandle<mirror::Class>(), kDexAnnotationType);
+ mirror::Object* obj = GetAnnotationValue(klass,
+ annotation_item,
+ "value",
+ ScopedNullHandle<mirror::Class>(),
+ kDexAnnotationType);
if (obj == nullptr) {
return nullptr;
}
@@ -1410,8 +1413,11 @@
return nullptr;
}
AnnotationValue annotation_value;
- if (!ProcessAnnotationValue(
- klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllRaw)) {
+ if (!ProcessAnnotationValue(klass,
+ &annotation,
+ &annotation_value,
+ ScopedNullHandle<mirror::Class>(),
+ kAllRaw)) {
return nullptr;
}
if (annotation_value.type_ != kDexAnnotationMethod) {
@@ -1439,7 +1445,7 @@
return nullptr;
}
return GetAnnotationValue(
- klass, annotation_item, "value", NullHandle<mirror::Class>(), kDexAnnotationMethod);
+ klass, annotation_item, "value", ScopedNullHandle<mirror::Class>(), kDexAnnotationMethod);
}
bool DexFile::GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const {
@@ -1457,8 +1463,11 @@
return false;
}
AnnotationValue annotation_value;
- if (!ProcessAnnotationValue(
- klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllObjects)) {
+ if (!ProcessAnnotationValue(klass,
+ &annotation,
+ &annotation_value,
+ ScopedNullHandle<mirror::Class>(),
+ kAllObjects)) {
return false;
}
if (annotation_value.type_ != kDexAnnotationNull &&
@@ -1484,8 +1493,11 @@
return false;
}
AnnotationValue annotation_value;
- if (!ProcessAnnotationValue(
- klass, &annotation, &annotation_value, NullHandle<mirror::Class>(), kAllRaw)) {
+ if (!ProcessAnnotationValue(klass,
+ &annotation,
+ &annotation_value,
+ ScopedNullHandle<mirror::Class>(),
+ kAllRaw)) {
return false;
}
if (annotation_value.type_ != kDexAnnotationInt) {
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 0a167bb..796701d 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -206,7 +206,7 @@
uint32_t checksum;
ScopedObjectAccess soa(Thread::Current());
std::string error_msg;
- EXPECT_TRUE(DexFile::GetChecksum(GetLibCoreDexFileName().c_str(), &checksum, &error_msg))
+ EXPECT_TRUE(DexFile::GetChecksum(GetLibCoreDexFileNames()[0].c_str(), &checksum, &error_msg))
<< error_msg;
EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksum);
}
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 2819670..57d623e 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -27,89 +27,12 @@
#include "base/unix_file/fd_file.h"
#include "elf_file_impl.h"
#include "elf_utils.h"
+#include "jit/debugger_interface.h"
#include "leb128.h"
#include "utils.h"
namespace art {
-// -------------------------------------------------------------------
-// Binary GDB JIT Interface as described in
-// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
-extern "C" {
- typedef enum {
- JIT_NOACTION = 0,
- JIT_REGISTER_FN,
- JIT_UNREGISTER_FN
- } JITAction;
-
- struct JITCodeEntry {
- JITCodeEntry* next_;
- JITCodeEntry* prev_;
- const uint8_t *symfile_addr_;
- uint64_t symfile_size_;
- };
-
- struct JITDescriptor {
- uint32_t version_;
- uint32_t action_flag_;
- JITCodeEntry* relevant_entry_;
- JITCodeEntry* first_entry_;
- };
-
- // GDB will place breakpoint into this function.
- // To prevent GCC from inlining or removing it we place noinline attribute
- // and inline assembler statement inside.
- void __attribute__((noinline)) __jit_debug_register_code();
- void __attribute__((noinline)) __jit_debug_register_code() {
- __asm__("");
- }
-
- // GDB will inspect contents of this descriptor.
- // Static initialization is necessary to prevent GDB from seeing
- // uninitialized descriptor.
- JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
-}
-
-
-static JITCodeEntry* CreateCodeEntry(const uint8_t *symfile_addr,
- uintptr_t symfile_size) {
- JITCodeEntry* entry = new JITCodeEntry;
- entry->symfile_addr_ = symfile_addr;
- entry->symfile_size_ = symfile_size;
- entry->prev_ = nullptr;
-
- // TODO: Do we need a lock here?
- entry->next_ = __jit_debug_descriptor.first_entry_;
- if (entry->next_ != nullptr) {
- entry->next_->prev_ = entry;
- }
- __jit_debug_descriptor.first_entry_ = entry;
- __jit_debug_descriptor.relevant_entry_ = entry;
-
- __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
- __jit_debug_register_code();
- return entry;
-}
-
-
-static void UnregisterCodeEntry(JITCodeEntry* entry) {
- // TODO: Do we need a lock here?
- if (entry->prev_ != nullptr) {
- entry->prev_->next_ = entry->next_;
- } else {
- __jit_debug_descriptor.first_entry_ = entry->next_;
- }
-
- if (entry->next_ != nullptr) {
- entry->next_->prev_ = entry->prev_;
- }
-
- __jit_debug_descriptor.relevant_entry_ = entry;
- __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
- __jit_debug_register_code();
- delete entry;
-}
-
template <typename ElfTypes>
ElfFileImpl<ElfTypes>::ElfFileImpl(File* file, bool writable,
bool program_header_only,
@@ -352,7 +275,7 @@
delete dynsym_symbol_table_;
delete jit_elf_image_;
if (jit_gdb_entry_) {
- UnregisterCodeEntry(jit_gdb_entry_);
+ DeleteJITCodeEntry(jit_gdb_entry_);
}
}
@@ -1511,7 +1434,7 @@
return;
}
- jit_gdb_entry_ = CreateCodeEntry(all.Begin(), all.Size());
+ jit_gdb_entry_ = CreateJITCodeEntry(all.Begin(), all.Size());
gdb_file_mapping_.reset(all_ptr.release());
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 87e29ae..915d9ab 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -306,11 +306,13 @@
mirror::Method* interface_method = soa.Decode<mirror::Method*>(interface_method_jobj);
ArtMethod* proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(
interface_method->GetArtMethod(), sizeof(void*));
- auto* virtual_methods = proxy_class->GetVirtualMethodsPtr();
+ auto virtual_methods = proxy_class->GetVirtualMethodsSlice(sizeof(void*));
size_t num_virtuals = proxy_class->NumVirtualMethods();
size_t method_size = ArtMethod::Size(sizeof(void*));
+ // Rely on the fact that the methods are contiguous to determine the index of the method in
+ // the slice.
int throws_index = (reinterpret_cast<uintptr_t>(proxy_method) -
- reinterpret_cast<uintptr_t>(virtual_methods)) / method_size;
+ reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size;
CHECK_LT(throws_index, static_cast<int>(num_virtuals));
mirror::ObjectArray<mirror::Class>* declared_exceptions =
proxy_class->GetThrows()->Get(throws_index);
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 25c0bda..1850254 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -574,9 +574,9 @@
constexpr ReadBarrierOption kReadBarrierOption =
kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, kReadBarrierOption, true>(obj,
- MemberOffset(offset),
- ref_addr);
+ ReadBarrier::Barrier<mirror::Object, kReadBarrierOption>(obj,
+ MemberOffset(offset),
+ ref_addr);
return result;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 4e85913..01e22a4 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -31,6 +31,13 @@
options->push_back(std::make_pair("imageinstructionset", "x86_64"));
}
+ // Do not do any of the finalization. We don't want to run any code, we don't need the heap
+ // prepared, it actually will be a problem with setting the instruction set to x86_64 in
+ // SetUpRuntimeOptions.
+ void FinalizeSetup() OVERRIDE {
+ ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
+ }
+
static ArtMethod* CreateCalleeSaveMethod(InstructionSet isa, Runtime::CalleeSaveType type)
NO_THREAD_SAFETY_ANALYSIS {
Runtime* r = Runtime::Current();
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 8f7bb94..d16afd9 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -487,7 +487,9 @@
// Mark all references to the alloc space(s).
void ModUnionTableCardCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
- auto* image_space = heap_->GetBootImageSpace();
+ // TODO: Needs better support for multi-images? b/26317072
+ space::ImageSpace* image_space =
+ heap_->GetBootImageSpaces().empty() ? nullptr : heap_->GetBootImageSpaces()[0];
// If we don't have an image space, just pass in space_ as the immune space. Pass in the same
// space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
CardBitVisitor bit_visitor(visitor, space_, image_space != nullptr ? image_space : space_,
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 3be7181..61c67f8 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -167,7 +167,10 @@
uintptr_t* address = &bitmap_begin_[index];
uintptr_t old_word = *address;
if (kSetBit) {
- *address = old_word | mask;
+ if ((old_word & mask) == 0) {
+ // Avoid dirtying the page if possible.
+ *address = old_word | mask;
+ }
} else {
*address = old_word & ~mask;
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index e9497a2..99e98bb 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -367,37 +367,26 @@
GetTimings());
table->UpdateAndMarkReferences(this);
DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
- } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
- // If the space has no mod union table (the non-moving space and main spaces when the bump
- // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
- // cards as roots (including the objects on the live stack which have just marked in the live
- // bitmap above in MarkAllocStackAsLive().)
- DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "collect_from_space_only_=" << collect_from_space_only_;
+ } else if ((space->IsImageSpace() || collect_from_space_only_) &&
+ space->GetLiveBitmap() != nullptr) {
+ // If the space has no mod union table (the non-moving space, app image spaces, main spaces
+ // when the bump pointer space only collection is enabled,) then we need to scan its live
+ // bitmap or dirty cards as roots (including the objects on the live stack which have just
+ // marked in the live bitmap above in MarkAllocStackAsLive().)
accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
- if (kUseRememberedSet) {
+ if (!space->IsImageSpace()) {
+ DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
+ << "Space " << space->GetName() << " "
+ << "generational_=" << generational_ << " "
+ << "collect_from_space_only_=" << collect_from_space_only_;
// App images currently do not have remembered sets.
- DCHECK((space->IsImageSpace() && space != heap_->GetBootImageSpace()) ||
- rem_set != nullptr);
+ DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
} else {
DCHECK(rem_set == nullptr);
}
if (rem_set != nullptr) {
TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
rem_set->UpdateAndMarkReferences(from_space_, this);
- if (kIsDebugBuild) {
- // Verify that there are no from-space references that
- // remain in the space, that is, the remembered set (and the
- // card table) didn't miss any from-space references in the
- // space.
- accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->End()),
- visitor);
- }
} else {
TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -406,6 +395,17 @@
reinterpret_cast<uintptr_t>(space->End()),
visitor);
}
+ if (kIsDebugBuild) {
+ // Verify that there are no from-space references that
+ // remain in the space, that is, the remembered set (and the
+ // card table) didn't miss any from-space references in the
+ // space.
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->End()),
+ visitor);
+ }
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6d72f31..7f67ae4 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -233,8 +233,7 @@
backtrace_lock_(nullptr),
seen_backtrace_count_(0u),
unique_backtrace_count_(0u),
- gc_disabled_for_shutdown_(false),
- boot_image_space_(nullptr) {
+ gc_disabled_for_shutdown_(false) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -260,23 +259,63 @@
CHECK_GE(300 * MB, non_moving_space_capacity);
requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
}
+
+ // Load image space(s).
if (!image_file_name.empty()) {
- ATRACE_BEGIN("ImageSpace::Create");
- std::string error_msg;
- boot_image_space_ = space::ImageSpace::Create(image_file_name.c_str(),
- image_instruction_set,
- &error_msg);
- ATRACE_END();
- if (boot_image_space_ != nullptr) {
- AddSpace(boot_image_space_);
- // Oat files referenced by image files immediately follow them in memory, ensure alloc space
- // isn't going to get in the middle
- uint8_t* oat_file_end_addr = boot_image_space_->GetImageHeader().GetOatFileEnd();
- CHECK_GT(oat_file_end_addr, boot_image_space_->End());
- requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
- } else {
- LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
- << "Attempting to fall back to imageless running. Error was: " << error_msg;
+ // For code reuse, handle this like a work queue.
+ std::vector<std::string> image_file_names;
+ image_file_names.push_back(image_file_name);
+ // The loaded spaces. Secondary images may fail to load, in which case we need to remove
+ // already added spaces.
+ std::vector<space::Space*> added_image_spaces;
+
+ for (size_t index = 0; index < image_file_names.size(); ++index) {
+ std::string& image_name = image_file_names[index];
+ ATRACE_BEGIN("ImageSpace::Create");
+ std::string error_msg;
+ space::ImageSpace* boot_image_space = space::ImageSpace::Create(image_name.c_str(),
+ image_instruction_set,
+ index > 0,
+ &error_msg);
+ ATRACE_END();
+ if (boot_image_space != nullptr) {
+ AddSpace(boot_image_space);
+ added_image_spaces.push_back(boot_image_space);
+ // Oat files referenced by image files immediately follow them in memory, ensure alloc space
+ // isn't going to get in the middle
+ uint8_t* oat_file_end_addr = boot_image_space->GetImageHeader().GetOatFileEnd();
+ CHECK_GT(oat_file_end_addr, boot_image_space->End());
+ requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
+ boot_image_spaces_.push_back(boot_image_space);
+
+ if (index == 0) {
+ // If this was the first space, check whether there are more images to load.
+ const OatFile* boot_oat_file = boot_image_space->GetOatFile();
+ if (boot_oat_file == nullptr) {
+ continue;
+ }
+
+ const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader();
+ const char* boot_classpath =
+ boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPath);
+ if (boot_classpath == nullptr) {
+ continue;
+ }
+
+ space::ImageSpace::CreateMultiImageLocations(image_file_name,
+ boot_classpath,
+ &image_file_names);
+ }
+ } else {
+ LOG(ERROR) << "Could not create image space with image file '" << image_file_name << "'. "
+ << "Attempting to fall back to imageless running. Error was: " << error_msg
+ << "\nAttempted image: " << image_name;
+ // Remove already loaded spaces.
+ for (space::Space* loaded_space : added_image_spaces) {
+ RemoveSpace(loaded_space);
+ }
+ break;
+ }
}
}
/*
@@ -456,13 +495,15 @@
rb_table_.reset(new accounting::ReadBarrierTable());
DCHECK(rb_table_->IsAllCleared());
}
- if (GetBootImageSpace() != nullptr) {
+ if (HasBootImageSpace()) {
// Don't add the image mod union table if we are running without an image, this can crash if
// we use the CardCache implementation.
- accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
- "Image mod-union table", this, GetBootImageSpace());
- CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
- AddModUnionTable(mod_union_table);
+ for (space::ImageSpace* image_space : GetBootImageSpaces()) {
+ accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
+ "Image mod-union table", this, image_space);
+ CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
+ AddModUnionTable(mod_union_table);
+ }
}
if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
accounting::RememberedSet* non_moving_space_rem_set =
@@ -525,11 +566,12 @@
garbage_collectors_.push_back(mark_compact_collector_);
}
}
- if (GetBootImageSpace() != nullptr && non_moving_space_ != nullptr &&
+ if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
(is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
// Check that there's no gap between the image space and the non moving space so that the
// immune region won't break (eg. due to a large object allocated in the gap). This is only
// required when we're the zygote or using GSS.
+ /* TODO: Modify this check to support multi-images. b/26317072
bool no_gap = MemMap::CheckNoGaps(GetBootImageSpace()->GetMemMap(),
non_moving_space_->GetMemMap());
if (!no_gap) {
@@ -537,6 +579,7 @@
MemMap::DumpMaps(LOG(ERROR), true);
LOG(FATAL) << "There's a gap between the image space and the non-moving space";
}
+ */
}
instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
if (gc_stress_mode_) {
@@ -1202,8 +1245,8 @@
return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
-space::ImageSpace* Heap::GetBootImageSpace() const {
- return boot_image_space_;
+std::vector<space::ImageSpace*> Heap::GetBootImageSpaces() const {
+ return boot_image_spaces_;
}
void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index e23b1a3..e7ea983 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -580,9 +580,8 @@
// Unbind any bound bitmaps.
void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
- // Returns the boot image space. There may be multiple image spaces, but there is only one boot
- // image space.
- space::ImageSpace* GetBootImageSpace() const;
+ // Returns the boot image spaces. There may be multiple boot image spaces.
+ std::vector<space::ImageSpace*> GetBootImageSpaces() const;
// Permenantly disable moving garbage collection.
void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
@@ -660,8 +659,8 @@
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
- bool HasImageSpace() const {
- return boot_image_space_ != nullptr;
+ bool HasBootImageSpace() const {
+ return !boot_image_spaces_.empty();
}
ReferenceProcessor* GetReferenceProcessor() {
@@ -1322,8 +1321,8 @@
// allocating.
bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
- // Boot image space.
- space::ImageSpace* boot_image_space_;
+ // Boot image spaces.
+ std::vector<space::ImageSpace*> boot_image_spaces_;
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
index ab921d9..dc23afe 100644
--- a/runtime/gc/reference_queue_test.cc
+++ b/runtime/gc/reference_queue_test.cc
@@ -35,7 +35,7 @@
ASSERT_EQ(queue.GetLength(), 0U);
auto ref_class = hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
- NullHandle<mirror::ClassLoader>()));
+ ScopedNullHandle<mirror::ClassLoader>()));
ASSERT_TRUE(ref_class.Get() != nullptr);
auto ref1(hs.NewHandle(ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref1.Get() != nullptr);
@@ -65,11 +65,11 @@
queue.Dump(LOG(INFO));
auto weak_ref_class = hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
- NullHandle<mirror::ClassLoader>()));
+ ScopedNullHandle<mirror::ClassLoader>()));
ASSERT_TRUE(weak_ref_class.Get() != nullptr);
auto finalizer_ref_class = hs.NewHandle(
Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/FinalizerReference;",
- NullHandle<mirror::ClassLoader>()));
+ ScopedNullHandle<mirror::ClassLoader>()));
ASSERT_TRUE(finalizer_ref_class.Get() != nullptr);
auto ref1(hs.NewHandle(weak_ref_class->AllocObject(self)->AsReference()));
ASSERT_TRUE(ref1.Get() != nullptr);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 8f67c21..dfdbd04 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -43,12 +43,17 @@
Atomic<uint32_t> ImageSpace::bitmap_index_(0);
-ImageSpace::ImageSpace(const std::string& image_filename, const char* image_location,
- MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap,
- uint8_t* end)
+ImageSpace::ImageSpace(const std::string& image_filename,
+ const char* image_location,
+ MemMap* mem_map,
+ accounting::ContinuousSpaceBitmap* live_bitmap,
+ uint8_t* end,
+ MemMap* shadow_map)
: MemMapSpace(image_filename, mem_map, mem_map->Begin(), end, end,
kGcRetentionPolicyNeverCollect),
- image_location_(image_location) {
+ oat_file_non_owned_(nullptr),
+ image_location_(image_location),
+ shadow_map_(shadow_map) {
DCHECK(live_bitmap != nullptr);
live_bitmap_.reset(live_bitmap);
}
@@ -470,6 +475,7 @@
ImageSpace* ImageSpace::Create(const char* image_location,
const InstructionSet image_isa,
+ bool secondary_image,
std::string* error_msg) {
std::string system_filename;
bool has_system = false;
@@ -481,7 +487,7 @@
&has_system, &cache_filename, &dalvik_cache_exists,
&has_cache, &is_global_cache);
- if (Runtime::Current()->IsZygote()) {
+ if (Runtime::Current()->IsZygote() && !secondary_image) {
MarkZygoteStart(image_isa, Runtime::Current()->GetZygoteMaxFailedBoots());
}
@@ -517,6 +523,9 @@
} else if (!ImageCreationAllowed(is_global_cache, &reason)) {
// Whether we can write to the cache.
success = false;
+ } else if (secondary_image) {
+ reason = "Should not have to patch secondary image.";
+ success = false;
} else {
// Try to relocate.
success = RelocateImage(image_location, cache_filename.c_str(), image_isa, &reason);
@@ -609,6 +618,9 @@
return nullptr;
} else if (!ImageCreationAllowed(is_global_cache, error_msg)) {
return nullptr;
+ } else if (secondary_image) {
+ *error_msg = "Cannot compile a secondary image.";
+ return nullptr;
} else if (!GenerateImage(cache_filename, image_isa, error_msg)) {
*error_msg = StringPrintf("Failed to generate image '%s': %s",
cache_filename.c_str(), error_msg->c_str());
@@ -686,7 +698,7 @@
return nullptr;
}
- if (kIsDebugBuild) {
+ if (VLOG_IS_ON(startup)) {
LOG(INFO) << "Dumping image sections";
for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
@@ -799,11 +811,52 @@
return nullptr;
}
+ // In case of multi-images, the images are spaced apart so that the bitmaps don't overlap. We
+ // need to reserve the slack, as otherwise the large object space might allocate in there.
+ // TODO: Reconsider the multi-image layout. b/26317072
+ std::unique_ptr<MemMap> shadow_map;
+ {
+ uintptr_t image_begin = reinterpret_cast<uintptr_t>(image_header.GetImageBegin());
+ uintptr_t image_end = RoundUp(image_begin + image_header.GetImageSize(), kPageSize);
+ uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_header.GetOatFileBegin());
+ if (image_end < oat_begin) {
+ // There's a gap. Could be multi-image, could be the oat file spaced apart. Go ahead and
+ // dummy-reserve the space covered by the bitmap (which will be a shadow that introduces
+ // a gap to the next image).
+ uintptr_t heap_size = bitmap->HeapSize();
+ uintptr_t bitmap_coverage_end = RoundUp(image_begin + heap_size, kPageSize);
+ if (bitmap_coverage_end > image_end) {
+ VLOG(startup) << "Reserving bitmap shadow ["
+ << std::hex << image_end << ";"
+ << std::hex << bitmap_coverage_end << ";] (oat file begins at "
+ << std::hex << oat_begin;
+ // Note: we cannot use MemMap::Dummy here, as that won't reserve the space in 32-bit mode.
+ shadow_map.reset(MemMap::MapAnonymous("Image bitmap shadow",
+ reinterpret_cast<uint8_t*>(image_end),
+ bitmap_coverage_end - image_end,
+ PROT_NONE,
+ false,
+ false,
+ error_msg));
+ if (shadow_map == nullptr) {
+ return nullptr;
+ }
+ // madvise it away, we don't really want it, just reserve the address space.
+ // TODO: Should we use MadviseDontNeedAndZero? b/26317072
+ madvise(shadow_map->BaseBegin(), shadow_map->BaseSize(), MADV_DONTNEED);
+ }
+ }
+ }
+
// We only want the mirror object, not the ArtFields and ArtMethods.
uint8_t* const image_end =
map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End();
- std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename, image_location,
- map.release(), bitmap.release(), image_end));
+ std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
+ image_location,
+ map.release(),
+ bitmap.release(),
+ image_end,
+ shadow_map.release()));
// VerifyImageAllocations() will be called later in Runtime::Init()
// as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
@@ -826,16 +879,18 @@
Runtime* runtime = Runtime::Current();
runtime->SetInstructionSet(space->oat_file_->GetOatHeader().GetInstructionSet());
- runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
- runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
- runtime->SetImtUnimplementedMethod(
- image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod));
- runtime->SetCalleeSaveMethod(
- image_header.GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll);
- runtime->SetCalleeSaveMethod(
- image_header.GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
- runtime->SetCalleeSaveMethod(
- image_header.GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
+ if (!runtime->HasResolutionMethod()) {
+ runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
+ runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
+ runtime->SetImtUnimplementedMethod(
+ image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kCalleeSaveMethod), Runtime::kSaveAll);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kRefsOnlySaveMethod), Runtime::kRefsOnly);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kRefsAndArgsSaveMethod), Runtime::kRefsAndArgs);
+ }
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "ImageSpace::Init exiting (" << PrettyDuration(NanoTime() - start_time)
@@ -920,6 +975,67 @@
<< ",name=\"" << GetName() << "\"]";
}
+void ImageSpace::CreateMultiImageLocations(const std::string& input_image_file_name,
+ const std::string& boot_classpath,
+ std::vector<std::string>* image_file_names) {
+ DCHECK(image_file_names != nullptr);
+
+ std::vector<std::string> images;
+ Split(boot_classpath, ':', &images);
+
+ // Add the rest into the list. We have to adjust locations, possibly:
+ //
+ // For example, image_file_name is /a/b/c/d/e.art
+ // images[0] is f/c/d/e.art
+ // ----------------------------------------------
+ // images[1] is g/h/i/j.art -> /a/b/h/i/j.art
+
+ // Derive pattern.
+ std::vector<std::string> left;
+ Split(input_image_file_name, '/', &left);
+ std::vector<std::string> right;
+ Split(images[0], '/', &right);
+
+ size_t common = 1;
+ while (common < left.size() && common < right.size()) {
+ if (left[left.size() - common - 1] != right[right.size() - common - 1]) {
+ break;
+ }
+ common++;
+ }
+
+ std::vector<std::string> prefix_vector(left.begin(), left.end() - common);
+ std::string common_prefix = Join(prefix_vector, '/');
+ if (!common_prefix.empty() && common_prefix[0] != '/' && input_image_file_name[0] == '/') {
+ common_prefix = "/" + common_prefix;
+ }
+
+ // Apply pattern to images[1] .. images[n].
+ for (size_t i = 1; i < images.size(); ++i) {
+ std::string image = images[i];
+
+ size_t rslash = std::string::npos;
+ for (size_t j = 0; j < common; ++j) {
+ if (rslash != std::string::npos) {
+ rslash--;
+ }
+
+ rslash = image.rfind('/', rslash);
+ if (rslash == std::string::npos) {
+ rslash = 0;
+ }
+ if (rslash == 0) {
+ break;
+ }
+ }
+ std::string image_part = image.substr(rslash);
+
+ std::string new_image = common_prefix + (StartsWith(image_part, "/") ? "" : "/") +
+ image_part;
+ image_file_names->push_back(new_image);
+ }
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index babd672..b8ae4a0 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -43,7 +43,10 @@
// creation of the alloc space. The ReleaseOatFile will later be
// used to transfer ownership of the OatFile to the ClassLinker when
// it is initialized.
- static ImageSpace* Create(const char* image, InstructionSet image_isa, std::string* error_msg)
+ static ImageSpace* Create(const char* image,
+ InstructionSet image_isa,
+ bool secondary_image,
+ std::string* error_msg)
SHARED_REQUIRES(Locks::mutator_lock_);
// Reads the image header from the specified image location for the
@@ -119,6 +122,12 @@
bool* has_data,
bool *is_global_cache);
+ // Use the input image filename to adapt the names in the given boot classpath to establish
+ // complete locations for secondary images.
+ static void CreateMultiImageLocations(const std::string& input_image_file_name,
+ const std::string& boot_classpath,
+ std::vector<std::string>* image_filenames);
+
// Return the end of the image which includes non-heap objects such as ArtMethods and ArtFields.
uint8_t* GetImageEnd() const {
return Begin() + GetImageHeader().GetImageSize();
@@ -158,8 +167,12 @@
std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
- ImageSpace(const std::string& name, const char* image_location,
- MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap, uint8_t* end);
+ ImageSpace(const std::string& name,
+ const char* image_location,
+ MemMap* mem_map,
+ accounting::ContinuousSpaceBitmap* live_bitmap,
+ uint8_t* end,
+ MemMap* shadow_map = nullptr);
// The OatFile associated with the image during early startup to
// reserve space contiguous to the image. It is later released to
@@ -172,6 +185,10 @@
const std::string image_location_;
+ // A MemMap reserving the space of the bitmap "shadow," so that we don't allocate into it. Only
+ // used in the multi-image case.
+ std::unique_ptr<MemMap> shadow_map_;
+
private:
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
};
diff --git a/runtime/handle.h b/runtime/handle.h
index f939ec5..5b3bb60 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -64,7 +64,7 @@
ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
- // Special case so that we work with NullHandles.
+ // Special case so that we work with null handles.
return nullptr;
}
return reinterpret_cast<jobject>(reference_);
@@ -147,12 +147,12 @@
template<size_t kNumReferences> friend class StackHandleScope;
};
-// A special case of Handle that only holds references to null.
+// A special case of Handle that only holds references to null. Invalid when if it goes out of
+// scope. Example: Handle<T> h = ScopedNullHandle<T> will leave h being undefined.
template<class T>
-class NullHandle : public Handle<T> {
+class ScopedNullHandle : public Handle<T> {
public:
- NullHandle() : Handle<T>(&null_ref_) {
- }
+ ScopedNullHandle() : Handle<T>(&null_ref_) {}
private:
StackReference<mirror::Object> null_ref_;
diff --git a/runtime/image.cc b/runtime/image.cc
index 7d2ef75..3856787 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -55,7 +55,6 @@
CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
- CHECK_LT(image_begin, image_roots);
CHECK_LT(image_roots, oat_file_begin);
CHECK_LE(oat_file_begin, oat_data_begin);
CHECK_LT(oat_data_begin, oat_data_end);
@@ -100,9 +99,6 @@
if (oat_file_begin_ >= oat_data_begin_) {
return false;
}
- if (image_roots_ <= image_begin_ || oat_file_begin_ <= image_roots_) {
- return false;
- }
if (!IsAligned<kPageSize>(patch_delta_)) {
return false;
}
@@ -126,7 +122,7 @@
mirror::ObjectArray<mirror::Object>* image_roots =
reinterpret_cast<mirror::ObjectArray<mirror::Object>*>(image_roots_);
mirror::ObjectArray<mirror::Object>* result =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Object>, kWithReadBarrier, true>(
+ ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Object>, kWithReadBarrier>(
&image_roots);
DCHECK_EQ(image_roots, result);
return result;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 264cd2c..9f61449 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -93,11 +93,8 @@
// We need the class to be resolved to install/uninstall stubs. Otherwise its methods
// could not be initialized or linked with regards to class inheritance.
} else {
- for (size_t i = 0, e = klass->NumDirectMethods(); i < e; i++) {
- InstallStubsForMethod(klass->GetDirectMethod(i, sizeof(void*)));
- }
- for (size_t i = 0, e = klass->NumVirtualMethods(); i < e; i++) {
- InstallStubsForMethod(klass->GetVirtualMethod(i, sizeof(void*)));
+ for (ArtMethod& method : klass->GetMethods(sizeof(void*))) {
+ InstallStubsForMethod(&method);
}
}
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index e2e4782..d035f5d 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -187,24 +187,27 @@
if (image_added_to_intern_table_) {
return nullptr;
}
- gc::space::ImageSpace* image = Runtime::Current()->GetHeap()->GetBootImageSpace();
- if (image == nullptr) {
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
return nullptr; // No image present.
}
- mirror::Object* root = image->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
- mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
const std::string utf8 = s->ToModifiedUtf8();
- for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
- mirror::DexCache* dex_cache = dex_caches->Get(i);
- const DexFile* dex_file = dex_cache->GetDexFile();
- // Binary search the dex file for the string index.
- const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
- if (string_id != nullptr) {
- uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
- // GetResolvedString() contains a RB.
- mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
- if (image_string != nullptr) {
- return image_string;
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ mirror::Object* root = image_space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
+ mirror::ObjectArray<mirror::DexCache>* dex_caches = root->AsObjectArray<mirror::DexCache>();
+ for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
+ mirror::DexCache* dex_cache = dex_caches->Get(i);
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ // Binary search the dex file for the string index.
+ const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
+ if (string_id != nullptr) {
+ uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
+ // GetResolvedString() contains a RB.
+ mirror::String* image_string = dex_cache->GetResolvedString(string_idx);
+ if (image_string != nullptr) {
+ return image_string;
+ }
}
}
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 92b6e4f..60ad0cb 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -128,8 +128,13 @@
}
StackHandleScope<1> hs(self);
Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result,
- "Class.forName", true, false);
+ UnstartedRuntimeFindClass(self,
+ h_class_name,
+ ScopedNullHandle<mirror::ClassLoader>(),
+ result,
+ "Class.forName",
+ true,
+ false);
CheckExceptionGenerateClassNotFound(self);
}
@@ -704,7 +709,7 @@
Handle<mirror::Class> h_class(hs.NewHandle(
runtime->GetClassLinker()->FindClass(self,
"Ljava/io/StringReader;",
- NullHandle<mirror::ClassLoader>())));
+ ScopedNullHandle<mirror::ClassLoader>())));
if (h_class.Get() == nullptr) {
AbortTransactionOrFail(self, "Could not find StringReader class");
return;
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 15f5122..5c44193 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -717,8 +717,8 @@
}
bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
- jstring library_path, jstring permitted_path,
- std::string* error_msg) {
+ bool is_shared_namespace, jstring library_path,
+ jstring permitted_path, std::string* error_msg) {
error_msg->clear();
// See if we've already loaded this library. If we have, and the class loader
@@ -777,8 +777,9 @@
Locks::mutator_lock_->AssertNotHeld(self);
const char* path_str = path.empty() ? nullptr : path.c_str();
- void* handle = android::OpenNativeLibrary(env, runtime_->GetTargetSdkVersion(),
- path_str, class_loader, library_path, permitted_path);
+ void* handle = android::OpenNativeLibrary(env, runtime_->GetTargetSdkVersion(), path_str,
+ class_loader, is_shared_namespace, library_path,
+ permitted_path);
bool needs_native_bridge = false;
if (handle == nullptr) {
if (android::NativeBridgeIsSupported(path_str)) {
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 8559769..8cae1e5 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -85,8 +85,9 @@
* Returns 'true' on success. On failure, sets 'error_msg' to a
* human-readable description of the error.
*/
- bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject javaLoader,
- jstring library_path, jstring permitted_path, std::string* error_msg);
+ bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
+ bool is_shared_namespace, jstring library_path, jstring permitted_path,
+ std::string* error_msg);
// Unload native libraries with cleared class loaders.
void UnloadNativeLibraries()
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
new file mode 100644
index 0000000..3c2898b
--- /dev/null
+++ b/runtime/jit/debugger_interface.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "debugger_interface.h"
+
+namespace art {
+
+// -------------------------------------------------------------------
+// Binary GDB JIT Interface as described in
+// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
+// -------------------------------------------------------------------
+extern "C" {
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } JITAction;
+
+ struct JITCodeEntry {
+ JITCodeEntry* next_;
+ JITCodeEntry* prev_;
+ const uint8_t *symfile_addr_;
+ uint64_t symfile_size_;
+ };
+
+ struct JITDescriptor {
+ uint32_t version_;
+ uint32_t action_flag_;
+ JITCodeEntry* relevant_entry_;
+ JITCodeEntry* first_entry_;
+ };
+
+ // GDB will place breakpoint into this function.
+ // To prevent GCC from inlining or removing it we place noinline attribute
+ // and inline assembler statement inside.
+ void __attribute__((noinline)) __jit_debug_register_code();
+ void __attribute__((noinline)) __jit_debug_register_code() {
+ __asm__("");
+ }
+
+ // GDB will inspect contents of this descriptor.
+ // Static initialization is necessary to prevent GDB from seeing
+ // uninitialized descriptor.
+ JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+}
+
+JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size) {
+ JITCodeEntry* entry = new JITCodeEntry;
+ entry->symfile_addr_ = symfile_addr;
+ entry->symfile_size_ = symfile_size;
+ entry->prev_ = nullptr;
+
+ // TODO: Do we need a lock here?
+ entry->next_ = __jit_debug_descriptor.first_entry_;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __jit_debug_descriptor.first_entry_ = entry;
+ __jit_debug_descriptor.relevant_entry_ = entry;
+
+ __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_register_code();
+ return entry;
+}
+
+void DeleteJITCodeEntry(JITCodeEntry* entry) {
+ // TODO: Do we need a lock here?
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __jit_debug_descriptor.first_entry_ = entry->next_;
+ }
+
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+
+ __jit_debug_descriptor.relevant_entry_ = entry;
+ __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_register_code();
+ delete entry;
+}
+
+} // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
new file mode 100644
index 0000000..a784ef5
--- /dev/null
+++ b/runtime/jit/debugger_interface.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
+#define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
+
+#include <inttypes.h>
+
+namespace art {
+
+extern "C" {
+ struct JITCodeEntry;
+}
+
+// Notify native debugger about new JITed code by passing in-memory ELF.
+JITCodeEntry* CreateJITCodeEntry(const uint8_t *symfile_addr, uintptr_t symfile_size);
+
+// Notify native debugger that JITed code has been removed.
+void DeleteJITCodeEntry(JITCodeEntry* entry);
+
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index a653440..ab70f4c 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -26,6 +26,7 @@
#include "jit_instrumentation.h"
#include "oat_file_manager.h"
#include "offline_profiling_info.h"
+#include "profile_saver.h"
#include "runtime.h"
#include "runtime_options.h"
#include "utils.h"
@@ -52,10 +53,10 @@
}
void Jit::DumpInfo(std::ostream& os) {
- os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
- << " data cache size=" << PrettySize(code_cache_->DataCacheSize())
- << " number of compiled code=" << code_cache_->NumberOfCompiledCode()
- << "\n";
+ os << "JIT code cache size=" << PrettySize(code_cache_->CodeCacheSize()) << "\n"
+ << "JIT data cache size=" << PrettySize(code_cache_->DataCacheSize()) << "\n"
+ << "JIT current capacity=" << PrettySize(code_cache_->GetCurrentCapacity()) << "\n"
+ << "JIT number of compiled code=" << code_cache_->NumberOfCompiledCode() << "\n";
cumulative_timings_.Dump(os);
}
@@ -66,7 +67,7 @@
Jit::Jit()
: jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
jit_compile_method_(nullptr), dump_info_on_shutdown_(false),
- cumulative_timings_("JIT timings") {
+ cumulative_timings_("JIT timings"), save_profiling_info_(false) {
}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
@@ -80,14 +81,12 @@
if (jit->GetCodeCache() == nullptr) {
return nullptr;
}
- jit->offline_profile_info_.reset(nullptr);
- if (options->GetSaveProfilingInfo()) {
- jit->offline_profile_info_.reset(new OfflineProfilingInfo());
- }
+ jit->save_profiling_info_ = options->GetSaveProfilingInfo();
LOG(INFO) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
<< ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
- << ", compile_threshold=" << options->GetCompileThreshold();
+ << ", compile_threshold=" << options->GetCompileThreshold()
+ << ", save_profiling_info=" << options->GetSaveProfilingInfo();
return jit.release();
}
@@ -173,34 +172,21 @@
}
}
-void Jit::SaveProfilingInfo(const std::string& filename) {
- if (offline_profile_info_ == nullptr) {
- return;
+void Jit::StartProfileSaver(const std::string& filename,
+ const std::vector<std::string>& code_paths) {
+ if (save_profiling_info_) {
+ ProfileSaver::Start(filename, code_cache_.get(), code_paths);
}
- // Note that we can't check the PrimaryOatFile when constructing the offline_profilie_info_
- // because it becomes known to the Runtime after we create and initialize the JIT.
- const OatFile* primary_oat_file = Runtime::Current()->GetOatFileManager().GetPrimaryOatFile();
- if (primary_oat_file == nullptr) {
- LOG(WARNING) << "Couldn't find a primary oat file when trying to save profile info to "
- << filename;
- return;
- }
+}
- uint64_t last_update_ns = code_cache_->GetLastUpdateTimeNs();
- if (offline_profile_info_->NeedsSaving(last_update_ns)) {
- VLOG(profiler) << "Initiate save profiling information to: " << filename;
- std::set<ArtMethod*> methods;
- {
- ScopedObjectAccess soa(Thread::Current());
- code_cache_->GetCompiledArtMethods(primary_oat_file, methods);
- }
- offline_profile_info_->SaveProfilingInfo(filename, last_update_ns, methods);
- } else {
- VLOG(profiler) << "No need to save profiling information to: " << filename;
+void Jit::StopProfileSaver() {
+ if (save_profiling_info_ && ProfileSaver::IsStarted()) {
+ ProfileSaver::Stop();
}
}
Jit::~Jit() {
+ DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted());
if (dump_info_on_shutdown_) {
DumpInfo(LOG(INFO));
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 630eba3..0edce2f 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -72,7 +72,12 @@
return instrumentation_cache_.get();
}
- void SaveProfilingInfo(const std::string& filename);
+ void StartProfileSaver(const std::string& filename, const std::vector<std::string>& code_paths);
+ void StopProfileSaver();
+
+ void DumpForSigQuit(std::ostream& os) {
+ DumpInfo(os);
+ }
private:
Jit();
@@ -93,7 +98,8 @@
std::unique_ptr<jit::JitCodeCache> code_cache_;
CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
- std::unique_ptr<OfflineProfilingInfo> offline_profile_info_;
+ bool save_profiling_info_;
+
DISALLOW_COPY_AND_ASSIGN(Jit);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 3342e92..08eac0e 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,6 +19,7 @@
#include <sstream>
#include "art_method-inl.h"
+#include "base/stl_util.h"
#include "base/time_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
@@ -316,7 +317,7 @@
// code.
GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
}
- last_update_time_ns_ = NanoTime();
+ last_update_time_ns_.StoreRelease(NanoTime());
VLOG(jit)
<< "JIT added "
<< PrettyMethod(method) << "@" << method
@@ -687,19 +688,18 @@
}
}
-void JitCodeCache::GetCompiledArtMethods(const OatFile* oat_file,
- std::set<ArtMethod*>& methods) {
+void JitCodeCache::GetCompiledArtMethods(const std::set<const std::string>& dex_base_locations,
+ std::vector<ArtMethod*>& methods) {
MutexLock mu(Thread::Current(), lock_);
for (auto it : method_code_map_) {
- if (it.second->GetDexFile()->GetOatDexFile()->GetOatFile() == oat_file) {
- methods.insert(it.second);
+ if (ContainsElement(dex_base_locations, it.second->GetDexFile()->GetBaseLocation())) {
+ methods.push_back(it.second);
}
}
}
-uint64_t JitCodeCache::GetLastUpdateTimeNs() {
- MutexLock mu(Thread::Current(), lock_);
- return last_update_time_ns_;
+uint64_t JitCodeCache::GetLastUpdateTimeNs() const {
+ return last_update_time_ns_.LoadAcquire();
}
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4e82916..1c842e4 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -146,12 +146,18 @@
void* MoreCore(const void* mspace, intptr_t increment);
- // Adds to `methods` all the compiled ArtMethods which are part of the given `oat_file`.
- void GetCompiledArtMethods(const OatFile* oat_file, std::set<ArtMethod*>& methods)
+ // Adds to `methods` all the compiled ArtMethods which are part of any of the given dex locations.
+ void GetCompiledArtMethods(const std::set<const std::string>& dex_base_locations,
+ std::vector<ArtMethod*>& methods)
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- uint64_t GetLastUpdateTimeNs() REQUIRES(!lock_);
+ uint64_t GetLastUpdateTimeNs() const;
+
+ size_t GetCurrentCapacity() REQUIRES(!lock_) {
+ MutexLock lock(Thread::Current(), lock_);
+ return current_capacity_;
+ }
private:
// Take ownership of maps.
@@ -243,7 +249,8 @@
bool has_done_one_collection_ GUARDED_BY(lock_);
// Last time the the code_cache was updated.
- uint64_t last_update_time_ns_ GUARDED_BY(lock_);
+ // It is atomic to avoid locking when reading it.
+ Atomic<uint64_t> last_update_time_ns_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index 7615870..5dc0e45 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -17,35 +17,21 @@
#include "offline_profiling_info.h"
#include <fstream>
-#include <set>
+#include <vector>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include "art_method-inl.h"
#include "base/mutex.h"
+#include "base/stl_util.h"
#include "jit/profiling_info.h"
#include "safe_map.h"
-#include "utils.h"
namespace art {
-// An arbitrary value to throttle save requests. Set to 500ms for now.
-static constexpr const uint64_t kMilisecondsToNano = 1000000;
-static constexpr const uint64_t kMinimumTimeBetweenSavesNs = 500 * kMilisecondsToNano;
-
-bool OfflineProfilingInfo::NeedsSaving(uint64_t last_update_time_ns) const {
- return last_update_time_ns - last_update_time_ns_.LoadRelaxed() > kMinimumTimeBetweenSavesNs;
-}
-
void OfflineProfilingInfo::SaveProfilingInfo(const std::string& filename,
- uint64_t last_update_time_ns,
- const std::set<ArtMethod*>& methods) {
- if (!NeedsSaving(last_update_time_ns)) {
- VLOG(profiler) << "No need to saved profile info to " << filename;
- return;
- }
-
+ const std::vector<ArtMethod*>& methods) {
if (methods.empty()) {
VLOG(profiler) << "No info to save to " << filename;
return;
@@ -62,9 +48,8 @@
// This doesn't need locking because we are trying to lock the file for exclusive
// access and fail immediately if we can't.
if (Serialize(filename, info)) {
- last_update_time_ns_.StoreRelaxed(last_update_time_ns);
- VLOG(profiler) << "Successfully saved profile info to "
- << filename << " with time stamp: " << last_update_time_ns;
+ VLOG(profiler) << "Successfully saved profile info to " << filename
+ << " Size: " << GetFileSizeBytes(filename);
}
}
@@ -146,11 +131,11 @@
/**
* Serialization format:
- * multidex_suffix1,dex_location_checksum1,method_id11,method_id12...
- * multidex_suffix2,dex_location_checksum2,method_id21,method_id22...
+ * dex_location1,dex_location_checksum1,method_id11,method_id12...
+ * dex_location2,dex_location_checksum2,method_id21,method_id22...
* e.g.
- * ,131232145,11,23,454,54 -> this is the first dex file, it has no multidex suffix
- * :classes5.dex,218490184,39,13,49,1 -> this is the fifth dex file.
+ * /system/priv-app/app/app.apk,131232145,11,23,454,54
+ * /system/priv-app/app/app.apk:classes5.dex,218490184,39,13,49,1
**/
bool OfflineProfilingInfo::Serialize(const std::string& filename,
const DexFileToMethodsMap& info) const {
@@ -167,7 +152,7 @@
const DexFile* dex_file = it.first;
const std::set<uint32_t>& method_dex_ids = it.second;
- os << DexFile::GetMultiDexSuffix(dex_file->GetLocation())
+ os << dex_file->GetLocation()
<< kFieldSeparator
<< dex_file->GetLocationChecksum();
for (auto method_it : method_dex_ids) {
@@ -214,7 +199,7 @@
return false;
}
- const std::string& multidex_suffix = parts[0];
+ const std::string& dex_location = parts[0];
uint32_t checksum;
if (!ParseInt(parts[1].c_str(), &checksum)) {
return false;
@@ -222,7 +207,7 @@
const DexFile* current_dex_file = nullptr;
for (auto dex_file : dex_files) {
- if (DexFile::GetMultiDexSuffix(dex_file->GetLocation()) == multidex_suffix) {
+ if (dex_file->GetLocation() == dex_location) {
if (checksum != dex_file->GetLocationChecksum()) {
LOG(WARNING) << "Checksum mismatch for "
<< dex_file->GetLocation() << " when parsing " << filename_;
@@ -284,15 +269,15 @@
return true;
}
if (kIsDebugBuild) {
- // In debug builds verify that the multidex suffixes are unique.
- std::set<std::string> suffixes;
+ // In debug builds verify that the locations are unique.
+ std::set<std::string> locations;
for (auto dex_file : dex_files) {
- std::string multidex_suffix = DexFile::GetMultiDexSuffix(dex_file->GetLocation());
- DCHECK(suffixes.find(multidex_suffix) == suffixes.end())
+ const std::string& location = dex_file->GetLocation();
+ DCHECK(locations.find(location) == locations.end())
<< "DexFiles appear to belong to different apks."
- << " There are multiple dex files with the same multidex suffix: "
- << multidex_suffix;
- suffixes.insert(multidex_suffix);
+ << " There are multiple dex files with the same location: "
+ << location;
+ locations.insert(location);
}
}
info_.clear();
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index 90bda60..32d4c5b 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_JIT_OFFLINE_PROFILING_INFO_H_
#include <set>
+#include <vector>
#include "atomic.h"
#include "dex_file.h"
@@ -36,10 +37,7 @@
*/
class OfflineProfilingInfo {
public:
- bool NeedsSaving(uint64_t last_update_time_ns) const;
- void SaveProfilingInfo(const std::string& filename,
- uint64_t last_update_time_ns,
- const std::set<ArtMethod*>& methods);
+ void SaveProfilingInfo(const std::string& filename, const std::vector<ArtMethod*>& methods);
private:
// Map identifying the location of the profiled methods.
@@ -49,10 +47,6 @@
void AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info)
SHARED_REQUIRES(Locks::mutator_lock_);
bool Serialize(const std::string& filename, const DexFileToMethodsMap& info) const;
-
- // TODO(calin): Verify if Atomic is really needed (are we sure to be called from a
- // single thread?)
- Atomic<uint64_t> last_update_time_ns_;
};
/**
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
new file mode 100644
index 0000000..0278138
--- /dev/null
+++ b/runtime/jit/profile_saver.cc
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profile_saver.h"
+
+#include "art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "oat_file_manager.h"
+
+namespace art {
+
+// An arbitrary value to throttle save requests. Set to 500ms for now.
+static constexpr const uint64_t kMilisecondsToNano = 1000000;
+static constexpr const uint64_t kMinimumTimeBetweenCodeCacheUpdatesNs = 500 * kMilisecondsToNano;
+
+// TODO: read the constants from ProfileOptions,
+// Add a random delay each time we go to sleep so that we don't hammer the CPU
+// with all profile savers running at the same time.
+static constexpr const uint64_t kRandomDelayMaxMs = 10 * 1000; // 10 seconds
+static constexpr const uint64_t kMaxBackoffMs = 4 * 60 * 1000; // 4 minutes
+static constexpr const uint64_t kSavePeriodMs = 4 * 1000; // 4 seconds
+static constexpr const double kBackoffCoef = 1.5;
+
+static constexpr const uint32_t kMinimumNrOrMethodsToSave = 10;
+
+ProfileSaver* ProfileSaver::instance_ = nullptr;
+pthread_t ProfileSaver::profiler_pthread_ = 0U;
+
+ProfileSaver::ProfileSaver(const std::string& output_filename,
+ jit::JitCodeCache* jit_code_cache,
+ const std::vector<std::string>& code_paths)
+ : output_filename_(output_filename),
+ jit_code_cache_(jit_code_cache),
+ tracked_dex_base_locations_(code_paths.begin(), code_paths.end()),
+ code_cache_last_update_time_ns_(0),
+ shutting_down_(false),
+ wait_lock_("ProfileSaver wait lock"),
+ period_condition_("ProfileSaver period condition", wait_lock_) {
+}
+
+void ProfileSaver::Run() {
+ srand(MicroTime() * getpid());
+ Thread* self = Thread::Current();
+
+ uint64_t save_period_ms = kSavePeriodMs;
+ VLOG(profiler) << "Save profiling information every " << save_period_ms << " ms";
+ while (true) {
+ if (ShuttingDown(self)) {
+ break;
+ }
+
+ uint64_t random_sleep_delay_ms = rand() % kRandomDelayMaxMs;
+ uint64_t sleep_time_ms = save_period_ms + random_sleep_delay_ms;
+ {
+ MutexLock mu(self, wait_lock_);
+ period_condition_.TimedWait(self, sleep_time_ms, 0);
+ }
+
+ if (ShuttingDown(self)) {
+ break;
+ }
+
+ if (!ProcessProfilingInfo() && save_period_ms < kMaxBackoffMs) {
+ // If we don't need to save now it is less likely that we will need to do
+ // so in the future. Increase the time between saves according to the
+ // kBackoffCoef, but make it no larger than kMaxBackoffMs.
+ save_period_ms = static_cast<uint64_t>(kBackoffCoef * save_period_ms);
+ } else {
+ // Reset the period to the initial value as it's highly likely to JIT again.
+ save_period_ms = kSavePeriodMs;
+ }
+ }
+}
+
+bool ProfileSaver::ProcessProfilingInfo() {
+ VLOG(profiler) << "Initiating save profiling information to: " << output_filename_;
+
+ uint64_t last_update_time_ns = jit_code_cache_->GetLastUpdateTimeNs();
+ if (last_update_time_ns - code_cache_last_update_time_ns_
+ > kMinimumTimeBetweenCodeCacheUpdatesNs) {
+ VLOG(profiler) << "Not enough time has passed since the last code cache update.";
+ return false;
+ }
+
+ uint64_t start = NanoTime();
+ code_cache_last_update_time_ns_ = last_update_time_ns;
+ std::vector<ArtMethod*> methods;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ jit_code_cache_->GetCompiledArtMethods(tracked_dex_base_locations_, methods);
+ }
+ if (methods.size() < kMinimumNrOrMethodsToSave) {
+ VLOG(profiler) << "Not enough information to save. Nr of methods: " << methods.size();
+ return false;
+ }
+ offline_profiling_info_.SaveProfilingInfo(output_filename_, methods);
+
+ VLOG(profiler) << "Saved profile time: " << PrettyDuration(NanoTime() - start);
+
+ return true;
+}
+
+void* ProfileSaver::RunProfileSaverThread(void* arg) {
+ Runtime* runtime = Runtime::Current();
+ ProfileSaver* profile_saver = reinterpret_cast<ProfileSaver*>(arg);
+
+ CHECK(runtime->AttachCurrentThread("Profile Saver",
+ /*as_daemon*/true,
+ runtime->GetSystemThreadGroup(),
+ /*create_peer*/true));
+ profile_saver->Run();
+
+ runtime->DetachCurrentThread();
+ VLOG(profiler) << "Profile saver shutdown";
+ return nullptr;
+}
+
+void ProfileSaver::Start(const std::string& output_filename,
+ jit::JitCodeCache* jit_code_cache,
+ const std::vector<std::string>& code_paths) {
+ DCHECK(Runtime::Current()->UseJit());
+ DCHECK(!output_filename.empty());
+ DCHECK(jit_code_cache != nullptr);
+
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ // Don't start two profile saver threads.
+ if (instance_ != nullptr) {
+ DCHECK(false) << "Tried to start two profile savers";
+ return;
+ }
+
+ VLOG(profiler) << "Starting profile saver using output file: " << output_filename
+ << ". Tracking: " << Join(code_paths, ':');
+
+ instance_ = new ProfileSaver(output_filename, jit_code_cache, code_paths);
+
+ // Create a new thread which does the saving.
+ CHECK_PTHREAD_CALL(
+ pthread_create,
+ (&profiler_pthread_, nullptr, &RunProfileSaverThread, reinterpret_cast<void*>(instance_)),
+ "Profile saver thread");
+}
+
+void ProfileSaver::Stop() {
+ ProfileSaver* profile_saver = nullptr;
+ pthread_t profiler_pthread = 0U;
+
+ {
+ MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_);
+ VLOG(profiler) << "Stopping profile saver thread for file: " << instance_->output_filename_;
+ profile_saver = instance_;
+ profiler_pthread = profiler_pthread_;
+ if (instance_ == nullptr) {
+ DCHECK(false) << "Tried to stop a profile saver which was not started";
+ return;
+ }
+ if (instance_->shutting_down_) {
+ DCHECK(false) << "Tried to stop the profile saver twice";
+ return;
+ }
+ instance_->shutting_down_ = true;
+ }
+
+ {
+ // Wake up the saver thread if it is sleeping to allow for a clean exit.
+ MutexLock wait_mutex(Thread::Current(), profile_saver->wait_lock_);
+ profile_saver->period_condition_.Signal(Thread::Current());
+ }
+
+ // Wait for the saver thread to stop.
+ CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
+
+ {
+ MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_);
+ instance_ = nullptr;
+ profiler_pthread_ = 0U;
+ }
+ delete profile_saver;
+}
+
+bool ProfileSaver::ShuttingDown(Thread* self) {
+ MutexLock mu(self, *Locks::profiler_lock_);
+ return shutting_down_;
+}
+
+bool ProfileSaver::IsStarted() {
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ return instance_ != nullptr;
+}
+
+} // namespace art
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
new file mode 100644
index 0000000..88efd41
--- /dev/null
+++ b/runtime/jit/profile_saver.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_PROFILE_SAVER_H_
+#define ART_RUNTIME_JIT_PROFILE_SAVER_H_
+
+#include "base/mutex.h"
+#include "jit_code_cache.h"
+#include "offline_profiling_info.h"
+
+namespace art {
+
+class ProfileSaver {
+ public:
+ // Starts the profile saver thread.
+ static void Start(const std::string& output_filename,
+ jit::JitCodeCache* jit_code_cache,
+ const std::vector<std::string>& code_paths)
+ REQUIRES(!Locks::profiler_lock_, !wait_lock_);
+
+ // Stops the profile saver thread.
+ // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+ static void Stop()
+ REQUIRES(!Locks::profiler_lock_, !wait_lock_)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ // Returns true if the profile saver is started.
+ static bool IsStarted() REQUIRES(!Locks::profiler_lock_);
+
+ private:
+ ProfileSaver(const std::string& output_filename,
+ jit::JitCodeCache* jit_code_cache,
+ const std::vector<std::string>& code_paths);
+
+ // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
+ static void* RunProfileSaverThread(void* arg)
+ REQUIRES(!Locks::profiler_lock_, !wait_lock_)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ // The run loop for the saver.
+ void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_);
+ // Processes the existing profiling info from the jit code cache and returns
+ // true if it needed to be saved to disk.
+ bool ProcessProfilingInfo();
+ // Returns true if the saver is shutting down (ProfileSaver::Stop() has been called).
+ bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
+
+ // The only instance of the saver.
+ static ProfileSaver* instance_ GUARDED_BY(Locks::profiler_lock_);
+ // Profile saver thread.
+ static pthread_t profiler_pthread_ GUARDED_BY(Locks::profiler_lock_);
+
+ const std::string output_filename_;
+ jit::JitCodeCache* jit_code_cache_;
+ const std::set<const std::string> tracked_dex_base_locations_;
+ OfflineProfilingInfo offline_profiling_info_;
+ uint64_t code_cache_last_update_time_ns_;
+ bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
+
+ // Save period condition support.
+ Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ConditionVariable period_condition_ GUARDED_BY(wait_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(ProfileSaver);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_PROFILE_SAVER_H_
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index dcb346c..3820592 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -71,12 +71,12 @@
break;
}
}
- DCHECK(cache != nullptr);
return cache;
}
void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) {
InlineCache* cache = GetInlineCache(dex_pc);
+ CHECK(cache != nullptr) << PrettyMethod(method_) << "@" << dex_pc;
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
mirror::Class* existing = cache->classes_[i].Read();
if (existing == cls) {
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 5e3fa19..cb67ee3 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -316,12 +316,7 @@
static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig)
SHARED_REQUIRES(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& method : c->GetDirectMethods(pointer_size)) {
- if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
- return &method;
- }
- }
- for (auto& method : c->GetVirtualMethods(pointer_size)) {
+ for (auto& method : c->GetMethods(pointer_size)) {
if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
return &method;
}
@@ -2220,13 +2215,7 @@
size_t unregistered_count = 0;
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : c->GetDirectMethods(pointer_size)) {
- if (m.IsNative()) {
- m.UnregisterNative();
- unregistered_count++;
- }
- }
- for (auto& m : c->GetVirtualMethods(pointer_size)) {
+ for (auto& m : c->GetMethods(pointer_size)) {
if (m.IsNative()) {
m.UnregisterNative();
unregistered_count++;
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index b41d16b..c718466 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -2091,8 +2091,7 @@
MakeExecutable(nullptr, "java.lang.Class");
MakeExecutable(nullptr, "java.lang.Object");
MakeExecutable(nullptr, "java.nio.DirectByteBuffer");
- MakeExecutable(nullptr, "java.nio.MemoryBlock");
- MakeExecutable(nullptr, "java.nio.MemoryBlock$UnmanagedBlock");
+ MakeExecutable(nullptr, "java.nio.Bits");
MakeExecutable(nullptr, "java.nio.MappedByteBuffer");
MakeExecutable(nullptr, "java.nio.ByteBuffer");
MakeExecutable(nullptr, "java.nio.Buffer");
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 9e416dc..ef4fe15 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -22,13 +22,14 @@
#include "art_field-inl.h"
#include "art_method.h"
#include "art_method-inl.h"
+#include "base/array_slice.h"
+#include "base/length_prefixed_array.h"
#include "class_loader.h"
#include "common_throws.h"
#include "dex_cache.h"
#include "dex_file.h"
#include "gc/heap-inl.h"
#include "iftable.h"
-#include "length_prefixed_array.h"
#include "object_array-inl.h"
#include "read_barrier-inl.h"
#include "reference-inl.h"
@@ -62,61 +63,148 @@
return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_));
}
-inline LengthPrefixedArray<ArtMethod>* Class::GetDirectMethodsPtr() {
+inline uint32_t Class::GetCopiedMethodsStartOffset() {
+ return GetFieldShort(OFFSET_OF_OBJECT_MEMBER(Class, copied_methods_offset_));
+}
+
+inline uint32_t Class::GetDirectMethodsStartOffset() {
+ return 0;
+}
+
+inline uint32_t Class::GetVirtualMethodsStartOffset() {
+ return GetFieldShort(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_offset_));
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(size_t pointer_size) {
DCHECK(IsLoaded() || IsErroneous());
- return GetDirectMethodsPtrUnchecked();
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ return GetDirectMethodsSliceUnchecked(pointer_size);
}
-inline LengthPrefixedArray<ArtMethod>* Class::GetDirectMethodsPtrUnchecked() {
+inline ArraySlice<ArtMethod> Class::GetDirectMethodsSliceUnchecked(size_t pointer_size) {
+ return ArraySlice<ArtMethod>(GetMethodsPtr(),
+ GetDirectMethodsStartOffset(),
+ GetVirtualMethodsStartOffset(),
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(size_t pointer_size) {
+ DCHECK(IsLoaded() || IsErroneous());
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ return GetDeclaredMethodsSliceUnchecked(pointer_size);
+}
+
+inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSliceUnchecked(size_t pointer_size) {
+ return ArraySlice<ArtMethod>(GetMethodsPtr(),
+ GetDirectMethodsStartOffset(),
+ GetCopiedMethodsStartOffset(),
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
+}
+template<VerifyObjectFlags kVerifyFlags>
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(size_t pointer_size) {
+ DCHECK(IsLoaded() || IsErroneous());
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size);
+}
+
+inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSliceUnchecked(size_t pointer_size) {
+ return ArraySlice<ArtMethod>(GetMethodsPtr(),
+ GetVirtualMethodsStartOffset(),
+ GetCopiedMethodsStartOffset(),
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSlice(size_t pointer_size) {
+ DCHECK(IsLoaded() || IsErroneous());
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ return GetVirtualMethodsSliceUnchecked(pointer_size);
+}
+
+inline ArraySlice<ArtMethod> Class::GetVirtualMethodsSliceUnchecked(size_t pointer_size) {
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ return ArraySlice<ArtMethod>(methods,
+ GetVirtualMethodsStartOffset(),
+ NumMethods(),
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSlice(size_t pointer_size) {
+ DCHECK(IsLoaded() || IsErroneous());
+ DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
+ return GetCopiedMethodsSliceUnchecked(pointer_size);
+}
+
+inline ArraySlice<ArtMethod> Class::GetCopiedMethodsSliceUnchecked(size_t pointer_size) {
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ return ArraySlice<ArtMethod>(methods,
+ GetCopiedMethodsStartOffset(),
+ NumMethods(),
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
+}
+
+inline LengthPrefixedArray<ArtMethod>* Class::GetMethodsPtr() {
return reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- GetField64(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
+ GetField64(OFFSET_OF_OBJECT_MEMBER(Class, methods_)));
}
-inline LengthPrefixedArray<ArtMethod>* Class::GetVirtualMethodsPtrUnchecked() {
- return reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- GetField64(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_)));
+template<VerifyObjectFlags kVerifyFlags>
+inline ArraySlice<ArtMethod> Class::GetMethodsSlice(size_t pointer_size) {
+ DCHECK(IsLoaded() || IsErroneous());
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ return ArraySlice<ArtMethod>(methods,
+ 0,
+ NumMethods(),
+ ArtMethod::Size(pointer_size),
+ ArtMethod::Alignment(pointer_size));
}
-inline void Class::SetDirectMethodsPtr(LengthPrefixedArray<ArtMethod>* new_direct_methods) {
- DCHECK(GetDirectMethodsPtrUnchecked() == nullptr);
- SetDirectMethodsPtrUnchecked(new_direct_methods);
-}
-inline void Class::SetDirectMethodsPtrUnchecked(
- LengthPrefixedArray<ArtMethod>* new_direct_methods) {
- SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_),
- reinterpret_cast<uint64_t>(new_direct_methods));
+inline uint32_t Class::NumMethods() {
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ return (methods == nullptr) ? 0 : methods->size();
}
inline ArtMethod* Class::GetDirectMethodUnchecked(size_t i, size_t pointer_size) {
CheckPointerSize(pointer_size);
- auto* methods = GetDirectMethodsPtrUnchecked();
- DCHECK(methods != nullptr);
- return &methods->At(i,
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return &GetDirectMethodsSliceUnchecked(pointer_size).At(i);
}
inline ArtMethod* Class::GetDirectMethod(size_t i, size_t pointer_size) {
CheckPointerSize(pointer_size);
- auto* methods = GetDirectMethodsPtr();
- DCHECK(methods != nullptr);
- return &methods->At(i,
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return &GetDirectMethodsSlice(pointer_size).At(i);
}
-template<VerifyObjectFlags kVerifyFlags>
-inline LengthPrefixedArray<ArtMethod>* Class::GetVirtualMethodsPtr() {
- DCHECK(IsLoaded<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
- return GetVirtualMethodsPtrUnchecked();
+inline void Class::SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
+ uint32_t num_direct,
+ uint32_t num_virtual) {
+ DCHECK(GetMethodsPtr() == nullptr);
+ SetMethodsPtrUnchecked(new_methods, num_direct, num_virtual);
}
-inline void Class::SetVirtualMethodsPtr(LengthPrefixedArray<ArtMethod>* new_virtual_methods) {
- // TODO: we reassign virtual methods to grow the table for miranda
- // methods.. they should really just be assigned once.
- SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_),
- reinterpret_cast<uint64_t>(new_virtual_methods));
+
+inline void Class::SetMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_methods,
+ uint32_t num_direct,
+ uint32_t num_virtual) {
+ DCHECK_LE(num_direct + num_virtual, (new_methods == nullptr) ? 0 : new_methods->size());
+ SetMethodsPtrInternal(new_methods);
+ SetFieldShort<false>(OFFSET_OF_OBJECT_MEMBER(Class, copied_methods_offset_),
+ dchecked_integral_cast<uint16_t>(num_direct + num_virtual));
+ SetFieldShort<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_offset_),
+ dchecked_integral_cast<uint16_t>(num_direct));
+}
+
+inline void Class::SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods) {
+ SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, methods_),
+ reinterpret_cast<uint64_t>(new_methods));
}
template<VerifyObjectFlags kVerifyFlags>
@@ -135,11 +223,7 @@
inline ArtMethod* Class::GetVirtualMethodUnchecked(size_t i, size_t pointer_size) {
CheckPointerSize(pointer_size);
- LengthPrefixedArray<ArtMethod>* methods = GetVirtualMethodsPtrUnchecked();
- DCHECK(methods != nullptr);
- return &methods->At(i,
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return &GetVirtualMethodsSliceUnchecked(pointer_size).At(i);
}
inline PointerArray* Class::GetVTable() {
@@ -833,24 +917,42 @@
CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus();
}
}
- for (ArtMethod& method : GetDirectMethods(pointer_size)) {
- method.VisitRoots(visitor, pointer_size);
- }
- for (ArtMethod& method : GetVirtualMethods(pointer_size)) {
+ for (ArtMethod& method : GetMethods(pointer_size)) {
method.VisitRoots(visitor, pointer_size);
}
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
- return MakeIterationRangeFromLengthPrefixedArray(GetDirectMethodsPtrUnchecked(),
- ArtMethod::Size(pointer_size),
- ArtMethod::Alignment(pointer_size));
+ return GetDirectMethodsSliceUnchecked(pointer_size).AsRange();
+}
+
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredMethods(
+ size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ return GetDeclaredMethodsSliceUnchecked(pointer_size).AsRange();
+}
+
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetDeclaredVirtualMethods(
+ size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size).AsRange();
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetVirtualMethods(size_t pointer_size) {
CheckPointerSize(pointer_size);
- return MakeIterationRangeFromLengthPrefixedArray(GetVirtualMethodsPtrUnchecked(),
+ return GetVirtualMethodsSliceUnchecked(pointer_size).AsRange();
+}
+
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetCopiedMethods(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ return GetCopiedMethodsSliceUnchecked(pointer_size).AsRange();
+}
+
+
+inline IterationRange<StrideIterator<ArtMethod>> Class::GetMethods(size_t pointer_size) {
+ CheckPointerSize(pointer_size);
+ return MakeIterationRangeFromLengthPrefixedArray(GetMethodsPtr(),
ArtMethod::Size(pointer_size),
ArtMethod::Alignment(pointer_size));
}
@@ -918,13 +1020,15 @@
}
inline uint32_t Class::NumDirectMethods() {
- LengthPrefixedArray<ArtMethod>* arr = GetDirectMethodsPtrUnchecked();
- return arr != nullptr ? arr->size() : 0u;
+ return GetVirtualMethodsStartOffset();
+}
+
+inline uint32_t Class::NumDeclaredVirtualMethods() {
+ return GetCopiedMethodsStartOffset() - GetVirtualMethodsStartOffset();
}
inline uint32_t Class::NumVirtualMethods() {
- LengthPrefixedArray<ArtMethod>* arr = GetVirtualMethodsPtrUnchecked();
- return arr != nullptr ? arr->size() : 0u;
+ return NumMethods() - GetVirtualMethodsStartOffset();
}
inline uint32_t Class::NumInstanceFields() {
@@ -952,16 +1056,11 @@
if (ifields != new_ifields) {
dest->SetIFieldsPtrUnchecked(new_ifields);
}
- // Update direct and virtual method arrays.
- LengthPrefixedArray<ArtMethod>* direct_methods = GetDirectMethodsPtr();
- LengthPrefixedArray<ArtMethod>* new_direct_methods = visitor(direct_methods);
- if (direct_methods != new_direct_methods) {
- dest->SetDirectMethodsPtrUnchecked(new_direct_methods);
- }
- LengthPrefixedArray<ArtMethod>* virtual_methods = GetVirtualMethodsPtr();
- LengthPrefixedArray<ArtMethod>* new_virtual_methods = visitor(virtual_methods);
- if (virtual_methods != new_virtual_methods) {
- dest->SetVirtualMethodsPtr(new_virtual_methods);
+ // Update method array.
+ LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
+ LengthPrefixedArray<ArtMethod>* new_methods = visitor(methods);
+ if (methods != new_methods) {
+ dest->SetMethodsPtrInternal(new_methods);
}
// Update dex cache strings.
GcRoot<mirror::String>* strings = GetDexCacheStrings();
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 05a9039..66060f2 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -457,6 +457,10 @@
return nullptr;
}
+// TODO These should maybe be changed to be named FindOwnedVirtualMethod or something similar
+// because they do not only find 'declared' methods and will return copied methods. This behavior is
+// desired and correct but the naming can lead to confusion because in the java language declared
+// excludes interface methods which might be found by this.
ArtMethod* Class::FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
size_t pointer_size) {
for (auto& method : GetVirtualMethods(pointer_size)) {
@@ -482,10 +486,8 @@
ArtMethod* Class::FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
size_t pointer_size) {
if (GetDexCache() == dex_cache) {
- for (auto& method : GetVirtualMethods(pointer_size)) {
- // A miranda method may have a different DexCache and is always created by linking,
- // never *declared* in the class.
- if (method.GetDexMethodIndex() == dex_method_idx && !method.IsMiranda()) {
+ for (auto& method : GetDeclaredVirtualMethods(pointer_size)) {
+ if (method.GetDexMethodIndex() == dex_method_idx) {
return &method;
}
}
@@ -725,12 +727,7 @@
void Class::SetPreverifiedFlagOnAllMethods(size_t pointer_size) {
DCHECK(IsVerified());
- for (auto& m : GetDirectMethods(pointer_size)) {
- if (!m.IsNative() && m.IsInvokable()) {
- m.SetPreverified();
- }
- }
- for (auto& m : GetVirtualMethods(pointer_size)) {
+ for (auto& m : GetMethods(pointer_size)) {
if (!m.IsNative() && m.IsInvokable()) {
m.SetPreverified();
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 0ab5b97..489c269 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -44,6 +44,7 @@
struct ClassOffsets;
template<class T> class Handle;
template<typename T> class LengthPrefixedArray;
+template<typename T> class ArraySlice;
class Signature;
class StringPiece;
template<size_t kNumReferences> class PACKED(4) StackHandleScope;
@@ -559,8 +560,8 @@
// The size of java.lang.Class.class.
static uint32_t ClassClassSize(size_t pointer_size) {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 65;
- return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0, pointer_size);
+ uint32_t vtable_entries = Object::kVTableLength + 69;
+ return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
// The size of a java.lang.Class representing a primitive such as int.class.
@@ -702,12 +703,24 @@
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- LengthPrefixedArray<ArtMethod>* GetDirectMethodsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
+ SHARED_REQUIRES(Locks::mutator_lock_);
- void SetDirectMethodsPtr(LengthPrefixedArray<ArtMethod>* new_direct_methods)
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
+ uint32_t num_direct,
+ uint32_t num_virtual)
SHARED_REQUIRES(Locks::mutator_lock_);
// Used by image writer.
- void SetDirectMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_direct_methods)
+ void SetMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_methods,
+ uint32_t num_direct,
+ uint32_t num_virtual)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, size_t pointer_size)
@@ -723,18 +736,50 @@
ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetVirtualMethodsPtr()
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
+ size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
+ size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- void SetVirtualMethodsPtr(LengthPrefixedArray<ArtMethod>* new_virtual_methods)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Returns the number of non-inherited virtual methods.
+ // Returns the number of non-inherited virtual methods (sum of declared and copied methods).
ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Returns the number of copied virtual methods.
+ ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Returns the number of declared virtual methods.
+ ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE uint32_t NumMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(size_t i, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1155,10 +1200,19 @@
return pointer_size;
}
- ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetDirectMethodsPtrUnchecked()
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
- ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetVirtualMethodsPtrUnchecked()
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(size_t pointer_size)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
// Fix up all of the native pointers in the class by running them through the visitor. Only sets
@@ -1169,6 +1223,9 @@
SHARED_REQUIRES(Locks::mutator_lock_);
private:
+ ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void SetVerifyError(Object* klass) SHARED_REQUIRES(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
@@ -1194,6 +1251,15 @@
IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked()
SHARED_REQUIRES(Locks::mutator_lock_);
+ // The index in the methods_ array where the first declared virtual method is.
+ ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // The index in the methods_ array where the first direct method is.
+ ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // The index in the methods_ array where the first copied method is.
+ ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+
bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
// Check that the pointer size matches the one in the class linker.
@@ -1206,6 +1272,11 @@
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // 'Class' Object Fields
+ // Order governed by java field ordering. See art::ClassLinker::LinkFields.
+
+ HeapReference<Object> annotation_type_;
+
// Defining class loader, or null for the "bootstrap" system loader.
HeapReference<ClassLoader> class_loader_;
@@ -1251,12 +1322,12 @@
// virtual_ methods_ for miranda methods.
HeapReference<PointerArray> vtable_;
+ // Access flags; low 16 bits are defined by VM spec.
+ uint32_t access_flags_;
+
// Short cuts to dex_cache_ member for fast compiled code access.
uint64_t dex_cache_strings_;
- // static, private, and <init> methods. Pointer to an ArtMethod length-prefixed array.
- uint64_t direct_methods_;
-
// instance fields
//
// These describe the layout of the contents of an Object.
@@ -1268,16 +1339,24 @@
// ArtFields.
uint64_t ifields_;
+ // Pointer to an ArtMethod length-prefixed array. All the methods where this class is the place
+ // where they are logically defined. This includes all private, static, final and virtual methods
+ // as well as inherited default methods and miranda methods.
+ //
+ // The slice methods_ [0, virtual_methods_offset_) are the direct (static, private, init) methods
+ // declared by this class.
+ //
+ // The slice methods_ [virtual_methods_offset_, copied_methods_offset_) are the virtual methods
+ // declared by this class.
+ //
+ // The slice methods_ [copied_methods_offset_, |methods_|) are the methods that are copied from
+ // interfaces such as miranda or default methods. These are copied for resolution purposes as this
+ // class is where they are (logically) declared as far as the virtual dispatch is concerned.
+ uint64_t methods_;
+
// Static fields length-prefixed array.
uint64_t sfields_;
- // Virtual methods defined in this class; invoked through vtable. Pointer to an ArtMethod
- // length-prefixed array.
- uint64_t virtual_methods_;
-
- // Access flags; low 16 bits are defined by VM spec.
- uint32_t access_flags_;
-
// Class flags to help speed up visiting object references.
uint32_t class_flags_;
@@ -1317,6 +1396,14 @@
// State of class initialization.
Status status_;
+ // The offset of the first virtual method that is copied from an interface. This includes miranda,
+ // default, and default-conflict methods. Having a hard limit of ((2 << 16) - 1) for methods
+ // defined on a single class is well established in Java so we will use only uint16_t's here.
+ uint16_t copied_methods_offset_;
+
+ // The offset of the first declared virtual methods in the methods_ array.
+ uint16_t virtual_methods_offset_;
+
// TODO: ?
// initiating class loader list
// NOTE: for classes with low serialNumber, these are unused, and the
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 01e99b9..bd4a9c1 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -23,7 +23,7 @@
namespace mirror {
inline uint32_t Reference::ClassSize(size_t pointer_size) {
- uint32_t vtable_entries = Object::kVTableLength + 5;
+ uint32_t vtable_entries = Object::kVTableLength + 4;
return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0, pointer_size);
}
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 28a830d..cdf468c 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -33,8 +33,8 @@
namespace mirror {
inline uint32_t String::ClassSize(size_t pointer_size) {
- uint32_t vtable_entries = Object::kVTableLength + 52;
- return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2, pointer_size);
+ uint32_t vtable_entries = Object::kVTableLength + 53;
+ return Class::ComputeClassSize(true, vtable_entries, 0, 2, 0, 1, 2, pointer_size);
}
// Sets string count in the allocation code path to ensure it is guarded by a CAS.
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index e215994..f068b3e 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -56,9 +56,9 @@
void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state);
+ SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
} else {
- SetFieldObjectVolatile<false>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), state);
+ SetFieldObjectVolatile<false>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
}
}
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 0f488dc..6aacc8d 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -60,16 +60,16 @@
private:
Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_));
+ return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
}
Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_trace_));
+ return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
}
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+ HeapReference<Object> backtrace_; // Note this is Java volatile:
HeapReference<Throwable> cause_;
HeapReference<String> detail_message_;
- HeapReference<Object> stack_state_; // Note this is Java volatile:
HeapReference<Object> stack_trace_;
HeapReference<Object> suppressed_exceptions_;
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index b49d68f..4b24f82 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -224,7 +224,6 @@
static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
Runtime* runtime = Runtime::Current();
runtime->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
- runtime->UpdateProfilerState(process_state);
}
static void VMRuntime_trimHeap(JNIEnv* env, jobject) {
@@ -566,17 +565,25 @@
*/
static void VMRuntime_registerAppInfo(JNIEnv* env,
jclass clazz ATTRIBUTE_UNUSED,
- jstring pkgName,
- jstring appDir,
- jstring procName ATTRIBUTE_UNUSED) {
- const char* appDirChars = env->GetStringUTFChars(appDir, nullptr);
- const char* pkgNameChars = env->GetStringUTFChars(pkgName, nullptr);
- std::string profileFile = StringPrintf("%s/code_cache/%s.prof", appDirChars, pkgNameChars);
+ jstring pkg_name,
+ jstring app_dir,
+ jobjectArray code_paths) {
+ std::vector<std::string> code_paths_vec;
+ int code_paths_length = env->GetArrayLength(code_paths);
+ for (int i = 0; i < code_paths_length; i++) {
+ jstring code_path = reinterpret_cast<jstring>(env->GetObjectArrayElement(code_paths, i));
+ const char* raw_code_path = env->GetStringUTFChars(code_path, nullptr);
+ code_paths_vec.push_back(raw_code_path);
+ env->ReleaseStringUTFChars(code_path, raw_code_path);
+ }
- Runtime::Current()->SetJitProfilingFilename(profileFile.c_str());
+ const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr);
+ const char* raw_pkg_name = env->GetStringUTFChars(pkg_name, nullptr);
+ std::string profile_file = StringPrintf("%s/code_cache/%s.prof", raw_app_dir, raw_pkg_name);
+ env->ReleaseStringUTFChars(pkg_name, raw_pkg_name);
+ env->ReleaseStringUTFChars(app_dir, raw_app_dir);
- env->ReleaseStringUTFChars(appDir, appDirChars);
- env->ReleaseStringUTFChars(pkgName, pkgNameChars);
+ Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file);
}
static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
@@ -633,7 +640,7 @@
NATIVE_METHOD(VMRuntime, isCheckJniEnabled, "!()Z"),
NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
NATIVE_METHOD(VMRuntime, registerAppInfo,
- "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V"),
+ "(Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;)V"),
NATIVE_METHOD(VMRuntime, isBootClassPathOnDisk, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(VMRuntime, getCurrentInstructionSet, "()Ljava/lang/String;"),
};
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index ae1a4d7..67d825e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -129,7 +129,11 @@
return reinterpret_cast<jlong>(ThreadForEnv(env));
}
-static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, jint debug_flags,
+static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
+ jclass,
+ jlong token,
+ jint debug_flags,
+ jboolean is_system_server,
jstring instruction_set) {
Thread* thread = reinterpret_cast<Thread*>(token);
// Our system thread ID, etc, has changed so reset Thread state.
@@ -174,22 +178,24 @@
}
}
- if (instruction_set != nullptr) {
+ if (instruction_set != nullptr && !is_system_server) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
Runtime::NativeBridgeAction action = Runtime::NativeBridgeAction::kUnload;
if (isa != kNone && isa != kRuntimeISA) {
action = Runtime::NativeBridgeAction::kInitialize;
}
- Runtime::Current()->InitNonZygoteOrPostFork(env, action, isa_string.c_str());
+ Runtime::Current()->InitNonZygoteOrPostFork(
+ env, is_system_server, action, isa_string.c_str());
} else {
- Runtime::Current()->InitNonZygoteOrPostFork(env, Runtime::NativeBridgeAction::kUnload, nullptr);
+ Runtime::Current()->InitNonZygoteOrPostFork(
+ env, is_system_server, Runtime::NativeBridgeAction::kUnload, nullptr);
}
}
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
- NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JILjava/lang/String;)V"),
+ NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZLjava/lang/String;)V"),
};
void register_dalvik_system_ZygoteHooks(JNIEnv* env) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5e42392..1977481 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -439,16 +439,9 @@
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
- for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : klass->GetDeclaredMethods(sizeof(void*))) {
auto modifiers = m.GetAccessFlags();
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccMiranda) == 0) {
- ++num_methods;
- }
- }
- for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
- auto modifiers = m.GetAccessFlags();
- // Add non-constructor direct/static methods.
+ // Add non-constructor declared methods.
if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
(modifiers & kAccConstructor) == 0) {
++num_methods;
@@ -457,22 +450,9 @@
auto ret = hs.NewHandle(mirror::ObjectArray<mirror::Method>::Alloc(
soa.Self(), mirror::Method::ArrayClass(), num_methods));
num_methods = 0;
- for (auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : klass->GetDeclaredMethods(sizeof(void*))) {
auto modifiers = m.GetAccessFlags();
if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccMiranda) == 0) {
- auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m);
- if (method == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- ret->SetWithoutChecks<false>(num_methods++, method);
- }
- }
- for (auto& m : klass->GetDirectMethods(sizeof(void*))) {
- auto modifiers = m.GetAccessFlags();
- // Add non-constructor direct/static methods.
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
(modifiers & kAccConstructor) == 0) {
auto* method = mirror::Method::CreateFromArtMethod(soa.Self(), &m);
if (method == nullptr) {
@@ -673,7 +653,8 @@
}
}
auto* constructor = klass->GetDeclaredConstructor(
- soa.Self(), NullHandle<mirror::ObjectArray<mirror::Class>>());
+ soa.Self(),
+ ScopedNullHandle<mirror::ObjectArray<mirror::Class>>());
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"%s has no zero argument constructor",
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 4a1e6c2..f42a17d 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -68,7 +68,8 @@
}
static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
- jstring javaLdLibraryPath, jstring javaIsolationPath) {
+ jboolean isSharedNamespace, jstring javaLibrarySearchPath,
+ jstring javaLibraryPermittedPath) {
ScopedUtfChars filename(env, javaFilename);
if (filename.c_str() == nullptr) {
return nullptr;
@@ -80,14 +81,19 @@
// linker namespace instead of global LD_LIBRARY_PATH
// (23 is Marshmallow)
if (target_sdk_version <= INT_MAX) {
- SetLdLibraryPath(env, javaLdLibraryPath);
+ SetLdLibraryPath(env, javaLibrarySearchPath);
}
std::string error_msg;
{
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- bool success = vm->LoadNativeLibrary(env, filename.c_str(), javaLoader,
- javaLdLibraryPath, javaIsolationPath, &error_msg);
+ bool success = vm->LoadNativeLibrary(env,
+ filename.c_str(),
+ javaLoader,
+ isSharedNamespace == JNI_TRUE,
+ javaLibrarySearchPath,
+ javaLibraryPermittedPath,
+ &error_msg);
if (success) {
return nullptr;
}
@@ -115,7 +121,7 @@
NATIVE_METHOD(Runtime, gc, "()V"),
NATIVE_METHOD(Runtime, maxMemory, "!()J"),
NATIVE_METHOD(Runtime, nativeExit, "(I)V"),
- NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
+ NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;ZLjava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
NATIVE_METHOD(Runtime, totalMemory, "!()J"),
};
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index c75ff78..13edd67 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -89,7 +89,7 @@
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
- case kWaitingWeakGcRootRead: return kJavaWaiting;
+ case kWaitingWeakGcRootRead: return kJavaRunnable;
case kWaitingForGcThreadFlip: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 45b9484..ddcaade 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -86,7 +86,7 @@
* with an interface, array, or primitive class. If this is coming from
* native, it is OK to avoid access checks since JNI does not enforce them.
*/
-static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
+static jobject Constructor_newInstance0(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
mirror::Constructor* m = soa.Decode<mirror::Constructor*>(javaMethod);
StackHandleScope<1> hs(soa.Self());
@@ -99,7 +99,9 @@
}
// Verify that we can access the class.
if (!m->IsAccessible() && !c->IsPublic()) {
- auto* caller = GetCallingClass(soa.Self(), 1);
+ // Go 2 frames back, this method is always called from newInstance0, which is called from
+ // Constructor.newInstance(Object... args).
+ auto* caller = GetCallingClass(soa.Self(), 2);
// If caller is null, then we called from JNI, just avoid the check since JNI avoids most
// access checks anyways. TODO: Investigate if this the correct behavior.
if (caller != nullptr && !caller->CanAccess(c.Get())) {
@@ -127,7 +129,7 @@
// String constructor is replaced by a StringFactory method in InvokeMethod.
if (c->IsStringClass()) {
- return InvokeMethod(soa, javaMethod, nullptr, javaArgs, 1);
+ return InvokeMethod(soa, javaMethod, nullptr, javaArgs, 2);
}
mirror::Object* receiver =
@@ -136,11 +138,18 @@
return nullptr;
}
jobject javaReceiver = soa.AddLocalReference<jobject>(receiver);
- InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, 1);
+ InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, 2);
// Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod.
return javaReceiver;
}
+static jobject Constructor_newInstanceFromSerialization(JNIEnv* env, jclass unused ATTRIBUTE_UNUSED,
+ jclass ctorClass, jclass allocClass) {
+ jmethodID ctor = env->GetMethodID(ctorClass, "<init>", "()V");
+ DCHECK(ctor != NULL);
+ return env->NewObject(allocClass, ctor);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Constructor, getAnnotationNative,
"!(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
@@ -149,7 +158,8 @@
NATIVE_METHOD(Constructor, getParameterAnnotationsNative,
"!()[[Ljava/lang/annotation/Annotation;"),
NATIVE_METHOD(Constructor, isAnnotationPresentNative, "!(Ljava/lang/Class;)Z"),
- NATIVE_METHOD(Constructor, newInstance, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Constructor, newInstance0, "!([Ljava/lang/Object;)Ljava/lang/Object;"),
+ NATIVE_METHOD(Constructor, newInstanceFromSerialization, "!(Ljava/lang/Class;Ljava/lang/Class;)Ljava/lang/Object;"),
};
void register_java_lang_reflect_Constructor(JNIEnv* env) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index caacba6..d7cf62e 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -71,7 +71,7 @@
mirror::Class* klass = method->GetDeclaringClass();
int throws_index = -1;
size_t i = 0;
- for (const auto& m : klass->GetVirtualMethods(sizeof(void*))) {
+ for (const auto& m : klass->GetDeclaredVirtualMethods(sizeof(void*))) {
if (&m == method) {
throws_index = i;
break;
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 770644c..8a2c7e4 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -15,7 +15,7 @@
*/
#include "sun_misc_Unsafe.h"
-
+#include "common_throws.h"
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
#include "mirror/array.h"
@@ -23,6 +23,10 @@
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+
namespace art {
static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -52,6 +56,17 @@
mirror::Object* expectedValue = soa.Decode<mirror::Object*>(javaExpectedValue);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
// JNI must use non transactional mode.
+ if (kUseReadBarrier) {
+ // Need to make sure the reference stored in the field is a to-space one before attempting the
+ // CAS or the CAS could fail incorrectly.
+ mirror::HeapReference<mirror::Object>* field_addr =
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(
+ reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset));
+ ReadBarrier::Barrier<mirror::Object, kWithReadBarrier, /*kAlwaysUpdateField*/true>(
+ obj,
+ MemberOffset(offset),
+ field_addr);
+ }
bool success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
expectedValue, newValue);
return success ? JNI_TRUE : JNI_FALSE;
@@ -185,6 +200,279 @@
return Primitive::ComponentSize(primitive_type);
}
+static jint Unsafe_addressSize(JNIEnv* env ATTRIBUTE_UNUSED, jobject ob ATTRIBUTE_UNUSED) {
+ return sizeof(void*);
+}
+
+static jint Unsafe_pageSize(JNIEnv* env ATTRIBUTE_UNUSED, jobject ob ATTRIBUTE_UNUSED) {
+ return sysconf(_SC_PAGESIZE);
+}
+
+static jlong Unsafe_allocateMemory(JNIEnv* env, jobject, jlong bytes) {
+ ScopedFastNativeObjectAccess soa(env);
+ // bytes is nonnegative and fits into size_t
+ if (bytes < 0 || bytes != (jlong)(size_t) bytes) {
+ ThrowIllegalAccessException("wrong number of bytes");
+ return 0;
+ }
+ void* mem = malloc(bytes);
+ if (mem == nullptr) {
+ soa.Self()->ThrowOutOfMemoryError("native alloc");
+ return 0;
+ }
+ return (uintptr_t) mem;
+}
+
+static void Unsafe_freeMemory(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ free(reinterpret_cast<void*>(static_cast<uintptr_t>(address)));
+}
+
+static void Unsafe_setMemory(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jlong bytes, jbyte value) {
+ memset(reinterpret_cast<void*>(static_cast<uintptr_t>(address)), value, bytes);
+}
+
+static jbyte Unsafe_getByte$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jbyte*>(address);
+}
+
+static void Unsafe_putByte$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jbyte value) {
+ *reinterpret_cast<jbyte*>(address) = value;
+}
+
+static jshort Unsafe_getShort$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jshort*>(address);
+}
+
+static void Unsafe_putShort$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jshort value) {
+ *reinterpret_cast<jshort*>(address) = value;
+}
+
+static jchar Unsafe_getChar$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jchar*>(address);
+}
+
+static void Unsafe_putChar$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jchar value) {
+ *reinterpret_cast<jchar*>(address) = value;
+}
+
+static jint Unsafe_getInt$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jint*>(address);
+}
+
+static void Unsafe_putInt$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jint value) {
+ *reinterpret_cast<jint*>(address) = value;
+}
+
+static jlong Unsafe_getLong$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jlong*>(address);
+}
+
+static void Unsafe_putLong$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jlong value) {
+ *reinterpret_cast<jlong*>(address) = value;
+}
+
+static jfloat Unsafe_getFloat$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jfloat*>(address);
+}
+
+static void Unsafe_putFloat$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jfloat value) {
+ *reinterpret_cast<jfloat*>(address) = value;
+}
+static jdouble Unsafe_getDouble$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+ return *reinterpret_cast<jdouble*>(address);
+}
+
+static void Unsafe_putDouble$(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jdouble value) {
+ *reinterpret_cast<jdouble*>(address) = value;
+}
+
+static void Unsafe_copyMemory(JNIEnv *env, jobject unsafe ATTRIBUTE_UNUSED, jlong src,
+ jlong dst, jlong size) {
+ if (size == 0) {
+ return;
+ }
+ // size is nonnegative and fits into size_t
+ if (size < 0 || size != (jlong)(size_t) size) {
+ ScopedFastNativeObjectAccess soa(env);
+ ThrowIllegalAccessException("wrong number of bytes");
+ }
+ size_t sz = (size_t)size;
+ memcpy(reinterpret_cast<void *>(dst), reinterpret_cast<void *>(src), sz);
+}
+
+template<typename T>
+static void copyToArray(jlong srcAddr, mirror::PrimitiveArray<T>* array,
+ size_t array_offset,
+ size_t size)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const T* src = reinterpret_cast<T*>(srcAddr);
+ size_t sz = size / sizeof(T);
+ size_t of = array_offset / sizeof(T);
+ for (size_t i = 0; i < sz; ++i) {
+ array->Set(i + of, *(src + i));
+ }
+}
+
+template<typename T>
+static void copyFromArray(jlong dstAddr, mirror::PrimitiveArray<T>* array,
+ size_t array_offset,
+ size_t size)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* dst = reinterpret_cast<T*>(dstAddr);
+ size_t sz = size / sizeof(T);
+ size_t of = array_offset / sizeof(T);
+ for (size_t i = 0; i < sz; ++i) {
+ *(dst + i) = array->Get(i + of);
+ }
+}
+
+static void Unsafe_copyMemoryToPrimitiveArray(JNIEnv *env,
+ jobject unsafe ATTRIBUTE_UNUSED,
+ jlong srcAddr,
+ jobject dstObj,
+ jlong dstOffset,
+ jlong size) {
+ ScopedObjectAccess soa(env);
+ if (size == 0) {
+ return;
+ }
+ // size is nonnegative and fits into size_t
+ if (size < 0 || size != (jlong)(size_t) size) {
+ ThrowIllegalAccessException("wrong number of bytes");
+ }
+ size_t sz = (size_t)size;
+ size_t dst_offset = (size_t)dstOffset;
+ mirror::Object* dst = soa.Decode<mirror::Object*>(dstObj);
+ mirror::Class* component_type = dst->GetClass()->GetComponentType();
+ if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
+ copyToArray(srcAddr, dst->AsByteSizedArray(), dst_offset, sz);
+ } else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
+ copyToArray(srcAddr, dst->AsShortSizedArray(), dst_offset, sz);
+ } else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
+ copyToArray(srcAddr, dst->AsIntArray(), dst_offset, sz);
+ } else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
+ copyToArray(srcAddr, dst->AsLongArray(), dst_offset, sz);
+ } else {
+ ThrowIllegalAccessException("not a primitive array");
+ }
+}
+
+static void Unsafe_copyMemoryFromPrimitiveArray(JNIEnv *env,
+ jobject unsafe ATTRIBUTE_UNUSED,
+ jobject srcObj,
+ jlong srcOffset,
+ jlong dstAddr,
+ jlong size) {
+ ScopedObjectAccess soa(env);
+ if (size == 0) {
+ return;
+ }
+ // size is nonnegative and fits into size_t
+ if (size < 0 || size != (jlong)(size_t) size) {
+ ThrowIllegalAccessException("wrong number of bytes");
+ }
+ size_t sz = (size_t)size;
+ size_t src_offset = (size_t)srcOffset;
+ mirror::Object* src = soa.Decode<mirror::Object*>(srcObj);
+ mirror::Class* component_type = src->GetClass()->GetComponentType();
+ if (component_type->IsPrimitiveByte() || component_type->IsPrimitiveBoolean()) {
+ copyFromArray(dstAddr, src->AsByteSizedArray(), src_offset, sz);
+ } else if (component_type->IsPrimitiveShort() || component_type->IsPrimitiveChar()) {
+ copyFromArray(dstAddr, src->AsShortSizedArray(), src_offset, sz);
+ } else if (component_type->IsPrimitiveInt() || component_type->IsPrimitiveFloat()) {
+ copyFromArray(dstAddr, src->AsIntArray(), src_offset, sz);
+ } else if (component_type->IsPrimitiveLong() || component_type->IsPrimitiveDouble()) {
+ copyFromArray(dstAddr, src->AsLongArray(), src_offset, sz);
+ } else {
+ ThrowIllegalAccessException("not a primitive array");
+ }
+}
+static jboolean Unsafe_getBoolean(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ return obj->GetFieldBoolean(MemberOffset(offset));
+}
+
+static void Unsafe_putBoolean(JNIEnv* env, jobject, jobject javaObj, jlong offset, jboolean newValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ // JNI must use non transactional mode (SetField8 is non-transactional).
+ obj->SetFieldBoolean<false>(MemberOffset(offset), newValue);
+}
+
+static jbyte Unsafe_getByte(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ return obj->GetFieldByte(MemberOffset(offset));
+}
+
+static void Unsafe_putByte(JNIEnv* env, jobject, jobject javaObj, jlong offset, jbyte newValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ // JNI must use non transactional mode.
+ obj->SetFieldByte<false>(MemberOffset(offset), newValue);
+}
+
+static jchar Unsafe_getChar(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ return obj->GetFieldChar(MemberOffset(offset));
+}
+
+static void Unsafe_putChar(JNIEnv* env, jobject, jobject javaObj, jlong offset, jchar newValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ // JNI must use non transactional mode.
+ obj->SetFieldChar<false>(MemberOffset(offset), newValue);
+}
+
+static jshort Unsafe_getShort(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ return obj->GetFieldShort(MemberOffset(offset));
+}
+
+static void Unsafe_putShort(JNIEnv* env, jobject, jobject javaObj, jlong offset, jshort newValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ // JNI must use non transactional mode.
+ obj->SetFieldShort<false>(MemberOffset(offset), newValue);
+}
+
+static jfloat Unsafe_getFloat(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ union {int32_t val; jfloat converted;} conv;
+ conv.val = obj->GetField32(MemberOffset(offset));
+ return conv.converted;
+}
+
+static void Unsafe_putFloat(JNIEnv* env, jobject, jobject javaObj, jlong offset, jfloat newValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ union {int32_t converted; jfloat val;} conv;
+ conv.val = newValue;
+ // JNI must use non transactional mode.
+ obj->SetField32<false>(MemberOffset(offset), conv.converted);
+}
+
+static jdouble Unsafe_getDouble(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ union {int64_t val; jdouble converted;} conv;
+ conv.val = obj->GetField64(MemberOffset(offset));
+ return conv.converted;
+}
+
+static void Unsafe_putDouble(JNIEnv* env, jobject, jobject javaObj, jlong offset, jdouble newValue) {
+ ScopedFastNativeObjectAccess soa(env);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
+ union {int64_t converted; jdouble val;} conv;
+ conv.val = newValue;
+ // JNI must use non transactional mode.
+ obj->SetField64<false>(MemberOffset(offset), conv.converted);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(Unsafe, compareAndSwapInt, "!(Ljava/lang/Object;JII)Z"),
NATIVE_METHOD(Unsafe, compareAndSwapLong, "!(Ljava/lang/Object;JJJ)Z"),
@@ -206,6 +494,40 @@
NATIVE_METHOD(Unsafe, putOrderedObject, "!(Ljava/lang/Object;JLjava/lang/Object;)V"),
NATIVE_METHOD(Unsafe, getArrayBaseOffsetForComponentType, "!(Ljava/lang/Class;)I"),
NATIVE_METHOD(Unsafe, getArrayIndexScaleForComponentType, "!(Ljava/lang/Class;)I"),
+ NATIVE_METHOD(Unsafe, addressSize, "!()I"),
+ NATIVE_METHOD(Unsafe, pageSize, "!()I"),
+ NATIVE_METHOD(Unsafe, allocateMemory, "!(J)J"),
+ NATIVE_METHOD(Unsafe, freeMemory, "!(J)V"),
+ NATIVE_METHOD(Unsafe, setMemory, "!(JJB)V"),
+ NATIVE_METHOD(Unsafe, getByte$, "!(J)B"),
+ NATIVE_METHOD(Unsafe, putByte$, "!(JB)V"),
+ NATIVE_METHOD(Unsafe, getShort$, "!(J)S"),
+ NATIVE_METHOD(Unsafe, putShort$, "!(JS)V"),
+ NATIVE_METHOD(Unsafe, getChar$, "!(J)C"),
+ NATIVE_METHOD(Unsafe, putChar$, "!(JC)V"),
+ NATIVE_METHOD(Unsafe, getInt$, "!(J)I"),
+ NATIVE_METHOD(Unsafe, putInt$, "!(JI)V"),
+ NATIVE_METHOD(Unsafe, getLong$, "!(J)J"),
+ NATIVE_METHOD(Unsafe, putLong$, "!(JJ)V"),
+ NATIVE_METHOD(Unsafe, getFloat$, "!(J)F"),
+ NATIVE_METHOD(Unsafe, putFloat$, "!(JF)V"),
+ NATIVE_METHOD(Unsafe, getDouble$, "!(J)D"),
+ NATIVE_METHOD(Unsafe, putDouble$, "!(JD)V"),
+ NATIVE_METHOD(Unsafe, copyMemory, "!(JJJ)V"),
+ NATIVE_METHOD(Unsafe, copyMemoryToPrimitiveArray, "!(JLjava/lang/Object;JJ)V"),
+ NATIVE_METHOD(Unsafe, copyMemoryFromPrimitiveArray, "!(Ljava/lang/Object;JJJ)V"),
+ NATIVE_METHOD(Unsafe, getBoolean, "!(Ljava/lang/Object;J)Z"),
+ NATIVE_METHOD(Unsafe, getByte, "!(Ljava/lang/Object;J)B"),
+ NATIVE_METHOD(Unsafe, getChar, "!(Ljava/lang/Object;J)C"),
+ NATIVE_METHOD(Unsafe, getShort, "!(Ljava/lang/Object;J)S"),
+ NATIVE_METHOD(Unsafe, getFloat, "!(Ljava/lang/Object;J)F"),
+ NATIVE_METHOD(Unsafe, getDouble, "!(Ljava/lang/Object;J)D"),
+ NATIVE_METHOD(Unsafe, putBoolean, "!(Ljava/lang/Object;JZ)V"),
+ NATIVE_METHOD(Unsafe, putByte, "!(Ljava/lang/Object;JB)V"),
+ NATIVE_METHOD(Unsafe, putChar, "!(Ljava/lang/Object;JC)V"),
+ NATIVE_METHOD(Unsafe, putShort, "!(Ljava/lang/Object;JS)V"),
+ NATIVE_METHOD(Unsafe, putFloat, "!(Ljava/lang/Object;JF)V"),
+ NATIVE_METHOD(Unsafe, putDouble, "!(Ljava/lang/Object;JD)V"),
};
void register_sun_misc_Unsafe(JNIEnv* env) {
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 46cc5aa..61a1085 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -45,10 +45,7 @@
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t native_method_count = 0;
- for (auto& m : c->GetDirectMethods(sizeof(void*))) {
- native_method_count += m.IsNative() ? 1u : 0u;
- }
- for (auto& m : c->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(sizeof(void*))) {
native_method_count += m.IsNative() ? 1u : 0u;
}
return native_method_count;
@@ -63,19 +60,7 @@
mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
uint32_t count = 0;
- for (auto& m : c->GetDirectMethods(sizeof(void*))) {
- if (m.IsNative()) {
- if (count < method_count) {
- methods[count].name = m.GetName();
- methods[count].signature = m.GetShorty();
- methods[count].fnPtr = m.GetEntryPointFromJni();
- count++;
- } else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(&m);
- }
- }
- }
- for (auto& m : c->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : c->GetMethods(sizeof(void*))) {
if (m.IsNative()) {
if (count < method_count) {
methods[count].name = m.GetName();
diff --git a/runtime/oat.h b/runtime/oat.h
index 5ed1977..13fd6a4 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -39,6 +39,7 @@
static constexpr const char* kPicKey = "pic";
static constexpr const char* kDebuggableKey = "debuggable";
static constexpr const char* kClassPathKey = "classpath";
+ static constexpr const char* kBootClassPath = "bootclasspath";
static constexpr const char kTrueValue[] = "true";
static constexpr const char kFalseValue[] = "false";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 83e594b..e3de14b 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -983,6 +983,7 @@
LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
<< " ( canonical path " << dex_canonical_location << ")"
<< " with checksum " << checksum << " in OatFile " << GetLocation();
+ /* TODO: Modify for multi-image support and reenable. b/26317072
if (kIsDebugBuild) {
for (const OatDexFile* odf : oat_dex_files_storage_) {
LOG(WARNING) << "OatFile " << GetLocation()
@@ -991,6 +992,7 @@
<< " with checksum 0x" << std::hex << odf->GetDexFileLocationChecksum();
}
}
+ */
}
return nullptr;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 0f3a013..8543ff4 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -846,11 +846,12 @@
std::string OatFileAssistant::ImageLocation() {
Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
- if (image_space == nullptr) {
+ const std::vector<gc::space::ImageSpace*>& image_spaces =
+ runtime->GetHeap()->GetBootImageSpaces();
+ if (image_spaces.empty()) {
return "";
}
- return image_space->GetImageLocation();
+ return image_spaces[0]->GetImageLocation();
}
const uint32_t* OatFileAssistant::GetRequiredDexChecksum() {
@@ -949,12 +950,13 @@
image_info_load_attempted_ = true;
Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
- if (image_space != nullptr) {
- cached_image_info_.location = image_space->GetImageLocation();
+ std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+ if (!image_spaces.empty()) {
+ // TODO: Better support multi-images? b/26317072
+ cached_image_info_.location = image_spaces[0]->GetImageLocation();
if (isa_ == kRuntimeISA) {
- const ImageHeader& image_header = image_space->GetImageHeader();
+ const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
cached_image_info_.oat_checksum = image_header.GetOatChecksum();
cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
image_header.GetOatDataBegin());
@@ -969,7 +971,7 @@
cached_image_info_.patch_delta = image_header->GetPatchDelta();
}
}
- image_info_load_succeeded_ = (image_space != nullptr);
+ image_info_load_succeeded_ = (!image_spaces.empty());
}
return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
}
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 8c7efb2..f994f0c 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -223,9 +223,10 @@
false, dex_location.c_str(), &error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetBootImageSpace();
- ASSERT_TRUE(image_space != nullptr);
- const ImageHeader& image_header = image_space->GetImageHeader();
+ const std::vector<gc::space::ImageSpace*> image_spaces =
+ runtime->GetHeap()->GetBootImageSpaces();
+ ASSERT_TRUE(!image_spaces.empty() && image_spaces[0] != nullptr);
+ const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
const OatHeader& oat_header = odex_file->GetOatHeader();
EXPECT_FALSE(odex_file->IsPic());
EXPECT_EQ(image_header.GetOatChecksum(), oat_header.GetImageFileLocationOatChecksum());
@@ -1025,7 +1026,7 @@
// We use the lib core dex file, because it's large, and hopefully should
// take a while to generate.
- Copy(GetLibCoreDexFileName(), dex_location);
+ Copy(GetLibCoreDexFileNames()[0], dex_location);
const int kNumThreads = 32;
Thread* self = Thread::Current();
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index ea6d3ff..36a967f 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -78,17 +78,23 @@
return nullptr;
}
-const OatFile* OatFileManager::GetBootOatFile() const {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- return (image_space == nullptr) ? nullptr : image_space->GetOatFile();
+std::vector<const OatFile*> OatFileManager::GetBootOatFiles() const {
+ std::vector<const OatFile*> oat_files;
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ for (gc::space::ImageSpace* image_space : image_spaces) {
+ oat_files.push_back(image_space->GetOatFile());
+ }
+ return oat_files;
}
const OatFile* OatFileManager::GetPrimaryOatFile() const {
ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
- const OatFile* boot_oat_file = GetBootOatFile();
- if (boot_oat_file != nullptr) {
+ std::vector<const OatFile*> boot_oat_files = GetBootOatFiles();
+ if (!boot_oat_files.empty()) {
for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
- if (oat_file.get() != boot_oat_file) {
+ if (std::find(boot_oat_files.begin(), boot_oat_files.end(), oat_file.get()) ==
+ boot_oat_files.end()) {
return oat_file.get();
}
}
@@ -102,8 +108,13 @@
oat_files_.clear();
}
-const OatFile* OatFileManager::RegisterImageOatFile(gc::space::ImageSpace* space) {
- return RegisterOatFile(space->ReleaseOatFile());
+std::vector<const OatFile*> OatFileManager::RegisterImageOatFiles(
+ std::vector<gc::space::ImageSpace*> spaces) {
+ std::vector<const OatFile*> oat_files;
+ for (gc::space::ImageSpace* space : spaces) {
+ oat_files.push_back(RegisterOatFile(space->ReleaseOatFile()));
+ }
+ return oat_files;
}
class DexFileAndClassPair : ValueObject {
@@ -213,7 +224,7 @@
std::priority_queue<DexFileAndClassPair> queue;
// Add dex files from already loaded oat files, but skip boot.
- const OatFile* boot_oat = GetBootOatFile();
+ std::vector<const OatFile*> boot_oat_files = GetBootOatFiles();
// The same OatFile can be loaded multiple times at different addresses. In this case, we don't
// need to check both against each other since they would have resolved the same way at compile
// time.
@@ -221,8 +232,8 @@
for (const std::unique_ptr<const OatFile>& loaded_oat_file : oat_files_) {
DCHECK_NE(loaded_oat_file.get(), oat_file);
const std::string& location = loaded_oat_file->GetLocation();
- if (loaded_oat_file.get() != boot_oat &&
- location != oat_file->GetLocation() &&
+ if (std::find(boot_oat_files.begin(), boot_oat_files.end(), loaded_oat_file.get()) ==
+ boot_oat_files.end() && location != oat_file->GetLocation() &&
unique_locations.find(location) == unique_locations.end()) {
unique_locations.insert(location);
AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue);
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index af7efb4..4690e45 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -73,15 +73,15 @@
return have_non_pic_oat_file_;
}
- // Returns the boot image oat file.
- const OatFile* GetBootOatFile() const;
+ // Returns the boot image oat files.
+ std::vector<const OatFile*> GetBootOatFiles() const;
// Returns the first non-image oat file in the class path.
const OatFile* GetPrimaryOatFile() const REQUIRES(!Locks::oat_file_manager_lock_);
- // Return the oat file for an image, registers the oat file. Takes ownership of the imagespace's
- // underlying oat file.
- const OatFile* RegisterImageOatFile(gc::space::ImageSpace* space)
+ // Returns the oat files for the images, registers the oat files.
+ // Takes ownership of the imagespace's underlying oat files.
+ std::vector<const OatFile*> RegisterImageOatFiles(std::vector<gc::space::ImageSpace*> spaces)
REQUIRES(!Locks::oat_file_manager_lock_);
// Finds or creates the oat file holding dex_location. Then loads and returns
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 03cad08..5643739 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -44,7 +44,8 @@
uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
DCHECK(IsAlignedParam(code, GetInstructionSetAlignment(kRuntimeISA)) ||
- IsAlignedParam(header, GetInstructionSetAlignment(kRuntimeISA)));
+ IsAlignedParam(header, GetInstructionSetAlignment(kRuntimeISA)))
+ << std::hex << code << " " << std::hex << header;
return reinterpret_cast<OatQuickMethodHeader*>(header);
}
diff --git a/runtime/openjdkjvm/NOTICE b/runtime/openjdkjvm/NOTICE
new file mode 100644
index 0000000..700a206
--- /dev/null
+++ b/runtime/openjdkjvm/NOTICE
@@ -0,0 +1,29 @@
+Copyright (C) 2014 The Android Open Source Project
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This file implements interfaces from the file jvm.h. This implementation
+is licensed under the same terms as the file jvm.h. The
+copyright and license information for the file jvm.h follows.
+
+Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License version 2 only, as
+published by the Free Software Foundation. Oracle designates this
+particular file as subject to the "Classpath" exception as provided
+by Oracle in the LICENSE file that accompanied this code.
+
+This code is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+version 2 for more details (a copy is included in the LICENSE file that
+accompanied this code).
+
+You should have received a copy of the GNU General Public License version
+2 along with this work; if not, write to the Free Software Foundation,
+Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+or visit www.oracle.com if you need additional information or have any
+questions.
diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc
new file mode 100644
index 0000000..ab0d934
--- /dev/null
+++ b/runtime/openjdkjvm/OpenjdkJvm.cc
@@ -0,0 +1,540 @@
+/* Copyright (C) 2014 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvm.h. This implementation
+ * is licensed under the same terms as the file jvm.h. The
+ * copyright and license information for the file jvm.h follows.
+ *
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * Services that OpenJDK expects the VM to provide.
+ */
+#include<stdio.h>
+#include <dlfcn.h>
+#include <limits.h>
+#include <unistd.h>
+
+#include "common_throws.h"
+#include "gc/heap.h"
+#include "thread.h"
+#include "thread_list.h"
+#include "runtime.h"
+#include "handle_scope-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedUtfChars.h"
+#include "mirror/class_loader.h"
+#include "verify_object-inl.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "../../libcore/ojluni/src/main/native/jvm.h" // TODO(narayan): fix it
+#include "jni_internal.h"
+#include "mirror/string-inl.h"
+#include "native/scoped_fast_native_object_access.h"
+#include "ScopedLocalRef.h"
+#include <sys/time.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+
+#ifdef __ANDROID__
+// This function is provided by android linker.
+extern "C" void android_update_LD_LIBRARY_PATH(const char* ld_library_path);
+#endif // __ANDROID__
+
+#undef LOG_TAG
+#define LOG_TAG "artopenjdk"
+
+using art::DEBUG;
+using art::WARNING;
+using art::VERBOSE;
+using art::INFO;
+using art::ERROR;
+using art::FATAL;
+
+/* posix open() with extensions; used by e.g. ZipFile */
+JNIEXPORT jint JVM_Open(const char* fname, jint flags, jint mode) {
+ LOG(DEBUG) << "JVM_Open fname='" << fname << "', flags=" << flags << ", mode=" << mode;
+
+ /*
+ * The call is expected to handle JVM_O_DELETE, which causes the file
+ * to be removed after it is opened. Also, some code seems to
+ * want the special return value JVM_EEXIST if the file open fails
+ * due to O_EXCL.
+ */
+ int fd = TEMP_FAILURE_RETRY(open(fname, flags & ~JVM_O_DELETE, mode));
+ if (fd < 0) {
+ int err = errno;
+ LOG(DEBUG) << "open(" << fname << ") failed: " << strerror(errno);
+ if (err == EEXIST) {
+ return JVM_EEXIST;
+ } else {
+ return -1;
+ }
+ }
+
+ if (flags & JVM_O_DELETE) {
+ LOG(DEBUG) << "Deleting '" << fname << "' after open\n";
+ if (unlink(fname) != 0) {
+ LOG(WARNING) << "Post-open deletion of '" << fname << "' failed: " << strerror(errno);
+ }
+ /* ignore */
+ }
+
+ LOG(VERBOSE) << "open(" << fname << ") --> " << fd;
+ return fd;
+}
+
+/* posix close() */
+JNIEXPORT jint JVM_Close(jint fd) {
+ LOG(DEBUG) << "JVM_Close fd=" << fd;
+ // don't want TEMP_FAILURE_RETRY here -- file is closed even if EINTR
+ return close(fd);
+}
+
+/* posix read() */
+JNIEXPORT jint JVM_Read(jint fd, char* buf, jint nbytes) {
+ LOG(DEBUG) << "JVM_Read fd=" << fd << ", buf='" << buf << "', nbytes=" << nbytes;
+ return TEMP_FAILURE_RETRY(read(fd, buf, nbytes));
+}
+
+/* posix write(); is used to write messages to stderr */
+JNIEXPORT jint JVM_Write(jint fd, char* buf, jint nbytes) {
+ LOG(DEBUG) << "JVM_Write fd=" << fd << ", buf='" << buf << "', nbytes=" << nbytes;
+ return TEMP_FAILURE_RETRY(write(fd, buf, nbytes));
+}
+
+/* posix lseek() */
+JNIEXPORT jlong JVM_Lseek(jint fd, jlong offset, jint whence) {
+ LOG(DEBUG) << "JVM_Lseek fd=" << fd << ", offset=" << offset << ", whence=" << whence;
+ return TEMP_FAILURE_RETRY(lseek(fd, offset, whence));
+}
+
+/*
+ * "raw monitors" seem to be expected to behave like non-recursive pthread
+ * mutexes. They're used by ZipFile.
+ */
+JNIEXPORT void* JVM_RawMonitorCreate(void) {
+ LOG(DEBUG) << "JVM_RawMonitorCreate";
+ pthread_mutex_t* newMutex =
+ reinterpret_cast<pthread_mutex_t*>(malloc(sizeof(pthread_mutex_t)));
+ pthread_mutex_init(newMutex, NULL);
+ return newMutex;
+}
+
+JNIEXPORT void JVM_RawMonitorDestroy(void* mon) {
+ LOG(DEBUG) << "JVM_RawMonitorDestroy mon=" << mon;
+ pthread_mutex_destroy(reinterpret_cast<pthread_mutex_t*>(mon));
+}
+
+JNIEXPORT jint JVM_RawMonitorEnter(void* mon) {
+ LOG(DEBUG) << "JVM_RawMonitorEnter mon=" << mon;
+ return pthread_mutex_lock(reinterpret_cast<pthread_mutex_t*>(mon));
+}
+
+JNIEXPORT void JVM_RawMonitorExit(void* mon) {
+ LOG(DEBUG) << "JVM_RawMonitorExit mon=" << mon;
+ pthread_mutex_unlock(reinterpret_cast<pthread_mutex_t*>(mon));
+}
+
+JNIEXPORT char* JVM_NativePath(char* path) {
+ LOG(DEBUG) << "JVM_NativePath path='" << path << "'";
+ return path;
+}
+
+JNIEXPORT jint JVM_GetLastErrorString(char* buf, int len) {
+#if defined(__GLIBC__) || defined(__BIONIC__)
+ int err = errno; // grab before JVM_TRACE can trash it
+ LOG(DEBUG) << "JVM_GetLastErrorString buf=" << buf << ", len=" << len;
+
+ if (len == 0) {
+ return 0;
+ }
+
+ char* result = strerror_r(err, buf, len);
+ if (result != buf) {
+ strncpy(buf, result, len);
+ buf[len - 1] = '\0';
+ }
+
+ return strlen(buf);
+#else
+ UNUSED(buf);
+ UNUSED(len);
+ return -1;
+#endif
+}
+
+JNIEXPORT int jio_fprintf(FILE* fp, const char* fmt, ...) {
+ va_list args;
+
+ va_start(args, fmt);
+ int len = jio_vfprintf(fp, fmt, args);
+ va_end(args);
+
+ return len;
+}
+
+JNIEXPORT int jio_vfprintf(FILE* fp, const char* fmt, va_list args) {
+ assert(fp != NULL);
+ return vfprintf(fp, fmt, args);
+}
+
+/* posix fsync() */
+JNIEXPORT jint JVM_Sync(jint fd) {
+ LOG(DEBUG) << "JVM_Sync fd=" << fd;
+ return TEMP_FAILURE_RETRY(fsync(fd));
+}
+
+JNIEXPORT void* JVM_FindLibraryEntry(void* handle, const char* name) {
+ LOG(DEBUG) << "JVM_FindLibraryEntry handle=" << handle << " name=" << name;
+ return dlsym(handle, name);
+}
+
+JNIEXPORT jlong JVM_CurrentTimeMillis(JNIEnv* env, jclass clazz ATTRIBUTE_UNUSED) {
+ LOG(DEBUG) << "JVM_CurrentTimeMillis env=" << env;
+ struct timeval tv;
+
+ gettimeofday(&tv, (struct timezone *) NULL);
+ jlong when = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
+ return when;
+}
+
+JNIEXPORT jint JVM_Socket(jint domain, jint type, jint protocol) {
+ LOG(DEBUG) << "JVM_Socket domain=" << domain << ", type=" << type << ", protocol=" << protocol;
+
+ return TEMP_FAILURE_RETRY(socket(domain, type, protocol));
+}
+
+JNIEXPORT jint JVM_InitializeSocketLibrary() {
+ return 0;
+}
+
+int jio_vsnprintf(char *str, size_t count, const char *fmt, va_list args) {
+ if ((intptr_t)count <= 0) return -1;
+ return vsnprintf(str, count, fmt, args);
+}
+
+int jio_snprintf(char *str, size_t count, const char *fmt, ...) {
+ va_list args;
+ int len;
+ va_start(args, fmt);
+ len = jio_vsnprintf(str, count, fmt, args);
+ va_end(args);
+ return len;
+}
+
+JNIEXPORT jint JVM_SetSockOpt(jint fd, int level, int optname,
+ const char* optval, int optlen) {
+ LOG(DEBUG) << "JVM_SetSockOpt fd=" << fd << ", level=" << level << ", optname=" << optname
+ << ", optval=" << optval << ", optlen=" << optlen;
+ return TEMP_FAILURE_RETRY(setsockopt(fd, level, optname, optval, optlen));
+}
+
+JNIEXPORT jint JVM_SocketShutdown(jint fd, jint howto) {
+ LOG(DEBUG) << "JVM_SocketShutdown fd=" << fd << ", howto=" << howto;
+ return TEMP_FAILURE_RETRY(shutdown(fd, howto));
+}
+
+JNIEXPORT jint JVM_GetSockOpt(jint fd, int level, int optname, char* optval,
+ int* optlen) {
+ LOG(DEBUG) << "JVM_GetSockOpt fd=" << fd << ", level=" << level << ", optname=" << optname
+ << ", optval=" << optval << ", optlen=" << optlen;
+
+ socklen_t len = *optlen;
+ int cc = TEMP_FAILURE_RETRY(getsockopt(fd, level, optname, optval, &len));
+ *optlen = len;
+ return cc;
+}
+
+JNIEXPORT jint JVM_GetSockName(jint fd, struct sockaddr* addr, int* addrlen) {
+ LOG(DEBUG) << "JVM_GetSockName fd=" << fd << ", addr=" << addr << ", addrlen=" << addrlen;
+
+ socklen_t len = *addrlen;
+ int cc = TEMP_FAILURE_RETRY(getsockname(fd, addr, &len));
+ *addrlen = len;
+ return cc;
+}
+
+JNIEXPORT jint JVM_SocketAvailable(jint fd, jint* result) {
+ LOG(DEBUG) << "JVM_SocketAvailable fd=" << fd << ", result=" << result;
+
+ if (TEMP_FAILURE_RETRY(ioctl(fd, FIONREAD, result)) < 0) {
+ LOG(DEBUG) << "ioctl(" << fd << ", FIONREAD) failed: " << strerror(errno);
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+JNIEXPORT jint JVM_Send(jint fd, char* buf, jint nBytes, jint flags) {
+ LOG(DEBUG) << "JVM_Send fd=" << fd << ", buf=" << buf << ", nBytes="
+ << nBytes << ", flags=" << flags;
+
+ return TEMP_FAILURE_RETRY(send(fd, buf, nBytes, flags));
+}
+
+JNIEXPORT jint JVM_SocketClose(jint fd) {
+ LOG(DEBUG) << "JVM_SocketClose fd=" << fd;
+
+ // don't want TEMP_FAILURE_RETRY here -- file is closed even if EINTR
+ return close(fd);
+}
+
+JNIEXPORT jint JVM_Listen(jint fd, jint count) {
+ LOG(DEBUG) << "JVM_Listen fd=" << fd << ", count=" << count;
+
+ return TEMP_FAILURE_RETRY(listen(fd, count));
+}
+
+JNIEXPORT jint JVM_Connect(jint fd, struct sockaddr* addr, jint addrlen) {
+ LOG(DEBUG) << "JVM_Connect fd=" << fd << ", addr=" << addr << ", addrlen=" << addrlen;
+
+ return TEMP_FAILURE_RETRY(connect(fd, addr, addrlen));
+}
+
+JNIEXPORT int JVM_GetHostName(char* name, int namelen) {
+ LOG(DEBUG) << "JVM_GetHostName name=" << name << ", namelen=" << namelen;
+
+ return TEMP_FAILURE_RETRY(gethostname(name, namelen));
+}
+
+JNIEXPORT jstring JVM_InternString(JNIEnv* env, jstring jstr) {
+ LOG(DEBUG) << "JVM_InternString env=" << env << ", jstr=" << jstr;
+ art::ScopedFastNativeObjectAccess soa(env);
+ art::mirror::String* s = soa.Decode<art::mirror::String*>(jstr);
+ art::mirror::String* result = s->Intern();
+ return soa.AddLocalReference<jstring>(result);
+}
+
+JNIEXPORT jlong JVM_FreeMemory(void) {
+ return art::Runtime::Current()->GetHeap()->GetFreeMemory();
+}
+
+JNIEXPORT jlong JVM_TotalMemory(void) {
+ return art::Runtime::Current()->GetHeap()->GetTotalMemory();
+}
+
+JNIEXPORT jlong JVM_MaxMemory(void) {
+ return art::Runtime::Current()->GetHeap()->GetMaxMemory();
+}
+
+JNIEXPORT void JVM_GC(void) {
+ if (art::Runtime::Current()->IsExplicitGcDisabled()) {
+ LOG(INFO) << "Explicit GC skipped.";
+ return;
+ }
+ art::Runtime::Current()->GetHeap()->CollectGarbage(false);
+}
+
+JNIEXPORT __attribute__((noreturn)) void JVM_Exit(jint status) {
+ LOG(INFO) << "System.exit called, status: " << status;
+ art::Runtime::Current()->CallExitHook(status);
+ exit(status);
+}
+
+static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPath) {
+#ifdef __ANDROID__
+ if (javaLdLibraryPath != nullptr) {
+ ScopedUtfChars ldLibraryPath(env, javaLdLibraryPath);
+ if (ldLibraryPath.c_str() != nullptr) {
+ android_update_LD_LIBRARY_PATH(ldLibraryPath.c_str());
+ }
+ }
+
+#else
+ LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
+ UNUSED(javaLdLibraryPath, env);
+#endif
+}
+
+
+JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env, jstring javaFilename, jobject javaLoader,
+ jboolean isSharedNamespace, jstring javaLibrarySearchPath,
+ jstring javaLibraryPermittedPath) {
+ ScopedUtfChars filename(env, javaFilename);
+ if (filename.c_str() == NULL) {
+ return NULL;
+ }
+
+ int32_t target_sdk_version = art::Runtime::Current()->GetTargetSdkVersion();
+
+ // Starting with N nativeLoad uses classloader local
+ // linker namespace instead of global LD_LIBRARY_PATH
+ // (23 is Marshmallow)
+ if (target_sdk_version <= 23) {
+ SetLdLibraryPath(env, javaLibrarySearchPath);
+ }
+
+ std::string error_msg;
+ {
+ art::ScopedObjectAccess soa(env);
+ art::StackHandleScope<1> hs(soa.Self());
+ art::JavaVMExt* vm = art::Runtime::Current()->GetJavaVM();
+ bool success = vm->LoadNativeLibrary(env,
+ filename.c_str(),
+ javaLoader,
+ isSharedNamespace == JNI_TRUE,
+ javaLibrarySearchPath,
+ javaLibraryPermittedPath,
+ &error_msg);
+ if (success) {
+ return nullptr;
+ }
+ }
+
+ // Don't let a pending exception from JNI_OnLoad cause a CheckJNI issue with NewStringUTF.
+ env->ExceptionClear();
+ return env->NewStringUTF(error_msg.c_str());
+}
+
+JNIEXPORT void JVM_StartThread(JNIEnv* env, jobject jthread, jlong stack_size, jboolean daemon) {
+ art::Thread::CreateNativeThread(env, jthread, stack_size, daemon == JNI_TRUE);
+}
+
+JNIEXPORT void JVM_SetThreadPriority(JNIEnv* env, jobject jthread, jint prio) {
+ art::ScopedObjectAccess soa(env);
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+ if (thread != NULL) {
+ thread->SetNativePriority(prio);
+ }
+}
+
+JNIEXPORT void JVM_Yield(JNIEnv* env ATTRIBUTE_UNUSED, jclass threadClass ATTRIBUTE_UNUSED) {
+ sched_yield();
+}
+
+JNIEXPORT void JVM_Sleep(JNIEnv* env, jclass threadClass ATTRIBUTE_UNUSED,
+ jobject java_lock, jlong millis) {
+ art::ScopedFastNativeObjectAccess soa(env);
+ art::mirror::Object* lock = soa.Decode<art::mirror::Object*>(java_lock);
+ art::Monitor::Wait(art::Thread::Current(), lock, millis, 0, true, art::kSleeping);
+}
+
+JNIEXPORT jobject JVM_CurrentThread(JNIEnv* env, jclass unused ATTRIBUTE_UNUSED) {
+ art::ScopedFastNativeObjectAccess soa(env);
+ return soa.AddLocalReference<jobject>(soa.Self()->GetPeer());
+}
+
+JNIEXPORT void JVM_Interrupt(JNIEnv* env, jobject jthread) {
+ art::ScopedFastNativeObjectAccess soa(env);
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+ if (thread != nullptr) {
+ thread->Interrupt(soa.Self());
+ }
+}
+
+JNIEXPORT jboolean JVM_IsInterrupted(JNIEnv* env, jobject jthread, jboolean clearInterrupted) {
+ if (clearInterrupted) {
+ return static_cast<art::JNIEnvExt*>(env)->self->Interrupted() ? JNI_TRUE : JNI_FALSE;
+ } else {
+ art::ScopedFastNativeObjectAccess soa(env);
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(soa, jthread);
+ return (thread != nullptr) ? thread->IsInterrupted() : JNI_FALSE;
+ }
+}
+
+JNIEXPORT jboolean JVM_HoldsLock(JNIEnv* env, jclass unused ATTRIBUTE_UNUSED, jobject jobj) {
+ art::ScopedObjectAccess soa(env);
+ art::mirror::Object* object = soa.Decode<art::mirror::Object*>(jobj);
+ if (object == NULL) {
+ art::ThrowNullPointerException("object == null");
+ return JNI_FALSE;
+ }
+ return soa.Self()->HoldsLock(object);
+}
+
+JNIEXPORT void JVM_SetNativeThreadName(JNIEnv* env, jobject jthread, jstring java_name) {
+ ScopedUtfChars name(env, java_name);
+ {
+ art::ScopedObjectAccess soa(env);
+ if (soa.Decode<art::mirror::Object*>(jthread) == soa.Self()->GetPeer()) {
+ soa.Self()->SetThreadName(name.c_str());
+ return;
+ }
+ }
+ // Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
+ // thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock
+ // in the DDMS send code.
+ art::ThreadList* thread_list = art::Runtime::Current()->GetThreadList();
+ bool timed_out;
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ art::Thread* thread;
+ {
+ thread = thread_list->SuspendThreadByPeer(jthread, true, false, &timed_out);
+ }
+ if (thread != NULL) {
+ {
+ art::ScopedObjectAccess soa(env);
+ thread->SetThreadName(name.c_str());
+ }
+ thread_list->Resume(thread, false);
+ } else if (timed_out) {
+ LOG(ERROR) << "Trying to set thread name to '" << name.c_str() << "' failed as the thread "
+ "failed to suspend within a generous timeout.";
+ }
+}
+
+JNIEXPORT jint JVM_IHashCode(JNIEnv* env ATTRIBUTE_UNUSED,
+ jobject javaObject ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "JVM_IHashCode is not implemented";
+ return 0;
+}
+
+JNIEXPORT jlong JVM_NanoTime(JNIEnv* env ATTRIBUTE_UNUSED, jclass unused ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "JVM_NanoTime is not implemented";
+ return 0L;
+}
+
+JNIEXPORT void JVM_ArrayCopy(JNIEnv* /* env */, jclass /* unused */, jobject /* javaSrc */,
+ jint /* srcPos */, jobject /* javaDst */, jint /* dstPos */,
+ jint /* length */) {
+ UNIMPLEMENTED(FATAL) << "JVM_ArrayCopy is not implemented";
+}
+
+JNIEXPORT jint JVM_FindSignal(const char* name ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "JVM_FindSignal is not implemented";
+ return 0;
+}
+
+JNIEXPORT void* JVM_RegisterSignal(jint signum ATTRIBUTE_UNUSED, void* handler ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "JVM_RegisterSignal is not implemented";
+ return nullptr;
+}
+
+JNIEXPORT jboolean JVM_RaiseSignal(jint signum ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "JVM_RaiseSignal is not implemented";
+ return JNI_FALSE;
+}
+
+JNIEXPORT __attribute__((noreturn)) void JVM_Halt(jint code) {
+ exit(code);
+}
+
+JNIEXPORT jboolean JVM_IsNaN(jdouble d) {
+ return isnan(d);
+}
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 06b40fd..5b90c6a 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -18,6 +18,8 @@
#include <memory>
+#include "arch/instruction_set.h"
+#include "base/stringprintf.h"
#include "common_runtime_test.h"
namespace art {
@@ -34,18 +36,28 @@
void* test_abort = reinterpret_cast<void*>(0xb);
void* test_exit = reinterpret_cast<void*>(0xc);
- std::string lib_core(CommonRuntimeTest::GetLibCoreDexFileName());
-
std::string boot_class_path;
+ std::string class_path;
boot_class_path += "-Xbootclasspath:";
- boot_class_path += lib_core;
+
+ bool first_dex_file = true;
+ for (const std::string &dex_file_name :
+ CommonRuntimeTest::GetLibCoreDexFileNames()) {
+ if (!first_dex_file) {
+ class_path += ":";
+ } else {
+ first_dex_file = false;
+ }
+ class_path += dex_file_name;
+ }
+ boot_class_path += class_path;
RuntimeOptions options;
options.push_back(std::make_pair(boot_class_path.c_str(), nullptr));
options.push_back(std::make_pair("-classpath", nullptr));
- options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair(class_path.c_str(), nullptr));
options.push_back(std::make_pair("-cp", nullptr));
- options.push_back(std::make_pair(lib_core.c_str(), nullptr));
+ options.push_back(std::make_pair(class_path.c_str(), nullptr));
options.push_back(std::make_pair("-Ximage:boot_image", nullptr));
options.push_back(std::make_pair("-Xcheck:jni", nullptr));
options.push_back(std::make_pair("-Xms2048", nullptr));
@@ -69,8 +81,8 @@
#define EXPECT_PARSED_EQ(expected, actual_key) EXPECT_EQ(expected, map.GetOrDefault(actual_key))
#define EXPECT_PARSED_EXISTS(actual_key) EXPECT_TRUE(map.Exists(actual_key))
- EXPECT_PARSED_EQ(lib_core, Opt::BootClassPath);
- EXPECT_PARSED_EQ(lib_core, Opt::ClassPath);
+ EXPECT_PARSED_EQ(class_path, Opt::BootClassPath);
+ EXPECT_PARSED_EQ(class_path, Opt::ClassPath);
EXPECT_PARSED_EQ(std::string("boot_image"), Opt::Image);
EXPECT_PARSED_EXISTS(Opt::CheckJni);
EXPECT_PARSED_EQ(2048U, Opt::MemoryInitialSize);
@@ -113,6 +125,40 @@
EXPECT_TRUE(map.Exists(Opt::GcOption));
XGcOption xgc = map.GetOrDefault(Opt::GcOption);
- EXPECT_EQ(gc::kCollectorTypeMC, xgc.collector_type_);}
+ EXPECT_EQ(gc::kCollectorTypeMC, xgc.collector_type_);
+}
+
+TEST_F(ParsedOptionsTest, ParsedOptionsInstructionSet) {
+ using Opt = RuntimeArgumentMap;
+
+ {
+ // Nothing set, should be kRuntimeISA.
+ RuntimeOptions options;
+ RuntimeArgumentMap map;
+ bool parsed = ParsedOptions::Parse(options, false, &map);
+ ASSERT_TRUE(parsed);
+ InstructionSet isa = map.GetOrDefault(Opt::ImageInstructionSet);
+ EXPECT_EQ(kRuntimeISA, isa);
+ }
+
+ const char* isa_strings[] = { "arm", "arm64", "x86", "x86_64", "mips", "mips64" };
+ InstructionSet ISAs[] = { InstructionSet::kArm,
+ InstructionSet::kArm64,
+ InstructionSet::kX86,
+ InstructionSet::kX86_64,
+ InstructionSet::kMips,
+ InstructionSet::kMips64 };
+ static_assert(arraysize(isa_strings) == arraysize(ISAs), "Need same amount.");
+
+ for (size_t i = 0; i < arraysize(isa_strings); ++i) {
+ RuntimeOptions options;
+ options.push_back(std::make_pair("imageinstructionset", isa_strings[i]));
+ RuntimeArgumentMap map;
+ bool parsed = ParsedOptions::Parse(options, false, &map);
+ ASSERT_TRUE(parsed);
+ InstructionSet isa = map.GetOrDefault(Opt::ImageInstructionSet);
+ EXPECT_EQ(ISAs[i], isa);
+ }
+}
} // namespace art
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 57472ad..4d9ca6d 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -79,7 +79,7 @@
mirror::Method::CreateFromArtMethod(soa.Self(), method)));
// Now adds all interfaces virtual methods.
for (mirror::Class* interface : interfaces) {
- for (auto& m : interface->GetVirtualMethods(sizeof(void*))) {
+ for (auto& m : interface->GetDeclaredVirtualMethods(sizeof(void*))) {
soa.Env()->SetObjectArrayElement(
proxyClassMethods, array_index++, soa.AddLocalReference<jobject>(
mirror::Method::CreateFromArtMethod(soa.Self(), &m)));
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index ea193d7..19cf759 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -28,7 +28,7 @@
namespace art {
-template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
@@ -42,9 +42,16 @@
ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
MirrorType* ref = ref_addr->AsMirrorPtr();
+ MirrorType* old_ref = ref;
if (is_gray) {
// Slow-path.
ref = reinterpret_cast<MirrorType*>(Mark(ref));
+ // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
+ // updates before us, but it's ok.
+ if (kAlwaysUpdateField && ref != old_ref) {
+ obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
+ offset, old_ref, ref);
+ }
}
if (kEnableReadBarrierInvariantChecks) {
CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
@@ -75,7 +82,7 @@
}
}
-template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source) {
MirrorType* ref = *root;
@@ -112,7 +119,7 @@
}
// TODO: Reduce copy paste
-template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
GcRootSource* gc_root_source) {
MirrorType* ref = root->AsMirrorPtr();
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 600b7f9..3169a8b 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -43,26 +43,24 @@
// Enable the read barrier checks.
static constexpr bool kEnableReadBarrierInvariantChecks = true;
- // It's up to the implementation whether the given field gets
- // updated whereas the return value must be an updated reference.
+ // It's up to the implementation whether the given field gets updated whereas the return value
+ // must be an updated reference unless kAlwaysUpdateField is true.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
- bool kMaybeDuringStartup = false>
+ bool kAlwaysUpdateField = false>
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
SHARED_REQUIRES(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
- template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
- bool kMaybeDuringStartup = false>
+ template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source = nullptr)
SHARED_REQUIRES(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
- template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
- bool kMaybeDuringStartup = false>
+ template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
GcRootSource* gc_root_source = nullptr)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 93ca347..5c72629 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -190,7 +190,6 @@
abort_(nullptr),
stats_enabled_(false),
is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
- profiler_started_(false),
instrumentation_(),
main_thread_group_(nullptr),
system_thread_group_(nullptr),
@@ -258,11 +257,6 @@
self = nullptr;
}
- // Shut down background profiler before the runtime exits.
- if (profiler_started_) {
- BackgroundMethodSamplingProfiler::Shutdown();
- }
-
// Make sure to let the GC complete if it is running.
heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
@@ -271,6 +265,8 @@
// Delete thread pool before the thread list since we don't want to wait forever on the
// JIT compiler threads.
jit_->DeleteThreadPool();
+ // Similarly, stop the profile saver thread before deleting the thread list.
+ jit_->StopProfileSaver();
}
// Make sure our internal threads are dead before we start tearing down things they're using.
@@ -547,15 +543,15 @@
// Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
if (!IsAotCompiler()) {
ScopedObjectAccess soa(self);
- gc::space::ImageSpace* image_space = heap_->GetBootImageSpace();
- if (image_space != nullptr) {
+ std::vector<gc::space::ImageSpace*> image_spaces = heap_->GetBootImageSpaces();
+ for (gc::space::ImageSpace* image_space : image_spaces) {
ATRACE_BEGIN("AddImageStringsToTable");
GetInternTable()->AddImageStringsToTable(image_space);
ATRACE_END();
- ATRACE_BEGIN("MoveImageClassesToClassTable");
- GetClassLinker()->AddBootImageClassesToClassTable();
- ATRACE_END();
}
+ ATRACE_BEGIN("MoveImageClassesToClassTable");
+ GetClassLinker()->AddBootImageClassesToClassTable();
+ ATRACE_END();
}
// If we are the zygote then we need to wait until after forking to create the code cache
@@ -564,7 +560,7 @@
CreateJit();
}
- if (!IsImageDex2OatEnabled() || !GetHeap()->HasImageSpace()) {
+ if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
@@ -593,6 +589,7 @@
PreInitializeNativeBridge(".");
}
InitNonZygoteOrPostFork(self->GetJniEnv(),
+ /* is_system_server */ false,
NativeBridgeAction::kInitialize,
GetInstructionSetString(kRuntimeISA));
}
@@ -616,8 +613,7 @@
if (fd >= 0) {
close(fd);
} else if (errno != EEXIST) {
- LOG(INFO) << "Failed to access the profile file. Profiler disabled.";
- return true;
+ LOG(WARNING) << "Failed to access the profile file. Profiler disabled.";
}
}
@@ -682,7 +678,8 @@
#endif
}
-void Runtime::InitNonZygoteOrPostFork(JNIEnv* env, NativeBridgeAction action, const char* isa) {
+void Runtime::InitNonZygoteOrPostFork(
+ JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) {
is_zygote_ = false;
if (is_native_bridge_loaded_) {
@@ -704,7 +701,7 @@
// before fork aren't attributed to an app.
heap_->ResetGcPerformanceInfo();
- if (!safe_mode_ && jit_options_->UseJIT() && jit_.get() == nullptr) {
+ if (!is_system_server && !safe_mode_ && jit_options_->UseJIT() && jit_.get() == nullptr) {
// Note that when running ART standalone (not zygote, nor zygote fork),
// the jit may have already been created.
CreateJit();
@@ -752,61 +749,92 @@
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
+// Attempts to open dex files from image(s). Given the image location, try to find the oat file
+// and open it to get the stored dex file. If the image is the first for a multi-image boot
+// classpath, go on and also open the other images.
static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files,
size_t* failures) {
DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
- std::string system_filename;
- bool has_system = false;
- std::string cache_filename_unused;
- bool dalvik_cache_exists_unused;
- bool has_cache_unused;
- bool is_global_cache_unused;
- bool found_image = gc::space::ImageSpace::FindImageFilename(image_location.c_str(),
- kRuntimeISA,
- &system_filename,
- &has_system,
- &cache_filename_unused,
- &dalvik_cache_exists_unused,
- &has_cache_unused,
- &is_global_cache_unused);
- *failures = 0;
- if (!found_image || !has_system) {
- return false;
- }
- std::string error_msg;
- // We are falling back to non-executable use of the oat file because patching failed, presumably
- // due to lack of space.
- std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
- std::string oat_location = ImageHeader::GetOatLocationFromImageLocation(image_location.c_str());
- std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
- if (file.get() == nullptr) {
- return false;
- }
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), false, false, &error_msg));
- if (elf_file.get() == nullptr) {
- return false;
- }
- std::unique_ptr<const OatFile> oat_file(
- OatFile::OpenWithElfFile(elf_file.release(), oat_location, nullptr, &error_msg));
- if (oat_file == nullptr) {
- LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
- return false;
- }
- for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
- if (oat_dex_file == nullptr) {
- *failures += 1;
- continue;
+ // Use a work-list approach, so that we can easily reuse the opening code.
+ std::vector<std::string> image_locations;
+ image_locations.push_back(image_location);
+
+ for (size_t index = 0; index < image_locations.size(); ++index) {
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename_unused;
+ bool dalvik_cache_exists_unused;
+ bool has_cache_unused;
+ bool is_global_cache_unused;
+ bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
+ kRuntimeISA,
+ &system_filename,
+ &has_system,
+ &cache_filename_unused,
+ &dalvik_cache_exists_unused,
+ &has_cache_unused,
+ &is_global_cache_unused);
+
+ if (!found_image || !has_system) {
+ return false;
}
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
- *failures += 1;
- } else {
- dex_files->push_back(std::move(dex_file));
+
+ // We are falling back to non-executable use of the oat file because patching failed, presumably
+ // due to lack of space.
+ std::string oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
+ std::string oat_location =
+ ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
+ // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
+ // that here.
+ if (EndsWith(oat_location, ".jar")) {
+ oat_location.replace(oat_location.length() - 3, 3, "oat");
}
+
+ std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
+ if (file.get() == nullptr) {
+ return false;
+ }
+ std::string error_msg;
+ std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), false, false, &error_msg));
+ if (elf_file.get() == nullptr) {
+ return false;
+ }
+ std::unique_ptr<const OatFile> oat_file(
+ OatFile::OpenWithElfFile(elf_file.release(), oat_location, nullptr, &error_msg));
+ if (oat_file == nullptr) {
+ LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
+ return false;
+ }
+
+ for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ if (oat_dex_file == nullptr) {
+ *failures += 1;
+ continue;
+ }
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ if (dex_file.get() == nullptr) {
+ *failures += 1;
+ } else {
+ dex_files->push_back(std::move(dex_file));
+ }
+ }
+
+ if (index == 0) {
+ // First file. See if this is a multi-image environment, and if so, enqueue the other images.
+ const OatHeader& boot_oat_header = oat_file->GetOatHeader();
+ const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPath);
+ if (boot_cp != nullptr) {
+ gc::space::ImageSpace::CreateMultiImageLocations(image_locations[0],
+ boot_cp,
+ &image_locations);
+ }
+ }
+
+ Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
}
- Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
return true;
}
@@ -944,7 +972,7 @@
runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
ATRACE_END();
- if (heap_->GetBootImageSpace() == nullptr && !allow_dex_file_fallback_) {
+ if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
ATRACE_END();
return false;
@@ -1052,7 +1080,7 @@
CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
class_linker_ = new ClassLinker(intern_table_);
- if (GetHeap()->HasImageSpace()) {
+ if (GetHeap()->HasBootImageSpace()) {
ATRACE_BEGIN("InitFromImage");
std::string error_msg;
bool result = class_linker_->InitFromImage(&error_msg);
@@ -1061,9 +1089,13 @@
LOG(ERROR) << "Could not initialize from image: " << error_msg;
return false;
}
+ /* TODO: Modify check to support multiple image spaces and reenable. b/26317072
if (kIsDebugBuild) {
- GetHeap()->GetBootImageSpace()->VerifyImageAllocations();
+ for (auto image_space : GetHeap()->GetBootImageSpaces()) {
+ image_space->VerifyImageAllocations();
+ }
}
+ */
if (boot_class_path_string_.empty()) {
// The bootclasspath is not explicitly specified: construct it from the loaded dex files.
const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
@@ -1209,20 +1241,33 @@
// First set up JniConstants, which is used by both the runtime's built-in native
// methods and libcore.
JniConstants::init(env);
- WellKnownClasses::Init(env);
// Then set up the native methods provided by the runtime itself.
RegisterRuntimeNativeMethods(env);
- // Then set up libcore, which is just a regular JNI library with a regular JNI_OnLoad.
- // Most JNI libraries can just use System.loadLibrary, but libcore can't because it's
- // the library that implements System.loadLibrary!
+ // Initialize classes used in JNI. The initialization requires runtime native
+ // methods to be loaded first.
+ WellKnownClasses::Init(env);
+
+ // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
+ // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
+ // libcore can't because it's the library that implements System.loadLibrary!
{
std::string error_msg;
- if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, nullptr, nullptr, &error_msg)) {
+ if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr,
+ /* is_shared_namespace */ false,
+ nullptr, nullptr, &error_msg)) {
LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
}
}
+ {
+ std::string error_msg;
+ if (!java_vm_->LoadNativeLibrary(env, "libopenjdk.so", nullptr,
+ /* is_shared_namespace */ false,
+ nullptr, nullptr, &error_msg)) {
+ LOG(FATAL) << "LoadNativeLibrary failed for \"libopenjdk.so\": " << error_msg;
+ }
+ }
// Initialize well known classes that may invoke runtime native methods.
WellKnownClasses::LateInit(env);
@@ -1300,6 +1345,11 @@
GetInternTable()->DumpForSigQuit(os);
GetJavaVM()->DumpForSigQuit(os);
GetHeap()->DumpForSigQuit(os);
+ if (GetJit() != nullptr) {
+ GetJit()->DumpForSigQuit(os);
+ } else {
+ os << "Running non JIT\n";
+ }
TrackedAllocators::Dump(os);
os << "\n";
@@ -1633,8 +1683,15 @@
callee_save_methods_[type] = reinterpret_cast<uintptr_t>(method);
}
-void Runtime::SetJitProfilingFilename(const char* profile_output_filename) {
+void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
+ const std::string& profile_output_filename) {
+ VLOG(profiler) << "Register app with " << profile_output_filename_
+ << " " << Join(code_paths, ':');
+ DCHECK(!profile_output_filename.empty());
profile_output_filename_ = profile_output_filename;
+ if (jit_.get() != nullptr && !profile_output_filename.empty() && !code_paths.empty()) {
+ jit_->StartProfileSaver(profile_output_filename, code_paths);
+ }
}
// Transaction support.
@@ -1780,18 +1837,6 @@
argv->push_back(feature_string);
}
-void Runtime::MaybeSaveJitProfilingInfo() {
- if (jit_.get() != nullptr && !profile_output_filename_.empty()) {
- jit_->SaveProfilingInfo(profile_output_filename_);
- }
-}
-
-void Runtime::UpdateProfilerState(int state) {
- if (state == kProfileBackground) {
- MaybeSaveJitProfilingInfo();
- }
-}
-
void Runtime::CreateJit() {
CHECK(!IsAotCompiler());
if (GetInstrumentation()->IsForcedInterpretOnly()) {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 93d8fcf..20acffb 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -457,7 +457,8 @@
void PreZygoteFork();
bool InitZygote();
- void InitNonZygoteOrPostFork(JNIEnv* env, NativeBridgeAction action, const char* isa);
+ void InitNonZygoteOrPostFork(
+ JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
const instrumentation::Instrumentation* GetInstrumentation() const {
return &instrumentation_;
@@ -467,8 +468,8 @@
return &instrumentation_;
}
- void SetJitProfilingFilename(const char* profile_output_filename);
- void UpdateProfilerState(int state);
+ void RegisterAppInfo(const std::vector<std::string>& code_paths,
+ const std::string& profile_output_filename);
// Transaction support.
bool IsActiveTransaction() const {
@@ -733,7 +734,6 @@
std::string profile_output_filename_;
ProfilerOptions profiler_options_;
- bool profiler_started_;
std::unique_ptr<TraceConfig> trace_config_;
diff --git a/runtime/utils.cc b/runtime/utils.cc
index eddc3a4..ff6b4c0 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1860,4 +1860,10 @@
*parsed_value = value;
}
+int64_t GetFileSizeBytes(const std::string& filename) {
+ struct stat stat_buf;
+ int rc = stat(filename.c_str(), &stat_buf);
+ return rc == 0 ? stat_buf.st_size : -1;
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 5b9e963..a07e74c 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -367,6 +367,9 @@
return dist(rng);
}
+// Return the file size in bytes or -1 if the file does not exists.
+int64_t GetFileSizeBytes(const std::string& filename);
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 2b778d9..8300921 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -140,6 +140,7 @@
jfieldID WellKnownClasses::java_lang_Thread_uncaughtHandler;
jfieldID WellKnownClasses::java_lang_Thread_nativePeer;
jfieldID WellKnownClasses::java_lang_ThreadGroup_groups;
+jfieldID WellKnownClasses::java_lang_ThreadGroup_ngroups;
jfieldID WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup;
jfieldID WellKnownClasses::java_lang_ThreadGroup_name;
jfieldID WellKnownClasses::java_lang_ThreadGroup_parent;
@@ -268,7 +269,7 @@
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
java_lang_Thread__UncaughtExceptionHandler_uncaughtException = CacheMethod(env, java_lang_Thread__UncaughtExceptionHandler, false, "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
- java_lang_ThreadGroup_removeThread = CacheMethod(env, java_lang_ThreadGroup, false, "removeThread", "(Ljava/lang/Thread;)V");
+ java_lang_ThreadGroup_removeThread = CacheMethod(env, java_lang_ThreadGroup, false, "threadTerminated", "(Ljava/lang/Thread;)V");
java_nio_DirectByteBuffer_init = CacheMethod(env, java_nio_DirectByteBuffer, false, "<init>", "(JI)V");
libcore_reflect_AnnotationFactory_createAnnotation = CacheMethod(env, libcore_reflect_AnnotationFactory, true, "createAnnotation", "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)Ljava/lang/annotation/Annotation;");
libcore_reflect_AnnotationMember_init = CacheMethod(env, libcore_reflect_AnnotationMember, false, "<init>", "(Ljava/lang/String;Ljava/lang/Object;Ljava/lang/Class;Ljava/lang/reflect/Method;)V");
@@ -340,9 +341,10 @@
java_lang_Thread_lock = CacheField(env, java_lang_Thread, false, "lock", "Ljava/lang/Object;");
java_lang_Thread_name = CacheField(env, java_lang_Thread, false, "name", "Ljava/lang/String;");
java_lang_Thread_priority = CacheField(env, java_lang_Thread, false, "priority", "I");
- java_lang_Thread_uncaughtHandler = CacheField(env, java_lang_Thread, false, "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
+ java_lang_Thread_uncaughtHandler = CacheField(env, java_lang_Thread, false, "uncaughtExceptionHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;");
java_lang_Thread_nativePeer = CacheField(env, java_lang_Thread, false, "nativePeer", "J");
- java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "Ljava/util/List;");
+ java_lang_ThreadGroup_groups = CacheField(env, java_lang_ThreadGroup, false, "groups", "[Ljava/lang/ThreadGroup;");
+ java_lang_ThreadGroup_ngroups = CacheField(env, java_lang_ThreadGroup, false, "ngroups", "I");
java_lang_ThreadGroup_mainThreadGroup = CacheField(env, java_lang_ThreadGroup, true, "mainThreadGroup", "Ljava/lang/ThreadGroup;");
java_lang_ThreadGroup_name = CacheField(env, java_lang_ThreadGroup, false, "name", "Ljava/lang/String;");
java_lang_ThreadGroup_parent = CacheField(env, java_lang_ThreadGroup, false, "parent", "Ljava/lang/ThreadGroup;");
@@ -350,13 +352,13 @@
java_lang_Throwable_cause = CacheField(env, java_lang_Throwable, false, "cause", "Ljava/lang/Throwable;");
java_lang_Throwable_detailMessage = CacheField(env, java_lang_Throwable, false, "detailMessage", "Ljava/lang/String;");
java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;");
- java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "stackState", "Ljava/lang/Object;");
+ java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "backtrace", "Ljava/lang/Object;");
java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;");
java_lang_reflect_AbstractMethod_artMethod = CacheField(env, java_lang_reflect_AbstractMethod, false, "artMethod", "J");
java_lang_reflect_Proxy_h = CacheField(env, java_lang_reflect_Proxy, false, "h", "Ljava/lang/reflect/InvocationHandler;");
java_nio_DirectByteBuffer_capacity = CacheField(env, java_nio_DirectByteBuffer, false, "capacity", "I");
- java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "effectiveDirectAddress", "J");
- java_util_ArrayList_array = CacheField(env, java_util_ArrayList, false, "array", "[Ljava/lang/Object;");
+ java_nio_DirectByteBuffer_effectiveDirectAddress = CacheField(env, java_nio_DirectByteBuffer, false, "address", "J");
+ java_util_ArrayList_array = CacheField(env, java_util_ArrayList, false, "elementData", "[Ljava/lang/Object;");
java_util_ArrayList_size = CacheField(env, java_util_ArrayList, false, "size", "I");
java_util_Collections_EMPTY_LIST = CacheField(env, java_util_Collections, true, "EMPTY_LIST", "Ljava/util/List;");
libcore_util_EmptyArray_STACK_TRACE_ELEMENT = CacheField(env, libcore_util_EmptyArray, true, "STACK_TRACE_ELEMENT", "[Ljava/lang/StackTraceElement;");
@@ -381,7 +383,7 @@
ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime"));
java_lang_Runtime_nativeLoad =
CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad",
- "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;Ljava/lang/String;)"
+ "(Ljava/lang/String;Ljava/lang/ClassLoader;ZLjava/lang/String;Ljava/lang/String;)"
"Ljava/lang/String;");
}
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index c856291..55158a7 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -153,6 +153,7 @@
static jfieldID java_lang_Thread_uncaughtHandler;
static jfieldID java_lang_Thread_nativePeer;
static jfieldID java_lang_ThreadGroup_groups;
+ static jfieldID java_lang_ThreadGroup_ngroups;
static jfieldID java_lang_ThreadGroup_mainThreadGroup;
static jfieldID java_lang_ThreadGroup_name;
static jfieldID java_lang_ThreadGroup_parent;
diff --git a/runtime/zip_archive_test.cc b/runtime/zip_archive_test.cc
index aded30c..4fc7ee2 100644
--- a/runtime/zip_archive_test.cc
+++ b/runtime/zip_archive_test.cc
@@ -32,7 +32,7 @@
TEST_F(ZipArchiveTest, FindAndExtract) {
std::string error_msg;
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(GetLibCoreDexFileName().c_str(), &error_msg));
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(GetLibCoreDexFileNames()[0].c_str(), &error_msg));
ASSERT_TRUE(zip_archive.get() != nullptr) << error_msg;
ASSERT_TRUE(error_msg.empty());
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find("classes.dex", &error_msg));
diff --git a/test/003-omnibus-opcodes/expected.txt b/test/003-omnibus-opcodes/expected.txt
index b591a7a..ee25ec1 100644
--- a/test/003-omnibus-opcodes/expected.txt
+++ b/test/003-omnibus-opcodes/expected.txt
@@ -31,15 +31,7 @@
FloatMath.checkConvI
FloatMath.checkConvL
FloatMath.checkConvF
- 0: -2.0054409E9
- 1: -8.613303E18
- 2: -3.1415927
--2.0054409E9, -8.6133031E18, -3.1415927
FloatMath.checkConvD
- 0: -2.005440939E9
- 1: -8.613303245920329E18
- 2: 123.45600128173828
--2.005440939E9, -8.6133032459203287E18, 123.4560012817382
FloatMath.checkConsts
FloatMath.jlmTests
IntMath.testIntCompare
diff --git a/test/003-omnibus-opcodes/src/FloatMath.java b/test/003-omnibus-opcodes/src/FloatMath.java
index a0bc9f4..96befe9 100644
--- a/test/003-omnibus-opcodes/src/FloatMath.java
+++ b/test/003-omnibus-opcodes/src/FloatMath.java
@@ -245,10 +245,9 @@
}
static void checkConvF(float[] results) {
System.out.println("FloatMath.checkConvF");
- // TODO: Main.assertTrue values
- for (int i = 0; i < results.length; i++)
- System.out.println(" " + i + ": " + results[i]);
- System.out.println("-2.0054409E9, -8.6133031E18, -3.1415927");
+ Main.assertTrue(results[0] == -2.0054409E9f);
+ Main.assertTrue(results[1] == -8.613303E18f);
+ Main.assertTrue(results[2] == -3.1415927f);
}
static double[] convD(int i, long l, float f) {
@@ -260,10 +259,9 @@
}
static void checkConvD(double[] results) {
System.out.println("FloatMath.checkConvD");
- // TODO: Main.assertTrue values
- for (int i = 0; i < results.length; i++)
- System.out.println(" " + i + ": " + results[i]);
- System.out.println("-2.005440939E9, -8.6133032459203287E18, 123.4560012817382");
+ Main.assertTrue(results[0] == -2.005440939E9);
+ Main.assertTrue(results[1] == -8.6133032459203287E18);
+ Main.assertTrue(results[2] == 123.45600128173828);
}
static void checkConsts() {
diff --git a/test/031-class-attributes/expected.txt b/test/031-class-attributes/expected.txt
index de99872..72656ae 100644
--- a/test/031-class-attributes/expected.txt
+++ b/test/031-class-attributes/expected.txt
@@ -84,7 +84,7 @@
enclosingCon: null
enclosingMeth: null
modifiers: 1
- package: package otherpackage
+ package: package otherpackage, Unknown, version 0.0
declaredClasses: [0]
member classes: [0]
isAnnotation: false
diff --git a/test/034-call-null/expected.txt b/test/034-call-null/expected.txt
index 343226f..4e0281e 100644
--- a/test/034-call-null/expected.txt
+++ b/test/034-call-null/expected.txt
@@ -1,2 +1,2 @@
-java.lang.NullPointerException: Attempt to invoke direct method 'void Main.doStuff(int, int[][], java.lang.String, java.lang.String[][])' on a null object reference
+Exception in thread "main" java.lang.NullPointerException: Attempt to invoke direct method 'void Main.doStuff(int, int[][], java.lang.String, java.lang.String[][])' on a null object reference
at Main.main(Main.java:26)
diff --git a/test/038-inner-null/expected.txt b/test/038-inner-null/expected.txt
index ba411f0..2e92564 100644
--- a/test/038-inner-null/expected.txt
+++ b/test/038-inner-null/expected.txt
@@ -1,4 +1,4 @@
new Special()
-java.lang.NullPointerException: Attempt to invoke virtual method 'void Main$Blort.repaint()' on a null object reference
+Exception in thread "main" java.lang.NullPointerException: Attempt to invoke virtual method 'void Main$Blort.repaint()' on a null object reference
at Main$Special.callInner(Main.java:31)
at Main.main(Main.java:20)
diff --git a/test/042-new-instance/expected.txt b/test/042-new-instance/expected.txt
index 7d843d1..c5de313 100644
--- a/test/042-new-instance/expected.txt
+++ b/test/042-new-instance/expected.txt
@@ -9,3 +9,4 @@
Cons got expected PackageAccess complaint
Cons got expected InstantationException
Cons got expected PackageAccess2 complaint
+Cons ConstructorAccess succeeded
diff --git a/test/042-new-instance/src/Main.java b/test/042-new-instance/src/Main.java
index b0a5fd4..8cd6b2e 100644
--- a/test/042-new-instance/src/Main.java
+++ b/test/042-new-instance/src/Main.java
@@ -156,6 +156,14 @@
ex.printStackTrace();
}
+ // should succeed
+ try {
+ otherpackage.ConstructorAccess.newConstructorInstance();
+ System.out.println("Cons ConstructorAccess succeeded");
+ } catch (Exception ex) {
+ System.err.println("Cons ConstructorAccess failed");
+ ex.printStackTrace();
+ }
}
class InnerClass {
@@ -173,7 +181,6 @@
public LocalClass2() {}
}
-
class LocalClass3 {
public static void main() {
try {
diff --git a/test/042-new-instance/src/otherpackage/ConstructorAccess.java b/test/042-new-instance/src/otherpackage/ConstructorAccess.java
new file mode 100644
index 0000000..a74e9a0
--- /dev/null
+++ b/test/042-new-instance/src/otherpackage/ConstructorAccess.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package otherpackage;
+
+import java.lang.reflect.Constructor;
+
+public class ConstructorAccess {
+
+ static class Inner {
+ Inner() {}
+ }
+
+ // Test for regression in b/25817515. Inner class constructor should
+ // be accessible from this static method, but if we over-shoot and check
+ // accessibility using the frame below (in Main class), we will see an
+ // IllegalAccessException from #newInstance
+ static public void newConstructorInstance() throws Exception {
+ Class c = Inner.class;
+ Constructor cons = c.getDeclaredConstructor((Class[]) null);
+ Object obj = cons.newInstance();
+ }
+}
diff --git a/test/046-reflect/expected.txt b/test/046-reflect/expected.txt
index d657d44..06932b9 100644
--- a/test/046-reflect/expected.txt
+++ b/test/046-reflect/expected.txt
@@ -96,8 +96,8 @@
got expected exception for Constructor.newInstance
ReflectTest done!
public method
-static java.lang.Object java.util.Collections.checkType(java.lang.Object,java.lang.Class) accessible=false
-static java.lang.Object java.util.Collections.checkType(java.lang.Object,java.lang.Class) accessible=true
+private static void java.util.Collections.swap(java.lang.Object[],int,int) accessible=false
+private static void java.util.Collections.swap(java.lang.Object[],int,int) accessible=true
checkType invoking null
checkType got expected exception
calling const-class FieldNoisyInitUser.class
diff --git a/test/046-reflect/src/Main.java b/test/046-reflect/src/Main.java
index 0c90109..67a0d11 100644
--- a/test/046-reflect/src/Main.java
+++ b/test/046-reflect/src/Main.java
@@ -407,12 +407,13 @@
System.out.println("ReflectTest done!");
}
- public static void checkType() {
+ public static void checkSwap() {
Method m;
+ final Object[] objects = new Object[2];
try {
- m = Collections.class.getDeclaredMethod("checkType",
- Object.class, Class.class);
+ m = Collections.class.getDeclaredMethod("swap",
+ Object[].class, int.class, int.class);
} catch (NoSuchMethodException nsme) {
nsme.printStackTrace();
return;
@@ -421,7 +422,7 @@
m.setAccessible(true);
System.out.println(m + " accessible=" + m.isAccessible());
try {
- m.invoke(null, new Object(), Object.class);
+ m.invoke(null, objects, 0, 1);
} catch (IllegalAccessException iae) {
iae.printStackTrace();
return;
@@ -432,7 +433,7 @@
try {
String s = "Should be ignored";
- m.invoke(s, new Object(), Object.class);
+ m.invoke(s, objects, 0, 1);
} catch (IllegalAccessException iae) {
iae.printStackTrace();
return;
@@ -443,7 +444,8 @@
try {
System.out.println("checkType invoking null");
- m.invoke(null, new Object(), int.class);
+ // Trigger an NPE at the target.
+ m.invoke(null, null, 0, 1);
System.out.println("ERROR: should throw InvocationTargetException");
} catch (InvocationTargetException ite) {
System.out.println("checkType got expected exception");
@@ -710,27 +712,27 @@
private static void checkGetDeclaredConstructor() {
try {
Method.class.getDeclaredConstructor().setAccessible(true);
- System.out.print("Didn't get an exception from Method.class.getDeclaredConstructor().setAccessible");
+ System.out.println("Didn't get an exception from Method.class.getDeclaredConstructor().setAccessible");
} catch (SecurityException e) {
} catch (NoSuchMethodException e) {
} catch (Exception e) {
- System.out.print(e);
+ System.out.println(e);
}
try {
Field.class.getDeclaredConstructor().setAccessible(true);
- System.out.print("Didn't get an exception from Field.class.getDeclaredConstructor().setAccessible");
+ System.out.println("Didn't get an exception from Field.class.getDeclaredConstructor().setAccessible");
} catch (SecurityException e) {
} catch (NoSuchMethodException e) {
} catch (Exception e) {
- System.out.print(e);
+ System.out.println(e);
}
try {
Class.class.getDeclaredConstructor().setAccessible(true);
- System.out.print("Didn't get an exception from Class.class.getDeclaredConstructor().setAccessible");
+ System.out.println("Didn't get an exception from Class.class.getDeclaredConstructor().setAccessible");
} catch (SecurityException e) {
} catch (NoSuchMethodException e) {
} catch (Exception e) {
- System.out.print(e);
+ System.out.println(e);
}
}
@@ -744,7 +746,7 @@
checkGetDeclaredConstructor();
checkAccess();
- checkType();
+ checkSwap();
checkClinitForFields();
checkClinitForMethods();
checkGeneric();
diff --git a/test/055-enum-performance/src/Main.java b/test/055-enum-performance/src/Main.java
index d5903af..d6bb211 100644
--- a/test/055-enum-performance/src/Main.java
+++ b/test/055-enum-performance/src/Main.java
@@ -20,7 +20,7 @@
throw new AssertionError();
} catch (InvocationTargetException expected) {
IllegalArgumentException iae = (IllegalArgumentException) expected.getCause();
- if (!iae.getMessage().equals("class java.lang.String is not an enum type")) {
+ if (!iae.getMessage().equals("class java.lang.String is not an enum type.")) {
throw new AssertionError();
}
}
diff --git a/test/063-process-manager/expected.txt b/test/063-process-manager/expected.txt
index 8360239..8c01bf0 100644
--- a/test/063-process-manager/expected.txt
+++ b/test/063-process-manager/expected.txt
@@ -4,12 +4,12 @@
spawning child
process manager: RUNNABLE
child died
-process manager: WAITING
+process manager: TIMED_WAITING
spawning child #2
spawning child
process manager: RUNNABLE
child died
-process manager: WAITING
+process manager: TIMED_WAITING
done!
diff --git a/test/063-process-manager/src/Main.java b/test/063-process-manager/src/Main.java
index 68bf878..e9e522c 100644
--- a/test/063-process-manager/src/Main.java
+++ b/test/063-process-manager/src/Main.java
@@ -30,7 +30,7 @@
traces.entrySet()) {
Thread t = entry.getKey();
String name = t.getName();
- if (name.equals("java.lang.ProcessManager")) {
+ if (name.indexOf("process reaper") >= 0) {
System.out.println("process manager: " + t.getState());
found = true;
}
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 5913c40..af25d9b 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -803,7 +803,7 @@
Assert.assertEquals(Math.round(-2.5d), -2l);
Assert.assertEquals(Math.round(-2.9d), -3l);
Assert.assertEquals(Math.round(-3.0d), -3l);
- Assert.assertEquals(Math.round(0.49999999999999994d), 1l);
+ Assert.assertEquals(Math.round(0.49999999999999994d), 0l);
Assert.assertEquals(Math.round(Double.NaN), (long)+0.0d);
Assert.assertEquals(Math.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
Assert.assertEquals(Math.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
@@ -1034,7 +1034,7 @@
Assert.assertEquals(StrictMath.round(-2.5d), -2l);
Assert.assertEquals(StrictMath.round(-2.9d), -3l);
Assert.assertEquals(StrictMath.round(-3.0d), -3l);
- Assert.assertEquals(StrictMath.round(0.49999999999999994d), 1l);
+ Assert.assertEquals(StrictMath.round(0.49999999999999994d), 0l);
Assert.assertEquals(StrictMath.round(Double.NaN), (long)+0.0d);
Assert.assertEquals(StrictMath.round(Long.MAX_VALUE + 1.0d), Long.MAX_VALUE);
Assert.assertEquals(StrictMath.round(Long.MIN_VALUE - 1.0d), Long.MIN_VALUE);
diff --git a/test/100-reflect2/expected.txt b/test/100-reflect2/expected.txt
index 0b87a4f..e4988c9 100644
--- a/test/100-reflect2/expected.txt
+++ b/test/100-reflect2/expected.txt
@@ -32,8 +32,8 @@
62 (class java.lang.Long)
14 (class java.lang.Short)
[java.lang.String(int,int,char[]), public java.lang.String(), public java.lang.String(byte[]), public java.lang.String(byte[],int), public java.lang.String(byte[],int,int), public java.lang.String(byte[],int,int,int), public java.lang.String(byte[],int,int,java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],int,int,java.nio.charset.Charset), public java.lang.String(byte[],java.lang.String) throws java.io.UnsupportedEncodingException, public java.lang.String(byte[],java.nio.charset.Charset), public java.lang.String(char[]), public java.lang.String(char[],int,int), public java.lang.String(int[],int,int), public java.lang.String(java.lang.String), public java.lang.String(java.lang.StringBuffer), public java.lang.String(java.lang.StringBuilder)]
-[private final int java.lang.String.count, private int java.lang.String.hashCode, private static final char java.lang.String.REPLACEMENT_CHAR, private static final char[] java.lang.String.ASCII, private static final long java.lang.String.serialVersionUID, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
-[native void java.lang.String.getCharsNoCheck(int,int,char[],int), native void java.lang.String.setCharAt(int,char), private char java.lang.String.foldCase(char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.failedBoundsCheck(int,int,int), private java.lang.StringIndexOutOfBoundsException java.lang.String.indexAndLength(int), private java.lang.StringIndexOutOfBoundsException java.lang.String.startEndAndLength(int,int), private native int java.lang.String.fastIndexOf(int,int), private native java.lang.String java.lang.String.fastSubstring(int,int), private static int java.lang.String.indexOf(java.lang.String,java.lang.String,int,int,char), public [B java.lang.String.getBytes(), public [B java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public [B java.lang.String.getBytes(java.nio.charset.Charset), public [Ljava.lang.String; java.lang.String.split(java.lang.String), public [Ljava.lang.String; java.lang.String.split(java.lang.String,int), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public native [C java.lang.String.toCharArray(), public native char java.lang.String.charAt(int), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int)]
+[private final int java.lang.String.count, private int java.lang.String.hash, private static final java.io.ObjectStreamField[] java.lang.String.serialPersistentFields, private static final long java.lang.String.serialVersionUID, private static int java.lang.String.HASHING_SEED, public static final java.util.Comparator java.lang.String.CASE_INSENSITIVE_ORDER]
+[int java.lang.String.hash32(), native void java.lang.String.getCharsNoCheck(int,int,char[],int), native void java.lang.String.setCharAt(int,char), private int java.lang.String.indexOfSupplementary(int,int), private int java.lang.String.lastIndexOfSupplementary(int,int), private native int java.lang.String.fastIndexOf(int,int), private native java.lang.String java.lang.String.fastSubstring(int,int), private static int java.lang.String.getHashingSeed(), public boolean java.lang.String.contains(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.CharSequence), public boolean java.lang.String.contentEquals(java.lang.StringBuffer), public boolean java.lang.String.endsWith(java.lang.String), public boolean java.lang.String.equals(java.lang.Object), public boolean java.lang.String.equalsIgnoreCase(java.lang.String), public boolean java.lang.String.isEmpty(), public boolean java.lang.String.matches(java.lang.String), public boolean java.lang.String.regionMatches(boolean,int,java.lang.String,int,int), public boolean java.lang.String.regionMatches(int,java.lang.String,int,int), public boolean java.lang.String.startsWith(java.lang.String), public boolean java.lang.String.startsWith(java.lang.String,int), public byte[] java.lang.String.getBytes(), public byte[] java.lang.String.getBytes(java.lang.String) throws java.io.UnsupportedEncodingException, public byte[] java.lang.String.getBytes(java.nio.charset.Charset), public int java.lang.String.codePointAt(int), public int java.lang.String.codePointBefore(int), public int java.lang.String.codePointCount(int,int), public int java.lang.String.compareTo(java.lang.Object), public int java.lang.String.compareToIgnoreCase(java.lang.String), public int java.lang.String.hashCode(), public int java.lang.String.indexOf(int), public int java.lang.String.indexOf(int,int), public int java.lang.String.indexOf(java.lang.String), public int java.lang.String.indexOf(java.lang.String,int), public int java.lang.String.lastIndexOf(int), public int java.lang.String.lastIndexOf(int,int), public int java.lang.String.lastIndexOf(java.lang.String), public int java.lang.String.lastIndexOf(java.lang.String,int), public int java.lang.String.length(), public int java.lang.String.offsetByCodePoints(int,int), public java.lang.CharSequence java.lang.String.subSequence(int,int), public java.lang.String java.lang.String.replace(char,char), public java.lang.String java.lang.String.replace(java.lang.CharSequence,java.lang.CharSequence), public java.lang.String java.lang.String.replaceAll(java.lang.String,java.lang.String), public java.lang.String java.lang.String.replaceFirst(java.lang.String,java.lang.String), public java.lang.String java.lang.String.substring(int), public java.lang.String java.lang.String.substring(int,int), public java.lang.String java.lang.String.toLowerCase(), public java.lang.String java.lang.String.toLowerCase(java.util.Locale), public java.lang.String java.lang.String.toString(), public java.lang.String java.lang.String.toUpperCase(), public java.lang.String java.lang.String.toUpperCase(java.util.Locale), public java.lang.String java.lang.String.trim(), public java.lang.String[] java.lang.String.split(java.lang.String), public java.lang.String[] java.lang.String.split(java.lang.String,int), public native char java.lang.String.charAt(int), public native char[] java.lang.String.toCharArray(), public native int java.lang.String.compareTo(java.lang.String), public native java.lang.String java.lang.String.concat(java.lang.String), public native java.lang.String java.lang.String.intern(), public static java.lang.String java.lang.String.copyValueOf(char[]), public static java.lang.String java.lang.String.copyValueOf(char[],int,int), public static java.lang.String java.lang.String.format(java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.format(java.util.Locale,java.lang.String,java.lang.Object[]), public static java.lang.String java.lang.String.valueOf(boolean), public static java.lang.String java.lang.String.valueOf(char), public static java.lang.String java.lang.String.valueOf(char[]), public static java.lang.String java.lang.String.valueOf(char[],int,int), public static java.lang.String java.lang.String.valueOf(double), public static java.lang.String java.lang.String.valueOf(float), public static java.lang.String java.lang.String.valueOf(int), public static java.lang.String java.lang.String.valueOf(java.lang.Object), public static java.lang.String java.lang.String.valueOf(long), public void java.lang.String.getBytes(int,int,byte[],int), public void java.lang.String.getChars(int,int,char[],int), static int java.lang.String.indexOf(char[],int,int,char[],int,int,int), static int java.lang.String.indexOf(java.lang.String,java.lang.String,int), static int java.lang.String.lastIndexOf(char[],int,int,char[],int,int,int), static int java.lang.String.lastIndexOf(java.lang.String,java.lang.String,int)]
[]
[interface java.io.Serializable, interface java.lang.Comparable, interface java.lang.CharSequence]
0
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index b6b1c43..1337442 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -35,8 +35,9 @@
}
static bool isRelocationDeltaZero() {
- gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- return space != nullptr && space->GetImageHeader().GetPatchDelta() == 0;
+ std::vector<gc::space::ImageSpace*> spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ return !spaces.empty() && spaces[0]->GetImageHeader().GetPatchDelta() == 0;
}
static bool hasExecutableOat(jclass cls) {
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 7762b2d..9bfe429 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -92,9 +92,10 @@
// detecting this.
#if __linux__
static bool IsPicImage() {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetBootImageSpace();
- CHECK(image_space != nullptr); // We should be running with an image.
- const OatFile* oat_file = image_space->GetOatFile();
+ std::vector<gc::space::ImageSpace*> image_spaces =
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
+ CHECK(!image_spaces.empty()); // We should be running with an image.
+ const OatFile* oat_file = image_spaces[0]->GetOatFile();
CHECK(oat_file != nullptr); // We should have an oat file to go with the image.
return oat_file->IsPic();
}
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index dc3ef7e..5474c9b 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -117,7 +117,7 @@
// Could do reflection for the private pid field, but String parsing is easier.
String s = p.toString();
if (s.startsWith("Process[pid=")) {
- return Integer.parseInt(s.substring("Process[pid=".length(), s.length() - 1));
+ return Integer.parseInt(s.substring("Process[pid=".length(), s.indexOf(",")));
} else {
return -1;
}
diff --git a/test/444-checker-nce/src/Main.java b/test/444-checker-nce/src/Main.java
index 32122e4..865355c 100644
--- a/test/444-checker-nce/src/Main.java
+++ b/test/444-checker-nce/src/Main.java
@@ -16,11 +16,11 @@
public class Main {
- /// CHECK-START: Main Main.keepTest(Main) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.keepTest(Main) instruction_simplifier (before)
/// CHECK: NullCheck
/// CHECK: InvokeStaticOrDirect
- /// CHECK-START: Main Main.keepTest(Main) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.keepTest(Main) instruction_simplifier (after)
/// CHECK: NullCheck
/// CHECK: InvokeStaticOrDirect
public Main keepTest(Main m) {
@@ -31,7 +31,7 @@
/// CHECK: NullCheck
/// CHECK: InvokeStaticOrDirect
- /// CHECK-START: Main Main.thisTest() instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.thisTest() instruction_simplifier (after)
/// CHECK-NOT: NullCheck
/// CHECK: InvokeStaticOrDirect
public Main thisTest() {
@@ -45,7 +45,7 @@
/// CHECK: NullCheck
/// CHECK: InvokeStaticOrDirect
- /// CHECK-START: Main Main.newInstanceRemoveTest() instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.newInstanceRemoveTest() instruction_simplifier (after)
/// CHECK-NOT: NullCheck
public Main newInstanceRemoveTest() {
Main m = new Main();
@@ -57,7 +57,7 @@
/// CHECK: NullCheck
/// CHECK: ArrayGet
- /// CHECK-START: Main Main.newArrayRemoveTest() instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.newArrayRemoveTest() instruction_simplifier (after)
/// CHECK: NewArray
/// CHECK-NOT: NullCheck
/// CHECK: ArrayGet
@@ -66,11 +66,11 @@
return ms[0];
}
- /// CHECK-START: Main Main.ifRemoveTest(boolean) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.ifRemoveTest(boolean) instruction_simplifier (before)
/// CHECK: NewInstance
/// CHECK: NullCheck
- /// CHECK-START: Main Main.ifRemoveTest(boolean) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.ifRemoveTest(boolean) instruction_simplifier (after)
/// CHECK: NewInstance
/// CHECK-NOT: NullCheck
public Main ifRemoveTest(boolean flag) {
@@ -83,11 +83,11 @@
return m.g();
}
- /// CHECK-START: Main Main.ifKeepTest(boolean) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.ifKeepTest(boolean) instruction_simplifier (before)
/// CHECK: NewInstance
/// CHECK: NullCheck
- /// CHECK-START: Main Main.ifKeepTest(boolean) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.ifKeepTest(boolean) instruction_simplifier (after)
/// CHECK: NewInstance
/// CHECK: NullCheck
public Main ifKeepTest(boolean flag) {
@@ -98,10 +98,10 @@
return m.g();
}
- /// CHECK-START: Main Main.forRemoveTest(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.forRemoveTest(int) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.forRemoveTest(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.forRemoveTest(int) instruction_simplifier (after)
/// CHECK-NOT: NullCheck
public Main forRemoveTest(int count) {
Main a = new Main();
@@ -114,10 +114,10 @@
return m.g();
}
- /// CHECK-START: Main Main.forKeepTest(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.forKeepTest(int) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.forKeepTest(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.forKeepTest(int) instruction_simplifier (after)
/// CHECK: NullCheck
public Main forKeepTest(int count) {
Main a = new Main();
@@ -132,10 +132,10 @@
return m.g();
}
- /// CHECK-START: Main Main.phiFlowRemoveTest(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.phiFlowRemoveTest(int) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.phiFlowRemoveTest(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.phiFlowRemoveTest(int) instruction_simplifier (after)
/// CHECK-NOT: NullCheck
public Main phiFlowRemoveTest(int count) {
Main a = new Main();
@@ -154,10 +154,10 @@
return n.g();
}
- /// CHECK-START: Main Main.phiFlowKeepTest(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.phiFlowKeepTest(int) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.phiFlowKeepTest(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.phiFlowKeepTest(int) instruction_simplifier (after)
/// CHECK: NullCheck
public Main phiFlowKeepTest(int count) {
Main a = new Main();
@@ -181,7 +181,7 @@
/// CHECK-START: Main Main.scopeRemoveTest(int, Main) ssa_builder (after)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.scopeRemoveTest(int, Main) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.scopeRemoveTest(int, Main) instruction_simplifier (after)
/// CHECK-NOT: NullCheck
public Main scopeRemoveTest(int count, Main a) {
Main m = null;
@@ -196,10 +196,10 @@
return m;
}
- /// CHECK-START: Main Main.scopeKeepTest(int, Main) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.scopeKeepTest(int, Main) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.scopeKeepTest(int, Main) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.scopeKeepTest(int, Main) instruction_simplifier (after)
/// CHECK: NullCheck
public Main scopeKeepTest(int count, Main a) {
Main m = new Main();
@@ -214,10 +214,10 @@
return m;
}
- /// CHECK-START: Main Main.scopeIfNotNullRemove(Main) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.scopeIfNotNullRemove(Main) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.scopeIfNotNullRemove(Main) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.scopeIfNotNullRemove(Main) instruction_simplifier (after)
/// CHECK-NOT: NullCheck
public Main scopeIfNotNullRemove(Main m) {
if (m != null) {
@@ -226,10 +226,10 @@
return m;
}
- /// CHECK-START: Main Main.scopeIfKeep(Main) instruction_simplifier_after_types (before)
+ /// CHECK-START: Main Main.scopeIfKeep(Main) instruction_simplifier (before)
/// CHECK: NullCheck
- /// CHECK-START: Main Main.scopeIfKeep(Main) instruction_simplifier_after_types (after)
+ /// CHECK-START: Main Main.scopeIfKeep(Main) instruction_simplifier (after)
/// CHECK: NullCheck
public Main scopeIfKeep(Main m) {
if (m == null) {
@@ -258,11 +258,11 @@
class ListElement {
private ListElement next;
- /// CHECK-START: boolean ListElement.isShorter(ListElement, ListElement) instruction_simplifier_after_types (before)
+ /// CHECK-START: boolean ListElement.isShorter(ListElement, ListElement) instruction_simplifier (before)
/// CHECK: NullCheck
/// CHECK: NullCheck
- /// CHECK-START: boolean ListElement.isShorter(ListElement, ListElement) instruction_simplifier_after_types (after)
+ /// CHECK-START: boolean ListElement.isShorter(ListElement, ListElement) instruction_simplifier (after)
/// CHECK-NOT: NullCheck
static boolean isShorter(ListElement x, ListElement y) {
ListElement xTail = x;
diff --git a/test/445-checker-licm/src/Main.java b/test/445-checker-licm/src/Main.java
index 6ee8a4d..061fe6e 100644
--- a/test/445-checker-licm/src/Main.java
+++ b/test/445-checker-licm/src/Main.java
@@ -52,13 +52,13 @@
return result;
}
- /// CHECK-START: int Main.innerDiv2() licm (before)
+ /// CHECK-START: int Main.innerMul() licm (before)
/// CHECK-DAG: Mul loop:B4
- /// CHECK-START: int Main.innerDiv2() licm (after)
+ /// CHECK-START: int Main.innerMul() licm (after)
/// CHECK-DAG: Mul loop:B2
- public static int innerDiv2() {
+ public static int innerMul() {
int result = 0;
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
@@ -71,13 +71,13 @@
return result;
}
- /// CHECK-START: int Main.innerDiv3(int, int) licm (before)
+ /// CHECK-START: int Main.divByA(int, int) licm (before)
/// CHECK-DAG: Div loop:{{B\d+}}
- /// CHECK-START: int Main.innerDiv3(int, int) licm (after)
+ /// CHECK-START: int Main.divByA(int, int) licm (after)
/// CHECK-DAG: Div loop:{{B\d+}}
- public static int innerDiv3(int a, int b) {
+ public static int divByA(int a, int b) {
int result = 0;
while (b < 5) {
// a might be null, so we can't hoist the operation.
@@ -107,6 +107,63 @@
return result;
}
+ /// CHECK-START: int Main.divAndIntrinsic(int[]) licm (before)
+ /// CHECK-DAG: Div loop:{{B\d+}}
+
+ /// CHECK-START: int Main.divAndIntrinsic(int[]) licm (after)
+ /// CHECK-NOT: Div loop:{{B\d+}}
+
+ /// CHECK-START: int Main.divAndIntrinsic(int[]) licm (after)
+ /// CHECK-DAG: Div loop:none
+
+ public static int divAndIntrinsic(int[] array) {
+ int result = 0;
+ for (int i = 0; i < array.length; i++) {
+ // An intrinsic call, unlike a general method call, cannot modify the field value.
+ // As a result, the invariant division on the field can be moved out of the loop.
+ result += (staticField / 42) + Math.abs(array[i]);
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.invariantBoundIntrinsic(int) licm (before)
+ /// CHECK-DAG: InvokeStaticOrDirect loop:{{B\d+}}
+
+ /// CHECK-START: int Main.invariantBoundIntrinsic(int) licm (after)
+ /// CHECK-NOT: InvokeStaticOrDirect loop:{{B\d+}}
+
+ /// CHECK-START: int Main.invariantBoundIntrinsic(int) licm (after)
+ /// CHECK-DAG: InvokeStaticOrDirect loop:none
+
+ public static int invariantBoundIntrinsic(int x) {
+ int result = 0;
+ // The intrinsic call to abs used as loop bound is invariant.
+ // As a result, the call itself can be moved out of the loop header.
+ for (int i = 0; i < Math.abs(x); i++) {
+ result += i;
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (before)
+ /// CHECK-DAG: InvokeStaticOrDirect loop:{{B\d+}}
+
+ /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (after)
+ /// CHECK-NOT: InvokeStaticOrDirect loop:{{B\d+}}
+
+ /// CHECK-START: int Main.invariantBodyIntrinsic(int, int) licm (after)
+ /// CHECK-DAG: InvokeStaticOrDirect loop:none
+
+ public static int invariantBodyIntrinsic(int x, int y) {
+ int result = 0;
+ for (int i = 0; i < 10; i++) {
+ // The intrinsic call to max used inside the loop is invariant.
+ // As a result, the call itself can be moved out of the loop body.
+ result += Math.max(x, y);
+ }
+ return result;
+ }
+
public static int staticField = 42;
public static void assertEquals(int expected, int actual) {
@@ -118,6 +175,11 @@
public static void main(String[] args) {
assertEquals(10, div());
assertEquals(100, innerDiv());
+ assertEquals(18900, innerMul());
+ assertEquals(105, divByA(2, 0));
assertEquals(12, arrayLength(new int[] { 4, 8 }));
+ assertEquals(21, divAndIntrinsic(new int[] { 4, -2, 8, -3 }));
+ assertEquals(45, invariantBoundIntrinsic(-10));
+ assertEquals(30, invariantBodyIntrinsic(2, 3));
}
}
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index c3d2759..6e7ba40 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -616,6 +616,40 @@
}
}
+ static int[][] mA;
+
+ /// CHECK-START: void Main.dynamicBCEAndIntrinsic(int) BCE (before)
+ /// CHECK-DAG: NullCheck
+ /// CHECK-DAG: ArrayLength
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: ArrayGet
+ /// CHECK-DAG: NullCheck
+ /// CHECK-DAG: ArrayLength
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: ArrayGet
+ /// CHECK-DAG: InvokeStaticOrDirect
+ /// CHECK-DAG: ArraySet
+
+ /// CHECK-START: void Main.dynamicBCEAndIntrinsic(int) BCE (after)
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: ArrayLength
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-DAG: ArrayGet
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-DAG: InvokeStaticOrDirect
+ /// CHECK-DAG: ArraySet
+ /// CHECK-DAG: Exit
+ /// CHECK-DAG: Deoptimize
+
+ static void dynamicBCEAndIntrinsic(int n) {
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < n; j++) {
+ // Since intrinsic call cannot modify fields or arrays,
+ // dynamic BCE and hoisting can be applied to the inner loop.
+ mA[i][j] = Math.abs(mA[i][j]);
+ }
+ }
+ }
static int foo() {
try {
@@ -1225,6 +1259,21 @@
}
}
+ mA = new int[4][4];
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ mA[i][j] = -1;
+ }
+ }
+ dynamicBCEAndIntrinsic(4);
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ if (mA[i][i] != 1) {
+ System.out.println("dynamic bce failed!");
+ }
+ }
+ }
+
array = new int[7];
pyramid1(array);
if (!isPyramid(array)) {
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index f1f80ca..fd4dd5e 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -72,49 +72,49 @@
public class Main {
- /// CHECK-START: void Main.testSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testSimpleRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testSimpleRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testSimpleRemove() {
Super s = new SubclassA();
((SubclassA)s).$noinline$g();
}
- /// CHECK-START: void Main.testSimpleKeep(Super) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testSimpleKeep(Super) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testSimpleKeep(Super) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testSimpleKeep(Super) instruction_simplifier (after)
/// CHECK: CheckCast
public void testSimpleKeep(Super s) {
((SubclassA)s).$noinline$f();
}
- /// CHECK-START: java.lang.String Main.testClassRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: java.lang.String Main.testClassRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: java.lang.String Main.testClassRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: java.lang.String Main.testClassRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public String testClassRemove() {
Object s = SubclassA.class;
return ((Class)s).getName();
}
- /// CHECK-START: java.lang.String Main.testClassKeep() instruction_simplifier_after_types (before)
+ /// CHECK-START: java.lang.String Main.testClassKeep() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: java.lang.String Main.testClassKeep() instruction_simplifier_after_types (after)
+ /// CHECK-START: java.lang.String Main.testClassKeep() instruction_simplifier (after)
/// CHECK: CheckCast
public String testClassKeep() {
Object s = SubclassA.class;
return ((SubclassA)s).$noinline$h();
}
- /// CHECK-START: void Main.testIfRemove(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testIfRemove(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testIfRemove(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testIfRemove(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testIfRemove(int x) {
Super s;
@@ -126,10 +126,10 @@
((SubclassA)s).$noinline$g();
}
- /// CHECK-START: void Main.testIfKeep(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testIfKeep(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testIfKeep(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testIfKeep(int) instruction_simplifier (after)
/// CHECK: CheckCast
public void testIfKeep(int x) {
Super s;
@@ -141,10 +141,10 @@
((SubclassA)s).$noinline$g();
}
- /// CHECK-START: void Main.testForRemove(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testForRemove(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testForRemove(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testForRemove(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testForRemove(int x) {
Super s = new SubclassA();
@@ -156,10 +156,10 @@
((SubclassA)s).$noinline$g();
}
- /// CHECK-START: void Main.testForKeep(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testForKeep(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testForKeep(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testForKeep(int) instruction_simplifier (after)
/// CHECK: CheckCast
public void testForKeep(int x) {
Super s = new SubclassA();
@@ -171,10 +171,10 @@
((SubclassC)s).$noinline$g();
}
- /// CHECK-START: void Main.testPhiFromCall(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testPhiFromCall(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testPhiFromCall(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testPhiFromCall(int) instruction_simplifier (after)
/// CHECK: CheckCast
public void testPhiFromCall(int i) {
Object x;
@@ -186,11 +186,12 @@
((SubclassC)x).$noinline$g();
}
- /// CHECK-START: void Main.testInstanceOf(java.lang.Object) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOf(java.lang.Object) instruction_simplifier (before)
/// CHECK: CheckCast
/// CHECK: CheckCast
+ /// CHECK-NOT: CheckCast
- /// CHECK-START: void Main.testInstanceOf(java.lang.Object) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOf(java.lang.Object) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOf(Object o) {
if (o instanceof SubclassC) {
@@ -201,11 +202,101 @@
}
}
- /// CHECK-START: void Main.testInstanceOfKeep(java.lang.Object) instruction_simplifier_after_types (before)
+ public static boolean $inline$InstanceofSubclassB(Object o) { return o instanceof SubclassB; }
+ public static boolean $inline$InstanceofSubclassC(Object o) { return o instanceof SubclassC; }
+
+ /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) ssa_builder (after)
+ /// CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<IOf1:z\d+>> InstanceOf
+ /// CHECK-DAG: NotEqual [<<IOf1>>,<<Cst1>>]
+ /// CHECK-DAG: <<IOf2:z\d+>> InstanceOf
+ /// CHECK-DAG: Equal [<<IOf2>>,<<Cst0>>]
+
+ /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (before)
+ /// CHECK: CheckCast
+ /// CHECK: CheckCast
+ /// CHECK-NOT: CheckCast
+
+ /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (after)
+ /// CHECK-NOT: CheckCast
+ public void testInstanceOf_NotInlined(Object o) {
+ if ((o instanceof SubclassC) == true) {
+ ((SubclassC)o).$noinline$g();
+ }
+ if ((o instanceof SubclassB) != false) {
+ ((SubclassB)o).$noinline$g();
+ }
+ }
+
+ /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) ssa_builder (after)
+ /// CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<IOf1:z\d+>> InstanceOf
+ /// CHECK-DAG: Equal [<<IOf1>>,<<Cst1>>]
+ /// CHECK-DAG: <<IOf2:z\d+>> InstanceOf
+ /// CHECK-DAG: NotEqual [<<IOf2>>,<<Cst0>>]
+
+ /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (before)
+ /// CHECK: CheckCast
+ /// CHECK: CheckCast
+ /// CHECK-NOT: CheckCast
+
+ /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) instruction_simplifier (after)
+ /// CHECK-NOT: CheckCast
+ public void testNotInstanceOf_NotInlined(Object o) {
+ if ((o instanceof SubclassC) != true) {
+ // Empty branch to flip the condition.
+ } else {
+ ((SubclassC)o).$noinline$g();
+ }
+ if ((o instanceof SubclassB) == false) {
+ // Empty branch to flip the condition.
+ } else {
+ ((SubclassB)o).$noinline$g();
+ }
+ }
+
+ /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) inliner (after)
+ /// CHECK-DAG: <<IOf:z\d+>> InstanceOf
+ /// CHECK-DAG: If [<<IOf>>]
+
+ /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) instruction_simplifier_after_bce (before)
+ /// CHECK: CheckCast
+ /// CHECK-NOT: CheckCast
+
+ /// CHECK-START: void Main.testInstanceOf_Inlined(java.lang.Object) instruction_simplifier_after_bce (after)
+ /// CHECK-NOT: CheckCast
+ public void testInstanceOf_Inlined(Object o) {
+ if (!$inline$InstanceofSubclassC(o)) {
+ // Empty branch to flip the condition.
+ } else {
+ ((SubclassC)o).$noinline$g();
+ }
+ }
+
+ /// CHECK-START: void Main.testNotInstanceOf_Inlined(java.lang.Object) inliner (after)
+ /// CHECK-DAG: <<IOf:z\d+>> InstanceOf
+ /// CHECK-DAG: <<Not:z\d+>> BooleanNot [<<IOf>>]
+ /// CHECK-DAG: If [<<Not>>]
+
+ /// CHECK-START: void Main.testNotInstanceOf_Inlined(java.lang.Object) instruction_simplifier_after_bce (before)
+ /// CHECK: CheckCast
+ /// CHECK-NOT: CheckCast
+
+ /// CHECK-START: void Main.testNotInstanceOf_Inlined(java.lang.Object) instruction_simplifier_after_bce (after)
+ /// CHECK-NOT: CheckCast
+ public void testNotInstanceOf_Inlined(Object o) {
+ if ($inline$InstanceofSubclassC(o)) {
+ ((SubclassC)o).$noinline$g();
+ }
+ }
+
+ /// CHECK-START: void Main.testInstanceOfKeep(java.lang.Object) instruction_simplifier (before)
/// CHECK: CheckCast
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfKeep(java.lang.Object) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfKeep(java.lang.Object) instruction_simplifier (after)
/// CHECK: CheckCast
/// CHECK: CheckCast
public void testInstanceOfKeep(Object o) {
@@ -217,11 +308,11 @@
}
}
- /// CHECK-START: void Main.testInstanceOfNested(java.lang.Object) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfNested(java.lang.Object) instruction_simplifier (before)
/// CHECK: CheckCast
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfNested(java.lang.Object) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfNested(java.lang.Object) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfNested(Object o) {
if (o instanceof SubclassC) {
@@ -233,10 +324,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfWithPhi(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfWithPhi(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfWithPhi(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfWithPhi(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfWithPhi(int i) {
Object o;
@@ -251,10 +342,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfInFor(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfInFor(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfInFor(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfInFor(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfInFor(int n) {
Object o = new SubclassA();
@@ -268,10 +359,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfSubclass() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfSubclass() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfSubclass() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfSubclass() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfSubclass() {
Object o = new SubclassA();
@@ -280,10 +371,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfWithPhiSubclass(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfWithPhiSubclass(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfWithPhiSubclass(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfWithPhiSubclass(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfWithPhiSubclass(int i) {
Object o;
@@ -298,10 +389,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfWithPhiTop(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfWithPhiTop(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfWithPhiTop(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfWithPhiTop(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfWithPhiTop(int i) {
Object o;
@@ -316,10 +407,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfSubclassInFor(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfSubclassInFor(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfSubclassInFor(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfSubclassInFor(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfSubclassInFor(int n) {
Object o = new SubclassA();
@@ -333,10 +424,10 @@
}
}
- /// CHECK-START: void Main.testInstanceOfTopInFor(int) instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceOfTopInFor(int) instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceOfTopInFor(int) instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceOfTopInFor(int) instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceOfTopInFor(int n) {
Object o = new SubclassA();
@@ -361,10 +452,10 @@
public SubclassA a = new SubclassA();
public static SubclassA b = new SubclassA();
- /// CHECK-START: void Main.testInstanceFieldGetSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInstanceFieldGetSimpleRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInstanceFieldGetSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInstanceFieldGetSimpleRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInstanceFieldGetSimpleRemove() {
Main m = new Main();
@@ -372,10 +463,10 @@
((SubclassA)a).$noinline$g();
}
- /// CHECK-START: void Main.testStaticFieldGetSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testStaticFieldGetSimpleRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testStaticFieldGetSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testStaticFieldGetSimpleRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testStaticFieldGetSimpleRemove() {
Super b = Main.b;
@@ -384,36 +475,36 @@
public SubclassA $noinline$getSubclass() { throw new RuntimeException(); }
- /// CHECK-START: void Main.testArraySimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testArraySimpleRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testArraySimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testArraySimpleRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testArraySimpleRemove() {
Super[] b = new SubclassA[10];
SubclassA[] c = (SubclassA[])b;
}
- /// CHECK-START: void Main.testInvokeSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testInvokeSimpleRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testInvokeSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testInvokeSimpleRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testInvokeSimpleRemove() {
Super b = $noinline$getSubclass();
((SubclassA)b).$noinline$g();
}
- /// CHECK-START: void Main.testArrayGetSimpleRemove() instruction_simplifier_after_types (before)
+ /// CHECK-START: void Main.testArrayGetSimpleRemove() instruction_simplifier (before)
/// CHECK: CheckCast
- /// CHECK-START: void Main.testArrayGetSimpleRemove() instruction_simplifier_after_types (after)
+ /// CHECK-START: void Main.testArrayGetSimpleRemove() instruction_simplifier (after)
/// CHECK-NOT: CheckCast
public void testArrayGetSimpleRemove() {
Super[] a = new SubclassA[10];
((SubclassA)a[0]).$noinline$g();
}
- /// CHECK-START: int Main.testLoadExceptionInCatchNonExact(int, int) reference_type_propagation (after)
+ /// CHECK-START: int Main.testLoadExceptionInCatchNonExact(int, int) ssa_builder (after)
/// CHECK: LoadException klass:java.lang.ArithmeticException can_be_null:false exact:false
public int testLoadExceptionInCatchNonExact(int x, int y) {
try {
@@ -423,7 +514,7 @@
}
}
- /// CHECK-START: int Main.testLoadExceptionInCatchExact(int) reference_type_propagation (after)
+ /// CHECK-START: int Main.testLoadExceptionInCatchExact(int) ssa_builder (after)
/// CHECK: LoadException klass:FinalException can_be_null:false exact:true
public int testLoadExceptionInCatchExact(int x) {
try {
@@ -437,7 +528,7 @@
}
}
- /// CHECK-START: int Main.testLoadExceptionInCatchAll(int, int) reference_type_propagation (after)
+ /// CHECK-START: int Main.testLoadExceptionInCatchAll(int, int) ssa_builder (after)
/// CHECK: LoadException klass:java.lang.Throwable can_be_null:false exact:false
public int testLoadExceptionInCatchAll(int x, int y) {
try {
@@ -458,7 +549,7 @@
return genericFinal.get();
}
- /// CHECK-START: SubclassC Main.inlineGenerics() reference_type_propagation (after)
+ /// CHECK-START: SubclassC Main.inlineGenerics() ssa_builder (after)
/// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:SubclassC exact:false
/// CHECK-NEXT: Return [<<Invoke>>]
@@ -470,7 +561,7 @@
return c;
}
- /// CHECK-START: Final Main.inlineGenericsFinal() reference_type_propagation (after)
+ /// CHECK-START: Final Main.inlineGenericsFinal() ssa_builder (after)
/// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:Final exact:true
/// CHECK-NEXT: Return [<<Invoke>>]
@@ -512,7 +603,7 @@
return new SubclassA();
}
- /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) reference_type_propagation (after)
+ /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:Super
/// CHECK: NullCheck [<<Phi>>] klass:Super
@@ -534,7 +625,7 @@
/// CHECK: CheckCast [<<Param>>,<<Clazz>>]
/// CHECK: BoundType [<<Param>>] can_be_null:true
- /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) instruction_simplifier_after_types (after)
+ /// CHECK-START: java.lang.String Main.checkcastPreserveNullCheck(java.lang.Object) instruction_simplifier (after)
/// CHECK: <<This:l\d+>> ParameterValue
/// CHECK: <<Param:l\d+>> ParameterValue
/// CHECK: <<Clazz:l\d+>> LoadClass
@@ -546,7 +637,7 @@
}
- /// CHECK-START: void Main.argumentCheck(Super, double, SubclassA, Final) reference_type_propagation (after)
+ /// CHECK-START: void Main.argumentCheck(Super, double, SubclassA, Final) ssa_builder (after)
/// CHECK: ParameterValue klass:Main can_be_null:false exact:false
/// CHECK: ParameterValue klass:Super can_be_null:true exact:false
/// CHECK: ParameterValue
@@ -562,7 +653,7 @@
private int mainField = 0;
- /// CHECK-START: SuperInterface Main.getWiderType(boolean, Interface, OtherInterface) reference_type_propagation (after)
+ /// CHECK-START: SuperInterface Main.getWiderType(boolean, Interface, OtherInterface) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private SuperInterface getWiderType(boolean cond, Interface a, OtherInterface b) {
@@ -618,7 +709,7 @@
getSuper();
}
- /// CHECK-START: void Main.testLoopPhiWithNullFirstInput(boolean) reference_type_propagation (after)
+ /// CHECK-START: void Main.testLoopPhiWithNullFirstInput(boolean) ssa_builder (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<Main:l\d+>> NewInstance klass:Main exact:true
/// CHECK-DAG: <<LoopPhi:l\d+>> Phi [<<Null>>,<<LoopPhi>>,<<Main>>] klass:Main exact:true
@@ -631,7 +722,7 @@
}
}
- /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) reference_type_propagation (after)
+ /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) ssa_builder (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<PhiA:l\d+>> Phi [<<Null>>,<<PhiB:l\d+>>,<<PhiA>>] klass:java.lang.Object exact:false
/// CHECK-DAG: <<PhiB>> Phi [<<Null>>,<<PhiB>>,<<PhiA>>] klass:java.lang.Object exact:false
@@ -647,7 +738,7 @@
}
}
- /// CHECK-START: java.lang.Object[] Main.testInstructionsWithUntypedParent() reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object[] Main.testInstructionsWithUntypedParent() ssa_builder (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<LoopPhi:l\d+>> Phi [<<Null>>,<<Phi:l\d+>>] klass:java.lang.Object[] exact:true
/// CHECK-DAG: <<Array:l\d+>> NewArray klass:java.lang.Object[] exact:true
diff --git a/test/455-checker-gvn/expected.txt b/test/455-checker-gvn/expected.txt
index 8351c19..c1679c7 100644
--- a/test/455-checker-gvn/expected.txt
+++ b/test/455-checker-gvn/expected.txt
@@ -1 +1,3 @@
14
+0
+10
diff --git a/test/455-checker-gvn/src/Main.java b/test/455-checker-gvn/src/Main.java
index 9824f27..cea0959 100644
--- a/test/455-checker-gvn/src/Main.java
+++ b/test/455-checker-gvn/src/Main.java
@@ -15,8 +15,14 @@
*/
public class Main {
+
+ private static int mX = 2;
+ private static int mY = -3;
+
public static void main(String[] args) {
System.out.println(foo(3, 4));
+ System.out.println(mulAndIntrinsic());
+ System.out.println(directIntrinsic(-5));
}
/// CHECK-START: int Main.foo(int, int) GVN (before)
@@ -35,7 +41,50 @@
return sum1 + sum2;
}
- public static long bar(int i) {
- return i;
+ /// CHECK-START: int Main.mulAndIntrinsic() GVN (before)
+ /// CHECK: StaticFieldGet
+ /// CHECK: StaticFieldGet
+ /// CHECK: Mul
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK: StaticFieldGet
+ /// CHECK: StaticFieldGet
+ /// CHECK: Mul
+ /// CHECK: Add
+
+ /// CHECK-START: int Main.mulAndIntrinsic() GVN (after)
+ /// CHECK: StaticFieldGet
+ /// CHECK: StaticFieldGet
+ /// CHECK: Mul
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK-NOT: StaticFieldGet
+ /// CHECK-NOT: StaticFieldGet
+ /// CHECK-NOT: Mul
+ /// CHECK: Add
+
+ public static int mulAndIntrinsic() {
+ // The intermediate call to abs() does not kill
+ // the common subexpression on the multiplication.
+ int mul1 = mX * mY;
+ int abs = Math.abs(mul1);
+ int mul2 = mY * mX;
+ return abs + mul2;
}
+
+ /// CHECK-START: int Main.directIntrinsic(int) GVN (before)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK: Add
+
+ /// CHECK-START: int Main.directIntrinsic(int) GVN (after)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK: Add
+
+ public static int directIntrinsic(int x) {
+ // Here, the two calls to abs() themselves can be replaced with just one.
+ int abs1 = Math.abs(x);
+ int abs2 = Math.abs(x);
+ return abs1 + abs2;
+ }
+
}
diff --git a/test/464-checker-inline-sharpen-calls/src/Main.java b/test/464-checker-inline-sharpen-calls/src/Main.java
index 5080f142..2222e0f 100644
--- a/test/464-checker-inline-sharpen-calls/src/Main.java
+++ b/test/464-checker-inline-sharpen-calls/src/Main.java
@@ -16,6 +16,14 @@
public final class Main {
+ public final static class Helper {
+ private int foo = 3;
+
+ public int getFoo() {
+ return foo;
+ }
+ }
+
public void invokeVirtual() {
}
@@ -31,25 +39,25 @@
m.invokeVirtual();
}
- /// CHECK-START: int Main.inlineSharpenStringInvoke() ssa_builder (after)
- /// CHECK-DAG: <<Invoke:i\d+>> InvokeVirtual
+ /// CHECK-START: int Main.inlineSharpenHelperInvoke() ssa_builder (after)
+ /// CHECK-DAG: <<Invoke:i\d+>> InvokeVirtual {{.*\.getFoo.*}}
/// CHECK-DAG: Return [<<Invoke>>]
- /// CHECK-START: int Main.inlineSharpenStringInvoke() inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: int Main.inlineSharpenHelperInvoke() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect {{.*\.getFoo.*}}
+ /// CHECK-NOT: InvokeVirtual {{.*\.getFoo.*}}
- /// CHECK-START: int Main.inlineSharpenStringInvoke() inliner (after)
+ /// CHECK-START: int Main.inlineSharpenHelperInvoke() inliner (after)
/// CHECK-DAG: <<Field:i\d+>> InstanceFieldGet
/// CHECK-DAG: Return [<<Field>>]
- public static int inlineSharpenStringInvoke() {
- return "Foo".length();
+ public static int inlineSharpenHelperInvoke() {
+ return new Helper().getFoo();
}
public static void main(String[] args) {
inlineSharpenInvokeVirtual(new Main());
- if (inlineSharpenStringInvoke() != 3) {
+ if (inlineSharpenHelperInvoke() != 3) {
throw new Error("Expected 3");
}
}
diff --git a/test/477-checker-bound-type/src/Main.java b/test/477-checker-bound-type/src/Main.java
index c873702..0f65e44 100644
--- a/test/477-checker-bound-type/src/Main.java
+++ b/test/477-checker-bound-type/src/Main.java
@@ -17,7 +17,7 @@
public class Main {
- /// CHECK-START: java.lang.Object Main.boundTypeForIf(java.lang.Object) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.boundTypeForIf(java.lang.Object) ssa_builder (after)
/// CHECK: BoundType
public static Object boundTypeForIf(Object a) {
if (a != null) {
@@ -27,7 +27,7 @@
}
}
- /// CHECK-START: java.lang.Object Main.boundTypeForInstanceOf(java.lang.Object) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.boundTypeForInstanceOf(java.lang.Object) ssa_builder (after)
/// CHECK: BoundType
public static Object boundTypeForInstanceOf(Object a) {
if (a instanceof Main) {
@@ -37,7 +37,7 @@
}
}
- /// CHECK-START: java.lang.Object Main.noBoundTypeForIf(java.lang.Object) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.noBoundTypeForIf(java.lang.Object) ssa_builder (after)
/// CHECK-NOT: BoundType
public static Object noBoundTypeForIf(Object a) {
if (a == null) {
@@ -47,7 +47,7 @@
}
}
- /// CHECK-START: java.lang.Object Main.noBoundTypeForInstanceOf(java.lang.Object) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.noBoundTypeForInstanceOf(java.lang.Object) ssa_builder (after)
/// CHECK-NOT: BoundType
public static Object noBoundTypeForInstanceOf(Object a) {
if (a instanceof Main) {
diff --git a/test/490-checker-inline/src/Main.java b/test/490-checker-inline/src/Main.java
index 21a0189..2e2deea 100644
--- a/test/490-checker-inline/src/Main.java
+++ b/test/490-checker-inline/src/Main.java
@@ -39,7 +39,7 @@
/// CHECK-DAG: InvokeInterface
/// CHECK-START: void Main.testMethod() inliner (after)
- /// CHECK-NOT: Invoke{{.*}}
+ /// CHECK-NOT: Invoke{{.*Object\.<init>.*}}
public static void testMethod() {
createMain().invokeVirtual();
diff --git a/test/492-checker-inline-invoke-interface/src/Main.java b/test/492-checker-inline-invoke-interface/src/Main.java
index a8b6307..3106ce4 100644
--- a/test/492-checker-inline-invoke-interface/src/Main.java
+++ b/test/492-checker-inline-invoke-interface/src/Main.java
@@ -32,14 +32,14 @@
}
/// CHECK-START: void Main.main(java.lang.String[]) ssa_builder (after)
- /// CHECK: InvokeStaticOrDirect
+ /// CHECK: InvokeStaticOrDirect {{.*Main.<init>.*}}
/// CHECK: InvokeInterface
/// CHECK-START: void Main.main(java.lang.String[]) inliner (before)
/// CHECK-NOT: ClinitCheck
/// CHECK-START: void Main.main(java.lang.String[]) inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: InvokeStaticOrDirect {{.*Main.<init>.*}}
/// CHECK-NOT: InvokeVirtual
/// CHECK-NOT: InvokeInterface
diff --git a/test/493-checker-inline-invoke-interface/src/Main.java b/test/493-checker-inline-invoke-interface/src/Main.java
index 44b727f..171405c 100644
--- a/test/493-checker-inline-invoke-interface/src/Main.java
+++ b/test/493-checker-inline-invoke-interface/src/Main.java
@@ -36,7 +36,7 @@
/// CHECK: InvokeInterface
/// CHECK-START: void Main.main(java.lang.String[]) inliner (after)
- /// CHECK-NOT: Invoke{{.*}}
+ /// CHECK-NOT: Invoke{{.*Object\.<init>.*}}
public static void main(String[] args) {
Itf itf = bar();
itf.foo();
diff --git a/test/530-checker-loops/src/Main.java b/test/530-checker-loops/src/Main.java
index e827b1e..f1d9a37 100644
--- a/test/530-checker-loops/src/Main.java
+++ b/test/530-checker-loops/src/Main.java
@@ -26,7 +26,7 @@
//
/// CHECK-START: int Main.linear(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linear(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -40,7 +40,7 @@
}
/// CHECK-START: int Main.linearDown(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearDown(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -54,7 +54,7 @@
}
/// CHECK-START: int Main.linearObscure(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearObscure(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -69,7 +69,7 @@
}
/// CHECK-START: int Main.linearVeryObscure(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearVeryObscure(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -84,7 +84,7 @@
}
/// CHECK-START: int Main.hiddenStride(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.hiddenStride(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -101,7 +101,7 @@
}
/// CHECK-START: int Main.linearWhile(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearWhile(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -116,7 +116,7 @@
}
/// CHECK-START: int Main.linearThreeWayPhi(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearThreeWayPhi(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -134,7 +134,7 @@
}
/// CHECK-START: int Main.linearFourWayPhi(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearFourWayPhi(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -156,7 +156,7 @@
}
/// CHECK-START: int Main.wrapAroundThenLinear(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.wrapAroundThenLinear(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -173,7 +173,7 @@
}
/// CHECK-START: int Main.wrapAroundThenLinearThreeWayPhi(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.wrapAroundThenLinearThreeWayPhi(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -194,7 +194,7 @@
}
/// CHECK-START: int[] Main.linearWithParameter(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int[] Main.linearWithParameter(int) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -208,7 +208,7 @@
}
/// CHECK-START: int[] Main.linearCopy(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int[] Main.linearCopy(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -223,8 +223,8 @@
}
/// CHECK-START: int Main.linearByTwo(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearByTwo(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -241,7 +241,7 @@
}
/// CHECK-START: int Main.linearByTwoSkip1(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearByTwoSkip1(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -255,10 +255,10 @@
}
/// CHECK-START: int Main.linearByTwoSkip2(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearByTwoSkip2(int[]) BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static int linearByTwoSkip2(int x[]) {
int result = 0;
@@ -270,7 +270,7 @@
}
/// CHECK-START: int Main.linearWithCompoundStride() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearWithCompoundStride() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -287,7 +287,7 @@
}
/// CHECK-START: int Main.linearWithLargePositiveStride() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearWithLargePositiveStride() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -305,10 +305,10 @@
}
/// CHECK-START: int Main.linearWithVeryLargePositiveStride() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearWithVeryLargePositiveStride() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static int linearWithVeryLargePositiveStride() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
@@ -323,7 +323,7 @@
}
/// CHECK-START: int Main.linearWithLargeNegativeStride() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearWithLargeNegativeStride() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -341,10 +341,10 @@
}
/// CHECK-START: int Main.linearWithVeryLargeNegativeStride() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearWithVeryLargeNegativeStride() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static int linearWithVeryLargeNegativeStride() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
@@ -359,7 +359,7 @@
}
/// CHECK-START: int Main.linearForNEUp() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearForNEUp() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -374,7 +374,7 @@
}
/// CHECK-START: int Main.linearForNEDown() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearForNEDown() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -389,7 +389,7 @@
}
/// CHECK-START: int Main.linearDoWhileUp() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearDoWhileUp() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -405,7 +405,7 @@
}
/// CHECK-START: int Main.linearDoWhileDown() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearDoWhileDown() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -421,10 +421,10 @@
}
/// CHECK-START: int Main.linearShort() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.linearShort() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static int linearShort() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
@@ -437,7 +437,7 @@
}
/// CHECK-START: int Main.invariantFromPreLoop(int[], int) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.invariantFromPreLoop(int[], int) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -458,20 +458,20 @@
}
/// CHECK-START: void Main.linearTriangularOnTwoArrayLengths(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
//
/// CHECK-START: void Main.linearTriangularOnTwoArrayLengths(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: Deoptimize
private static void linearTriangularOnTwoArrayLengths(int n) {
int[] a = new int[n];
@@ -488,20 +488,20 @@
}
/// CHECK-START: void Main.linearTriangularOnOneArrayLength(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
//
/// CHECK-START: void Main.linearTriangularOnOneArrayLength(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: Deoptimize
private static void linearTriangularOnOneArrayLength(int n) {
int[] a = new int[n];
@@ -518,20 +518,20 @@
}
/// CHECK-START: void Main.linearTriangularOnParameter(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
//
/// CHECK-START: void Main.linearTriangularOnParameter(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: Deoptimize
private static void linearTriangularOnParameter(int n) {
int[] a = new int[n];
@@ -548,32 +548,32 @@
}
/// CHECK-START: void Main.linearTriangularVariations(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
//
/// CHECK-START: void Main.linearTriangularVariations(int) BCE (after)
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: Deoptimize
private static void linearTriangularVariations(int n) {
int[] a = new int[n];
@@ -616,22 +616,22 @@
}
/// CHECK-START: void Main.bubble(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: If
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: If
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
//
/// CHECK-START: void Main.bubble(int[]) BCE (after)
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: ArrayGet
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: If
- /// CHECK-DAG: ArraySet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: If
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
/// CHECK-NOT: Deoptimize
private static void bubble(int[] a) {
for (int i = a.length; --i >= 0;) {
@@ -646,7 +646,7 @@
}
/// CHECK-START: int Main.periodicIdiom(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.periodicIdiom(int) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -664,7 +664,7 @@
}
/// CHECK-START: int Main.periodicSequence2(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.periodicSequence2(int) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -685,10 +685,10 @@
}
/// CHECK-START: int Main.periodicSequence4(int) BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.periodicSequence4(int) BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -713,7 +713,7 @@
}
/// CHECK-START: int Main.justRightUp1() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justRightUp1() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -728,7 +728,7 @@
}
/// CHECK-START: int Main.justRightUp2() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justRightUp2() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -743,7 +743,7 @@
}
/// CHECK-START: int Main.justRightUp3() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justRightUp3() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -758,10 +758,10 @@
}
/// CHECK-START: int Main.justOOBUp() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justOOBUp() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static int justOOBUp() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
@@ -774,7 +774,7 @@
}
/// CHECK-START: int Main.justRightDown1() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justRightDown1() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -789,7 +789,7 @@
}
/// CHECK-START: int Main.justRightDown2() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justRightDown2() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -804,7 +804,7 @@
}
/// CHECK-START: int Main.justRightDown3() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justRightDown3() BCE (after)
/// CHECK-NOT: BoundsCheck
@@ -819,10 +819,10 @@
}
/// CHECK-START: int Main.justOOBDown() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.justOOBDown() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static int justOOBDown() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
@@ -835,10 +835,10 @@
}
/// CHECK-START: void Main.lowerOOB(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: void Main.lowerOOB(int[]) BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static void lowerOOB(int[] x) {
for (int i = -1; i < x.length; i++) {
@@ -847,10 +847,10 @@
}
/// CHECK-START: void Main.upperOOB(int[]) BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: void Main.upperOOB(int[]) BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static void upperOOB(int[] x) {
for (int i = 0; i <= x.length; i++) {
@@ -859,10 +859,10 @@
}
/// CHECK-START: void Main.doWhileUpOOB() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: void Main.doWhileUpOOB() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static void doWhileUpOOB() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
@@ -873,10 +873,10 @@
}
/// CHECK-START: void Main.doWhileDownOOB() BCE (before)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: void Main.doWhileDownOOB() BCE (after)
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: Deoptimize
private static void doWhileDownOOB() {
int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
@@ -887,14 +887,14 @@
}
/// CHECK-START: int[] Main.multiply1() BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
//
/// CHECK-START: int[] Main.multiply1() BCE (after)
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
/// CHECK-NOT: Deoptimize
private static int[] multiply1() {
int[] a = new int[10];
@@ -912,14 +912,14 @@
}
/// CHECK-START: int[] Main.multiply2() BCE (before)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
//
/// CHECK-START: int[] Main.multiply2() BCE (after)
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: ArraySet
static int[] multiply2() {
int[] a = new int[10];
try {
@@ -936,24 +936,24 @@
}
/// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (before)
- /// CHECK-DAG: StaticFieldGet
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: StaticFieldSet
+ /// CHECK: StaticFieldGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: StaticFieldSet
//
/// CHECK-START: int Main.linearDynamicBCE1(int[], int, int) BCE (after)
- /// CHECK-DAG: StaticFieldGet
+ /// CHECK: StaticFieldGet
/// CHECK-NOT: NullCheck
/// CHECK-NOT: ArrayLength
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: StaticFieldSet
- /// CHECK-DAG: Exit
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
+ /// CHECK: ArrayGet
+ /// CHECK: StaticFieldSet
+ /// CHECK: Exit
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
private static int linearDynamicBCE1(int[] x, int lo, int hi) {
int result = 0;
for (int i = lo; i < hi; i++) {
@@ -963,24 +963,24 @@
}
/// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (before)
- /// CHECK-DAG: StaticFieldGet
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: StaticFieldSet
+ /// CHECK: StaticFieldGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: StaticFieldSet
//
/// CHECK-START: int Main.linearDynamicBCE2(int[], int, int, int) BCE (after)
- /// CHECK-DAG: StaticFieldGet
+ /// CHECK: StaticFieldGet
/// CHECK-NOT: NullCheck
/// CHECK-NOT: ArrayLength
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: StaticFieldSet
- /// CHECK-DAG: Exit
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
+ /// CHECK: ArrayGet
+ /// CHECK: StaticFieldSet
+ /// CHECK: Exit
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
private static int linearDynamicBCE2(int[] x, int lo, int hi, int offset) {
int result = 0;
for (int i = lo; i < hi; i++) {
@@ -990,19 +990,19 @@
}
/// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (before)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
//
/// CHECK-START: int Main.wrapAroundDynamicBCE(int[]) BCE (after)
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
/// CHECK-NOT: NullCheck
/// CHECK-NOT: ArrayLength
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: ArrayGet
private static int wrapAroundDynamicBCE(int[] x) {
int w = 9;
int result = 0;
@@ -1014,19 +1014,19 @@
}
/// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (before)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
//
/// CHECK-START: int Main.periodicDynamicBCE(int[]) BCE (after)
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
/// CHECK-NOT: NullCheck
/// CHECK-NOT: ArrayLength
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: ArrayGet
private static int periodicDynamicBCE(int[] x) {
int k = 0;
int result = 0;
@@ -1038,20 +1038,20 @@
}
/// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (before)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
//
/// CHECK-START: int Main.dynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
/// CHECK-NOT: NullCheck
/// CHECK-NOT: ArrayLength
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: Exit
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
+ /// CHECK: ArrayGet
+ /// CHECK: Exit
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
static int dynamicBCEPossiblyInfiniteLoop(int[] x, int lo, int hi) {
// This loop could be infinite for hi = max int. Since i is also used
// as subscript, however, dynamic bce can proceed.
@@ -1063,16 +1063,16 @@
}
/// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (before)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
//
/// CHECK-START: int Main.noDynamicBCEPossiblyInfiniteLoop(int[], int, int) BCE (after)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
/// CHECK-NOT: Deoptimize
static int noDynamicBCEPossiblyInfiniteLoop(int[] x, int lo, int hi) {
// As above, but now the index is not used as subscript,
@@ -1085,16 +1085,16 @@
}
/// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (before)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
//
/// CHECK-START: int Main.noDynamicBCEMixedInductionTypes(int[], long, long) BCE (after)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
/// CHECK-NOT: Deoptimize
static int noDynamicBCEMixedInductionTypes(int[] x, long lo, long hi) {
int result = 0;
@@ -1107,41 +1107,41 @@
}
/// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (before)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: NotEqual
- /// CHECK-DAG: If
- /// CHECK-DAG: If
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: If
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: NotEqual
+ /// CHECK: If
+ /// CHECK: If
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: If
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
//
/// CHECK-START: int Main.dynamicBCEAndConstantIndices(int[], int[][], int, int) BCE (after)
- /// CHECK-DAG: NullCheck
- /// CHECK-DAG: ArrayLength
- /// CHECK-DAG: NotEqual
- /// CHECK-DAG: If
- /// CHECK-DAG: If
+ /// CHECK: NullCheck
+ /// CHECK: ArrayLength
+ /// CHECK: NotEqual
+ /// CHECK: If
+ /// CHECK: If
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: If
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: If
+ /// CHECK: Deoptimize
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK-NOT: BoundsCheck
- /// CHECK-DAG: Exit
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
- /// CHECK-DAG: Deoptimize
+ /// CHECK: Exit
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
/// CHECK-NOT: ArrayGet
static int dynamicBCEAndConstantIndices(int[] x, int[][] a, int lo, int hi) {
// Deliberately test array length on a before the loop so that only bounds checks
@@ -1167,27 +1167,27 @@
}
/// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], java.lang.Integer[], int, int) BCE (before)
- /// CHECK-DAG: If
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
- /// CHECK-DAG: BoundsCheck
- /// CHECK-DAG: ArrayGet
+ /// CHECK: If
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArrayGet
//
/// CHECK-START: int Main.dynamicBCEAndConstantIndicesAllTypes(int[], boolean[], byte[], char[], short[], int[], long[], float[], double[], java.lang.Integer[], int, int) BCE (after)
/// CHECK-DAG: If
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 98251e4..baee7b3 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -52,6 +52,11 @@
int j;
}
+class TestClass3 {
+ float floatField = 8.0f;
+ boolean test1 = true;
+}
+
class Finalizable {
static boolean sVisited = false;
static final int VALUE = 0xbeef;
@@ -453,16 +458,14 @@
}
/// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (before)
- /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
- /// CHECK: ArraySet
- /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+ /// CHECK: {{f\d+}} ArrayGet
+ /// CHECK: {{f\d+}} ArrayGet
/// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (after)
- /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
- /// CHECK: ArraySet
- /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+ /// CHECK: {{f\d+}} ArrayGet
+ /// CHECK-NOT: {{f\d+}} ArrayGet
- // I/F, J/D aliasing should keep the load/store.
+ // I/F, J/D aliasing should not happen any more and LSE should eliminate the load.
static float test19(float[] fa1, float[] fa2) {
fa1[0] = fa2[0];
return fa1[0];
@@ -484,27 +487,32 @@
return obj;
}
- /// CHECK-START: void Main.test21() load_store_elimination (before)
+ /// CHECK-START: void Main.test21(TestClass) load_store_elimination (before)
/// CHECK: NewInstance
/// CHECK: InstanceFieldSet
- /// CHECK: StaticFieldSet
- /// CHECK: StaticFieldGet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldGet
- /// CHECK-START: void Main.test21() load_store_elimination (after)
+ /// CHECK-START: void Main.test21(TestClass) load_store_elimination (after)
/// CHECK: NewInstance
/// CHECK: InstanceFieldSet
- /// CHECK: StaticFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
/// CHECK: InstanceFieldGet
// Loop side effects can kill heap values, stores need to be kept in that case.
- static void test21() {
+ static void test21(TestClass obj0) {
TestClass obj = new TestClass();
+ obj0.str = "abc";
obj.str = "abc";
for (int i = 0; i < 2; i++) {
- // Generate some loop side effect that does write.
- obj.si = 1;
+ // Generate some loop side effect that writes into obj.
+ obj.str = "def";
}
- System.out.print(obj.str.substring(0, 0));
+ System.out.print(obj0.str.substring(0, 0) + obj.str.substring(0, 0));
}
/// CHECK-START: int Main.test22() load_store_elimination (before)
@@ -520,27 +528,29 @@
/// CHECK-START: int Main.test22() load_store_elimination (after)
/// CHECK: NewInstance
- /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
/// CHECK: NewInstance
/// CHECK-NOT: InstanceFieldSet
/// CHECK-NOT: InstanceFieldGet
/// CHECK: NewInstance
/// CHECK-NOT: InstanceFieldSet
- /// CHECK: InstanceFieldGet
+ /// CHECK-NOT: InstanceFieldGet
/// CHECK-NOT: InstanceFieldGet
- // Loop side effects only affects stores into singletons that dominiates the loop header.
+ // For a singleton, loop side effects can kill its field values only if:
+ // (1) it dominiates the loop header, and
+ // (2) its fields are stored into inside a loop.
static int test22() {
int sum = 0;
TestClass obj1 = new TestClass();
- obj1.i = 2; // This store can't be eliminated since it can be killed by loop side effects.
+ obj1.i = 2; // This store can be eliminated since obj1 is never stored into inside a loop.
for (int i = 0; i < 2; i++) {
TestClass obj2 = new TestClass();
- obj2.i = 3; // This store can be eliminated since the singleton is inside the loop.
+ obj2.i = 3; // This store can be eliminated since the singleton is inside the loop.
sum += obj2.i;
}
TestClass obj3 = new TestClass();
- obj3.i = 5; // This store can be eliminated since the singleton is created after the loop.
+ obj3.i = 5; // This store can be eliminated since the singleton is created after the loop.
sum += obj1.i + obj3.i;
return sum;
}
@@ -577,6 +587,37 @@
return obj.i;
}
+ /// CHECK-START: float Main.test24() load_store_elimination (before)
+ /// CHECK-DAG: <<True:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Float8:f\d+>> FloatConstant 8
+ /// CHECK-DAG: <<Float42:f\d+>> FloatConstant 42
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<True>>]
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Float8>>]
+ /// CHECK-DAG: <<GetTest:z\d+>> InstanceFieldGet [<<Obj>>]
+ /// CHECK-DAG: If [<<GetTest>>]
+ /// CHECK-DAG: <<GetField:f\d+>> InstanceFieldGet [<<Obj>>]
+ /// CHECK-DAG: <<Phi:f\d+>> Phi [<<Float42>>,<<GetField>>]
+ /// CHECK-DAG: Return [<<Phi>>]
+
+ /// CHECK-START: float Main.test24() load_store_elimination (after)
+ /// CHECK-DAG: <<True:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Float8:f\d+>> FloatConstant 8
+ /// CHECK-DAG: <<Float42:f\d+>> FloatConstant 42
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: If [<<True>>]
+ /// CHECK-DAG: <<Phi:f\d+>> Phi [<<Float42>>,<<Float8>>]
+ /// CHECK-DAG: Return [<<Phi>>]
+
+ static float test24() {
+ float a = 42.0f;
+ TestClass3 obj = new TestClass3();
+ if (obj.test1) {
+ a = obj.floatField;
+ }
+ return a;
+ }
+
/// CHECK-START: void Main.testFinalizable() load_store_elimination (before)
/// CHECK: NewInstance
/// CHECK: InstanceFieldSet
@@ -679,10 +720,11 @@
float[] fa2 = { 1.8f };
assertFloatEquals(test19(fa1, fa2), 1.8f);
assertFloatEquals(test20().i, 0);
- test21();
+ test21(new TestClass());
assertIntEquals(test22(), 13);
assertIntEquals(test23(true), 4);
assertIntEquals(test23(false), 5);
+ assertFloatEquals(test24(), 8.0f);
testFinalizableByForcingGc();
}
}
diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java
index 3f65d5a..be666e9 100644
--- a/test/536-checker-intrinsic-optimization/src/Main.java
+++ b/test/536-checker-intrinsic-optimization/src/Main.java
@@ -47,7 +47,7 @@
}
/// CHECK-START-X86: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after)
- /// CHECK: InvokeVirtual
+ /// CHECK: InvokeVirtual {{.*\.equals.*}}
/// CHECK-NOT: test
public static boolean stringArgumentNotNull(Object obj) {
obj.getClass();
diff --git a/test/540-checker-rtp-bug/src/Main.java b/test/540-checker-rtp-bug/src/Main.java
index e9f16c0..9a9f0b6 100644
--- a/test/540-checker-rtp-bug/src/Main.java
+++ b/test/540-checker-rtp-bug/src/Main.java
@@ -21,14 +21,14 @@
}
public class Main {
- /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: CheckCast [<<Phi>>,<<Class>>]
/// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>] klass:Final
/// CHECK: Return [<<Ret>>]
- /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) instruction_simplifier_after_types (after)
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) instruction_simplifier (after)
/// CHECK: <<Phi:l\d+>> Phi
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: CheckCast [<<Phi>>,<<Class>>]
@@ -43,7 +43,7 @@
return (Final) x;
}
- /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: InstanceOf [<<Phi>>,<<Class>>]
@@ -65,7 +65,7 @@
}
}
- /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
/// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
diff --git a/test/549-checker-types-merge/src/Main.java b/test/549-checker-types-merge/src/Main.java
index dc27f10..917073b 100644
--- a/test/549-checker-types-merge/src/Main.java
+++ b/test/549-checker-types-merge/src/Main.java
@@ -38,14 +38,14 @@
public class Main {
- /// CHECK-START: java.lang.Object Main.testMergeNullContant(boolean) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeNullContant(boolean) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:Main
/// CHECK: Return [<<Phi>>]
private Object testMergeNullContant(boolean cond) {
return cond ? null : new Main();
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassExtendsB) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassExtendsB) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:ClassSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassExtendsA a, ClassExtendsB b) {
@@ -53,7 +53,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:ClassSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassExtendsA a, ClassSuper b) {
@@ -61,7 +61,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassSuper, ClassSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassSuper, ClassSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:ClassSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassSuper a, ClassSuper b) {
@@ -69,7 +69,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassOtherSuper, ClassSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassOtherSuper, ClassSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassOtherSuper a, ClassSuper b) {
@@ -77,7 +77,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassImplementsInterfaceA, InterfaceSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassImplementsInterfaceA, InterfaceSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:InterfaceSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClassWithInterface(boolean cond, ClassImplementsInterfaceA a, InterfaceSuper b) {
@@ -85,7 +85,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassSuper, InterfaceSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassSuper, InterfaceSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeClassWithInterface(boolean cond, ClassSuper a, InterfaceSuper b) {
@@ -93,7 +93,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:InterfaceSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceExtendsA a, InterfaceSuper b) {
@@ -101,7 +101,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:InterfaceSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceSuper a, InterfaceSuper b) {
@@ -109,7 +109,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceExtendsB) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceExtendsB) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceExtendsA a, InterfaceExtendsB b) {
@@ -117,7 +117,7 @@
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceOtherSuper) reference_type_propagation (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceOtherSuper) ssa_builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceSuper a, InterfaceOtherSuper b) {
diff --git a/test/552-checker-primitive-typeprop/expected.txt b/test/552-checker-primitive-typeprop/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/expected.txt
diff --git a/test/552-checker-primitive-typeprop/info.txt b/test/552-checker-primitive-typeprop/info.txt
new file mode 100644
index 0000000..9d69056
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/info.txt
@@ -0,0 +1,2 @@
+Test that phis with environment uses which can be properly typed are kept
+in --debuggable mode.
\ No newline at end of file
diff --git a/test/552-checker-primitive-typeprop/smali/ArrayGet.smali b/test/552-checker-primitive-typeprop/smali/ArrayGet.smali
new file mode 100644
index 0000000..042fa0c
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/smali/ArrayGet.smali
@@ -0,0 +1,245 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LArrayGet;
+.super Ljava/lang/Object;
+
+
+# Test phi with fixed-type ArrayGet as an input and a matching second input.
+# The phi should be typed accordingly.
+
+## CHECK-START: void ArrayGet.matchingFixedType(float[], float) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: void ArrayGet.matchingFixedType(float[], float) ssa_builder (after)
+## CHECK-DAG: <<Arg1:f\d+>> ParameterValue
+## CHECK-DAG: <<Aget:f\d+>> ArrayGet
+## CHECK-DAG: {{f\d+}} Phi [<<Aget>>,<<Arg1>>] reg:0
+.method public static matchingFixedType([FF)V
+ .registers 8
+
+ const v0, 0x0
+ const v1, 0x1
+
+ aget v0, p0, v0 # read value
+ add-float v2, v0, v1 # float use fixes type
+
+ float-to-int v2, p1
+ if-eqz v2, :after
+ move v0, p1
+ :after
+ # v0 = Phi [ArrayGet, Arg1] => float
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+
+# Test phi with fixed-type ArrayGet as an input and a conflicting second input.
+# The phi should be eliminated due to the conflict.
+
+## CHECK-START: void ArrayGet.conflictingFixedType(float[], int) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType(float[], int) ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static conflictingFixedType([FI)V
+ .registers 8
+
+ const v0, 0x0
+ const v1, 0x1
+
+ aget v0, p0, v0 # read value
+ add-float v2, v0, v1 # float use fixes type
+
+ if-eqz p1, :after
+ move v0, p1
+ :after
+ # v0 = Phi [ArrayGet, Arg1] => conflict
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+
+# Same test as the one above, only this time tests that type of ArrayGet is not
+# changed.
+
+## CHECK-START: void ArrayGet.conflictingFixedType2(int[], float) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType2(int[], float) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType2(int[], float) ssa_builder (after)
+## CHECK: {{i\d+}} ArrayGet
+.method public static conflictingFixedType2([IF)V
+ .registers 8
+
+ const v0, 0x0
+ const v1, 0x1
+
+ aget v0, p0, v0 # read value
+ add-int v2, v0, v1 # int use fixes type
+
+ float-to-int v2, p1
+ if-eqz v2, :after
+ move v0, p1
+ :after
+ # v0 = Phi [ArrayGet, Arg1] => conflict
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+
+# Test phi with free-type ArrayGet as an input and a matching second input.
+# The phi should be typed accordingly.
+
+## CHECK-START: void ArrayGet.matchingFreeType(float[], float) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: void ArrayGet.matchingFreeType(float[], float) ssa_builder (after)
+## CHECK-DAG: <<Arg1:f\d+>> ParameterValue
+## CHECK-DAG: <<Aget:f\d+>> ArrayGet
+## CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Aget>>]
+## CHECK-DAG: {{f\d+}} Phi [<<Aget>>,<<Arg1>>] reg:0
+.method public static matchingFreeType([FF)V
+ .registers 8
+
+ const v0, 0x0
+ const v1, 0x1
+
+ aget v0, p0, v0 # read value, should be float but has no typed use
+ aput v0, p0, v1 # aput does not disambiguate the type
+
+ float-to-int v2, p1
+ if-eqz v2, :after
+ move v0, p1
+ :after
+ # v0 = Phi [ArrayGet, Arg1] => float
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+
+# Test phi with free-type ArrayGet as an input and a conflicting second input.
+# The phi will be kept and typed according to the second input despite the
+# conflict.
+
+## CHECK-START: void ArrayGet.conflictingFreeType(int[], float) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFreeType(int[], float) ssa_builder (after)
+## CHECK-NOT: Phi
+
+.method public static conflictingFreeType([IF)V
+ .registers 8
+
+ const v0, 0x0
+ const v1, 0x1
+
+ aget v0, p0, v0 # read value, should be int but has no typed use
+ aput v0, p0, v1
+
+ float-to-int v2, p1
+ if-eqz v2, :after
+ move v0, p1
+ :after
+ # v0 = Phi [ArrayGet, Arg1] => float
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+
+# Test that real use of ArrayGet is propagated through phis. The following test
+# case uses ArrayGet indirectly through two phis. It also creates an unused
+# conflicting phi which should not be preserved.
+
+## CHECK-START: void ArrayGet.conflictingPhiUses(int[], float, boolean, boolean, boolean) ssa_builder (after)
+## CHECK: InvokeStaticOrDirect env:[[{{i\d+}},{{i\d+}},_,{{i\d+}},{{.*}}
+
+.method public static conflictingPhiUses([IFZZZ)V
+ .registers 10
+
+ const v0, 0x0
+
+ # Create v1 = Phi [0x0, int ArrayGet]
+ move v1, v0
+ if-eqz p2, :else1
+ aget v1, p0, v0
+ :else1
+
+ # Create v2 = Phi [v1, float]
+ move v2, v1
+ if-eqz p3, :else2
+ move v2, p1
+ :else2
+
+ # Create v3 = Phi [v1, int]
+ move v3, v1
+ if-eqz p4, :else3
+ move v3, v0
+ :else3
+
+ # Use v3 as int.
+ add-int/lit8 v4, v3, 0x2a
+
+ # Create env uses.
+ invoke-static {}, Ljava/lang/System;->nanoTime()J
+
+ return-void
+.end method
+
+# Test that the right ArrayGet equivalent is always selected. The following test
+# case uses ArrayGet as float through one phi and as an indeterminate type through
+# another. The situation needs to be resolved so that only one instruction
+# remains.
+
+## CHECK-START: void ArrayGet.typedVsUntypedPhiUse(float[], float, boolean, boolean) ssa_builder (after)
+## CHECK: {{f\d+}} ArrayGet
+
+## CHECK-START: void ArrayGet.typedVsUntypedPhiUse(float[], float, boolean, boolean) ssa_builder (after)
+## CHECK-NOT: {{i\d+}} ArrayGet
+
+.method public static typedVsUntypedPhiUse([FFZZ)V
+ .registers 10
+
+ const v0, 0x0
+
+ # v1 = float ArrayGet
+ aget v1, p0, v0
+
+ # Create v2 = Phi [v1, 0.0f]
+ move v2, v1
+ if-eqz p2, :else1
+ move v2, v0
+ :else1
+
+ # Use v2 as float
+ cmpl-float v2, v2, p1
+
+ # Create v3 = Phi [v1, 0.0f]
+ move v3, v1
+ if-eqz p3, :else2
+ move v3, v0
+ :else2
+
+ # Use v3 without a determinate type.
+ aput v3, p0, v0
+
+ return-void
+.end method
diff --git a/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali b/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali
new file mode 100644
index 0000000..395feaa
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali
@@ -0,0 +1,52 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LSsaBuilder;
+.super Ljava/lang/Object;
+
+# Check that a dead phi with a live equivalent is replaced in an environment. The
+# following test case throws an exception and uses v0 afterwards. However, v0
+# contains a phi that is interpreted as int for the environment, and as float for
+# instruction use. SsaBuilder must substitute the int variant before removing it,
+# otherwise running the code with an array short enough to throw will crash at
+# runtime because v0 is undefined.
+
+## CHECK-START: int SsaBuilder.environmentPhi(boolean, int[]) ssa_builder (after)
+## CHECK-DAG: <<Cst0:f\d+>> FloatConstant 0
+## CHECK-DAG: <<Cst2:f\d+>> FloatConstant 2
+## CHECK-DAG: <<Phi:f\d+>> Phi [<<Cst0>>,<<Cst2>>]
+## CHECK-DAG: BoundsCheck env:[[<<Phi>>,{{i\d+}},{{z\d+}},{{l\d+}}]]
+
+.method public static environmentPhi(Z[I)I
+ .registers 4
+
+ const v0, 0x0
+ if-eqz p0, :else
+ const v0, 0x40000000
+ :else
+ # v0 = phi that can be both int and float
+
+ :try_start
+ const v1, 0x3
+ aput v1, p1, v1
+ const v0, 0x1 # generate catch phi for v0
+ const v1, 0x4
+ aput v1, p1, v1
+ :try_end
+ .catchall {:try_start .. :try_end} :use_as_float
+
+ :use_as_float
+ float-to-int v0, v0
+ return v0
+.end method
\ No newline at end of file
diff --git a/test/552-checker-primitive-typeprop/smali/TypePropagation.smali b/test/552-checker-primitive-typeprop/smali/TypePropagation.smali
new file mode 100644
index 0000000..58682a1
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/smali/TypePropagation.smali
@@ -0,0 +1,136 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTypePropagation;
+.super Ljava/lang/Object;
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeDeadPhi(boolean, boolean, int, float, float) ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static mergeDeadPhi(ZZIFF)V
+ .registers 8
+
+ if-eqz p0, :after1
+ move p2, p3
+ :after1
+ # p2 = merge(int,float) = conflict
+
+ if-eqz p1, :after2
+ move p2, p4
+ :after2
+ # p2 = merge(conflict,float) = conflict
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeSameType(boolean, int, int) ssa_builder (after)
+## CHECK: {{i\d+}} Phi
+## CHECK-NOT: Phi
+.method public static mergeSameType(ZII)V
+ .registers 8
+ if-eqz p0, :after
+ move p1, p2
+ :after
+ # p1 = merge(int,int) = int
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeVoidInput(boolean, boolean, int, int) ssa_builder (after)
+## CHECK: {{i\d+}} Phi
+## CHECK: {{i\d+}} Phi
+## CHECK-NOT: Phi
+.method public static mergeVoidInput(ZZII)V
+ .registers 8
+ :loop
+ # p2 = void (loop phi) => p2 = merge(int,int) = int
+ if-eqz p0, :after
+ move p2, p3
+ :after
+ # p2 = merge(void,int) = int
+ if-eqz p1, :loop
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeDifferentSize(boolean, int, long) ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static mergeDifferentSize(ZIJ)V
+ .registers 8
+ if-eqz p0, :after
+ move-wide p1, p2
+ :after
+ # p1 = merge(int,long) = conflict
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeRefFloat(boolean, float, java.lang.Object) ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static mergeRefFloat(ZFLjava/lang/Object;)V
+ .registers 8
+ if-eqz p0, :after
+ move-object p1, p2
+ :after
+ # p1 = merge(float,reference) = conflict
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeIntFloat_Success(boolean, float) ssa_builder (after)
+## CHECK: {{f\d+}} Phi
+## CHECK-NOT: Phi
+.method public static mergeIntFloat_Success(ZF)V
+ .registers 8
+ if-eqz p0, :after
+ const/4 p1, 0x0
+ :after
+ # p1 = merge(float,0x0) = float
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeIntFloat_Fail(boolean, int, float) ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static mergeIntFloat_Fail(ZIF)V
+ .registers 8
+ if-eqz p0, :after
+ move p1, p2
+ :after
+ # p1 = merge(int,float) = conflict
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
+
+## CHECK-START-DEBUGGABLE: void TypePropagation.updateAllUsersOnConflict(boolean, boolean, int, float, int) ssa_builder (after)
+## CHECK-NOT: Phi
+.method public static updateAllUsersOnConflict(ZZIFI)V
+ .registers 8
+
+ :loop1
+ # loop phis for all args
+ # p2 = merge(int,float) = float? => conflict
+ move p2, p3
+ if-eqz p0, :loop1
+
+ :loop2
+ # loop phis for all args
+ # requests float equivalent of p4 phi in loop1 => conflict
+ # propagates conflict to loop2's phis
+ move p2, p4
+ if-eqz p1, :loop2
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+ return-void
+.end method
diff --git a/test/552-checker-primitive-typeprop/src/Main.java b/test/552-checker-primitive-typeprop/src/Main.java
new file mode 100644
index 0000000..fe2343e
--- /dev/null
+++ b/test/552-checker-primitive-typeprop/src/Main.java
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ private static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Wrong result, expected=" + expected + ", actual=" + actual);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("SsaBuilder");
+ Method m = c.getMethod("environmentPhi", new Class[] { boolean.class, int[].class });
+
+ int[] array = new int[3];
+ int result;
+
+ result = (Integer) m.invoke(null, new Object[] { true, array } );
+ assertEquals(2, result);
+
+ result = (Integer) m.invoke(null, new Object[] { false, array } );
+ assertEquals(0, result);
+ }
+}
diff --git a/test/559-bce-ssa/expected.txt b/test/559-bce-ssa/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/559-bce-ssa/expected.txt
diff --git a/test/559-bce-ssa/info.txt b/test/559-bce-ssa/info.txt
new file mode 100644
index 0000000..e104515
--- /dev/null
+++ b/test/559-bce-ssa/info.txt
@@ -0,0 +1,2 @@
+Regression test for the optimizing compiler which used
+to hit a bogus DCHECK on the test case.
diff --git a/test/559-bce-ssa/src/Main.java b/test/559-bce-ssa/src/Main.java
new file mode 100644
index 0000000..88f06b4
--- /dev/null
+++ b/test/559-bce-ssa/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+
+ public static void foo(int[] array, int[] array2, int start, int end) {
+ for (int i = start; i < end; ++i) {
+ array[i] = array2[array.length] + 1;
+ }
+ }
+
+ public static void main(String[] args) {
+ int[]a = new int[1];
+ foo(a, new int[2], 0, 1);
+ if (a[0] != 1) {
+ throw new Error("test failed");
+ }
+ }
+}
diff --git a/test/560-packed-switch/expected.txt b/test/560-packed-switch/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/560-packed-switch/expected.txt
diff --git a/test/560-packed-switch/info.txt b/test/560-packed-switch/info.txt
new file mode 100644
index 0000000..41d4562
--- /dev/null
+++ b/test/560-packed-switch/info.txt
@@ -0,0 +1,2 @@
+Regression test for optimizing that used to emit wrong code
+for a HPackedSwitch.
diff --git a/test/560-packed-switch/src/Main.java b/test/560-packed-switch/src/Main.java
new file mode 100644
index 0000000..3b0b425
--- /dev/null
+++ b/test/560-packed-switch/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ switch (staticField) {
+ case -1:
+ return;
+ case -4:
+ // We add this case to make it an odd number of case/default.
+ // The code generation for it used to be bogus.
+ throw new Error("Cannot happen");
+ default:
+ throw new Error("Cannot happen");
+ }
+ }
+ static int staticField = -1;
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 7589f8f..81cfb70 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -38,11 +38,13 @@
$(HOST_OUT_EXECUTABLES)/jasmin \
$(HOST_OUT_EXECUTABLES)/smali \
$(HOST_OUT_EXECUTABLES)/dexmerger
+TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES :=
ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
TEST_ART_RUN_TEST_DEPENDENCIES += \
$(JACK) \
$(JILL_JAR)
+ TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES += setup-jack-server
endif
ifeq ($(ART_TEST_DEBUG_GC),true)
@@ -63,7 +65,7 @@
run_test_options += --quiet
endif
$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
-$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
+$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
@@ -153,8 +155,14 @@
ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
IMAGE_TYPES += no-image
endif
+ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true)
+ IMAGE_TYPES := multiimage
+endif
ifeq ($(ART_TEST_PIC_IMAGE),true)
IMAGE_TYPES += picimage
+ ifeq ($(ART_TEST_RUN_TEST_MULTI_IMAGE),true)
+ IMAGE_TYPES := multipicimage
+ endif
endif
PICTEST_TYPES := npictest
ifeq ($(ART_TEST_PIC_TEST),true)
@@ -579,6 +587,19 @@
TEST_ART_BROKEN_DEFAULT_HEAP_POISONING_RUN_TESTS :=
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
+
+# Tests broken by multi-image. b/26317072
+TEST_ART_BROKEN_MULTI_IMAGE_RUN_TESTS := \
+ 476-checker-ctor-memory-barrier \
+ 530-checker-lse
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), \
+ $(TEST_ART_BROKEN_MULTI_IMAGE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_MULTI_IMAGE_RUN_TESTS :=
+
# Clear variables ahead of appending to them when defining tests.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
$(foreach target, $(TARGET_TYPES), \
@@ -640,14 +661,16 @@
$(ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
- $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
+ $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION)
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttest$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libarttestd$(ART_HOST_SHLIB_EXTENSION) \
$(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libnativebridgetest$(ART_HOST_SHLIB_EXTENSION) \
- $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION)
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
+ $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION)
endif
# Create a rule to build and run a tests following the form:
@@ -835,7 +858,27 @@
prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_$(13))
endif
else
- $$(error found $(9) expected $(IMAGE_TYPES))
+ ifeq ($(9),multiimage)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
+ run_test_options += --multi-image
+ ifeq ($(1),host)
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
+ else
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_no-pic_multi_$(13))
+ endif
+ else
+ ifeq ($(9),multipicimage)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
+ run_test_options += --pic-image --multi-image
+ ifeq ($(1),host)
+ prereq_rule += $$(HOST_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
+ else
+ prereq_rule += $$(TARGET_CORE_IMAGE_$$(image_suffix)_pic_multi_$(13))
+ endif
+ else
+ $$(error found $(9) expected $(IMAGE_TYPES))
+ endif
+ endif
endif
endif
endif
@@ -896,7 +939,7 @@
$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
$$(run_test_rule_name): PRIVATE_JACK_CLASSPATH := $$(jack_classpath)
.PHONY: $$(run_test_rule_name)
-$$(run_test_rule_name): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule)
+$$(run_test_rule_name): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(HOST_OUT_EXECUTABLES)/hprof-conv $$(prereq_rule) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
DX=$(abspath $(DX)) \
JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 082c9b3..fd41fd2 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -56,7 +56,7 @@
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv* env ATTRIBUTE_UNUSED,
jclass cls ATTRIBUTE_UNUSED) {
- return Runtime::Current()->GetHeap()->HasImageSpace();
+ return Runtime::Current()->GetHeap()->HasBootImageSpace();
}
// public static native boolean isImageDex2OatEnabled();
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 3efa6ff..dacb7b9 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -361,13 +361,17 @@
dex2oat_cmdline="true"
mkdir_cmdline="mkdir -p ${DEX_LOCATION}/dalvik-cache/$ISA"
+# TODO: allow app-image to work with multi-image. b/26317072
+app_image=""
+# app_image="--app-image-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.art | cut -d/ -f 2- | sed "s:/:@:g")"
+
if [ "$PREBUILD" = "y" ]; then
dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/dex2oatd \
$COMPILE_FLAGS \
--boot-image=${BOOT_IMAGE} \
--dex-file=$DEX_LOCATION/$TEST_NAME.jar \
--oat-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g") \
- --app-image-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.art | cut -d/ -f 2- | sed "s:/:@:g") \
+ ${app_image} \
--instruction-set=$ISA"
if [ "x$INSTRUCTION_SET_FEATURES" != "x" ] ; then
dex2oat_cmdline="${dex2oat_cmdline} --instruction-set-features=${INSTRUCTION_SET_FEATURES}"
@@ -390,6 +394,12 @@
DALVIKVM_ISA_FEATURES_ARGS="-Xcompiler-option --instruction-set-features=${INSTRUCTION_SET_FEATURES}"
fi
+# java.io.tmpdir can only be set at launch time.
+TMP_DIR_OPTION=""
+if [ "$HOST" = "n" ]; then
+ TMP_DIR_OPTION="-Djava.io.tmpdir=/data/local/tmp"
+fi
+
dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$GDB_ARGS \
$FLAGS \
@@ -403,6 +413,7 @@
$INT_OPTS \
$DEBUGGER_OPTS \
$DALVIKVM_BOOT_OPT \
+ $TMP_DIR_OPTION \
-cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN $ARGS"
# Remove whitespace.
diff --git a/test/run-test b/test/run-test
index 60e008c..d076687 100755
--- a/test/run-test
+++ b/test/run-test
@@ -85,7 +85,7 @@
# If JACK_CLASSPATH is not set, assume it only contains core-libart.
if [ -z "$JACK_CLASSPATH" ]; then
- export JACK_CLASSPATH="${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
+ export JACK_CLASSPATH="${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack:${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/common/obj/JAVA_LIBRARIES/core-oj-hostdex_intermediates/classes.jack"
fi
# If JILL_JAR is not set, assume it is located in the prebuilts directory.
@@ -135,6 +135,7 @@
have_image="yes"
image_suffix=""
pic_image_suffix=""
+multi_image_suffix=""
android_root="/system"
while true; do
@@ -184,6 +185,9 @@
elif [ "x$1" = "x--pic-image" ]; then
pic_image_suffix="-pic"
shift
+ elif [ "x$1" = "x--multi-image" ]; then
+ multi_image_suffix="-multi"
+ shift
elif [ "x$1" = "x--pic-test" ]; then
run_args="${run_args} --pic-test"
shift
@@ -458,7 +462,7 @@
if [ "$runtime" = "dalvik" ]; then
if [ "$target_mode" = "no" ]; then
framework="${ANDROID_PRODUCT_OUT}/system/framework"
- bpath="${framework}/core.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
+ bpath="${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
run_args="${run_args} --boot -Xbootclasspath:${bpath}"
else
true # defaults to using target BOOTCLASSPATH
@@ -470,12 +474,12 @@
export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out/}host/linux-x86
fi
guess_host_arch_name
- run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${pic_image_suffix}.art"
+ run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art"
run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}"
else
guess_target_arch_name
run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}"
- run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}.art"
+ run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art"
fi
if [ "$relocate" = "yes" ]; then
run_args="${run_args} --relocate"
@@ -502,6 +506,7 @@
# TODO If the target was compiled WITH_DEXPREOPT=true then these tests will
# fail since these jar files will be stripped.
bpath="${framework}/core-libart${bpath_suffix}.jar"
+ bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar"
bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar"
@@ -611,6 +616,8 @@
echo " Set instruction-set-features for compilation."
echo " --pic-image Use an image compiled with position independent code for the"
echo " boot class path."
+ echo " --multi-image Use a set of images compiled with dex2oat multi-image for"
+ echo " the boot class path."
echo " --pic-test Compile the test code position independent."
echo " --quiet Don't print anything except failure messages"
) 1>&2 # Direct to stderr so usage is not printed if --quiet is set.
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index a2d2f23..880be26 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -66,8 +66,7 @@
"libcore.java.text.SimpleDateFormatTest#testDstZoneNameWithNonDstTimestamp",
"libcore.java.text.SimpleDateFormatTest#testDstZoneWithNonDstTimestampForNonHourDstZone",
"libcore.java.text.SimpleDateFormatTest#testNonDstZoneNameWithDstTimestamp",
- "libcore.java.text.SimpleDateFormatTest#testNonDstZoneWithDstTimestampForNonHourDstZone",
- "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition"]
+ "libcore.java.text.SimpleDateFormatTest#testNonDstZoneWithDstTimestampForNonHourDstZone"]
},
{
description: "Failing due to missing localhost on hammerhead and volantis.",
@@ -172,10 +171,83 @@
bug: 25437292
},
{
- description: "JSR166TestCase.waitForThreadToEnterWaitState seems to time out; needs investigation.",
+ description: "Assertion failing on the concurrent collector configuration.",
result: EXEC_FAILED,
- names: ["jsr166.LinkedTransferQueueTest#testTransfer2",
- "jsr166.LinkedTransferQueueTest#testWaitingConsumer"],
+ names: ["jsr166.LinkedTransferQueueTest#testTransfer2"],
bug: 25883050
+},
+{
+ description: "Failing tests after enso move.",
+ result: EXEC_FAILED,
+ bug: 26326992,
+ names: ["libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeDateTimeStringDST",
+ "libcore.java.lang.OldSystemTest#test_load",
+ "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits",
+ "libcore.java.text.NumberFormatTest#test_customCurrencySymbol",
+ "libcore.java.text.NumberFormatTest#test_setCurrency",
+ "libcore.java.text.OldNumberFormatTest#test_getIntegerInstanceLjava_util_Locale",
+ "libcore.java.util.CalendarTest#testAddOneDayAndOneDayOver30MinuteDstForwardAdds48Hours",
+ "libcore.java.util.CalendarTest#testNewCalendarKoreaIsSelfConsistent",
+ "libcore.java.util.CalendarTest#testSetTimeInZoneWhereDstIsNoLongerUsed",
+ "libcore.java.util.CalendarTest#test_nullLocale",
+ "libcore.java.util.FormatterTest#test_numberLocalization",
+ "libcore.java.util.FormatterTest#test_uppercaseConversions",
+ "libcore.java.util.TimeZoneTest#testTimeZoneIDLocalization",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGet",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetBoolean",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetByteArray",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetDouble",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetFloat",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetInt",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetLong",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testKeys",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testNodeExists",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPut",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutBoolean",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutByteArray",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutDouble",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutFloat",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutInt",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutLong",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemove",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemoveNode",
+ "libcore.java.util.prefs.OldAbstractPreferencesTest#testSync",
+ "libcore.java.util.prefs.PreferencesTest#testHtmlEncoding",
+ "libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles",
+ "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure",
+ "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString",
+ "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs",
+ "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate",
+ "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration",
+ "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException",
+ "org.apache.harmony.tests.java.lang.Character_UnicodeBlockTest#test_forNameLjava_lang_StringExceptions",
+ "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261",
+ "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon",
+ "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String",
+ "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible",
+ "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition",
+ "org.apache.harmony.tests.java.util.CalendarTest#test_getDisplayNamesIILjava_util_Locale",
+ "org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String",
+ "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
+ "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
+ "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet",
+ "libcore.java.util.CalendarTest#test_clear_45877",
+ "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat",
+ "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"]
+},
+{
+ description: "Failing tests after enso move, only on arm32",
+ result: EXEC_FAILED,
+ bug: 26353151,
+ modes_variants: [[device, X32]],
+ names: ["org.apache.harmony.tests.java.text.DecimalFormatTest#test_formatDouble_withFieldPosition",
+ "org.apache.harmony.tests.java.text.DecimalFormatTest#test_formatToCharacterIterator_original"]
}
+
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index c79f4b9..f29e51f 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -65,7 +65,7 @@
art="bash ${OUT_DIR-out}/host/linux-x86/bin/art"
art_debugee="bash ${OUT_DIR-out}/host/linux-x86/bin/art"
# We force generation of a new image to avoid build-time and run-time classpath differences.
- image="-Ximage:/system/non/existent"
+ image="-Ximage:/system/non/existent/vogar.art"
# We do not need a device directory on host.
device_dir=""
# Vogar knows which VM to use on host.
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 4b5a5ca..11ed8b9 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -83,7 +83,7 @@
# will create a boot image with the default compiler. Note that
# giving an existing image on host does not work because of
# classpath/resources differences when compiling the boot image.
- vogar_args="$vogar_args --vm-arg -Ximage:/non/existent"
+ vogar_args="$vogar_args --vm-arg -Ximage:/non/existent/vogar.art"
shift
elif [[ "$1" == "--debug" ]]; then
# Remove the --debug from the arguments.