summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk7
-rw-r--r--build/Android.common_test.mk5
-rw-r--r--build/Android.gtest.mk12
-rw-r--r--build/Android.oat.mk4
-rw-r--r--compiler/common_compiler_test.h4
-rw-r--r--compiler/dex/frontend.cc4
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc4
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h7
-rw-r--r--compiler/dex/quick/arm64/fp_arm64.cc10
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc42
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc14
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc32
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc69
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h4
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc57
-rw-r--r--compiler/dex/quick/mir_to_lir.h11
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc7
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h1
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc2
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc117
-rw-r--r--compiler/dex/quick/x86/x86_lir.h4
-rw-r--r--compiler/driver/compiler_driver.cc27
-rw-r--r--compiler/driver/compiler_options.h9
-rw-r--r--compiler/elf_fixup.cc92
-rw-r--r--compiler/elf_writer_quick.cc26
-rw-r--r--compiler/elf_writer_quick.h1
-rw-r--r--compiler/image_writer.cc81
-rw-r--r--compiler/image_writer.h5
-rw-r--r--compiler/jni/jni_compiler_test.cc4
-rw-r--r--dex2oat/dex2oat.cc36
-rw-r--r--disassembler/disassembler_x86.cc27
-rw-r--r--patchoat/Android.mk45
-rw-r--r--patchoat/patchoat.cc1020
-rw-r--r--patchoat/patchoat.h124
-rw-r--r--runtime/arch/x86/jni_entrypoints_x86.S8
-rw-r--r--runtime/arch/x86_64/jni_entrypoints_x86_64.S6
-rw-r--r--runtime/debugger.cc255
-rw-r--r--runtime/debugger.h59
-rw-r--r--runtime/elf_file.cc19
-rw-r--r--runtime/elf_file.h1
-rw-r--r--runtime/elf_utils.h97
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc5
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc8
-rw-r--r--runtime/image.cc33
-rw-r--r--runtime/image.h13
-rw-r--r--runtime/jdwp/jdwp_event.cc20
-rw-r--r--runtime/oat_file.h12
-rw-r--r--runtime/quick/inline_method_analyser.h1
-rw-r--r--runtime/runtime.cc1
-rw-r--r--runtime/thread.cc3
-rw-r--r--runtime/utils.h29
-rw-r--r--test/Android.oat.mk10
52 files changed, 2132 insertions, 362 deletions
diff --git a/Android.mk b/Android.mk
index aef731f594..9c206400fc 100644
--- a/Android.mk
+++ b/Android.mk
@@ -99,6 +99,7 @@ include $(art_path)/compiler/Android.mk
include $(art_path)/dex2oat/Android.mk
include $(art_path)/disassembler/Android.mk
include $(art_path)/oatdump/Android.mk
+include $(art_path)/patchoat/Android.mk
include $(art_path)/dalvikvm/Android.mk
include $(art_path)/tools/Android.mk
include $(art_path)/build/Android.oat.mk
@@ -364,13 +365,13 @@ build-art-target: $(ART_TARGET_EXECUTABLES) $(ART_TARGET_GTEST_EXECUTABLES) $(TA
# "m art-host" for just building the files needed to run the art script
.PHONY: art-host
ifeq ($(HOST_PREFER_32_BIT),true)
-art-host: $(HOST_OUT_EXECUTABLES)/art $(HOST_OUT)/bin/dalvikvm32 $(HOST_OUT)/lib/libart.so $(HOST_OUT)/bin/dex2oat $(HOST_CORE_IMG_OUT) $(HOST_OUT)/lib/libjavacore.so $(HOST_OUT)/bin/dalvikvm
+art-host: $(HOST_OUT_EXECUTABLES)/art $(HOST_OUT)/bin/dalvikvm32 $(HOST_OUT)/lib/libart.so $(HOST_OUT)/bin/dex2oat $(HOST_OUT)/bin/patchoat $(HOST_CORE_IMG_OUT) $(HOST_OUT)/lib/libjavacore.so $(HOST_OUT)/bin/dalvikvm
else
-art-host: $(HOST_OUT_EXECUTABLES)/art $(HOST_OUT)/bin/dalvikvm64 $(HOST_OUT)/bin/dalvikvm32 $(HOST_OUT)/lib/libart.so $(HOST_OUT)/bin/dex2oat $(HOST_CORE_IMG_OUT) $(HOST_OUT)/lib/libjavacore.so $(HOST_OUT)/lib64/libjavacore.so $(HOST_OUT)/bin/dalvikvm
+art-host: $(HOST_OUT_EXECUTABLES)/art $(HOST_OUT)/bin/dalvikvm64 $(HOST_OUT)/bin/dalvikvm32 $(HOST_OUT)/lib/libart.so $(HOST_OUT)/bin/dex2oat $(HOST_OUT)/bin/patchoat $(HOST_CORE_IMG_OUT) $(HOST_OUT)/lib/libjavacore.so $(HOST_OUT)/lib64/libjavacore.so $(HOST_OUT)/bin/dalvikvm
endif
.PHONY: art-host-debug
-art-host-debug: art-host $(HOST_OUT)/lib/libartd.so $(HOST_OUT)/bin/dex2oatd
+art-host-debug: art-host $(HOST_OUT)/lib/libartd.so $(HOST_OUT)/bin/dex2oatd $(HOST_OUT)/bin/patchoatd
########################################################################
# targets to switch back and forth from libdvm to libart
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 542e888cfa..ed07129786 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -87,7 +87,8 @@ endef
# $(2): input test directory in art/test, e.g. HelloWorld
# $(3): target output module path (default module path is used on host)
# $(4): additional dependencies
-# $(5): a make variable used to collate dependencies
+# $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX
+# $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX
define build-art-test-dex
ifeq ($(ART_BUILD_TARGET),true)
include $(CLEAR_VARS)
@@ -113,7 +114,7 @@ define build-art-test-dex
LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS)
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
- $(5)-host := $$(LOCAL_INSTALLED_MODULE)
+ $(6) := $$(LOCAL_INSTALLED_MODULE)
endif
endef
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index cdfcfc46dd..10cd1cc65a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -42,7 +42,8 @@ GTEST_DEX_DIRECTORIES := \
# Create build rules for each dex file recording the dependency.
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-gtest,$(dir), \
- $(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_GTEST_$(dir)_DEX)))
+ $(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_TEST_TARGET_GTEST_$(dir)_DEX, \
+ ART_TEST_HOST_GTEST_$(dir)_DEX)))
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
@@ -197,7 +198,7 @@ define define-art-gtest-rule-target
# to ensure files are pushed to the device.
TEST_ART_TARGET_SYNC_DEPS += \
$$(ART_GTEST_$(1)_TARGET_DEPS) \
- $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_GTEST_$(file)_DEX)) \
+ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \
$$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so
@@ -233,7 +234,7 @@ define define-art-gtest-rule-host
.PHONY: $$(gtest_rule)
-$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_GTEST_$(file)_DEX-host)) $$(gtest_deps)
+$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps)
$(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \
|| $$(call ART_TEST_FAILED,$$@)
@@ -242,7 +243,7 @@ $$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_
ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule)
.PHONY: valgrind-$$(gtest_rule)
-valgrind-$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_GTEST_$(file)_DEX-host)) $$(gtest_deps)
+valgrind-$$(gtest_rule): $$(gtest_exe) $$(ART_GTEST_$(1)_HOST_DEPS) $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) $$(gtest_deps)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
valgrind --leak-check=full --error-exitcode=1 $$< && $$(call ART_TEST_PASSED,$$@) \
|| $$(call ART_TEST_FAILED,$$@)
@@ -460,6 +461,7 @@ ART_GTEST_proxy_test_DEX_DEPS :=
ART_GTEST_reflection_test_DEX_DEPS :=
ART_GTEST_stub_test_DEX_DEPS :=
ART_GTEST_transaction_test_DEX_DEPS :=
-$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_GTEST_TEST_$(dir)_DEX :=))
+$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=))
+$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=))
GTEST_DEX_DIRECTORIES :=
LOCAL_PATH :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 916fd58046..d3c8d7ee40 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -35,7 +35,7 @@ $$($(1)HOST_CORE_IMG_OUT): $$(HOST_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENCY)
--oat-location=$$($(1)HOST_CORE_OAT) --image=$$($(1)HOST_CORE_IMG_OUT) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(1)ART_HOST_ARCH) \
--instruction-set-features=$$($(1)HOST_INSTRUCTION_SET_FEATURES) \
- --host --android-root=$$(HOST_OUT)
+ --host --android-root=$$(HOST_OUT) --include-patch-information
# This "renaming" eases declaration in art/Android.mk
HOST_CORE_IMG_OUT$($(1)ART_PHONY_TEST_HOST_SUFFIX) := $($(1)HOST_CORE_IMG_OUT)
@@ -64,7 +64,7 @@ $$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENC
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(1)TARGET_ARCH) \
--instruction-set-features=$$($(1)TARGET_INSTRUCTION_SET_FEATURES) \
--implicit-checks=$(IMPLICIT_CHECKS_$($(1)TARGET_ARCH)) \
- --android-root=$$(PRODUCT_OUT)/system
+ --android-root=$$(PRODUCT_OUT)/system --include-patch-information
# This "renaming" eases declaration in art/Android.mk
TARGET_CORE_IMG_OUT$($(1)ART_PHONY_TEST_TARGET_SUFFIX) := $($(1)TARGET_CORE_IMG_OUT)
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 45cf2fba7f..e11f61a285 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -293,16 +293,12 @@ class CommonCompilerTest : public CommonRuntimeTest {
ASSERT_LE(instruction_set_features, runtime_features);
#elif defined(__aarch64__)
instruction_set = kArm64;
- // TODO: arm64 compilation support.
- compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
#elif defined(__mips__)
instruction_set = kMips;
#elif defined(__i386__)
instruction_set = kX86;
#elif defined(__x86_64__)
instruction_set = kX86_64;
- // TODO: x86_64 compilation support.
- compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
#endif
runtime_->SetInstructionSet(instruction_set);
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 07bbf15e00..dc6043dd65 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -868,7 +868,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
cu.disable_opt |= (1 << kLoadStoreElimination);
} else if (cu.instruction_set == kArm64) {
// TODO(Arm64): enable optimizations once backend is mature enough.
- cu.disable_opt = ~(uint32_t)0;
+ cu.disable_opt = ~((1 << kSuppressMethodInlining) |
+ (1 << kNullCheckElimination) |
+ (1 << kPromoteRegs));
}
cu.StartTimingSplit("BuildMIRGraph");
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 284593bf2c..e10f7cfe67 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -170,7 +170,7 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
ENCODING_MAP(WIDE(kA64Cbz2rt), SF_VARIANTS(0x34000000),
kFmtRegR, 4, 0, kFmtBitBlt, 23, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1,
- IS_BINARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP,
+ IS_BINARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP,
"cbz", "!0r, !1t", kFixupCBxZ),
ENCODING_MAP(WIDE(kA64Cmn3rro), SF_VARIANTS(0x2b00001f),
kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
@@ -288,7 +288,7 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
kFmtRegW, 4, 0, kFmtRegS, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"fmov", "!0w, !1s", kFixupNone),
- ENCODING_MAP(kA64Fmov2xS, NO_VARIANTS(0x9e6e0000),
+ ENCODING_MAP(kA64Fmov2xS, NO_VARIANTS(0x9e660000),
kFmtRegX, 4, 0, kFmtRegD, 9, 5, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"fmov", "!0x, !1S", kFixupNone),
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index b070c8a289..7d75da91d8 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -78,7 +78,8 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
- LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) OVERRIDE;
+ LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
+ OVERRIDE;
LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_dest, OpSize size) OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
@@ -89,7 +90,8 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) OVERRIDE;
+ LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale)
+ OVERRIDE;
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size) OVERRIDE;
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
@@ -177,6 +179,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
RegLocation rl_src2);
void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
bool GenInlinedReverseBits(CallInfo* info, OpSize size);
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index 0f9de5b604..6594c4b7a7 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -323,6 +323,16 @@ void Arm64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
+bool Arm64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
+ RegLocation rl_src = info->args[0];
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_dest = InlineTargetWide(info);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR4(WIDE(kA64Ubfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 62);
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+}
+
bool Arm64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
RegLocation rl_src = info->args[0];
RegLocation rl_dest = InlineTargetWide(info); // double place for result
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index bab549955c..e8f5cb9f09 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -451,9 +451,8 @@ bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] ?
- RegLocation rl_dest = InlineTarget(info);
- RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg
+ RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
+ RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
@@ -468,9 +467,8 @@ bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
RegLocation rl_src_value = info->args[2]; // [size] value
- RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg
+ RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
RegLocation rl_value;
if (size == k64) {
@@ -497,11 +495,9 @@ void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
DCHECK_EQ(cu_->instruction_set, kArm64);
- ArmOpcode wide = is_long ? WIDE(0) : UNWIDE(0);
// Unused - RegLocation rl_src_unsafe = info->args[0];
RegLocation rl_src_obj = info->args[1]; // Object - known non-null
RegLocation rl_src_offset = info->args[2]; // long low
- rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3] //TODO: do we really need this
RegLocation rl_src_expected = info->args[4]; // int, long or Object
// If is_long, high half is in info->args[5]
RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
@@ -510,7 +506,7 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
// Load Object and offset
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
- RegLocation rl_offset = LoadValue(rl_src_offset, kRefReg);
+ RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
RegLocation rl_new_value;
RegLocation rl_expected;
@@ -542,28 +538,38 @@ bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
// result = tmp != 0;
RegStorage r_tmp;
+ RegStorage r_tmp_stored;
+ RegStorage rl_new_value_stored = rl_new_value.reg;
+ ArmOpcode wide = UNWIDE(0);
if (is_long) {
- r_tmp = AllocTempWide();
+ r_tmp_stored = r_tmp = AllocTempWide();
+ wide = WIDE(0);
} else if (is_object) {
+ // References use 64-bit registers, but are stored as compressed 32-bit values.
+ // This means r_tmp_stored != r_tmp.
r_tmp = AllocTempRef();
+ r_tmp_stored = As32BitReg(r_tmp);
+ rl_new_value_stored = As32BitReg(rl_new_value_stored);
} else {
- r_tmp = AllocTemp();
+ r_tmp_stored = r_tmp = AllocTemp();
}
+ RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
LIR* loop = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kA64Ldaxr2rX | wide, r_tmp.GetReg(), r_ptr.GetReg());
+ NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
LIR* early_exit = OpCondBranch(kCondNe, NULL);
-
- NewLIR3(kA64Stlxr3wrX | wide, As32BitReg(r_tmp).GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg());
- NewLIR3(kA64Cmp3RdT, As32BitReg(r_tmp).GetReg(), 0, ENCODE_NO_SHIFT);
+ NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
+ NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
OpCondBranch(kCondNe, loop);
+ LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
+ early_exit->target = exit_loop;
+
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LIR* exit = NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
- early_exit->target = exit;
+ NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
FreeTemp(r_tmp); // Now unneeded.
FreeTemp(r_ptr); // Now unneeded.
@@ -817,7 +823,7 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
if (rl_result.ref) {
- LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg);
+ LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
} else {
LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
}
@@ -914,7 +920,7 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
FreeTemp(reg_len);
}
if (rl_src.ref) {
- StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg);
+ StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
} else {
StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 6985de6574..ef9dbddbde 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -1056,8 +1056,8 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + 1);
- // Fisrt of all, check whether it make sense to use bulk copying
- // Optimization is aplicable only for range case
+ // First of all, check whether it makes sense to use bulk copying.
+ // Bulk copying is done only for the range case.
// TODO: make a constant instead of 2
if (info->is_range && regs_left_to_pass_via_stack >= 2) {
// Scan the rest of the args - if in phys_reg flush to memory
@@ -1141,7 +1141,6 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
LoadValueDirectWideFixed(rl_arg, regWide);
StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
}
- i++;
} else {
if (rl_arg.location == kLocPhysReg) {
if (rl_arg.ref) {
@@ -1163,6 +1162,9 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
call_state = next_call_insn(cu_, info, call_state, target_method,
vtable_idx, direct_code, direct_method, type);
}
+ if (rl_arg.wide) {
+ i++;
+ }
}
}
@@ -1174,12 +1176,14 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
if (reg.Valid()) {
if (rl_arg.wide) {
LoadValueDirectWideFixed(rl_arg, reg);
- i++;
} else {
LoadValueDirectFixed(rl_arg, reg);
}
call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
- direct_code, direct_method, type);
+ direct_code, direct_method, type);
+ }
+ if (rl_arg.wide) {
+ i++;
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index aaee91b817..22a4ec4d49 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -893,7 +893,14 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
int expected_scale = 0;
ArmOpcode opcode = kA64Brk1d;
r_base = Check64BitReg(r_base);
- r_index = Check64BitReg(r_index);
+
+ // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
+ // register offset load (rather than doing the sign extension in a separate instruction).
+ if (r_index.Is32Bit()) {
+ // Assemble: ``sxtw xN, wN''.
+ r_index = As64BitReg(r_index);
+ NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
+ }
if (r_dest.IsFloat()) {
if (r_dest.IsDouble()) {
@@ -920,8 +927,8 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
opcode = WIDE(kA64Ldr4rXxG);
expected_scale = 3;
break;
- case kSingle:
- case k32:
+ case kSingle: // Intentional fall-through.
+ case k32: // Intentional fall-through.
case kReference:
r_dest = Check32BitReg(r_dest);
opcode = kA64Ldr4rXxG;
@@ -963,8 +970,9 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
return load;
}
-LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) {
- return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), 2, kReference);
+LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ int scale) {
+ return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), scale, kReference);
}
LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
@@ -973,7 +981,14 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
int expected_scale = 0;
ArmOpcode opcode = kA64Brk1d;
r_base = Check64BitReg(r_base);
- r_index = Check64BitReg(r_index);
+
+ // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
+ // register offset store (rather than doing the sign extension in a separate instruction).
+ if (r_index.Is32Bit()) {
+ // Assemble: ``sxtw xN, wN''.
+ r_index = As64BitReg(r_index);
+ NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
+ }
if (r_src.IsFloat()) {
if (r_src.IsDouble()) {
@@ -1035,8 +1050,9 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
return store;
}
-LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) {
- return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), 2, kReference);
+LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+ int scale) {
+ return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), scale, kReference);
}
/*
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index b699bd3bf2..36456bc4b0 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -34,6 +34,59 @@ namespace art {
namespace { // anonymous namespace
+static constexpr bool kIntrinsicIsStatic[] = {
+ true, // kIntrinsicDoubleCvt
+ true, // kIntrinsicFloatCvt
+ true, // kIntrinsicReverseBits
+ true, // kIntrinsicReverseBytes
+ true, // kIntrinsicAbsInt
+ true, // kIntrinsicAbsLong
+ true, // kIntrinsicAbsFloat
+ true, // kIntrinsicAbsDouble
+ true, // kIntrinsicMinMaxInt
+ true, // kIntrinsicMinMaxLong
+ true, // kIntrinsicMinMaxFloat
+ true, // kIntrinsicMinMaxDouble
+ true, // kIntrinsicSqrt
+ false, // kIntrinsicCharAt
+ false, // kIntrinsicCompareTo
+ false, // kIntrinsicIsEmptyOrLength
+ false, // kIntrinsicIndexOf
+ true, // kIntrinsicCurrentThread
+ true, // kIntrinsicPeek
+ true, // kIntrinsicPoke
+ false, // kIntrinsicCas
+ false, // kIntrinsicUnsafeGet
+ false, // kIntrinsicUnsafePut
+ true, // kIntrinsicSystemArrayCopyCharArray
+};
+COMPILE_ASSERT(arraysize(kIntrinsicIsStatic) == kInlineOpNop, check_arraysize_kIntrinsicIsStatic);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicDoubleCvt], DoubleCvt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloatCvt], FloatCvt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBits], ReverseBits_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicReverseBytes], ReverseBytes_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsInt], AbsInt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsLong], AbsLong_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsFloat], AbsFloat_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicAbsDouble], AbsDouble_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxInt], MinMaxInt_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxLong], MinMaxLong_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], MinMaxFloat_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], MinMaxDouble_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSqrt], Sqrt_must_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIndexOf], IndexOf_must_not_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicCurrentThread], CurrentThread_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPeek], Peek_must_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicPoke], Poke_must_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCas], Cas_must_not_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], UnsafeGet_must_not_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafePut], UnsafePut_must_not_be_static);
+COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
+ SystemArrayCopyCharArray_must_not_be_static);
+
MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
MIR* insn = mir_graph->NewMIR();
insn->offset = invoke->offset;
@@ -84,6 +137,8 @@ const char* const DexFileMethodInliner::kClassCacheNames[] = {
"Ljava/lang/Thread;", // kClassCacheJavaLangThread
"Llibcore/io/Memory;", // kClassCacheLibcoreIoMemory
"Lsun/misc/Unsafe;", // kClassCacheSunMiscUnsafe
+ "Ljava/lang/System;", // kClassCacheJavaLangSystem
+ "[C" // kClassCacheJavaLangCharArray
};
const char* const DexFileMethodInliner::kNameCacheNames[] = {
@@ -129,6 +184,7 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = {
"putObject", // kNameCachePutObject
"putObjectVolatile", // kNameCachePutObjectVolatile
"putOrderedObject", // kNameCachePutOrderedObject
+ "arraycopy", // kNameCacheArrayCopy
};
const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
@@ -204,6 +260,9 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
// kProtoCacheObjectJObject_V
{ kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong,
kClassCacheJavaLangObject } },
+ // kProtoCacheCharArrayICharArrayII_V
+ { kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt,
+ kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt}}
};
const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods[] = {
@@ -291,6 +350,10 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject),
#undef UNSAFE_GET_PUT
+ INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
+ 0),
+
+
#undef INTRINSIC
};
@@ -334,6 +397,10 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
}
intrinsic = it->second;
}
+ if (kIntrinsicIsStatic[intrinsic.opcode] != (info->type == kStatic)) {
+ // Invoke type mismatch.
+ return false;
+ }
switch (intrinsic.opcode) {
case kIntrinsicDoubleCvt:
return backend->GenInlinedDoubleCvt(info);
@@ -387,6 +454,8 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
intrinsic.d.data & kIntrinsicFlagIsObject,
intrinsic.d.data & kIntrinsicFlagIsVolatile,
intrinsic.d.data & kIntrinsicFlagIsOrdered);
+ case kIntrinsicSystemArrayCopyCharArray:
+ return backend->GenInlinedArrayCopyCharArray(info);
default:
LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
return false; // avoid warning "control reaches end of non-void function"
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index c7a3b83260..5b3b104150 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -118,6 +118,8 @@ class DexFileMethodInliner {
kClassCacheJavaLangThread,
kClassCacheLibcoreIoMemory,
kClassCacheSunMiscUnsafe,
+ kClassCacheJavaLangSystem,
+ kClassCacheJavaLangCharArray,
kClassCacheLast
};
@@ -170,6 +172,7 @@ class DexFileMethodInliner {
kNameCachePutObject,
kNameCachePutObjectVolatile,
kNameCachePutOrderedObject,
+ kNameCacheArrayCopy,
kNameCacheLast
};
@@ -214,6 +217,7 @@ class DexFileMethodInliner {
kProtoCacheObjectJJ_V,
kProtoCacheObjectJ_Object,
kProtoCacheObjectJObject_V,
+ kProtoCacheCharArrayICharArrayII_V,
kProtoCacheLast
};
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 6c670cdeba..02f39ac180 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -1280,7 +1280,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
}
Load32Disp(rl_obj.reg, offset_offset, reg_off);
MarkPossibleNullPointerException(info->opt_flags);
- Load32Disp(rl_obj.reg, value_offset, reg_ptr);
+ LoadRefDisp(rl_obj.reg, value_offset, reg_ptr, kNotVolatile);
if (range_check) {
// Set up a slow path to allow retry in case of bounds violation */
OpRegReg(kOpCmp, rl_idx.reg, reg_max);
@@ -1367,8 +1367,8 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
}
bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+ // TODO - add Mips implementation; Enable Arm64.
return false;
}
RegLocation rl_src_i = info->args[0];
@@ -1499,14 +1499,8 @@ bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- if (cu_->instruction_set == kArm64) {
- // TODO - Can ecode ? UBXF otherwise
- // OpRegRegImm(kOpAnd, rl_result.reg, 0x7fffffffffffffff);
- return false;
- } else {
- OpRegCopyWide(rl_result.reg, rl_src.reg);
- OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
- }
+ OpRegCopyWide(rl_result.reg, rl_src.reg);
+ OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1533,6 +1527,11 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
return true;
}
+bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ return false;
+}
+
+
/*
* Fast String.indexOf(I) & (II). Tests for simple case of char <= 0xFFFF,
* otherwise bails to standard library code.
@@ -1651,7 +1650,8 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
break;
case kArm64:
- Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg);
+ LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
+ kNotVolatile);
break;
case kX86:
@@ -1685,10 +1685,11 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
+ RegLocation rl_result = EvalLoc(rl_dest, LocToRegClass(rl_dest), true);
if (is_long) {
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64);
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
+ || cu_->instruction_set == kArm64) {
+ LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
@@ -1696,7 +1697,11 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
FreeTemp(rl_temp_offset);
}
} else {
- LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
+ if (rl_result.ref) {
+ LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
+ } else {
+ LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
+ }
}
if (is_volatile) {
@@ -1734,8 +1739,9 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
RegLocation rl_value;
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
- if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
- StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64);
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
+ || cu_->instruction_set == kArm64) {
+ StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
} else {
RegStorage rl_temp_offset = AllocTemp();
OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
@@ -1744,7 +1750,11 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
}
} else {
rl_value = LoadValue(rl_src_value);
- StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
+ if (rl_value.ref) {
+ StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
+ } else {
+ StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
+ }
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
@@ -1772,12 +1782,9 @@ void Mir2Lir::GenInvoke(CallInfo* info) {
return;
}
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
- // Temporary disable intrinsics for Arm64. We will enable them later step by step.
- if (cu_->instruction_set != kArm64) {
- if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
- ->GenIntrinsic(this, info)) {
- return;
- }
+ if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
+ ->GenIntrinsic(this, info)) {
+ return;
}
GenInvokeNoInline(info);
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 6768790d19..48855012c3 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -992,6 +992,7 @@ class Mir2Lir : public Backend {
virtual bool GenInlinedAbsDouble(CallInfo* info);
bool GenInlinedFloatCvt(CallInfo* info);
bool GenInlinedDoubleCvt(CallInfo* info);
+ virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
bool GenInlinedStringCompareTo(CallInfo* info);
bool GenInlinedCurrentThread(CallInfo* info);
@@ -1023,8 +1024,9 @@ class Mir2Lir : public Backend {
return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
}
// Load a reference at base + index and decompress into register.
- virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest) {
- return LoadBaseIndexed(r_base, r_index, r_dest, 2, kReference);
+ virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ int scale) {
+ return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference);
}
// Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress.
virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
@@ -1050,8 +1052,9 @@ class Mir2Lir : public Backend {
return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
}
// Store an uncompressed reference into a compressed 32-bit container by index.
- virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src) {
- return StoreBaseIndexed(r_base, r_index, r_src, 2, kReference);
+ virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+ int scale) {
+ return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference);
}
// Store 32 bits, regardless of target.
virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 879cf93bf1..7baf2d9663 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -402,6 +402,8 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
EXT_0F_ENCODING_MAP(Pxor, 0x66, 0xEF, REG_DEF0_USE0),
EXT_0F_ENCODING2_MAP(Phaddw, 0x66, 0x38, 0x01, REG_DEF0_USE0),
EXT_0F_ENCODING2_MAP(Phaddd, 0x66, 0x38, 0x02, REG_DEF0_USE0),
+ EXT_0F_ENCODING_MAP(Haddpd, 0x66, 0x7C, REG_DEF0_USE0),
+ EXT_0F_ENCODING_MAP(Haddps, 0xF2, 0x7C, REG_DEF0_USE0),
{ kX86PextrbRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x3A, 0x14, 0, 0, 1, false }, "PextbRRI", "!0r,!1r,!2d" },
{ kX86PextrwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC5, 0x00, 0, 0, 1, false }, "PextwRRI", "!0r,!1r,!2d" },
@@ -410,6 +412,9 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86PshuflwRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0xF2, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuflwRRI", "!0r,!1r,!2d" },
{ kX86PshufdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0x70, 0, 0, 0, 1, false }, "PshuffRRI", "!0r,!1r,!2d" },
+ { kX86ShufpsRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x00, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "kX86ShufpsRRI", "!0r,!1r,!2d" },
+ { kX86ShufpdRRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0 | REG_USE1, { 0x66, 0, 0x0F, 0xC6, 0, 0, 0, 1, false }, "kX86ShufpdRRI", "!0r,!1r,!2d" },
+
{ kX86PsrawRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 4, 0, 1, false }, "PsrawRI", "!0r,!1d" },
{ kX86PsradRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x72, 0, 4, 0, 1, false }, "PsradRI", "!0r,!1d" },
{ kX86PsrlwRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x71, 0, 2, 0, 1, false }, "PsrlwRI", "!0r,!1d" },
@@ -429,7 +434,7 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86Fst64M, kMem, IS_STORE | IS_UNARY_OP | REG_USE0 | USE_FP_STACK, { 0x0, 0, 0xDD, 0x00, 0, 2, 0, 0, false }, "Fstd64M", "[!0r,!1d]" },
{ kX86Fprem, kNullary, NO_OPERAND | USE_FP_STACK, { 0xD9, 0, 0xF8, 0, 0, 0, 0, 0, false }, "Fprem64", "" },
{ kX86Fucompp, kNullary, NO_OPERAND | USE_FP_STACK, { 0xDA, 0, 0xE9, 0, 0, 0, 0, 0, false }, "Fucompp", "" },
- { kX86Fstsw16R, kNullary, NO_OPERAND | USE_FP_STACK, { 0x9B, 0xDF, 0xE0, 0, 0, 0, 0, 0, false }, "Fstsw16R", "ax" },
+ { kX86Fstsw16R, kNullary, NO_OPERAND | REG_DEFA | USE_FP_STACK, { 0x9B, 0xDF, 0xE0, 0, 0, 0, 0, 0, false }, "Fstsw16R", "ax" },
EXT_0F_ENCODING_MAP(Mova128, 0x66, 0x6F, REG_DEF0),
{ kX86Mova128MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x0F, 0x6F, 0, 0, 0, 0, false }, "Mova128MR", "[!0r+!1d],!2r" },
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 123fe90d03..55e5993dce 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -517,6 +517,7 @@ class X86Mir2Lir : public Mir2Lir {
* @returns true if a register is byte addressable.
*/
bool IsByteRegister(RegStorage reg);
+ bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
/*
* @brief generate inline code for fast case of Strng.indexOf.
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 5372512589..cf29e52bb8 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -2321,7 +2321,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
// For 32-bit, SETcc only works with EAX..EDX.
RegStorage object_32reg = object.reg.Is64Bit() ? As32BitReg(object.reg) : object.reg;
- if (result_reg == object_32reg || !IsByteRegister(result_reg)) {
+ if (result_reg.GetRegNum() == object_32reg.GetRegNum() || !IsByteRegister(result_reg)) {
result_reg = AllocateByteRegister();
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 72e47d06b1..43882c2e02 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1023,6 +1023,123 @@ void X86Mir2Lir::InstallLiteralPools() {
Mir2Lir::InstallLiteralPools();
}
+bool X86Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ if (cu_->target64) {
+ // TODO: Implement ArrayCOpy intrinsic for x86_64
+ return false;
+ }
+
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_srcPos = info->args[1];
+ RegLocation rl_dst = info->args[2];
+ RegLocation rl_dstPos = info->args[3];
+ RegLocation rl_length = info->args[4];
+ if (rl_srcPos.is_const && (mir_graph_->ConstantValue(rl_srcPos) < 0)) {
+ return false;
+ }
+ if (rl_dstPos.is_const && (mir_graph_->ConstantValue(rl_dstPos) < 0)) {
+ return false;
+ }
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers
+ LoadValueDirectFixed(rl_src , rs_rAX);
+ LoadValueDirectFixed(rl_dst , rs_rCX);
+ LIR* src_dst_same = OpCmpBranch(kCondEq, rs_rAX , rs_rCX, nullptr);
+ LIR* src_null_branch = OpCmpImmBranch(kCondEq, rs_rAX , 0, nullptr);
+ LIR* dst_null_branch = OpCmpImmBranch(kCondEq, rs_rCX , 0, nullptr);
+ LoadValueDirectFixed(rl_length , rs_rDX);
+ LIR* len_negative = OpCmpImmBranch(kCondLt, rs_rDX , 0, nullptr);
+ LIR* len_too_big = OpCmpImmBranch(kCondGt, rs_rDX , 128, nullptr);
+ LoadValueDirectFixed(rl_src , rs_rAX);
+ LoadWordDisp(rs_rAX , mirror::Array::LengthOffset().Int32Value(), rs_rAX);
+ LIR* src_bad_len = nullptr;
+ LIR* srcPos_negative = nullptr;
+ if (!rl_srcPos.is_const) {
+ LoadValueDirectFixed(rl_srcPos , rs_rBX);
+ srcPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
+ OpRegReg(kOpAdd, rs_rBX, rs_rDX);
+ src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ } else {
+ int pos_val = mir_graph_->ConstantValue(rl_srcPos.orig_sreg);
+ if (pos_val == 0) {
+ src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
+ } else {
+ OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val);
+ src_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ }
+ }
+ LIR* dstPos_negative = nullptr;
+ LIR* dst_bad_len = nullptr;
+ LoadValueDirectFixed(rl_dst, rs_rAX);
+ LoadWordDisp(rs_rAX, mirror::Array::LengthOffset().Int32Value(), rs_rAX);
+ if (!rl_dstPos.is_const) {
+ LoadValueDirectFixed(rl_dstPos , rs_rBX);
+ dstPos_negative = OpCmpImmBranch(kCondLt, rs_rBX , 0, nullptr);
+ OpRegRegReg(kOpAdd, rs_rBX, rs_rBX, rs_rDX);
+ dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ } else {
+ int pos_val = mir_graph_->ConstantValue(rl_dstPos.orig_sreg);
+ if (pos_val == 0) {
+ dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rDX, nullptr);
+ } else {
+ OpRegRegImm(kOpAdd, rs_rBX, rs_rDX, pos_val);
+ dst_bad_len = OpCmpBranch(kCondLt, rs_rAX , rs_rBX, nullptr);
+ }
+ }
+ // everything is checked now
+ LoadValueDirectFixed(rl_src , rs_rAX);
+ LoadValueDirectFixed(rl_dst , rs_rBX);
+ LoadValueDirectFixed(rl_srcPos , rs_rCX);
+ NewLIR5(kX86Lea32RA, rs_rAX.GetReg(), rs_rAX.GetReg(),
+ rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value());
+ // RAX now holds the address of the first src element to be copied
+
+ LoadValueDirectFixed(rl_dstPos , rs_rCX);
+ NewLIR5(kX86Lea32RA, rs_rBX.GetReg(), rs_rBX.GetReg(),
+ rs_rCX.GetReg() , 1, mirror::Array::DataOffset(2).Int32Value() );
+ // RBX now holds the address of the first dst element to be copied
+
+ // check if the number of elements to be copied is odd or even. If odd
+ // then copy the first element (so that the remaining number of elements
+ // is even).
+ LoadValueDirectFixed(rl_length , rs_rCX);
+ OpRegImm(kOpAnd, rs_rCX, 1);
+ LIR* jmp_to_begin_loop = OpCmpImmBranch(kCondEq, rs_rCX, 0, nullptr);
+ OpRegImm(kOpSub, rs_rDX, 1);
+ LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
+ StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSignedHalf);
+
+ // since the remaining number of elements is even, we will copy by
+ // two elements at a time.
+ LIR *beginLoop = NewLIR0(kPseudoTargetLabel);
+ LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_rDX , 0, nullptr);
+ OpRegImm(kOpSub, rs_rDX, 2);
+ LoadBaseIndexedDisp(rs_rAX, rs_rDX, 1, 0, rs_rCX, kSingle);
+ StoreBaseIndexedDisp(rs_rBX, rs_rDX, 1, 0, rs_rCX, kSingle);
+ OpUnconditionalBranch(beginLoop);
+ LIR *check_failed = NewLIR0(kPseudoTargetLabel);
+ LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
+ LIR *return_point = NewLIR0(kPseudoTargetLabel);
+ jmp_to_ret->target = return_point;
+ jmp_to_begin_loop->target = beginLoop;
+ src_dst_same->target = check_failed;
+ len_negative->target = check_failed;
+ len_too_big->target = check_failed;
+ src_null_branch->target = check_failed;
+ if (srcPos_negative != nullptr)
+ srcPos_negative ->target = check_failed;
+ if (src_bad_len != nullptr)
+ src_bad_len->target = check_failed;
+ dst_null_branch->target = check_failed;
+ if (dstPos_negative != nullptr)
+ dstPos_negative->target = check_failed;
+ if (dst_bad_len != nullptr)
+ dst_bad_len->target = check_failed;
+ AddIntrinsicSlowPath(info, launchpad_branch, return_point);
+ return true;
+}
+
+
/*
* Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff,
* otherwise bails to standard library code.
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index ff243ce149..e271e9d100 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -564,11 +564,15 @@ enum X86OpCode {
Binary0fOpCode(kX86Pxor), // parallel XOR 128 bits x 1
Binary0fOpCode(kX86Phaddw), // parallel horizontal addition 16 bits x 8
Binary0fOpCode(kX86Phaddd), // parallel horizontal addition 32 bits x 4
+ Binary0fOpCode(kX86Haddpd), // parallel FP horizontal addition 64 bits x 2
+ Binary0fOpCode(kX86Haddps), // parallel FP horizontal addition 32 bits x 4
kX86PextrbRRI, // Extract 8 bits from XMM into GPR
kX86PextrwRRI, // Extract 16 bits from XMM into GPR
kX86PextrdRRI, // Extract 32 bits from XMM into GPR
kX86PshuflwRRI, // Shuffle 16 bits in lower 64 bits of XMM.
kX86PshufdRRI, // Shuffle 32 bits in XMM.
+ kX86ShufpsRRI, // FP Shuffle 32 bits in XMM.
+ kX86ShufpdRRI, // FP Shuffle 64 bits in XMM.
kX86PsrawRI, // signed right shift of floating point registers 16 bits x 8
kX86PsradRI, // signed right shift of floating point registers 32 bits x 4
kX86PsrlwRI, // logical right shift of floating point registers 16 bits x 8
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 96625c5dac..770ae89ca2 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1461,6 +1461,18 @@ static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Cla
return false;
}
+static void CheckAndClearResolveException(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(self->IsExceptionPending());
+ mirror::Throwable* exception = self->GetException(nullptr);
+ std::string descriptor = exception->GetClass()->GetDescriptor();
+ if (descriptor != "Ljava/lang/IncompatibleClassChangeError;" &&
+ descriptor != "Ljava/lang/NoClassDefFoundError;") {
+ LOG(FATAL) << "Unexpected exeption " << exception->Dump();
+ }
+ self->ClearException();
+}
+
static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manager,
size_t class_def_index)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
@@ -1496,8 +1508,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
if (klass == NULL) {
// Class couldn't be resolved, for example, super-class is in a different dex file. Don't
// attempt to resolve methods and fields when there is no declaring class.
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+ CheckAndClearResolveException(soa.Self());
resolve_fields_and_methods = false;
} else {
resolve_fields_and_methods = manager->GetCompiler()->IsImage();
@@ -1516,8 +1527,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
dex_cache, class_loader, true);
if (field == NULL) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+ CheckAndClearResolveException(soa.Self());
}
}
it.Next();
@@ -1532,8 +1542,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
mirror::ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(),
dex_cache, class_loader, false);
if (field == NULL) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+ CheckAndClearResolveException(soa.Self());
}
}
it.Next();
@@ -1545,8 +1554,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
NullHandle<mirror::ArtMethod>(),
it.GetMethodInvokeType(class_def));
if (method == NULL) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+ CheckAndClearResolveException(soa.Self());
}
it.Next();
}
@@ -1556,8 +1564,7 @@ static void ResolveClassFieldsAndMethods(const ParallelCompilationManager* manag
NullHandle<mirror::ArtMethod>(),
it.GetMethodInvokeType(class_def));
if (method == NULL) {
- CHECK(soa.Self()->IsExceptionPending());
- soa.Self()->ClearException();
+ CheckAndClearResolveException(soa.Self());
}
it.Next();
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fb3341ba71..92b2feeb7f 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -43,6 +43,7 @@ class CompilerOptions {
static const size_t kDefaultNumDexMethodsThreshold = 900;
static constexpr double kDefaultTopKProfileThreshold = 90.0;
static const bool kDefaultIncludeDebugSymbols = kIsDebugBuild;
+ static const bool kDefaultIncludePatchInformation = false;
CompilerOptions() :
compiler_filter_(kDefaultCompilerFilter),
@@ -52,6 +53,7 @@ class CompilerOptions {
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
generate_gdb_information_(false),
+ include_patch_information_(kDefaultIncludePatchInformation),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
include_debug_symbols_(kDefaultIncludeDebugSymbols),
explicit_null_checks_(true),
@@ -69,6 +71,7 @@ class CompilerOptions {
size_t tiny_method_threshold,
size_t num_dex_methods_threshold,
bool generate_gdb_information,
+ bool include_patch_information,
double top_k_profile_threshold,
bool include_debug_symbols,
bool explicit_null_checks,
@@ -85,6 +88,7 @@ class CompilerOptions {
tiny_method_threshold_(tiny_method_threshold),
num_dex_methods_threshold_(num_dex_methods_threshold),
generate_gdb_information_(generate_gdb_information),
+ include_patch_information_(include_patch_information),
top_k_profile_threshold_(top_k_profile_threshold),
include_debug_symbols_(include_debug_symbols),
explicit_null_checks_(explicit_null_checks),
@@ -188,6 +192,10 @@ class CompilerOptions {
return generate_gdb_information_;
}
+ bool GetIncludePatchInformation() const {
+ return include_patch_information_;
+ }
+
private:
CompilerFilter compiler_filter_;
size_t huge_method_threshold_;
@@ -196,6 +204,7 @@ class CompilerOptions {
size_t tiny_method_threshold_;
size_t num_dex_methods_threshold_;
bool generate_gdb_information_;
+ bool include_patch_information_;
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
bool include_debug_symbols_;
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 404e3f8692..60f76efed0 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -69,97 +69,7 @@ bool ElfFixup::FixupDynamic(ElfFile& elf_file, uintptr_t base_address) {
for (Elf32_Word i = 0; i < elf_file.GetDynamicNum(); i++) {
Elf32_Dyn& elf_dyn = elf_file.GetDynamic(i);
Elf32_Word d_tag = elf_dyn.d_tag;
- bool elf_dyn_needs_fixup = false;
- switch (d_tag) {
- // case 1: well known d_tag values that imply Elf32_Dyn.d_un contains an address in d_ptr
- case DT_PLTGOT:
- case DT_HASH:
- case DT_STRTAB:
- case DT_SYMTAB:
- case DT_RELA:
- case DT_INIT:
- case DT_FINI:
- case DT_REL:
- case DT_DEBUG:
- case DT_JMPREL: {
- elf_dyn_needs_fixup = true;
- break;
- }
- // d_val or ignored values
- case DT_NULL:
- case DT_NEEDED:
- case DT_PLTRELSZ:
- case DT_RELASZ:
- case DT_RELAENT:
- case DT_STRSZ:
- case DT_SYMENT:
- case DT_SONAME:
- case DT_RPATH:
- case DT_SYMBOLIC:
- case DT_RELSZ:
- case DT_RELENT:
- case DT_PLTREL:
- case DT_TEXTREL:
- case DT_BIND_NOW:
- case DT_INIT_ARRAYSZ:
- case DT_FINI_ARRAYSZ:
- case DT_RUNPATH:
- case DT_FLAGS: {
- break;
- }
- // boundary values that should not be used
- case DT_ENCODING:
- case DT_LOOS:
- case DT_HIOS:
- case DT_LOPROC:
- case DT_HIPROC: {
- LOG(FATAL) << "Illegal d_tag value 0x" << std::hex << d_tag;
- break;
- }
- default: {
- // case 2: "regular" DT_* ranges where even d_tag values imply an address in d_ptr
- if ((DT_ENCODING < d_tag && d_tag < DT_LOOS)
- || (DT_LOOS < d_tag && d_tag < DT_HIOS)
- || (DT_LOPROC < d_tag && d_tag < DT_HIPROC)) {
- // Special case for MIPS which breaks the regular rules between DT_LOPROC and DT_HIPROC
- if (elf_file.GetHeader().e_machine == EM_MIPS) {
- switch (d_tag) {
- case DT_MIPS_RLD_VERSION:
- case DT_MIPS_TIME_STAMP:
- case DT_MIPS_ICHECKSUM:
- case DT_MIPS_IVERSION:
- case DT_MIPS_FLAGS:
- case DT_MIPS_LOCAL_GOTNO:
- case DT_MIPS_CONFLICTNO:
- case DT_MIPS_LIBLISTNO:
- case DT_MIPS_SYMTABNO:
- case DT_MIPS_UNREFEXTNO:
- case DT_MIPS_GOTSYM:
- case DT_MIPS_HIPAGENO: {
- break;
- }
- case DT_MIPS_BASE_ADDRESS:
- case DT_MIPS_CONFLICT:
- case DT_MIPS_LIBLIST:
- case DT_MIPS_RLD_MAP: {
- elf_dyn_needs_fixup = true;
- break;
- }
- default: {
- LOG(FATAL) << "Unknown MIPS d_tag value 0x" << std::hex << d_tag;
- break;
- }
- }
- } else if ((elf_dyn.d_tag % 2) == 0) {
- elf_dyn_needs_fixup = true;
- }
- } else {
- LOG(FATAL) << "Unknown d_tag value 0x" << std::hex << d_tag;
- }
- break;
- }
- }
- if (elf_dyn_needs_fixup) {
+ if (IsDynamicSectionPointer(d_tag, elf_file.GetHeader().e_machine)) {
uint32_t d_ptr = elf_dyn.d_un.d_ptr;
if (DEBUG_FIXUP) {
LOG(INFO) << StringPrintf("In %s moving Elf32_Dyn[%d] from 0x%08x to 0x%08" PRIxPTR,
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e4dcaa7426..42743862fe 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -803,6 +803,25 @@ bool ElfWriterQuick::Create(File* elf_file,
return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
}
+// Add patch information to this section. Each patch is a Elf32_Word that
+// identifies an offset from the start of the text section
+void ElfWriterQuick::ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug) {
+ size_t size =
+ compiler_driver_->GetCodeToPatch().size() +
+ compiler_driver_->GetMethodsToPatch().size() +
+ compiler_driver_->GetClassesToPatch().size();
+ if (size == 0) {
+ if (debug) {
+ LOG(INFO) << "No patches to record";
+ }
+ return;
+ }
+ buffer->resize(size * sizeof(uintptr_t));
+ if (debug) {
+ LOG(INFO) << "Patches reserved for " << size;
+ }
+}
+
bool ElfWriterQuick::Write(OatWriter* oat_writer,
const std::vector<const DexFile*>& dex_files_unused,
const std::string& android_root_unused,
@@ -836,6 +855,13 @@ bool ElfWriterQuick::Write(OatWriter* oat_writer,
builder.RegisterRawSection(debug_str);
}
+ if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {
+ ElfRawSectionBuilder oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0,
+ sizeof(size_t), sizeof(size_t));
+ ReservePatchSpace(oat_patches.GetBuffer(), debug);
+ builder.RegisterRawSection(oat_patches);
+ }
+
return builder.Write();
}
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index 6eb5d68817..a0d36df471 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -51,6 +51,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
void AddDebugSymbols(ElfBuilder& builder,
OatWriter* oat_writer,
bool debug);
+ void ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug);
class ElfSectionBuilder {
public:
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 6e5f19a8c5..acfa607f39 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -27,6 +27,8 @@
#include "compiled_method.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
+#include "elf_file.h"
+#include "elf_utils.h"
#include "elf_writer.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
@@ -138,7 +140,8 @@ bool ImageWriter::Write(const std::string& image_filename,
ElfWriter::GetOatElfInformation(oat_file.get(), oat_loaded_size, oat_data_offset);
CalculateNewObjectOffsets(oat_loaded_size, oat_data_offset);
CopyAndFixupObjects();
- PatchOatCodeAndMethods();
+
+ PatchOatCodeAndMethods(oat_file.get());
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
@@ -782,7 +785,25 @@ static Class* GetTargetType(const CompilerDriver::TypePatchInformation* patch)
return klass;
}
-void ImageWriter::PatchOatCodeAndMethods() {
+void ImageWriter::PatchOatCodeAndMethods(File* elf_file) {
+ std::vector<uintptr_t> patches;
+ std::set<uintptr_t> patches_set;
+ auto maybe_push = [&patches, &patches_set] (uintptr_t p) {
+ if (patches_set.find(p) == patches_set.end()) {
+ patches.push_back(p);
+ patches_set.insert(p);
+ }
+ };
+ const bool add_patches = compiler_driver_.GetCompilerOptions().GetIncludePatchInformation();
+ if (add_patches) {
+ // TODO if we are adding patches the resulting ELF file might have a potentially rather large
+ // amount of free space where patches might have been placed. We should adjust the ELF file to
+ // get rid of this excess space.
+ patches.reserve(compiler_driver_.GetCodeToPatch().size() +
+ compiler_driver_.GetMethodsToPatch().size() +
+ compiler_driver_.GetClassesToPatch().size());
+ }
+ uintptr_t loc = 0;
Thread* self = Thread::Current();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
@@ -828,14 +849,20 @@ void ImageWriter::PatchOatCodeAndMethods() {
} else {
value = PointerToLowMemUInt32(GetOatAddress(code_offset));
}
- SetPatchLocation(patch, value);
+ SetPatchLocation(patch, value, &loc);
+ if (add_patches && !patch->AsCall()->IsRelative()) {
+ maybe_push(loc);
+ }
}
const CallPatches& methods_to_patch = compiler_driver_.GetMethodsToPatch();
for (size_t i = 0; i < methods_to_patch.size(); i++) {
const CompilerDriver::CallPatchInformation* patch = methods_to_patch[i];
ArtMethod* target = GetTargetMethod(patch);
- SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)));
+ SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)), &loc);
+ if (add_patches && !patch->AsCall()->IsRelative()) {
+ maybe_push(loc);
+ }
}
const std::vector<const CompilerDriver::TypePatchInformation*>& classes_to_patch =
@@ -843,16 +870,50 @@ void ImageWriter::PatchOatCodeAndMethods() {
for (size_t i = 0; i < classes_to_patch.size(); i++) {
const CompilerDriver::TypePatchInformation* patch = classes_to_patch[i];
Class* target = GetTargetType(patch);
- SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)));
+ SetPatchLocation(patch, PointerToLowMemUInt32(GetImageAddress(target)), &loc);
+ if (add_patches) {
+ maybe_push(loc);
+ }
}
// Update the image header with the new checksum after patching
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
image_header->SetOatChecksum(oat_file_->GetOatHeader().GetChecksum());
self->EndAssertNoThreadSuspension(old_cause);
+
+ // Update the ElfFiles SHT_OAT_PATCH section to include the patches.
+ if (add_patches) {
+ std::string err;
+ // TODO we are mapping in the contents of this file twice. We should be able
+ // to do it only once, which would be better.
+ std::unique_ptr<ElfFile> file(ElfFile::Open(elf_file, true, false, &err));
+ if (file == nullptr) {
+ LOG(ERROR) << err;
+ }
+ Elf32_Shdr* shdr = file->FindSectionByName(".oat_patches");
+ if (shdr != nullptr) {
+ CHECK_EQ(shdr, file->FindSectionByType(SHT_OAT_PATCH))
+ << "Incorrect type for .oat_patches section";
+ CHECK_LE(patches.size() * sizeof(uintptr_t), shdr->sh_size)
+ << "We got more patches than anticipated";
+ CHECK_LE(reinterpret_cast<uintptr_t>(file->Begin()) + shdr->sh_offset + shdr->sh_size,
+ reinterpret_cast<uintptr_t>(file->End())) << "section is too large";
+ CHECK(shdr == &file->GetSectionHeader(file->GetSectionHeaderNum() - 1) ||
+ shdr->sh_offset + shdr->sh_size <= (shdr + 1)->sh_offset)
+ << "Section overlaps onto next section";
+ // It's mmap'd so we can just memcpy.
+ memcpy(file->Begin() + shdr->sh_offset, patches.data(), patches.size()*sizeof(uintptr_t));
+ // TODO We should fill in the newly empty space between the last patch and the start of the
+ // next section by moving the following sections down if possible.
+ shdr->sh_size = patches.size() * sizeof(uintptr_t);
+ } else {
+ LOG(ERROR) << "Unable to find section header for SHT_OAT_PATCH";
+ }
+ }
}
-void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value) {
+void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value,
+ uintptr_t* patched_ptr) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const void* quick_oat_code = class_linker->GetQuickOatCodeFor(patch->GetDexFile(),
patch->GetReferrerClassDefIdx(),
@@ -885,6 +946,14 @@ void ImageWriter::SetPatchLocation(const CompilerDriver::PatchInformation* patch
}
*patch_location = value;
oat_header.UpdateChecksum(patch_location, sizeof(value));
+
+ uintptr_t loc = reinterpret_cast<uintptr_t>(patch_location) -
+ (reinterpret_cast<uintptr_t>(oat_file_->Begin()) + oat_header.GetExecutableOffset());
+ CHECK_GT(reinterpret_cast<uintptr_t>(patch_location),
+ reinterpret_cast<uintptr_t>(oat_file_->Begin()) + oat_header.GetExecutableOffset());
+ CHECK_LT(loc, oat_file_->Size() - oat_header.GetExecutableOffset());
+
+ *patched_ptr = loc;
}
} // namespace art
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index aff155affc..2bcb41e3fe 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -150,9 +150,10 @@ class ImageWriter {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Patches references in OatFile to expect runtime addresses.
- void PatchOatCodeAndMethods()
+ void PatchOatCodeAndMethods(File* elf_file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value)
+ void SetPatchLocation(const CompilerDriver::PatchInformation* patch, uint32_t value,
+ uintptr_t* patched_location)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const CompilerDriver& compiler_driver_;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 25b489ba79..b4d863bf32 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -61,8 +61,8 @@ class JniCompilerTest : public CommonCompilerTest {
method = c->FindVirtualMethod(method_name, method_sig);
}
ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
- if (method->GetEntryPointFromQuickCompiledCode() == nullptr) {
- ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() == nullptr);
+ if (method->GetEntryPointFromQuickCompiledCode() == nullptr ||
+ method->GetEntryPointFromQuickCompiledCode() == class_linker_->GetQuickGenericJniTrampoline()) {
CompileMethod(method);
ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
<< method_name << " " << method_sig;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 3387f914b6..80e77245fc 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -60,6 +60,7 @@
#include "runtime.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
+#include "utils.h"
#include "vector_output_stream.h"
#include "well_known_classes.h"
#include "zip_archive.h"
@@ -144,8 +145,8 @@ static void Usage(const char* fmt, ...) {
UsageError(" Example: --android-root=out/host/linux-x86");
UsageError(" Default: $ANDROID_ROOT");
UsageError("");
- UsageError(" --instruction-set=(arm|arm64|mips|x86|x86_64): compile for a particular instruction");
- UsageError(" set.");
+ UsageError(" --instruction-set=(arm|arm64|mips|x86|x86_64): compile for a particular");
+ UsageError(" instruction set.");
UsageError(" Example: --instruction-set=x86");
UsageError(" Default: arm");
UsageError("");
@@ -203,6 +204,11 @@ static void Usage(const char* fmt, ...) {
UsageError("");
UsageError(" --dump-timing: display a breakdown of where time was spent");
UsageError("");
+ UsageError(" --include-patch-information: Include patching information so the generated code");
+ UsageError(" can have its base address moved without full recompilation.");
+ UsageError("");
+ UsageError(" --no-include-patch-information: Do not include patching information.");
+ UsageError("");
UsageError(" --include-debug-symbols: Include ELF symbols in this oat file");
UsageError("");
UsageError(" --no-include-debug-symbols: Do not include ELF symbols in this oat file");
@@ -271,7 +277,7 @@ class Dex2Oat {
LOG(ERROR) << "Failed to open image classes file " << image_classes_filename;
return nullptr;
}
- std::unique_ptr<CompilerDriver::DescriptorSet> result(ReadImageClasses(*image_classes_file.get()));
+ std::unique_ptr<CompilerDriver::DescriptorSet> result(ReadImageClasses(*image_classes_file));
image_classes_file->close();
return result.release();
}
@@ -510,16 +516,6 @@ class Dex2Oat {
DISALLOW_IMPLICIT_CONSTRUCTORS(Dex2Oat);
};
-static bool ParseInt(const char* in, int* out) {
- char* end;
- int result = strtol(in, &end, 10);
- if (in == end || *end != '\0') {
- return false;
- }
- *out = result;
- return true;
-}
-
static size_t OpenDexFiles(const std::vector<const char*>& dex_filenames,
const std::vector<const char*>& dex_locations,
std::vector<const DexFile*>& dex_files) {
@@ -827,6 +823,8 @@ static int dex2oat(int argc, char** argv) {
bool dump_stats = false;
bool dump_timing = false;
bool dump_passes = false;
+ bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
+ bool explicit_include_patch_information = false;
bool include_debug_symbols = kIsDebugBuild;
bool dump_slow_timing = kIsDebugBuild;
bool watch_dog_enabled = !kIsTargetBuild;
@@ -1037,6 +1035,12 @@ static int dex2oat(int argc, char** argv) {
}
}
has_explicit_checks_options = true;
+ } else if (option == "--include-patch-information") {
+ include_patch_information = true;
+ explicit_include_patch_information = true;
+ } else if (option == "--no-include-patch-information") {
+ include_patch_information = false;
+ explicit_include_patch_information = true;
} else {
Usage("Unknown argument %s", option.data());
}
@@ -1166,6 +1170,11 @@ static int dex2oat(int argc, char** argv) {
CheckExplicitCheckOptions(instruction_set, &explicit_null_checks, &explicit_so_checks,
&explicit_suspend_checks);
+ if (!explicit_include_patch_information) {
+ include_patch_information =
+ (compiler_kind == Compiler::kQuick && CompilerOptions::kDefaultIncludePatchInformation);
+ }
+
CompilerOptions compiler_options(compiler_filter,
huge_method_threshold,
large_method_threshold,
@@ -1173,6 +1182,7 @@ static int dex2oat(int argc, char** argv) {
tiny_method_threshold,
num_dex_methods_threshold,
generate_gdb_information,
+ include_patch_information,
top_k_profile_threshold,
include_debug_symbols,
explicit_null_checks,
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 14a5b5f2bf..e6cbf05744 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -641,6 +641,21 @@ DISASSEMBLER_ENTRY(cmp,
store = true;
immediate_bytes = 1;
break;
+ case 0x7C:
+ if (prefix[0] == 0xF2) {
+ opcode << "haddps";
+ prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
+ } else if (prefix[2] == 0x66) {
+ opcode << "haddpd";
+ prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
+ } else {
+ opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+ break;
+ }
+ src_reg_file = dst_reg_file = SSE;
+ has_modrm = true;
+ load = true;
+ break;
case 0x7E:
if (prefix[2] == 0x66) {
src_reg_file = SSE;
@@ -733,6 +748,18 @@ DISASSEMBLER_ENTRY(cmp,
opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
}
break;
+ case 0xC6:
+ if (prefix[2] == 0x66) {
+ opcode << "shufpd";
+ prefix[2] = 0;
+ } else {
+ opcode << "shufps";
+ }
+ has_modrm = true;
+ store = true;
+ src_reg_file = dst_reg_file = SSE;
+ immediate_bytes = 1;
+ break;
case 0xC7:
static const char* x0FxC7_opcodes[] = { "unknown-0f-c7", "cmpxchg8b", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7", "unknown-0f-c7" };
modrm_opcodes = x0FxC7_opcodes;
diff --git a/patchoat/Android.mk b/patchoat/Android.mk
new file mode 100644
index 0000000000..8b6b9ad773
--- /dev/null
+++ b/patchoat/Android.mk
@@ -0,0 +1,45 @@
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH := $(call my-dir)
+
+include art/build/Android.executable.mk
+
+PATCHOAT_SRC_FILES := \
+ patchoat.cc
+
+# TODO: Remove this when the framework (installd) supports pushing the
+# right instruction-set parameter for the primary architecture.
+ifneq ($(filter ro.zygote=zygote64,$(PRODUCT_DEFAULT_PROPERTY_OVERRIDES)),)
+ patchoat_arch := 64
+else
+ patchoat_arch := 32
+endif
+
+ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
+ $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),libcutils,art/compiler,target,ndebug,$(patchoat_arch)))
+endif
+ifeq ($(ART_BUILD_TARGET_DEBUG),true)
+ $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),libcutils,art/compiler,target,debug,$(patchoat_arch)))
+endif
+
+# We always build patchoat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
+ifeq ($(ART_BUILD_NDEBUG),true)
+ $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,ndebug))
+endif
+ifeq ($(ART_BUILD_DEBUG),true)
+ $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,debug))
+endif
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
new file mode 100644
index 0000000000..dcf8c70501
--- /dev/null
+++ b/patchoat/patchoat.cc
@@ -0,0 +1,1020 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "patchoat.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+
+#include <string>
+#include <vector>
+
+#include "base/stringpiece.h"
+#include "base/stringprintf.h"
+#include "elf_utils.h"
+#include "elf_file.h"
+#include "image.h"
+#include "instruction_set.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/object.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference.h"
+#include "noop_compiler_callbacks.h"
+#include "offsets.h"
+#include "os.h"
+#include "runtime.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+#include "utils.h"
+
+namespace art {
+
+static InstructionSet ElfISAToInstructionSet(Elf32_Word isa) {
+ switch (isa) {
+ case EM_ARM:
+ return kArm;
+ case EM_AARCH64:
+ return kArm64;
+ case EM_386:
+ return kX86;
+ case EM_X86_64:
+ return kX86_64;
+ case EM_MIPS:
+ return kMips;
+ default:
+ return kNone;
+ }
+}
+
+bool PatchOat::Patch(const std::string& image_location, off_t delta,
+ File* output_image, InstructionSet isa,
+ TimingLogger* timings) {
+ CHECK(Runtime::Current() == nullptr);
+ CHECK(output_image != nullptr);
+ CHECK_GE(output_image->Fd(), 0);
+ CHECK(!image_location.empty()) << "image file must have a filename.";
+ CHECK_NE(isa, kNone);
+
+ TimingLogger::ScopedTiming t("Runtime Setup", timings);
+ const char *isa_name = GetInstructionSetString(isa);
+ std::string image_filename(GetSystemImageFilename(image_location.c_str(), isa));
+ std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str()));
+ if (input_image.get() == nullptr) {
+ LOG(ERROR) << "unable to open input image file.";
+ return false;
+ }
+ int64_t image_len = input_image->GetLength();
+ if (image_len < 0) {
+ LOG(ERROR) << "Error while getting image length";
+ return false;
+ }
+ ImageHeader image_header;
+ if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
+ sizeof(image_header), 0)) {
+ LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
+ return false;
+ }
+
+ // Set up the runtime
+ Runtime::Options options;
+ NoopCompilerCallbacks callbacks;
+ options.push_back(std::make_pair("compilercallbacks", &callbacks));
+ std::string img = "-Ximage:" + image_location;
+ options.push_back(std::make_pair(img.c_str(), nullptr));
+ options.push_back(std::make_pair("imageinstructionset", reinterpret_cast<const void*>(isa_name)));
+ if (!Runtime::Create(options, false)) {
+ LOG(ERROR) << "Unable to initialize runtime";
+ return false;
+ }
+ // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
+ // give it away now and then switch to a more manageable ScopedObjectAccess.
+ Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ ScopedObjectAccess soa(Thread::Current());
+
+ t.NewTiming("Image and oat Patching setup");
+ // Create the map where we will write the image patches to.
+ std::string error_msg;
+ std::unique_ptr<MemMap> image(MemMap::MapFile(image_len, PROT_READ | PROT_WRITE, MAP_PRIVATE,
+ input_image->Fd(), 0,
+ input_image->GetPath().c_str(),
+ &error_msg));
+ if (image.get() == nullptr) {
+ LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
+ return false;
+ }
+ gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
+
+ PatchOat p(image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ delta, timings);
+ t.NewTiming("Patching files");
+ if (!p.PatchImage()) {
+ LOG(INFO) << "Failed to patch image file " << input_image->GetPath();
+ return false;
+ }
+
+ t.NewTiming("Writing files");
+ if (!p.WriteImage(output_image)) {
+ return false;
+ }
+ return true;
+}
+
+bool PatchOat::Patch(const File* input_oat, const std::string& image_location, off_t delta,
+ File* output_oat, File* output_image, InstructionSet isa,
+ TimingLogger* timings) {
+ CHECK(Runtime::Current() == nullptr);
+ CHECK(output_image != nullptr);
+ CHECK_GE(output_image->Fd(), 0);
+ CHECK(input_oat != nullptr);
+ CHECK(output_oat != nullptr);
+ CHECK_GE(input_oat->Fd(), 0);
+ CHECK_GE(output_oat->Fd(), 0);
+ CHECK(!image_location.empty()) << "image file must have a filename.";
+
+ TimingLogger::ScopedTiming t("Runtime Setup", timings);
+
+ if (isa == kNone) {
+ Elf32_Ehdr elf_hdr;
+ if (sizeof(elf_hdr) != input_oat->Read(reinterpret_cast<char*>(&elf_hdr), sizeof(elf_hdr), 0)) {
+ LOG(ERROR) << "unable to read elf header";
+ return false;
+ }
+ isa = ElfISAToInstructionSet(elf_hdr.e_machine);
+ }
+ const char* isa_name = GetInstructionSetString(isa);
+ std::string image_filename(GetSystemImageFilename(image_location.c_str(), isa));
+ std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str()));
+ if (input_image.get() == nullptr) {
+ LOG(ERROR) << "unable to open input image file.";
+ return false;
+ }
+ int64_t image_len = input_image->GetLength();
+ if (image_len < 0) {
+ LOG(ERROR) << "Error while getting image length";
+ return false;
+ }
+ ImageHeader image_header;
+ if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
+ sizeof(image_header), 0)) {
+ LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
+ }
+
+ // Set up the runtime
+ Runtime::Options options;
+ NoopCompilerCallbacks callbacks;
+ options.push_back(std::make_pair("compilercallbacks", &callbacks));
+ std::string img = "-Ximage:" + image_location;
+ options.push_back(std::make_pair(img.c_str(), nullptr));
+ options.push_back(std::make_pair("imageinstructionset", reinterpret_cast<const void*>(isa_name)));
+ if (!Runtime::Create(options, false)) {
+ LOG(ERROR) << "Unable to initialize runtime";
+ return false;
+ }
+ // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
+ // give it away now and then switch to a more manageable ScopedObjectAccess.
+ Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ ScopedObjectAccess soa(Thread::Current());
+
+ t.NewTiming("Image and oat Patching setup");
+ // Create the map where we will write the image patches to.
+ std::string error_msg;
+ std::unique_ptr<MemMap> image(MemMap::MapFile(image_len, PROT_READ | PROT_WRITE, MAP_PRIVATE,
+ input_image->Fd(), 0,
+ input_image->GetPath().c_str(),
+ &error_msg));
+ if (image.get() == nullptr) {
+ LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg;
+ return false;
+ }
+ gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
+
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(const_cast<File*>(input_oat),
+ PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
+ if (elf.get() == nullptr) {
+ LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg;
+ return false;
+ }
+
+ PatchOat p(elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+ delta, timings);
+ t.NewTiming("Patching files");
+ if (!p.PatchElf()) {
+ LOG(INFO) << "Failed to patch oat file " << input_oat->GetPath();
+ return false;
+ }
+ if (!p.PatchImage()) {
+ LOG(INFO) << "Failed to patch image file " << input_image->GetPath();
+ return false;
+ }
+
+ t.NewTiming("Writing files");
+ if (!p.WriteElf(output_oat)) {
+ return false;
+ }
+ if (!p.WriteImage(output_image)) {
+ return false;
+ }
+ return true;
+}
+
+bool PatchOat::WriteElf(File* out) {
+ TimingLogger::ScopedTiming t("Writing Elf File", timings_);
+ CHECK(oat_file_.get() != nullptr);
+ CHECK(out != nullptr);
+ size_t expect = oat_file_->Size();
+ if (out->WriteFully(reinterpret_cast<char*>(oat_file_->Begin()), expect) &&
+ out->SetLength(expect) == 0) {
+ return true;
+ } else {
+ LOG(ERROR) << "Writing to oat file " << out->GetPath() << " failed.";
+ return false;
+ }
+}
+
+bool PatchOat::WriteImage(File* out) {
+ TimingLogger::ScopedTiming t("Writing image File", timings_);
+ CHECK(image_ != nullptr);
+ CHECK(out != nullptr);
+ size_t expect = image_->Size();
+ if (out->WriteFully(reinterpret_cast<char*>(image_->Begin()), expect) &&
+ out->SetLength(expect) == 0) {
+ return true;
+ } else {
+ LOG(ERROR) << "Writing to image file " << out->GetPath() << " failed.";
+ return false;
+ }
+}
+
+bool PatchOat::PatchImage() {
+ ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
+ CHECK_GT(image_->Size(), sizeof(ImageHeader));
+ // These are the roots from the original file.
+ mirror::Object* img_roots = image_header->GetImageRoots();
+ image_header->RelocateImage(delta_);
+
+ VisitObject(img_roots);
+ if (!image_header->IsValid()) {
+ LOG(ERROR) << "reloction renders image header invalid";
+ return false;
+ }
+
+ {
+ TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
+ // Walk the bitmap.
+ WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ bitmap_->Walk(PatchOat::BitmapCallback, this);
+ }
+ return true;
+}
+
+bool PatchOat::InHeap(mirror::Object* o) {
+ uintptr_t begin = reinterpret_cast<uintptr_t>(heap_->Begin());
+ uintptr_t end = reinterpret_cast<uintptr_t>(heap_->End());
+ uintptr_t obj = reinterpret_cast<uintptr_t>(o);
+ return o == nullptr || (begin <= obj && obj < end);
+}
+
+void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
+ bool is_static_unused) const {
+ mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
+ DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
+ mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
+ copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
+}
+
+void PatchOat::PatchVisitor::operator() (mirror::Class* cls, mirror::Reference* ref) const {
+ MemberOffset off = mirror::Reference::ReferentOffset();
+ mirror::Object* referent = ref->GetReferent();
+ DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
+ mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
+ copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
+}
+
+mirror::Object* PatchOat::RelocatedCopyOf(mirror::Object* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
+ DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
+ uintptr_t heap_off =
+ reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
+ DCHECK_LT(heap_off, image_->Size());
+ return reinterpret_cast<mirror::Object*>(image_->Begin() + heap_off);
+}
+
+mirror::Object* PatchOat::RelocatedAddressOf(mirror::Object* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ } else {
+ return reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj) + delta_);
+ }
+}
+
+// Called by BitmapCallback
+void PatchOat::VisitObject(mirror::Object* object) {
+ mirror::Object* copy = RelocatedCopyOf(object);
+ CHECK(copy != nullptr);
+ if (kUseBakerOrBrooksReadBarrier) {
+ object->AssertReadBarrierPointer();
+ if (kUseBrooksReadBarrier) {
+ mirror::Object* moved_to = RelocatedAddressOf(object);
+ copy->SetReadBarrierPointer(moved_to);
+ DCHECK_EQ(copy->GetReadBarrierPointer(), moved_to);
+ }
+ }
+ PatchOat::PatchVisitor visitor(this, copy);
+ object->VisitReferences<true, kVerifyNone>(visitor, visitor);
+ if (object->IsArtMethod<kVerifyNone>()) {
+ FixupMethod(static_cast<mirror::ArtMethod*>(object),
+ static_cast<mirror::ArtMethod*>(copy));
+ }
+}
+
+void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) {
+ // Just update the entry points if it looks like we should.
+ // TODO: sanity check all the pointers' values
+ uintptr_t portable = reinterpret_cast<uintptr_t>(
+ object->GetEntryPointFromPortableCompiledCode<kVerifyNone>());
+ if (portable != 0) {
+ copy->SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(portable + delta_));
+ }
+ uintptr_t quick= reinterpret_cast<uintptr_t>(
+ object->GetEntryPointFromQuickCompiledCode<kVerifyNone>());
+ if (quick != 0) {
+ copy->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(quick + delta_));
+ }
+ uintptr_t interpreter = reinterpret_cast<uintptr_t>(
+ object->GetEntryPointFromInterpreter<kVerifyNone>());
+ if (interpreter != 0) {
+ copy->SetEntryPointFromInterpreter(
+ reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_));
+ }
+
+ uintptr_t native_method = reinterpret_cast<uintptr_t>(object->GetNativeMethod());
+ if (native_method != 0) {
+ copy->SetNativeMethod(reinterpret_cast<void*>(native_method + delta_));
+ }
+
+ uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(object->GetNativeGcMap());
+ if (native_gc_map != 0) {
+ copy->SetNativeGcMap(reinterpret_cast<uint8_t*>(native_gc_map + delta_));
+ }
+}
+
+bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogger* timings) {
+ CHECK(input_oat != nullptr);
+ CHECK(output_oat != nullptr);
+ CHECK_GE(input_oat->Fd(), 0);
+ CHECK_GE(output_oat->Fd(), 0);
+ TimingLogger::ScopedTiming t("Setup Oat File Patching", timings);
+
+ std::string error_msg;
+ std::unique_ptr<ElfFile> elf(ElfFile::Open(const_cast<File*>(input_oat),
+ PROT_READ | PROT_WRITE, MAP_PRIVATE, &error_msg));
+ if (elf.get() == nullptr) {
+ LOG(ERROR) << "unable to open oat file " << input_oat->GetPath() << " : " << error_msg;
+ return false;
+ }
+
+ PatchOat p(elf.release(), delta, timings);
+ t.NewTiming("Patch Oat file");
+ if (!p.PatchElf()) {
+ return false;
+ }
+
+ t.NewTiming("Writing oat file");
+ if (!p.WriteElf(output_oat)) {
+ return false;
+ }
+ return true;
+}
+
+bool PatchOat::CheckOatFile() {
+ Elf32_Shdr* patches_sec = oat_file_->FindSectionByName(".oat_patches");
+ if (patches_sec == nullptr) {
+ return false;
+ }
+ if (patches_sec->sh_type != SHT_OAT_PATCH) {
+ return false;
+ }
+ uintptr_t* patches = reinterpret_cast<uintptr_t*>(oat_file_->Begin() + patches_sec->sh_offset);
+ uintptr_t* patches_end = patches + (patches_sec->sh_size/sizeof(uintptr_t));
+ Elf32_Shdr* oat_data_sec = oat_file_->FindSectionByName(".rodata");
+ Elf32_Shdr* oat_text_sec = oat_file_->FindSectionByName(".text");
+ if (oat_data_sec == nullptr) {
+ return false;
+ }
+ if (oat_text_sec == nullptr) {
+ return false;
+ }
+ if (oat_text_sec->sh_offset <= oat_data_sec->sh_offset) {
+ return false;
+ }
+
+ for (; patches < patches_end; patches++) {
+ if (oat_text_sec->sh_size <= *patches) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool PatchOat::PatchElf() {
+ TimingLogger::ScopedTiming t("Fixup Elf Headers", timings_);
+ // Fixup Phdr's
+ for (unsigned int i = 0; i < oat_file_->GetProgramHeaderNum(); i++) {
+ Elf32_Phdr& hdr = oat_file_->GetProgramHeader(i);
+ if (hdr.p_vaddr != 0) {
+ hdr.p_vaddr += delta_;
+ }
+ if (hdr.p_paddr != 0) {
+ hdr.p_paddr += delta_;
+ }
+ }
+ // Fixup Shdr's
+ for (unsigned int i = 0; i < oat_file_->GetSectionHeaderNum(); i++) {
+ Elf32_Shdr& hdr = oat_file_->GetSectionHeader(i);
+ if (hdr.sh_addr != 0) {
+ hdr.sh_addr += delta_;
+ }
+ }
+
+ // Fixup Dynamics.
+ for (Elf32_Word i = 0; i < oat_file_->GetDynamicNum(); i++) {
+ Elf32_Dyn& dyn = oat_file_->GetDynamic(i);
+ if (IsDynamicSectionPointer(dyn.d_tag, oat_file_->GetHeader().e_machine)) {
+ dyn.d_un.d_ptr += delta_;
+ }
+ }
+
+ t.NewTiming("Fixup Elf Symbols");
+ // Fixup dynsym
+ Elf32_Shdr* dynsym_sec = oat_file_->FindSectionByName(".dynsym");
+ CHECK(dynsym_sec != nullptr);
+ if (!PatchSymbols(dynsym_sec)) {
+ return false;
+ }
+
+ // Fixup symtab
+ Elf32_Shdr* symtab_sec = oat_file_->FindSectionByName(".symtab");
+ if (symtab_sec != nullptr) {
+ if (!PatchSymbols(symtab_sec)) {
+ return false;
+ }
+ }
+
+ t.NewTiming("Fixup Elf Text Section");
+ // Fixup text
+ if (!PatchTextSection()) {
+ return false;
+ }
+
+ return true;
+}
+
+bool PatchOat::PatchSymbols(Elf32_Shdr* section) {
+ Elf32_Sym* syms = reinterpret_cast<Elf32_Sym*>(oat_file_->Begin() + section->sh_offset);
+ const Elf32_Sym* last_sym =
+ reinterpret_cast<Elf32_Sym*>(oat_file_->Begin() + section->sh_offset + section->sh_size);
+ CHECK_EQ(section->sh_size % sizeof(Elf32_Sym), 0u)
+ << "Symtab section size is not multiple of symbol size";
+ for (; syms < last_sym; syms++) {
+ uint8_t sttype = ELF32_ST_TYPE(syms->st_info);
+ Elf32_Word shndx = syms->st_shndx;
+ if (shndx != SHN_ABS && shndx != SHN_COMMON && shndx != SHN_UNDEF &&
+ (sttype == STT_FUNC || sttype == STT_OBJECT)) {
+ CHECK_NE(syms->st_value, 0u);
+ syms->st_value += delta_;
+ }
+ }
+ return true;
+}
+
+bool PatchOat::PatchTextSection() {
+ Elf32_Shdr* patches_sec = oat_file_->FindSectionByName(".oat_patches");
+ if (patches_sec == nullptr) {
+ LOG(INFO) << ".oat_patches section not found. Aborting patch";
+ return false;
+ }
+ DCHECK(CheckOatFile()) << "Oat file invalid";
+ CHECK_EQ(patches_sec->sh_type, SHT_OAT_PATCH) << "Unexpected type of .oat_patches";
+ uintptr_t* patches = reinterpret_cast<uintptr_t*>(oat_file_->Begin() + patches_sec->sh_offset);
+ uintptr_t* patches_end = patches + (patches_sec->sh_size/sizeof(uintptr_t));
+ Elf32_Shdr* oat_text_sec = oat_file_->FindSectionByName(".text");
+ CHECK(oat_text_sec != nullptr);
+ byte* to_patch = oat_file_->Begin() + oat_text_sec->sh_offset;
+ uintptr_t to_patch_end = reinterpret_cast<uintptr_t>(to_patch) + oat_text_sec->sh_size;
+
+ for (; patches < patches_end; patches++) {
+ CHECK_LT(*patches, oat_text_sec->sh_size) << "Bad Patch";
+ uint32_t* patch_loc = reinterpret_cast<uint32_t*>(to_patch + *patches);
+ CHECK_LT(reinterpret_cast<uintptr_t>(patch_loc), to_patch_end);
+ *patch_loc += delta_;
+ }
+
+ return true;
+}
+
+static int orig_argc;
+static char** orig_argv;
+
+static std::string CommandLine() {
+ std::vector<std::string> command;
+ for (int i = 0; i < orig_argc; ++i) {
+ command.push_back(orig_argv[i]);
+ }
+ return Join(command, ' ');
+}
+
+static void UsageErrorV(const char* fmt, va_list ap) {
+ std::string error;
+ StringAppendV(&error, fmt, ap);
+ LOG(ERROR) << error;
+}
+
+static void UsageError(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+}
+
+static void Usage(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+
+ UsageError("Command: %s", CommandLine().c_str());
+ UsageError("Usage: patchoat [options]...");
+ UsageError("");
+ UsageError(" --instruction-set=<isa>: Specifies the instruction set the patched code is");
+ UsageError(" compiled for. Required if you use --input-oat-location");
+ UsageError("");
+ UsageError(" --input-oat-file=<file.oat>: Specifies the exact filename of the oat file to be");
+ UsageError(" patched.");
+ UsageError("");
+ UsageError(" --input-oat-fd=<file-descriptor>: Specifies the file-descriptor of the oat file");
+ UsageError(" to be patched.");
+ UsageError("");
+ UsageError(" --input-oat-location=<file.oat>: Specifies the 'location' to read the patched");
+ UsageError(" oat file from. If used one must also supply the --instruction-set");
+ UsageError("");
+ UsageError(" --input-image-location=<file.art>: Specifies the 'location' of the image file to");
+ UsageError(" be patched. If --instruction-set is not given it will use the instruction set");
+ UsageError(" extracted from the --input-oat-file.");
+ UsageError("");
+ UsageError(" --output-oat-file=<file.oat>: Specifies the exact file to write the patched oat");
+ UsageError(" file to.");
+ UsageError("");
+ UsageError(" --output-oat-location=<file.oat>: Specifies the 'location' to write the patched");
+ UsageError(" oat file to. If used one must also specify the --instruction-set");
+ UsageError("");
+ UsageError(" --output-oat-fd=<file-descriptor>: Specifies the file-descriptor to write the");
+ UsageError(" the patched oat file to.");
+ UsageError("");
+ UsageError(" --output-image-file=<file.art>: Specifies the exact file to write the patched");
+ UsageError(" image file to.");
+ UsageError("");
+ UsageError(" --output-image-fd=<file-descriptor>: Specifies the file-descriptor to write the");
+ UsageError(" the patched image file to.");
+ UsageError("");
+ UsageError(" --output-image-location=<file.art>: Specifies the 'location' to write the patched");
+ UsageError(" image file to. If used one must also specify the --instruction-set");
+ UsageError("");
+ UsageError(" --orig-base-offset=<original-base-offset>: Specify the base offset the input file");
+ UsageError(" was compiled with. This is needed if one is specifying a --base-offset");
+ UsageError("");
+ UsageError(" --base-offset=<new-base-offset>: Specify the base offset we will repatch the");
+ UsageError(" given files to use. This requires that --orig-base-offset is also given.");
+ UsageError("");
+ UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
+ UsageError(" This value may be negative.");
+ UsageError("");
+ UsageError(" --patched-image-file=<file.art>: Use the same patch delta as was used to patch");
+ UsageError(" the given image file.");
+ UsageError("");
+ UsageError(" --patched-image-location=<file.art>: Use the same patch delta as was used to");
+ UsageError(" patch the given image location. If used one must also specify the");
+ UsageError(" --instruction-set flag.");
+ UsageError("");
+ UsageError(" --dump-timings: dump out patch timing information");
+ UsageError("");
+ UsageError(" --no-dump-timings: do not dump out patch timing information");
+ UsageError("");
+
+ exit(EXIT_FAILURE);
+}
+
+static bool ReadBaseDelta(const char* name, off_t* delta, std::string* error_msg) {
+ CHECK(name != nullptr);
+ CHECK(delta != nullptr);
+ std::unique_ptr<File> file;
+ if (OS::FileExists(name)) {
+ file.reset(OS::OpenFileForReading(name));
+ if (file.get() == nullptr) {
+ *error_msg = "Failed to open file %s for reading";
+ return false;
+ }
+ } else {
+ *error_msg = "File %s does not exist";
+ return false;
+ }
+ CHECK(file.get() != nullptr);
+ ImageHeader hdr;
+ if (sizeof(hdr) != file->Read(reinterpret_cast<char*>(&hdr), sizeof(hdr), 0)) {
+ *error_msg = "Failed to read file %s";
+ return false;
+ }
+ if (!hdr.IsValid()) {
+ *error_msg = "%s does not contain a valid image header.";
+ return false;
+ }
+ *delta = hdr.GetPatchDelta();
+ return true;
+}
+
+static File* CreateOrOpen(const char* name, bool* created) {
+ if (OS::FileExists(name)) {
+ *created = false;
+ return OS::OpenFileReadWrite(name);
+ } else {
+ *created = true;
+ return OS::CreateEmptyFile(name);
+ }
+}
+
+static int patchoat(int argc, char **argv) {
+ InitLogging(argv);
+ const bool debug = kIsDebugBuild;
+ orig_argc = argc;
+ orig_argv = argv;
+ TimingLogger timings("patcher", false, false);
+
+ InitLogging(argv);
+
+ // Skip over the command name.
+ argv++;
+ argc--;
+
+ if (argc == 0) {
+ Usage("No arguments specified");
+ }
+
+ timings.StartTiming("Patchoat");
+
+ // cmd line args
+ bool isa_set = false;
+ InstructionSet isa = kNone;
+ std::string input_oat_filename;
+ std::string input_oat_location;
+ int input_oat_fd = -1;
+ bool have_input_oat = false;
+ std::string input_image_location;
+ std::string output_oat_filename;
+ std::string output_oat_location;
+ int output_oat_fd = -1;
+ bool have_output_oat = false;
+ std::string output_image_filename;
+ std::string output_image_location;
+ int output_image_fd = -1;
+ bool have_output_image = false;
+ uintptr_t base_offset = 0;
+ bool base_offset_set = false;
+ uintptr_t orig_base_offset = 0;
+ bool orig_base_offset_set = false;
+ off_t base_delta = 0;
+ bool base_delta_set = false;
+ std::string patched_image_filename;
+ std::string patched_image_location;
+ bool dump_timings = kIsDebugBuild;
+
+ for (int i = 0; i < argc; i++) {
+ const StringPiece option(argv[i]);
+ const bool log_options = false;
+ if (log_options) {
+ LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
+ }
+ // TODO: GetInstructionSetFromString shouldn't LOG(FATAL).
+ if (option.starts_with("--instruction-set=")) {
+ isa_set = true;
+ const char* isa_str = option.substr(strlen("--instruction-set=")).data();
+ if (!strcmp("arm", isa_str)) {
+ isa = kArm;
+ } else if (!strcmp("arm64", isa_str)) {
+ isa = kArm64;
+ } else if (!strcmp("x86", isa_str)) {
+ isa = kX86;
+ } else if (!strcmp("x86_64", isa_str)) {
+ isa = kX86_64;
+ } else if (!strcmp("mips", isa_str)) {
+ isa = kMips;
+ } else {
+ Usage("Unknown instruction set %s", isa_str);
+ }
+ } else if (option.starts_with("--input-oat-location=")) {
+ if (have_input_oat) {
+ Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
+ }
+ have_input_oat = true;
+ input_oat_location = option.substr(strlen("--input-oat-location=")).data();
+ } else if (option.starts_with("--input-oat-file=")) {
+ if (have_input_oat) {
+ Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
+ }
+ have_input_oat = true;
+ input_oat_filename = option.substr(strlen("--input-oat-file=")).data();
+ } else if (option.starts_with("--input-oat-fd=")) {
+ if (have_input_oat) {
+ Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
+ }
+ have_input_oat = true;
+ const char* oat_fd_str = option.substr(strlen("--input-oat-fd=")).data();
+ if (!ParseInt(oat_fd_str, &input_oat_fd)) {
+ Usage("Failed to parse --input-oat-fd argument '%s' as an integer", oat_fd_str);
+ }
+ if (input_oat_fd < 0) {
+ Usage("--input-oat-fd pass a negative value %d", input_oat_fd);
+ }
+ } else if (option.starts_with("--input-image-location=")) {
+ input_image_location = option.substr(strlen("--input-image-location=")).data();
+ } else if (option.starts_with("--output-oat-location=")) {
+ if (have_output_oat) {
+ Usage("Only one of --output-oat-file, --output-oat-location and --output-oat-fd may "
+ "be used.");
+ }
+ have_output_oat = true;
+ output_oat_location = option.substr(strlen("--output-oat-location=")).data();
+ } else if (option.starts_with("--output-oat-file=")) {
+ if (have_output_oat) {
+ Usage("Only one of --output-oat-file, --output-oat-location and --output-oat-fd may "
+ "be used.");
+ }
+ have_output_oat = true;
+ output_oat_filename = option.substr(strlen("--output-oat-file=")).data();
+ } else if (option.starts_with("--output-oat-fd=")) {
+ if (have_output_oat) {
+ Usage("Only one of --output-oat-file, --output-oat-location and --output-oat-fd may "
+ "be used.");
+ }
+ have_output_oat = true;
+ const char* oat_fd_str = option.substr(strlen("--output-oat-fd=")).data();
+ if (!ParseInt(oat_fd_str, &output_oat_fd)) {
+ Usage("Failed to parse --output-oat-fd argument '%s' as an integer", oat_fd_str);
+ }
+ if (output_oat_fd < 0) {
+ Usage("--output-oat-fd pass a negative value %d", output_oat_fd);
+ }
+ } else if (option.starts_with("--output-image-location=")) {
+ if (have_output_image) {
+ Usage("Only one of --output-image-file, --output-image-location and --output-image-fd may "
+ "be used.");
+ }
+ have_output_image = true;
+ output_image_location= option.substr(strlen("--output-image-location=")).data();
+ } else if (option.starts_with("--output-image-file=")) {
+ if (have_output_image) {
+ Usage("Only one of --output-image-file, --output-image-location and --output-image-fd may "
+ "be used.");
+ }
+ have_output_image = true;
+ output_image_filename = option.substr(strlen("--output-image-file=")).data();
+ } else if (option.starts_with("--output-image-fd=")) {
+ if (have_output_image) {
+ Usage("Only one of --output-image-file, --output-image-location and --output-image-fd "
+ "may be used.");
+ }
+ have_output_image = true;
+ const char* image_fd_str = option.substr(strlen("--output-image-fd=")).data();
+ if (!ParseInt(image_fd_str, &output_image_fd)) {
+ Usage("Failed to parse --output-image-fd argument '%s' as an integer", image_fd_str);
+ }
+ if (output_image_fd < 0) {
+ Usage("--output-image-fd pass a negative value %d", output_image_fd);
+ }
+ } else if (option.starts_with("--orig-base-offset=")) {
+ const char* orig_base_offset_str = option.substr(strlen("--orig-base-offset=")).data();
+ orig_base_offset_set = true;
+ if (!ParseUint(orig_base_offset_str, &orig_base_offset)) {
+ Usage("Failed to parse --orig-base-offset argument '%s' as an uintptr_t",
+ orig_base_offset_str);
+ }
+ } else if (option.starts_with("--base-offset=")) {
+ const char* base_offset_str = option.substr(strlen("--base-offset=")).data();
+ base_offset_set = true;
+ if (!ParseUint(base_offset_str, &base_offset)) {
+ Usage("Failed to parse --base-offset argument '%s' as an uintptr_t", base_offset_str);
+ }
+ } else if (option.starts_with("--base-offset-delta=")) {
+ const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
+ base_delta_set = true;
+ if (!ParseInt(base_delta_str, &base_delta)) {
+ Usage("Failed to parse --base-offset-delta argument '%s' as an off_t", base_delta_str);
+ }
+ } else if (option.starts_with("--patched-image-location=")) {
+ patched_image_location = option.substr(strlen("--patched-image-location=")).data();
+ } else if (option.starts_with("--patched-image-file=")) {
+ patched_image_filename = option.substr(strlen("--patched-image-file=")).data();
+ } else if (option == "--dump-timings") {
+ dump_timings = true;
+ } else if (option == "--no-dump-timings") {
+ dump_timings = false;
+ } else {
+ Usage("Unknown argument %s", option.data());
+ }
+ }
+
+ {
+ // Only 1 of these may be set.
+ uint32_t cnt = 0;
+ cnt += (base_delta_set) ? 1 : 0;
+ cnt += (base_offset_set && orig_base_offset_set) ? 1 : 0;
+ cnt += (!patched_image_filename.empty()) ? 1 : 0;
+ cnt += (!patched_image_location.empty()) ? 1 : 0;
+ if (cnt > 1) {
+ Usage("Only one of --base-offset/--orig-base-offset, --base-offset-delta, "
+ "--patched-image-filename or --patched-image-location may be used.");
+ } else if (cnt == 0) {
+ Usage("Must specify --base-offset-delta, --base-offset and --orig-base-offset, "
+ "--patched-image-location or --patched-image-file");
+ }
+ }
+
+ if (have_input_oat != have_output_oat) {
+ Usage("Either both input and output oat must be supplied or niether must be.");
+ }
+
+ if ((!input_image_location.empty()) != have_output_image) {
+ Usage("Either both input and output image must be supplied or niether must be.");
+ }
+
+ // We know we have both the input and output so rename for clarity.
+ bool have_image_files = have_output_image;
+ bool have_oat_files = have_output_oat;
+
+ if (!have_oat_files && !have_image_files) {
+ Usage("Must be patching either an oat or an image file or both.");
+ }
+
+ if (!have_oat_files && !isa_set) {
+ Usage("Must include ISA if patching an image file without an oat file.");
+ }
+
+ if (!input_oat_location.empty()) {
+ if (!isa_set) {
+ Usage("specifying a location requires specifying an instruction set");
+ }
+ input_oat_filename = GetSystemImageFilename(input_oat_location.c_str(), isa);
+ if (debug) {
+ LOG(INFO) << "Using input-oat-file " << input_oat_filename;
+ }
+ }
+ if (!output_oat_location.empty()) {
+ if (!isa_set) {
+ Usage("specifying a location requires specifying an instruction set");
+ }
+ output_oat_filename = GetSystemImageFilename(output_oat_location.c_str(), isa);
+ if (debug) {
+ LOG(INFO) << "Using output-oat-file " << output_oat_filename;
+ }
+ }
+ if (!output_image_location.empty()) {
+ if (!isa_set) {
+ Usage("specifying a location requires specifying an instruction set");
+ }
+ output_image_filename = GetSystemImageFilename(output_image_location.c_str(), isa);
+ if (debug) {
+ LOG(INFO) << "Using output-image-file " << output_image_filename;
+ }
+ }
+ if (!patched_image_location.empty()) {
+ if (!isa_set) {
+ Usage("specifying a location requires specifying an instruction set");
+ }
+ patched_image_filename = GetSystemImageFilename(patched_image_location.c_str(), isa);
+ if (debug) {
+ LOG(INFO) << "Using patched-image-file " << patched_image_filename;
+ }
+ }
+
+ if (!base_delta_set) {
+ if (orig_base_offset_set && base_offset_set) {
+ base_delta_set = true;
+ base_delta = base_offset - orig_base_offset;
+ } else if (!patched_image_filename.empty()) {
+ base_delta_set = true;
+ std::string error_msg;
+ if (!ReadBaseDelta(patched_image_filename.c_str(), &base_delta, &error_msg)) {
+ Usage(error_msg.c_str(), patched_image_filename.c_str());
+ }
+ } else {
+ if (base_offset_set) {
+ Usage("Unable to determine original base offset.");
+ } else {
+ Usage("Must supply a desired new offset or delta.");
+ }
+ }
+ }
+
+ if (!IsAligned<kPageSize>(base_delta)) {
+ Usage("Base offset/delta must be alligned to a pagesize (0x%08x) boundary.", kPageSize);
+ }
+
+ // Do we need to cleanup output files if we fail?
+ bool new_image_out = false;
+ bool new_oat_out = false;
+
+ std::unique_ptr<File> input_oat;
+ std::unique_ptr<File> output_oat;
+ std::unique_ptr<File> output_image;
+
+ if (have_image_files) {
+ CHECK(!input_image_location.empty());
+
+ if (output_image_fd != -1) {
+ output_image.reset(new File(output_image_fd, output_image_filename));
+ } else {
+ CHECK(!output_image_filename.empty());
+ output_image.reset(CreateOrOpen(output_image_filename.c_str(), &new_image_out));
+ }
+ } else {
+ CHECK(output_image_filename.empty() && output_image_fd == -1 && input_image_location.empty());
+ }
+
+ if (have_oat_files) {
+ if (input_oat_fd != -1) {
+ input_oat.reset(new File(input_oat_fd, input_oat_filename));
+ } else {
+ CHECK(!input_oat_filename.empty());
+ input_oat.reset(OS::OpenFileForReading(input_oat_filename.c_str()));
+ }
+
+ if (output_oat_fd != -1) {
+ output_oat.reset(new File(output_oat_fd, output_oat_filename));
+ } else {
+ CHECK(!output_oat_filename.empty());
+ output_oat.reset(CreateOrOpen(output_oat_filename.c_str(), &new_oat_out));
+ }
+ }
+
+ auto cleanup = [&output_image_filename, &output_oat_filename,
+ &new_oat_out, &new_image_out, &timings, &dump_timings](bool success) {
+ timings.EndTiming();
+ if (!success) {
+ if (new_oat_out) {
+ CHECK(!output_oat_filename.empty());
+ unlink(output_oat_filename.c_str());
+ }
+ if (new_image_out) {
+ CHECK(!output_image_filename.empty());
+ unlink(output_image_filename.c_str());
+ }
+ }
+ if (dump_timings) {
+ LOG(INFO) << Dumpable<TimingLogger>(timings);
+ }
+ };
+
+ if (debug) {
+ LOG(INFO) << "moving offset by " << base_delta << " (0x" << std::hex << base_delta << ") bytes";
+ }
+
+ bool ret;
+ if (have_image_files && have_oat_files) {
+ TimingLogger::ScopedTiming pt("patch image and oat", &timings);
+ ret = PatchOat::Patch(input_oat.get(), input_image_location, base_delta,
+ output_oat.get(), output_image.get(), isa, &timings);
+ } else if (have_oat_files) {
+ TimingLogger::ScopedTiming pt("patch oat", &timings);
+ ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings);
+ } else {
+ TimingLogger::ScopedTiming pt("patch image", &timings);
+ CHECK(have_image_files);
+ ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings);
+ }
+ cleanup(ret);
+ return (ret) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+} // namespace art
+
+int main(int argc, char **argv) {
+ return art::patchoat(argc, argv);
+}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
new file mode 100644
index 0000000000..a63e6f44b8
--- /dev/null
+++ b/patchoat/patchoat.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_PATCHOAT_PATCHOAT_H_
+#define ART_PATCHOAT_PATCHOAT_H_
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "instruction_set.h"
+#include "os.h"
+#include "elf_file.h"
+#include "elf_utils.h"
+#include "gc/accounting/space_bitmap.h"
+#include "gc/heap.h"
+#include "utils.h"
+
+namespace art {
+
+class ImageHeader;
+
+namespace mirror {
+class Object;
+class Reference;
+class Class;
+class ArtMethod;
+}; // namespace mirror
+
+class PatchOat {
+ public:
+ static bool Patch(File* oat_in, off_t delta, File* oat_out, TimingLogger* timings);
+
+ static bool Patch(const std::string& art_location, off_t delta, File* art_out, InstructionSet isa,
+ TimingLogger* timings);
+
+ static bool Patch(const File* oat_in, const std::string& art_location,
+ off_t delta, File* oat_out, File* art_out, InstructionSet isa,
+ TimingLogger* timings);
+
+ private:
+ // Takes ownership only of the ElfFile. All other pointers are only borrowed.
+ PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings)
+ : oat_file_(oat_file), delta_(delta), timings_(timings) {}
+ PatchOat(MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
+ MemMap* heap, off_t delta, TimingLogger* timings)
+ : image_(image), bitmap_(bitmap), heap_(heap),
+ delta_(delta), timings_(timings) {}
+ PatchOat(ElfFile* oat_file, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
+ MemMap* heap, off_t delta, TimingLogger* timings)
+ : oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap),
+ delta_(delta), timings_(timings) {}
+ ~PatchOat() {}
+
+ static void BitmapCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
+ }
+
+ void VisitObject(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool InHeap(mirror::Object*);
+
+ bool CheckOatFile();
+
+ // Patches oat in place, modifying the oat_file given to the constructor.
+ bool PatchElf();
+ bool PatchTextSection();
+ bool PatchSymbols(Elf32_Shdr* section);
+
+ bool PatchImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool WriteElf(File* out);
+ bool WriteImage(File* out);
+
+ mirror::Object* RelocatedCopyOf(mirror::Object*);
+ mirror::Object* RelocatedAddressOf(mirror::Object* obj);
+
+ // Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
+ // change the heap.
+ class PatchVisitor {
+ public:
+ PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
+ ~PatchVisitor() {}
+ void operator() (mirror::Object* obj, MemberOffset off, bool b) const
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // For reference classes.
+ void operator() (mirror::Class* cls, mirror::Reference* ref) const
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ private:
+ PatchOat* patcher_;
+ mirror::Object* copy_;
+ };
+
+ // The elf file we are patching.
+ std::unique_ptr<ElfFile> oat_file_;
+ // A mmap of the image we are patching. This is modified.
+ const MemMap* image_;
+ // The heap we are patching. This is not modified.
+ gc::accounting::ContinuousSpaceBitmap* bitmap_;
+ // The heap we are patching. This is not modified.
+ const MemMap* heap_;
+ // The amount we are changing the offset by.
+ off_t delta_;
+ TimingLogger* timings_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
+};
+
+} // namespace art
+#endif // ART_PATCHOAT_PATCHOAT_H_
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index 45d7356f76..997a2594c0 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -26,9 +26,11 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call PLT_SYMBOL(artFindNativeMethod) // (Thread*)
- UNDO_SETUP_GOT
- addl LITERAL(8), %esp // restore the stack
- CFI_ADJUST_CFA_OFFSET(-12)
+ addl LITERAL(4), %esp // remove argument
+ CFI_ADJUST_CFA_OFFSET(-4)
+ UNDO_SETUP_GOT // pop ebx
+ addl LITERAL(4), %esp // remove padding
+ CFI_ADJUST_CFA_OFFSET(-4)
testl %eax, %eax // check if returned method code is null
jz .Lno_native_code_found // if null, jump to return to handle
jmp *%eax // otherwise, tail call to intended method
diff --git a/runtime/arch/x86_64/jni_entrypoints_x86_64.S b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
index 10f39b7500..d668797ba4 100644
--- a/runtime/arch/x86_64/jni_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
@@ -23,7 +23,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r9 // Arg.
PUSH r8 // Arg.
- PUSH rdi //
+ PUSH rdi // JniEnv.
PUSH rsi // Arg.
PUSH rdx // Arg.
PUSH rcx // Arg.
@@ -40,7 +40,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub
movq %xmm6, 48(%rsp)
movq %xmm7, 56(%rsp)
// prepare call
- movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
+ movq %gs:THREAD_SELF_OFFSET, %rdi // RDI := Thread::Current()
// call
call PLT_SYMBOL(artFindNativeMethod) // (Thread*)
// restore arguments
@@ -57,7 +57,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub
POP rcx // Arg.
POP rdx // Arg.
POP rsi // Arg.
- POP rdi //
+ POP rdi // JniEnv.
POP r8 // Arg.
POP r9 // Arg.
testq %rax, %rax // check if returned method code is null
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index f19c353f18..6161aff647 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -74,18 +74,13 @@ class AllocRecordStackTraceElement {
}
mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = reinterpret_cast<mirror::ArtMethod*>(
- Thread::Current()->DecodeJObject(method_));
- return method;
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ return soa.DecodeMethod(method_);
}
void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- JNIEnv* env = soa.Env();
- if (method_ != nullptr) {
- env->DeleteWeakGlobalRef(method_);
- }
- method_ = env->NewWeakGlobalRef(soa.AddLocalReference<jobject>(m));
+ method_ = soa.EncodeMethod(m);
}
uint32_t DexPc() const {
@@ -97,27 +92,46 @@ class AllocRecordStackTraceElement {
}
private:
- jobject method_; // This is a weak global.
+ jmethodID method_;
uint32_t dex_pc_;
};
+jobject Dbg::TypeCache::Add(mirror::Class* t) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ int32_t hash_code = t->IdentityHashCode();
+ auto range = objects_.equal_range(hash_code);
+ for (auto it = range.first; it != range.second; ++it) {
+ if (soa.Decode<mirror::Class*>(it->second) == t) {
+ // Found a matching weak global, return it.
+ return it->second;
+ }
+ }
+ JNIEnv* env = soa.Env();
+ const jobject local_ref = soa.AddLocalReference<jobject>(t);
+ const jobject weak_global = env->NewWeakGlobalRef(local_ref);
+ env->DeleteLocalRef(local_ref);
+ objects_.insert(std::make_pair(hash_code, weak_global));
+ return weak_global;
+}
+
+void Dbg::TypeCache::Clear() {
+ ScopedObjectAccess soa(Thread::Current());
+ for (const auto& p : objects_) {
+ soa.Vm()->DeleteWeakGlobalRef(soa.Self(), p.second);
+ }
+ objects_.clear();
+}
+
class AllocRecord {
public:
AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Class* type = reinterpret_cast<mirror::Class*>(
- Thread::Current()->DecodeJObject(type_));
- return type;
+ return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
}
void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ScopedObjectAccessUnchecked soa(Thread::Current());
- JNIEnv* env = soa.Env();
- if (type_ != nullptr) {
- env->DeleteWeakGlobalRef(type_);
- }
- type_ = env->NewWeakGlobalRef(soa.AddLocalReference<jobject>(t));
+ type_ = Dbg::GetTypeCache().Add(t);
}
size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -156,27 +170,47 @@ class AllocRecord {
AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have NULL method.
};
-struct Breakpoint {
- // The location of this breakpoint.
- mirror::ArtMethod* method;
- uint32_t dex_pc;
+class Breakpoint {
+ public:
+ Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ method_ = soa.EncodeMethod(method);
+ }
- // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
- bool need_full_deoptimization;
+ Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : method_(nullptr), dex_pc_(other.dex_pc_),
+ need_full_deoptimization_(other.need_full_deoptimization_) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ method_ = soa.EncodeMethod(other.Method());
+ }
- Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
- : method(method), dex_pc(dex_pc), need_full_deoptimization(need_full_deoptimization) {}
+ mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ return soa.DecodeMethod(method_);
+ }
- void VisitRoots(RootCallback* callback, void* arg) {
- if (method != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
- }
+ uint32_t DexPc() const {
+ return dex_pc_;
}
+
+ bool NeedFullDeoptimization() const {
+ return need_full_deoptimization_;
+ }
+
+ private:
+ // The location of this breakpoint.
+ jmethodID method_;
+ uint32_t dex_pc_;
+
+ // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
+ bool need_full_deoptimization_;
};
-static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
+static std::ostream& operator<<(std::ostream& os, Breakpoint& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc);
+ os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
return os;
}
@@ -274,6 +308,7 @@ AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer
size_t Dbg::alloc_record_max_ = 0;
size_t Dbg::alloc_record_head_ = 0;
size_t Dbg::alloc_record_count_ = 0;
+Dbg::TypeCache Dbg::type_cache_;
// Deoptimization support.
Mutex* Dbg::deoptimization_lock_ = nullptr;
@@ -334,18 +369,12 @@ void SingleStepControl::Clear() {
dex_pcs.clear();
}
-void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) {
- if (method != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
- }
-}
-
static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
LOCKS_EXCLUDED(Locks::breakpoint_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
- if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) {
+ if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
return true;
}
@@ -626,21 +655,6 @@ void Dbg::StartJdwp() {
}
}
-void Dbg::VisitRoots(RootCallback* callback, void* arg) {
- {
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
- for (Breakpoint& bp : gBreakpoints) {
- bp.VisitRoots(callback, arg);
- }
- }
- if (deoptimization_lock_ != nullptr) { // only true if the debugger is started.
- MutexLock mu(Thread::Current(), *deoptimization_lock_);
- for (DeoptimizationRequest& req : deoptimization_requests_) {
- req.VisitRoots(callback, arg);
- }
- }
-}
-
void Dbg::StopJdwp() {
// Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
Disposed();
@@ -2829,22 +2843,22 @@ size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
// Process request while all mutator threads are suspended.
void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- switch (request.kind) {
+ switch (request.GetKind()) {
case DeoptimizationRequest::kNothing:
LOG(WARNING) << "Ignoring empty deoptimization request.";
break;
case DeoptimizationRequest::kRegisterForEvent:
VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
- request.instrumentation_event);
- instrumentation->AddListener(&gDebugInstrumentationListener, request.instrumentation_event);
- instrumentation_events_ |= request.instrumentation_event;
+ request.InstrumentationEvent());
+ instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
+ instrumentation_events_ |= request.InstrumentationEvent();
break;
case DeoptimizationRequest::kUnregisterForEvent:
VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
- request.instrumentation_event);
+ request.InstrumentationEvent());
instrumentation->RemoveListener(&gDebugInstrumentationListener,
- request.instrumentation_event);
- instrumentation_events_ &= ~request.instrumentation_event;
+ request.InstrumentationEvent());
+ instrumentation_events_ &= ~request.InstrumentationEvent();
break;
case DeoptimizationRequest::kFullDeoptimization:
VLOG(jdwp) << "Deoptimize the world ...";
@@ -2857,17 +2871,17 @@ void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
VLOG(jdwp) << "Undeoptimize the world DONE";
break;
case DeoptimizationRequest::kSelectiveDeoptimization:
- VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " ...";
- instrumentation->Deoptimize(request.method);
- VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " DONE";
+ VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
+ instrumentation->Deoptimize(request.Method());
+ VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
break;
case DeoptimizationRequest::kSelectiveUndeoptimization:
- VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " ...";
- instrumentation->Undeoptimize(request.method);
- VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " DONE";
+ VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
+ instrumentation->Undeoptimize(request.Method());
+ VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
break;
default:
- LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind;
+ LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
break;
}
}
@@ -2884,8 +2898,8 @@ void Dbg::ProcessDelayedFullUndeoptimizations() {
MutexLock mu(Thread::Current(), *deoptimization_lock_);
while (delayed_full_undeoptimization_count_ > 0) {
DeoptimizationRequest req;
- req.kind = DeoptimizationRequest::kFullUndeoptimization;
- req.method = nullptr;
+ req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
+ req.SetMethod(nullptr);
RequestDeoptimizationLocked(req);
--delayed_full_undeoptimization_count_;
}
@@ -2894,7 +2908,7 @@ void Dbg::ProcessDelayedFullUndeoptimizations() {
}
void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
- if (req.kind == DeoptimizationRequest::kNothing) {
+ if (req.GetKind() == DeoptimizationRequest::kNothing) {
// Nothing to do.
return;
}
@@ -2903,35 +2917,35 @@ void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
}
void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
- switch (req.kind) {
+ switch (req.GetKind()) {
case DeoptimizationRequest::kRegisterForEvent: {
- DCHECK_NE(req.instrumentation_event, 0u);
- size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+ DCHECK_NE(req.InstrumentationEvent(), 0u);
+ size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
- req.instrumentation_event);
+ req.InstrumentationEvent());
if (*counter == 0) {
VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
- deoptimization_requests_.size(), req.instrumentation_event);
+ deoptimization_requests_.size(), req.InstrumentationEvent());
deoptimization_requests_.push_back(req);
}
*counter = *counter + 1;
break;
}
case DeoptimizationRequest::kUnregisterForEvent: {
- DCHECK_NE(req.instrumentation_event, 0u);
- size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+ DCHECK_NE(req.InstrumentationEvent(), 0u);
+ size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
- req.instrumentation_event);
+ req.InstrumentationEvent());
*counter = *counter - 1;
if (*counter == 0) {
VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
- deoptimization_requests_.size(), req.instrumentation_event);
+ deoptimization_requests_.size(), req.InstrumentationEvent());
deoptimization_requests_.push_back(req);
}
break;
}
case DeoptimizationRequest::kFullDeoptimization: {
- DCHECK(req.method == nullptr);
+ DCHECK(req.Method() == nullptr);
if (full_deoptimization_event_count_ == 0) {
VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
<< " for full deoptimization";
@@ -2941,7 +2955,7 @@ void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
break;
}
case DeoptimizationRequest::kFullUndeoptimization: {
- DCHECK(req.method == nullptr);
+ DCHECK(req.Method() == nullptr);
DCHECK_GT(full_deoptimization_event_count_, 0U);
--full_deoptimization_event_count_;
if (full_deoptimization_event_count_ == 0) {
@@ -2952,21 +2966,21 @@ void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
break;
}
case DeoptimizationRequest::kSelectiveDeoptimization: {
- DCHECK(req.method != nullptr);
+ DCHECK(req.Method() != nullptr);
VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
- << " for deoptimization of " << PrettyMethod(req.method);
+ << " for deoptimization of " << PrettyMethod(req.Method());
deoptimization_requests_.push_back(req);
break;
}
case DeoptimizationRequest::kSelectiveUndeoptimization: {
- DCHECK(req.method != nullptr);
+ DCHECK(req.Method() != nullptr);
VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
- << " for undeoptimization of " << PrettyMethod(req.method);
+ << " for undeoptimization of " << PrettyMethod(req.Method());
deoptimization_requests_.push_back(req);
break;
}
default: {
- LOG(FATAL) << "Unknown deoptimization request kind " << req.kind;
+ LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
break;
}
}
@@ -2990,7 +3004,7 @@ void Dbg::ManageDeoptimization() {
{
MutexLock mu(self, *deoptimization_lock_);
size_t req_index = 0;
- for (const DeoptimizationRequest& request : deoptimization_requests_) {
+ for (DeoptimizationRequest& request : deoptimization_requests_) {
VLOG(jdwp) << "Process deoptimization request #" << req_index++;
ProcessDeoptimizationRequest(request);
}
@@ -3021,9 +3035,9 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
}
static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
- for (const Breakpoint& breakpoint : gBreakpoints) {
- if (breakpoint.method == m) {
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (Breakpoint& breakpoint : gBreakpoints) {
+ if (breakpoint.Method() == m) {
return &breakpoint;
}
}
@@ -3035,7 +3049,7 @@ static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_
EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
if (kIsDebugBuild) {
for (const Breakpoint& breakpoint : gBreakpoints) {
- CHECK_EQ(need_full_deoptimization, breakpoint.need_full_deoptimization);
+ CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
}
if (need_full_deoptimization) {
// We should have deoptimized everything but not "selectively" deoptimized this method.
@@ -3065,18 +3079,18 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques
// inlined, we deoptimize everything; otherwise we deoptimize only this method.
need_full_deoptimization = IsMethodPossiblyInlined(self, m);
if (need_full_deoptimization) {
- req->kind = DeoptimizationRequest::kFullDeoptimization;
- req->method = nullptr;
+ req->SetKind(DeoptimizationRequest::kFullDeoptimization);
+ req->SetMethod(nullptr);
} else {
- req->kind = DeoptimizationRequest::kSelectiveDeoptimization;
- req->method = m;
+ req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
+ req->SetMethod(m);
}
} else {
// There is at least one breakpoint for this method: we don't need to deoptimize.
- req->kind = DeoptimizationRequest::kNothing;
- req->method = nullptr;
+ req->SetKind(DeoptimizationRequest::kNothing);
+ req->SetMethod(nullptr);
- need_full_deoptimization = existing_breakpoint->need_full_deoptimization;
+ need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
SanityCheckExistingBreakpoints(m, need_full_deoptimization);
}
@@ -3088,15 +3102,14 @@ void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationReques
// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
// request if we need to undeoptimize.
void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
+ MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
mirror::ArtMethod* m = FromMethodId(location->method_id);
DCHECK(m != nullptr) << "No method for method id " << location->method_id;
-
- MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
bool need_full_deoptimization = false;
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
- if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
+ if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
- need_full_deoptimization = gBreakpoints[i].need_full_deoptimization;
+ need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
gBreakpoints.erase(gBreakpoints.begin() + i);
break;
@@ -3107,17 +3120,17 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ
// There is no more breakpoint on this method: we need to undeoptimize.
if (need_full_deoptimization) {
// This method required full deoptimization: we need to undeoptimize everything.
- req->kind = DeoptimizationRequest::kFullUndeoptimization;
- req->method = nullptr;
+ req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
+ req->SetMethod(nullptr);
} else {
// This method required selective deoptimization: we need to undeoptimize only that method.
- req->kind = DeoptimizationRequest::kSelectiveUndeoptimization;
- req->method = m;
+ req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
+ req->SetMethod(m);
}
} else {
// There is at least one breakpoint for this method: we don't need to undeoptimize.
- req->kind = DeoptimizationRequest::kNothing;
- req->method = nullptr;
+ req->SetKind(DeoptimizationRequest::kNothing);
+ req->SetMethod(nullptr);
SanityCheckExistingBreakpoints(m, need_full_deoptimization);
}
}
@@ -4253,8 +4266,10 @@ void Dbg::SetAllocTrackingEnabled(bool enabled) {
Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
{
MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ LOG(INFO) << "Disabling alloc tracker";
delete[] recent_allocation_records_;
recent_allocation_records_ = NULL;
+ type_cache_.Clear();
}
}
}
@@ -4376,8 +4391,12 @@ class StringTable {
StringTable() {
}
- void Add(const char* s) {
- table_.insert(s);
+ void Add(const std::string& str) {
+ table_.insert(str);
+ }
+
+ void Add(const char* str) {
+ table_.insert(str);
}
size_t IndexOf(const char* s) const {
@@ -4476,9 +4495,7 @@ jbyteArray Dbg::GetRecentAllocations() {
int idx = HeadIndex();
while (count--) {
AllocRecord* record = &recent_allocation_records_[idx];
-
- class_names.Add(record->Type()->GetDescriptor().c_str());
-
+ class_names.Add(record->Type()->GetDescriptor());
for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
mirror::ArtMethod* m = record->StackElement(i)->Method();
if (m != NULL) {
@@ -4571,4 +4588,14 @@ jbyteArray Dbg::GetRecentAllocations() {
return result;
}
+mirror::ArtMethod* DeoptimizationRequest::Method() const {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ return soa.DecodeMethod(method_);
+}
+
+void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ method_ = soa.EncodeMethod(m);
+}
+
} // namespace art
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 1cf0b0c421..1d3668c1f6 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -23,6 +23,7 @@
#include <pthread.h>
+#include <map>
#include <set>
#include <string>
#include <vector>
@@ -130,7 +131,8 @@ struct SingleStepControl {
};
// TODO rename to InstrumentationRequest.
-struct DeoptimizationRequest {
+class DeoptimizationRequest {
+ public:
enum Kind {
kNothing, // no action.
kRegisterForEvent, // start listening for instrumentation event.
@@ -141,25 +143,63 @@ struct DeoptimizationRequest {
kSelectiveUndeoptimization // undeoptimize one method.
};
- DeoptimizationRequest() : kind(kNothing), instrumentation_event(0), method(nullptr) {}
+ DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {}
+
+ DeoptimizationRequest(const DeoptimizationRequest& other)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) {
+ // Create a new JNI global reference for the method.
+ SetMethod(other.Method());
+ }
+
+ mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Name 'Kind()' would collide with the above enum name.
+ Kind GetKind() const {
+ return kind_;
+ }
+
+ void SetKind(Kind kind) {
+ kind_ = kind;
+ }
+
+ uint32_t InstrumentationEvent() const {
+ return instrumentation_event_;
+ }
- void VisitRoots(RootCallback* callback, void* arg);
+ void SetInstrumentationEvent(uint32_t instrumentation_event) {
+ instrumentation_event_ = instrumentation_event;
+ }
- Kind kind;
+ private:
+ Kind kind_;
// TODO we could use a union to hold the instrumentation_event and the method since they
// respectively have sense only for kRegisterForEvent/kUnregisterForEvent and
// kSelectiveDeoptimization/kSelectiveUndeoptimization.
// Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent.
- uint32_t instrumentation_event;
+ uint32_t instrumentation_event_;
// Method for selective deoptimization.
- mirror::ArtMethod* method;
+ jmethodID method_;
};
class Dbg {
public:
+ class TypeCache {
+ public:
+ // Returns a weak global for the input type. Deduplicates.
+ jobject Add(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Clears the type cache and deletes all the weak global refs.
+ void Clear();
+
+ private:
+ std::multimap<int32_t, jobject> objects_;
+ };
+
static bool ParseJdwpOptions(const std::string& options);
static void SetJdwpAllowed(bool allowed);
@@ -555,6 +595,10 @@ class Dbg {
static void DdmSendHeapSegments(bool native)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static TypeCache& GetTypeCache() {
+ return type_cache_;
+ }
+
private:
static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
@@ -604,6 +648,9 @@ class Dbg {
static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
+ // Weak global type cache, TODO improve this.
+ static TypeCache type_cache_;
+
// Instrumentation event reference counters.
// TODO we could use an array instead of having all these dedicated counters. Instrumentation
// events are bits of a mask so we could convert them to array index.
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index bb33978761..e5402e1c66 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -724,14 +724,23 @@ Elf32_Dyn& ElfFile::GetDynamic(Elf32_Word i) const {
return *(GetDynamicSectionStart() + i);
}
-Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) const {
+Elf32_Dyn* ElfFile::FindDynamicByType(Elf32_Sword type) const {
for (Elf32_Word i = 0; i < GetDynamicNum(); i++) {
- Elf32_Dyn& elf_dyn = GetDynamic(i);
- if (elf_dyn.d_tag == type) {
- return elf_dyn.d_un.d_val;
+ Elf32_Dyn* dyn = &GetDynamic(i);
+ if (dyn->d_tag == type) {
+ return dyn;
}
}
- return 0;
+ return NULL;
+}
+
+Elf32_Word ElfFile::FindDynamicValueByType(Elf32_Sword type) const {
+ Elf32_Dyn* dyn = FindDynamicByType(type);
+ if (dyn == NULL) {
+ return 0;
+ } else {
+ return dyn->d_un.d_val;
+ }
}
Elf32_Rel* ElfFile::GetRelSectionStart(Elf32_Shdr& section_header) const {
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index 496690b748..a966bd9632 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -112,6 +112,7 @@ class ElfFile {
Elf32_Word GetDynamicNum() const;
Elf32_Dyn& GetDynamic(Elf32_Word) const;
+ Elf32_Dyn* FindDynamicByType(Elf32_Sword type) const;
Elf32_Word FindDynamicValueByType(Elf32_Sword type) const;
Elf32_Word GetRelNum(Elf32_Shdr&) const;
diff --git a/runtime/elf_utils.h b/runtime/elf_utils.h
index 2c50047825..5966d05996 100644
--- a/runtime/elf_utils.h
+++ b/runtime/elf_utils.h
@@ -22,6 +22,8 @@
// Explicitly include our own elf.h to avoid Linux and other dependencies.
#include "./elf.h"
+#include "base/logging.h"
+
// Architecture dependent flags for the ELF header.
#define EF_ARM_EABI_VER5 0x05000000
#define EF_MIPS_ABI_O32 0x00001000
@@ -62,8 +64,103 @@
#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */
#define DT_MIPS_RLD_MAP 0x70000016 /* Address of debug map pointer */
+// Patching section type
+#define SHT_OAT_PATCH SHT_LOUSER
+
inline void SetBindingAndType(Elf32_Sym* sym, unsigned char b, unsigned char t) {
sym->st_info = (b << 4) + (t & 0x0f);
}
+inline bool IsDynamicSectionPointer(Elf32_Word d_tag, Elf32_Word e_machine) {
+ switch (d_tag) {
+ // case 1: well known d_tag values that imply Elf32_Dyn.d_un contains an address in d_ptr
+ case DT_PLTGOT:
+ case DT_HASH:
+ case DT_STRTAB:
+ case DT_SYMTAB:
+ case DT_RELA:
+ case DT_INIT:
+ case DT_FINI:
+ case DT_REL:
+ case DT_DEBUG:
+ case DT_JMPREL: {
+ return true;
+ }
+ // d_val or ignored values
+ case DT_NULL:
+ case DT_NEEDED:
+ case DT_PLTRELSZ:
+ case DT_RELASZ:
+ case DT_RELAENT:
+ case DT_STRSZ:
+ case DT_SYMENT:
+ case DT_SONAME:
+ case DT_RPATH:
+ case DT_SYMBOLIC:
+ case DT_RELSZ:
+ case DT_RELENT:
+ case DT_PLTREL:
+ case DT_TEXTREL:
+ case DT_BIND_NOW:
+ case DT_INIT_ARRAYSZ:
+ case DT_FINI_ARRAYSZ:
+ case DT_RUNPATH:
+ case DT_FLAGS: {
+ return false;
+ }
+ // boundary values that should not be used
+ case DT_ENCODING:
+ case DT_LOOS:
+ case DT_HIOS:
+ case DT_LOPROC:
+ case DT_HIPROC: {
+ LOG(FATAL) << "Illegal d_tag value 0x" << std::hex << d_tag;
+ return false;
+ }
+ default: {
+ // case 2: "regular" DT_* ranges where even d_tag values imply an address in d_ptr
+ if ((DT_ENCODING < d_tag && d_tag < DT_LOOS)
+ || (DT_LOOS < d_tag && d_tag < DT_HIOS)
+ || (DT_LOPROC < d_tag && d_tag < DT_HIPROC)) {
+ // Special case for MIPS which breaks the regular rules between DT_LOPROC and DT_HIPROC
+ if (e_machine == EM_MIPS) {
+ switch (d_tag) {
+ case DT_MIPS_RLD_VERSION:
+ case DT_MIPS_TIME_STAMP:
+ case DT_MIPS_ICHECKSUM:
+ case DT_MIPS_IVERSION:
+ case DT_MIPS_FLAGS:
+ case DT_MIPS_LOCAL_GOTNO:
+ case DT_MIPS_CONFLICTNO:
+ case DT_MIPS_LIBLISTNO:
+ case DT_MIPS_SYMTABNO:
+ case DT_MIPS_UNREFEXTNO:
+ case DT_MIPS_GOTSYM:
+ case DT_MIPS_HIPAGENO: {
+ return false;
+ }
+ case DT_MIPS_BASE_ADDRESS:
+ case DT_MIPS_CONFLICT:
+ case DT_MIPS_LIBLIST:
+ case DT_MIPS_RLD_MAP: {
+ return true;
+ }
+ default: {
+ LOG(FATAL) << "Unknown MIPS d_tag value 0x" << std::hex << d_tag;
+ return false;
+ }
+ }
+ } else if ((d_tag % 2) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ LOG(FATAL) << "Unknown d_tag value 0x" << std::hex << d_tag;
+ return false;
+ }
+ }
+ }
+}
+
#endif // ART_RUNTIME_ELF_UTILS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index bea7d960d4..bae4023c1d 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -25,8 +25,13 @@
namespace art {
// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
+#if defined(__arm__) || defined(__aarch64__)
extern "C" void* artFindNativeMethod() {
Thread* self = Thread::Current();
+#else
+extern "C" void* artFindNativeMethod(Thread* self) {
+ DCHECK_EQ(self, Thread::Current());
+#endif
Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
ScopedObjectAccess soa(self);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 6fb962452e..95cb85eefc 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1565,7 +1565,11 @@ void BuildGenericJniFrameVisitor::FinalizeHandleScope(Thread* self) {
self->PushHandleScope(handle_scope_);
}
+#if defined(__arm__) || defined(__aarch64__)
extern "C" void* artFindNativeMethod();
+#else
+extern "C" void* artFindNativeMethod(Thread* self);
+#endif
uint64_t artQuickGenericJniEndJNIRef(Thread* self, uint32_t cookie, jobject l, jobject lock) {
if (lock != nullptr) {
@@ -1638,7 +1642,11 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
// pointer.
DCHECK(nativeCode != nullptr);
if (nativeCode == GetJniDlsymLookupStub()) {
+#if defined(__arm__) || defined(__aarch64__)
nativeCode = artFindNativeMethod();
+#else
+ nativeCode = artFindNativeMethod(self);
+#endif
if (nativeCode == nullptr) {
DCHECK(self->IsExceptionPending()); // There should be an exception pending now.
diff --git a/runtime/image.cc b/runtime/image.cc
index 528bfc631c..93ec27daf1 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const byte ImageHeader::kImageVersion[] = { '0', '0', '7', '\0' };
+const byte ImageHeader::kImageVersion[] = { '0', '0', '8', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -45,6 +45,7 @@ ImageHeader::ImageHeader(uint32_t image_begin,
oat_data_begin_(oat_data_begin),
oat_data_end_(oat_data_end),
oat_file_end_(oat_file_end),
+ patch_delta_(0),
image_roots_(image_roots) {
CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
@@ -58,6 +59,17 @@ ImageHeader::ImageHeader(uint32_t image_begin,
memcpy(version_, kImageVersion, sizeof(kImageVersion));
}
+void ImageHeader::RelocateImage(off_t delta) {
+ CHECK_ALIGNED(delta, kPageSize) << " patch delta must be page aligned";
+ image_begin_ += delta;
+ oat_file_begin_ += delta;
+ oat_data_begin_ += delta;
+ oat_data_end_ += delta;
+ oat_file_end_ += delta;
+ image_roots_ += delta;
+ patch_delta_ += delta;
+}
+
bool ImageHeader::IsValid() const {
if (memcmp(magic_, kImageMagic, sizeof(kImageMagic)) != 0) {
return false;
@@ -65,6 +77,25 @@ bool ImageHeader::IsValid() const {
if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
return false;
}
+ // Unsigned so wraparound is well defined.
+ if (image_begin_ >= image_begin_ + image_size_) {
+ return false;
+ }
+ if (oat_file_begin_ > oat_file_end_) {
+ return false;
+ }
+ if (oat_data_begin_ > oat_data_end_) {
+ return false;
+ }
+ if (oat_file_begin_ >= oat_data_begin_) {
+ return false;
+ }
+ if (image_roots_ <= image_begin_ || oat_file_begin_ <= image_roots_) {
+ return false;
+ }
+ if (!IsAligned<kPageSize>(patch_delta_)) {
+ return false;
+ }
return true;
}
diff --git a/runtime/image.h b/runtime/image.h
index abe1ad8711..424a40b7ca 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -84,6 +84,10 @@ class PACKED(4) ImageHeader {
return reinterpret_cast<byte*>(oat_file_end_);
}
+ off_t GetPatchDelta() const {
+ return patch_delta_;
+ }
+
size_t GetBitmapOffset() const {
return RoundUp(image_size_, kPageSize);
}
@@ -112,10 +116,11 @@ class PACKED(4) ImageHeader {
mirror::Object* GetImageRoot(ImageRoot image_root) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
mirror::ObjectArray<mirror::Object>* GetImageRoots() const;
+ void RelocateImage(off_t delta);
+
+ private:
static const byte kImageMagic[4];
static const byte kImageVersion[4];
@@ -150,11 +155,13 @@ class PACKED(4) ImageHeader {
// .so files. Used for positioning a following alloc spaces.
uint32_t oat_file_end_;
+ // The total delta that this image has been patched.
+ int32_t patch_delta_;
+
// Absolute address of an Object[] of objects needed to reinitialize from an image.
uint32_t image_roots_;
friend class ImageWriter;
- friend class ImageDumper; // For GetImageRoots()
};
} // namespace art
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 86c84e8b0f..36fbed4ea2 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -192,17 +192,17 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
}
}
if (NeedsFullDeoptimization(pEvent->eventKind)) {
- CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
- CHECK(req.method == nullptr);
- req.kind = DeoptimizationRequest::kFullDeoptimization;
+ CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing);
+ CHECK(req.Method() == nullptr);
+ req.SetKind(DeoptimizationRequest::kFullDeoptimization);
}
Dbg::RequestDeoptimization(req);
}
uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
if (instrumentation_event != 0) {
DeoptimizationRequest req;
- req.kind = DeoptimizationRequest::kRegisterForEvent;
- req.instrumentation_event = instrumentation_event;
+ req.SetKind(DeoptimizationRequest::kRegisterForEvent);
+ req.SetInstrumentationEvent(instrumentation_event);
Dbg::RequestDeoptimization(req);
}
@@ -274,17 +274,17 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
// deoptimization and only the last single-step will trigger a full undeoptimization.
Dbg::DelayFullUndeoptimization();
} else if (NeedsFullDeoptimization(pEvent->eventKind)) {
- CHECK_EQ(req.kind, DeoptimizationRequest::kNothing);
- CHECK(req.method == nullptr);
- req.kind = DeoptimizationRequest::kFullUndeoptimization;
+ CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing);
+ CHECK(req.Method() == nullptr);
+ req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
}
Dbg::RequestDeoptimization(req);
}
uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
if (instrumentation_event != 0) {
DeoptimizationRequest req;
- req.kind = DeoptimizationRequest::kUnregisterForEvent;
- req.instrumentation_event = instrumentation_event;
+ req.SetKind(DeoptimizationRequest::kUnregisterForEvent);
+ req.SetInstrumentationEvent(instrumentation_event);
Dbg::RequestDeoptimization(req);
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index eae0418c17..70253af614 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -60,6 +60,12 @@ class OatFile {
~OatFile();
+ ElfFile* GetElfFile() const {
+ CHECK_NE(reinterpret_cast<uintptr_t>(elf_file_.get()), reinterpret_cast<uintptr_t>(nullptr))
+ << "Cannot get an elf file from " << GetLocation();
+ return elf_file_.get();
+ }
+
const std::string& GetLocation() const {
return location_;
}
@@ -227,6 +233,9 @@ class OatFile {
return End() - Begin();
}
+ const byte* Begin() const;
+ const byte* End() const;
+
private:
static void CheckLocation(const std::string& location);
@@ -248,9 +257,6 @@ class OatFile {
std::string* error_msg);
bool Setup(std::string* error_msg);
- const byte* Begin() const;
- const byte* End() const;
-
// The oat file name.
//
// The image will embed this to link its associated oat file.
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index e1fbf011ca..5128b19d12 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -58,6 +58,7 @@ enum InlineMethodOpcode : uint16_t {
kIntrinsicCas,
kIntrinsicUnsafeGet,
kIntrinsicUnsafePut,
+ kIntrinsicSystemArrayCopyCharArray,
kInlineOpNop,
kInlineOpReturnArg,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 53ddcca469..3b14aaa767 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -930,7 +930,6 @@ void Runtime::VisitConstantRoots(RootCallback* callback, void* arg) {
void Runtime::VisitConcurrentRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
intern_table_->VisitRoots(callback, arg, flags);
class_linker_->VisitRoots(callback, arg, flags);
- Dbg::VisitRoots(callback, arg);
if ((flags & kVisitRootFlagNewRoots) == 0) {
// Guaranteed to have no new roots in the constant roots.
VisitConstantRoots(callback, arg);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index d3487d0488..d60fb493bc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -835,10 +835,9 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
os << " \"" << mutex->GetName() << "\"";
if (mutex->IsReaderWriterMutex()) {
ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
- if (rw_mutex->IsExclusiveHeld(thread)) {
+ if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
os << "(exclusive held)";
} else {
- CHECK(rw_mutex->IsSharedHeld(thread));
os << "(shared held)";
}
}
diff --git a/runtime/utils.h b/runtime/utils.h
index eb79968e21..448c591f2b 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -19,6 +19,7 @@
#include <pthread.h>
+#include <limits>
#include <string>
#include <vector>
@@ -50,6 +51,34 @@ enum TimeUnit {
kTimeUnitSecond,
};
+template <typename T>
+bool ParseUint(const char *in, T* out) {
+ char* end;
+ unsigned long long int result = strtoull(in, &end, 0); // NOLINT(runtime/int)
+ if (in == end || *end != '\0') {
+ return false;
+ }
+ if (std::numeric_limits<T>::max() < result) {
+ return false;
+ }
+ *out = static_cast<T>(result);
+ return true;
+}
+
+template <typename T>
+bool ParseInt(const char* in, T* out) {
+ char* end;
+ long long int result = strtoll(in, &end, 0); // NOLINT(runtime/int)
+ if (in == end || *end != '\0') {
+ return false;
+ }
+ if (result < std::numeric_limits<T>::min() || std::numeric_limits<T>::max() < result) {
+ return false;
+ }
+ *out = static_cast<T>(result);
+ return true;
+}
+
template<typename T>
static constexpr bool IsPowerOfTwo(T x) {
return (x & (x - 1)) == 0;
diff --git a/test/Android.oat.mk b/test/Android.oat.mk
index a560a17608..13d452c7e8 100644
--- a/test/Android.oat.mk
+++ b/test/Android.oat.mk
@@ -40,7 +40,8 @@ TEST_OAT_DIRECTORIES := \
# Create build rules for each dex file recording the dependency.
$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval $(call build-art-test-dex,art-oat-test,$(dir), \
- $(ART_TARGET_TEST_OUT),$(LOCAL_PATH)/Android.oat.mk,ART_OAT_TEST_$(dir)_DEX)))
+ $(ART_TARGET_TEST_OUT),$(LOCAL_PATH)/Android.oat.mk,ART_TEST_TARGET_OAT_$(dir)_DEX, \
+ ART_TEST_HOST_OAT_$(dir)_DEX)))
########################################################################
@@ -77,7 +78,7 @@ endif
define define-test-art-oat-rule-target
# Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
# to ensure files are pushed to the device.
- TEST_ART_TARGET_SYNC_DEPS += $$(ART_OAT_TEST_$(1)_DEX)
+ TEST_ART_TARGET_SYNC_DEPS += $$(ART_TEST_TARGET_OAT_$(1)_DEX)
.PHONY: $(3)
$(3): test-art-target-sync
@@ -189,7 +190,7 @@ define define-test-art-oat-rule-host
$(3): PRIVATE_DEX_FILE := /$$(dex_file)
$(3): PRIVATE_OAT_FILE := $$(oat_file)
.PHONY: $(3)
-$(3): $$(ART_OAT_TEST_$(1)_DEX) $(ART_TEST_HOST_OAT_DEPENDENCIES)
+$(3): $$(ART_TEST_HOST_OAT_$(1)_DEX) $(ART_TEST_HOST_OAT_DEPENDENCIES)
$(hide) mkdir -p $(ART_HOST_TEST_DIR)/android-data-$$@/dalvik-cache/$$($(2)HOST_ARCH)
$(hide) cp $$(realpath $$<) $(ART_HOST_TEST_DIR)/android-data-$$@/oat-test-dex-$(1).jar
$(hide) $(DEX2OATD) $(DEX2OAT_FLAGS) --runtime-arg -Xms16m --runtime-arg -Xmx16m $(4) \
@@ -447,7 +448,8 @@ ART_TEST_HOST_OAT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_OAT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_OAT_RULES :=
ART_TEST_HOST_OAT_DEPENDENCIES :=
-$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval ART_OAT_TEST_$(dir)_DEX :=))
+$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval ART_TEST_TARGET_OAT_$(dir)_DEX :=))
+$(foreach dir,$(TEST_OAT_DIRECTORIES), $(eval ART_TEST_HOST_OAT_$(dir)_DEX :=))
TEST_OAT_DIRECTORIES :=
LOCAL_PID :=
LOCAL_PATH :=