summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk26
-rw-r--r--build/Android.common_build.mk4
-rw-r--r--build/Android.common_test.mk3
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--build/Android.oat.mk14
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc14
-rw-r--r--compiler/dex/quick/arm/int_arm.cc1
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc4
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc2
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc20
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc14
-rw-r--r--compiler/dex/quick/mir_to_lir.cc3
-rw-r--r--compiler/dex/verification_results.cc7
-rw-r--r--compiler/driver/compiler_driver.cc8
-rw-r--r--compiler/jit/jit_compiler.cc17
-rw-r--r--dex2oat/dex2oat.cc13
-rw-r--r--disassembler/disassembler_mips.cc47
-rw-r--r--oatdump/oatdump.cc282
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/arch/mips/entrypoints_direct_mips.h52
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc92
-rw-r--r--runtime/class_linker.cc22
-rw-r--r--runtime/dex_instruction.cc17
-rw-r--r--runtime/dex_instruction.h4
-rw-r--r--runtime/gc/accounting/bitmap-inl.h152
-rw-r--r--runtime/gc/accounting/bitmap.cc92
-rw-r--r--runtime/gc/accounting/bitmap.h192
-rw-r--r--runtime/gc/accounting/mod_union_table.cc174
-rw-r--r--runtime/gc/accounting/mod_union_table.h42
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc242
-rw-r--r--runtime/gc/accounting/space_bitmap.h7
-rw-r--r--runtime/gc/heap.cc51
-rw-r--r--runtime/gc/heap.h9
-rw-r--r--runtime/jdwp/jdwp_event.cc6
-rw-r--r--runtime/utils.cc8
-rw-r--r--runtime/verifier/reg_type_cache.cc5
-rw-r--r--test/134-reg-promotion/expected.txt0
-rw-r--r--test/134-reg-promotion/info.txt4
-rw-r--r--test/134-reg-promotion/smali/Test.smali38
-rw-r--r--test/134-reg-promotion/src/Main.java42
-rw-r--r--test/Android.run-test.mk22
-rwxr-xr-xtest/run-test1
-rw-r--r--tools/libcore_failures.txt7
43 files changed, 1541 insertions, 221 deletions
diff --git a/Android.mk b/Android.mk
index 76c3aa52e3..c740a0d979 100644
--- a/Android.mk
+++ b/Android.mk
@@ -202,6 +202,11 @@ test-art-host-optimizing: test-art-host-run-test-optimizing
test-art-host-interpreter: test-art-host-run-test-interpreter
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+# All host tests that run solely on the jit.
+.PHONY: test-art-host-jit
+test-art-host-jit: test-art-host-run-test-jit
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Primary host architecture variants:
.PHONY: test-art-host$(ART_PHONY_TEST_HOST_SUFFIX)
test-art-host$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(ART_PHONY_TEST_HOST_SUFFIX) \
@@ -220,6 +225,10 @@ test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-op
test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+.PHONY: test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(ART_PHONY_TEST_HOST_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Secondary host architecture variants:
ifneq ($(HOST_PREFER_32_BIT),true)
.PHONY: test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
@@ -238,6 +247,10 @@ test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-tes
.PHONY: test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
+.PHONY: test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
# Valgrind. Currently only 32b gtests.
@@ -268,6 +281,11 @@ test-art-target-optimizing: test-art-target-run-test-optimizing
test-art-target-interpreter: test-art-target-run-test-interpreter
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+# All target tests that run solely on the jit.
+.PHONY: test-art-target-jit
+test-art-target-jit: test-art-target-run-test-jit
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Primary target architecture variants:
.PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \
@@ -286,6 +304,10 @@ test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-t
test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+.PHONY: test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(ART_PHONY_TEST_TARGET_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
# Secondary target architecture variants:
ifdef TARGET_2ND_ARCH
.PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
@@ -304,6 +326,10 @@ test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-r
.PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
+
+.PHONY: test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
endif # art_test_bother
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index cb344737aa..6a83e72e8c 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -74,8 +74,8 @@ endif
#
# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
#
-art_default_gc_type ?= CMS
-art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(art_default_gc_type)
+ART_DEFAULT_GC_TYPE ?= CMS
+art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE)
ART_HOST_CFLAGS :=
ART_TARGET_CFLAGS :=
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index da50d53dbf..547e92e7d3 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -46,6 +46,9 @@ ART_TEST_DEFAULT_COMPILER ?= true
# Do you want interpreter tests run?
ART_TEST_INTERPRETER ?= $(ART_TEST_FULL)
+# Do you want JIT tests run?
+ART_TEST_JIT ?= $(ART_TEST_FULL)
+
# Do you want optimizing compiler tests run?
ART_TEST_OPTIMIZING ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index c576d1bd6d..7ab4d64d45 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -116,6 +116,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/entrypoints_order_test.cc \
runtime/exception_test.cc \
runtime/gc/accounting/card_table_test.cc \
+ runtime/gc/accounting/mod_union_table_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
runtime/gc/reference_queue_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 8d49565478..4d2fa41692 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -31,7 +31,7 @@ ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
endif
# Use dex2oat debug version for better error reporting
-# $(1): compiler - default, optimizing or interpreter.
+# $(1): compiler - default, optimizing, jit or interpreter.
# $(2): pic/no-pic
# $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
# $(4): wrapper, e.g., valgrind.
@@ -67,9 +67,9 @@ define create-core-oat-host-rules
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter jit optimizing,$(1)),)
#Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected default, interpreter or optimizing)
+ $$(error found $(1) expected default, interpreter, jit or optimizing)
endif
ifeq ($(2),pic)
@@ -127,7 +127,7 @@ $$(core_oat_name): $$(core_image_name)
core_pic_infix :=
endef # create-core-oat-host-rules
-# $(1): compiler - default, optimizing or interpreter.
+# $(1): compiler - default, optimizing, jit or interpreter.
# $(2): wrapper.
# $(3): dex2oat suffix.
define create-core-oat-host-rule-combination
@@ -181,9 +181,9 @@ define create-core-oat-target-rules
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter jit optimizing,$(1)),)
# Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected default, interpreter or optimizing)
+ $$(error found $(1) expected default, interpreter, jit or optimizing)
endif
ifeq ($(2),pic)
@@ -245,7 +245,7 @@ $$(core_oat_name): $$(core_image_name)
core_pic_infix :=
endef # create-core-oat-target-rules
-# $(1): compiler - default, optimizing or interpreter.
+# $(1): compiler - default, optimizing, jit or interpreter.
# $(2): wrapper.
# $(3): dex2oat suffix.
define create-core-oat-target-rule-combination
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3d64833942..8833da324f 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -117,11 +117,11 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
"add", "!0C, !1C", 2, kFixupNone),
ENCODING_MAP(kThumbAddPcRel, 0xa000,
kFmtBitBlt, 10, 8, kFmtBitBlt, 7, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_PC | NEEDS_FIXUP,
"add", "!0C, pc, #!1E", 2, kFixupLoad),
ENCODING_MAP(kThumbAddSpRel, 0xa800,
kFmtBitBlt, 10, 8, kFmtSkip, -1, -1, kFmtBitBlt, 7, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF_SP | REG_USE_SP,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0 | REG_USE_SP,
"add", "!0C, sp, #!2E", 2, kFixupNone),
ENCODING_MAP(kThumbAddSpI7, 0xb000,
kFmtBitBlt, 6, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
@@ -182,7 +182,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
"blx", "!0C", 2, kFixupNone),
ENCODING_MAP(kThumbBx, 0x4700,
kFmtBitBlt, 6, 3, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_USE0 | IS_BRANCH,
"bx", "!0C", 2, kFixupNone),
ENCODING_MAP(kThumbCmnRR, 0x42c0,
kFmtBitBlt, 2, 0, kFmtBitBlt, 5, 3, kFmtUnused, -1, -1,
@@ -693,7 +693,7 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
ENCODING_MAP(kThumb2AdcRRR, 0xeb500000, /* setflags encoding */
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
kFmtShift, -1, -1,
- IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES,
+ IS_QUAD_OP | REG_DEF0_USE12 | SETS_CCODES | USES_CCODES,
"adcs", "!0C, !1C, !2C!3H", 4, kFixupNone),
ENCODING_MAP(kThumb2AndRRR, 0xea000000,
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
@@ -835,15 +835,15 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
"it:!1b", "!0c", 2, kFixupNone),
ENCODING_MAP(kThumb2Fmstat, 0xeef1fa10,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES,
+ kFmtUnused, -1, -1, NO_OPERAND | SETS_CCODES | USES_CCODES,
"fmstat", "", 4, kFixupNone),
ENCODING_MAP(kThumb2Vcmpd, 0xeeb40b40,
kFmtDfp, 22, 12, kFmtDfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
"vcmp.f64", "!0S, !1S", 4, kFixupNone),
ENCODING_MAP(kThumb2Vcmps, 0xeeb40a40,
kFmtSfp, 22, 12, kFmtSfp, 5, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE01 | SETS_CCODES,
"vcmp.f32", "!0s, !1s", 4, kFixupNone),
ENCODING_MAP(kThumb2LdrPcRel12, 0xf8df0000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 0, kFmtUnused, -1, -1,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 3159886826..2a4d27ba57 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1079,6 +1079,7 @@ bool ArmMir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
}
LIR* ArmMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
}
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 806617be2a..aa5e5b4719 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -111,7 +111,7 @@ namespace art {
const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
ENCODING_MAP(WIDE(kA64Adc3rrr), SF_VARIANTS(0x1a000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
"adc", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Add4RRdT), SF_VARIANTS(0x11000000),
kFmtRegROrSp, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
@@ -518,7 +518,7 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
"ror", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Sbc3rrr), SF_VARIANTS(0x5a000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12 | USES_CCODES,
"sbc", "!0r, !1r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Sbfm4rrdd), SF_N_VARIANTS(0x13000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index f636e3b880..8e3f4ef726 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -718,7 +718,7 @@ bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, const InlineMetho
if (PrettyMethod(method_idx, *dex_file_) == "int java.lang.String.length()") {
// TODO: String.length is both kIntrinsicIsEmptyOrLength and kInlineOpIGet.
} else {
- LOG(ERROR) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline";
+ LOG(WARNING) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline";
}
return false;
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 040b07cedd..01f1d375ed 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -248,14 +248,16 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampo
if (arg0.wide == 0) {
LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
if (arg1.wide == 0) {
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
if (cu_->instruction_set == kMips) {
- LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
+ LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide));
} else {
LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
}
} else {
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
if (cu_->instruction_set == kMips) {
- LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
+ LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
} else {
LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
}
@@ -263,9 +265,19 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampo
} else {
LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
if (arg1.wide == 0) {
- LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
+ if (cu_->instruction_set == kMips) {
+ LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide));
+ } else {
+ LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
+ }
} else {
- LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
+ // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
+ if (cu_->instruction_set == kMips) {
+ LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
+ } else {
+ LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
+ }
}
}
}
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index ec6edabdbd..2d26922dca 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -17,6 +17,7 @@
#include "codegen_mips.h"
#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/entrypoints_direct_mips.h"
#include "base/logging.h"
#include "dex/quick/mir_to_lir-inl.h"
#include "dex/reg_storage_eq.h"
@@ -708,7 +709,18 @@ LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+ if (IsDirectEntrypoint(trampoline)) {
+ // Reserve argument space on stack (for $a0-$a3) for
+ // entrypoints that directly reference native implementations.
+ // This is not safe in general, as it violates the frame size
+ // of the Quick method, but it is used here only for calling
+ // native functions, outside of the runtime.
+ OpRegImm(kOpSub, rs_rSP, 16);
+ LIR* retVal = OpReg(op, r_tgt);
+ OpRegImm(kOpAdd, rs_rSP, 16);
+ return retVal;
+ }
+
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 966a92d290..83486265c4 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -587,6 +587,9 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::MOVE_FROM16:
case Instruction::MOVE_OBJECT_FROM16:
StoreValue(rl_dest, rl_src[0]);
+ if (rl_src[0].is_const && (mir_graph_->ConstantValue(rl_src[0]) == 0)) {
+ Workaround7250540(rl_dest, RegStorage::InvalidReg());
+ }
break;
case Instruction::MOVE_WIDE:
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 51a3d84382..150bdaca67 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -71,8 +71,11 @@ bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size());
}
DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size());
- delete it->second;
- verified_methods_.erase(it);
+ // Delete the new verified method since there was already an existing one registered. It
+ // is unsafe to replace the existing one since the JIT may be using it to generate a
+ // native GC map.
+ delete verified_method;
+ return true;
}
verified_methods_.Put(ref, verified_method);
DCHECK(verified_methods_.find(ref) != verified_methods_.end());
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 15b3d08a37..90e63e9674 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1316,6 +1316,14 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
}
}
}
+ if (runtime->UseJit()) {
+ // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will
+ // never be updated even after we compile the method.
+ if (runtime->GetClassLinker()->IsQuickToInterpreterBridge(
+ reinterpret_cast<const void*>(compiler_->GetEntryPointOf(method)))) {
+ use_dex_cache = true;
+ }
+ }
if (method_code_in_boot) {
*stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot;
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b1d972e44e..0283791e28 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,7 +76,7 @@ JitCompiler::JitCompiler() : total_time_(0) {
false,
false,
false,
- true, // pic
+ false, // pic
nullptr,
pass_manager_options,
nullptr));
@@ -132,7 +132,20 @@ bool JitCompiler::CompileMethod(Thread* self, mirror::ArtMethod* method) {
return false;
}
total_time_ += NanoTime() - start_time;
- const bool result = MakeExecutable(compiled_method, h_method.Get());
+ // Don't add the method if we are supposed to be deoptimized.
+ bool result = false;
+ if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
+ const void* code = Runtime::Current()->GetClassLinker()->GetOatMethodQuickCodeFor(
+ h_method.Get());
+ if (code != nullptr) {
+ // Already have some compiled code, just use this instead of linking.
+ // TODO: Fix recompilation.
+ h_method->SetEntryPointFromQuickCompiledCode(code);
+ result = true;
+ } else {
+ result = MakeExecutable(compiled_method, h_method.Get());
+ }
+ }
// Remove the compiled method to save memory.
compiler_driver_->RemoveCompiledMethod(method_ref);
return result;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 0b1f14dd94..22665ea6ee 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1052,6 +1052,13 @@ class Dex2Oat FINAL {
runtime_options.push_back(
std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
+ // Only allow no boot image for the runtime if we're compiling one. When we compile an app,
+ // we don't want fallback mode, it will abort as we do not push a boot classpath (it might
+ // have been stripped in preopting, anyways).
+ if (!image_) {
+ runtime_options.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
+ }
+
if (!CreateRuntime(runtime_options)) {
return false;
}
@@ -1637,9 +1644,13 @@ class Dex2Oat FINAL {
}
void LogCompletionTime() {
+ // Note: when creation of a runtime fails, e.g., when trying to compile an app but when there
+ // is no image, there won't be a Runtime::Current().
LOG(INFO) << "dex2oat took " << PrettyDuration(NanoTime() - start_ns_)
<< " (threads: " << thread_count_ << ") "
- << driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler));
+ << ((Runtime::Current() != nullptr) ?
+ driver_->GetMemoryUsageString(kIsDebugBuild || VLOG_IS_ON(compiler)) :
+ "");
}
std::unique_ptr<CompilerOptions> compiler_options_;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 7442c705fa..3d8a56730a 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -143,27 +143,31 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
// Floating point.
- { kFpMask, kCop1 | 0, "add", "fdst" },
- { kFpMask, kCop1 | 1, "sub", "fdst" },
- { kFpMask, kCop1 | 2, "mul", "fdst" },
- { kFpMask, kCop1 | 3, "div", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 4, "sqrt", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 5, "abs", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 6, "mov", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 7, "neg", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 8, "round.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 9, "trunc.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 10, "ceil.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 11, "floor.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 12, "round.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 13, "trunc.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 14, "ceil.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 15, "floor.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 32, "cvt.s", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 33, "cvt.d", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 36, "cvt.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 37, "cvt.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 38, "cvt.ps", "fds" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21) | 0, "mfc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x03 << 21) | 0, "mfhc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21) | 0, "mtc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x07 << 21) | 0, "mthc1", "Td" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -206,6 +210,7 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
break;
case 'D': args << 'r' << rd; break;
case 'd': args << 'f' << rd; break;
+ case 'a': args << 'f' << sa; break;
case 'f': // Floating point "fmt".
{
size_t fmt = (instruction >> 21) & 0x7; // TODO: other fmts?
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 11ccafbe7e..3ce86d872f 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -311,13 +311,23 @@ class OatDumperOptions {
bool dump_vmap,
bool disassemble_code,
bool absolute_addresses,
- const char* method_filter)
+ const char* class_filter,
+ const char* method_filter,
+ bool list_classes,
+ bool list_methods,
+ const char* export_dex_location,
+ uint32_t addr2instr)
: dump_raw_mapping_table_(dump_raw_mapping_table),
dump_raw_gc_map_(dump_raw_gc_map),
dump_vmap_(dump_vmap),
disassemble_code_(disassemble_code),
absolute_addresses_(absolute_addresses),
+ class_filter_(class_filter),
method_filter_(method_filter),
+ list_classes_(list_classes),
+ list_methods_(list_methods),
+ export_dex_location_(export_dex_location),
+ addr2instr_(addr2instr),
class_loader_(nullptr) {}
const bool dump_raw_mapping_table_;
@@ -325,27 +335,34 @@ class OatDumperOptions {
const bool dump_vmap_;
const bool disassemble_code_;
const bool absolute_addresses_;
+ const char* const class_filter_;
const char* const method_filter_;
+ const bool list_classes_;
+ const bool list_methods_;
+ const char* const export_dex_location_;
+ uint32_t addr2instr_;
Handle<mirror::ClassLoader>* class_loader_;
};
class OatDumper {
public:
- explicit OatDumper(const OatFile& oat_file, OatDumperOptions* options)
+ explicit OatDumper(const OatFile& oat_file, const OatDumperOptions& options)
: oat_file_(oat_file),
oat_dex_files_(oat_file.GetOatDexFiles()),
options_(options),
+ resolved_addr2instr_(0),
instruction_set_(oat_file_.GetOatHeader().GetInstructionSet()),
disassembler_(Disassembler::Create(instruction_set_,
- new DisassemblerOptions(options_->absolute_addresses_,
+ new DisassemblerOptions(options_.absolute_addresses_,
oat_file.Begin(),
true /* can_read_litals_ */))) {
- CHECK(options_->class_loader_ != nullptr);
+ CHECK(options_.class_loader_ != nullptr);
+ CHECK(options_.class_filter_ != nullptr);
+ CHECK(options_.method_filter_ != nullptr);
AddAllOffsets();
}
~OatDumper() {
- delete options_;
delete disassembler_;
}
@@ -380,7 +397,7 @@ class OatDumper {
#define DUMP_OAT_HEADER_OFFSET(label, offset) \
os << label " OFFSET:\n"; \
os << StringPrintf("0x%08x", oat_header.offset()); \
- if (oat_header.offset() != 0 && options_->absolute_addresses_) { \
+ if (oat_header.offset() != 0 && options_.absolute_addresses_) { \
os << StringPrintf(" (%p)", oat_file_.Begin() + oat_header.offset()); \
} \
os << StringPrintf("\n\n");
@@ -426,7 +443,7 @@ class OatDumper {
os << "\n";
}
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
os << "BEGIN:\n";
os << reinterpret_cast<const void*>(oat_file_.Begin()) << "\n\n";
@@ -439,11 +456,26 @@ class OatDumper {
os << std::flush;
+ // If set, adjust relative address to be searched
+ if (options_.addr2instr_ != 0) {
+ resolved_addr2instr_ = options_.addr2instr_ + oat_header.GetExecutableOffset();
+ os << "SEARCH ADDRESS (executable offset + input):\n";
+ os << StringPrintf("0x%08x\n\n", resolved_addr2instr_);
+ }
+
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
- if (!DumpOatDexFile(os, *oat_dex_file)) {
- success = false;
+
+ // If file export selected skip file analysis
+ if (options_.export_dex_location_) {
+ if (!ExportDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
+ } else {
+ if (!DumpOatDexFile(os, *oat_dex_file)) {
+ success = false;
+ }
}
}
os << std::flush;
@@ -553,6 +585,7 @@ class OatDumper {
bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
bool success = true;
+ bool stop_analysis = false;
os << "OatDexFile:\n";
os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str());
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
@@ -571,6 +604,12 @@ class OatDumper {
class_def_index++) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
+
+ // TODO: Support regex
+ if (DescriptorToDot(descriptor).find(options_.class_filter_) == std::string::npos) {
+ continue;
+ }
+
uint32_t oat_class_offset = oat_dex_file.GetOatClassOffset(class_def_index);
const OatFile::OatClass oat_class = oat_dex_file.GetOatClass(class_def_index);
os << StringPrintf("%zd: %s (offset=0x%08x) (type_idx=%d)",
@@ -580,15 +619,98 @@ class OatDumper {
// TODO: include bitmap here if type is kOatClassSomeCompiled?
Indenter indent_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indented_os(&indent_filter);
- if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def)) {
+ if (options_.list_classes_) continue;
+ if (!DumpOatClass(indented_os, oat_class, *(dex_file.get()), class_def, &stop_analysis)) {
success = false;
}
+ if (stop_analysis) {
+ os << std::flush;
+ return success;
+ }
}
os << std::flush;
return success;
}
+ bool ExportDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
+ std::string error_msg;
+ std::string dex_file_location = oat_dex_file.GetDexFileLocation();
+
+ std::unique_ptr<const DexFile> dex_file(oat_dex_file.OpenDexFile(&error_msg));
+ if (dex_file == nullptr) {
+ os << "Failed to open dex file '" << dex_file_location << "': " << error_msg;
+ return false;
+ }
+ size_t fsize = oat_dex_file.FileSize();
+
+ // Some quick checks just in case
+ if (fsize == 0 || fsize < sizeof(DexFile::Header)) {
+ os << "Invalid dex file\n";
+ return false;
+ }
+
+ // Verify output directory exists
+ if (!OS::DirectoryExists(options_.export_dex_location_)) {
+ // TODO: Extend OS::DirectoryExists if symlink support is required
+ os << options_.export_dex_location_ << " output directory not found or symlink\n";
+ return false;
+ }
+
+ // Beautify path names
+ if (dex_file_location.size() > PATH_MAX || dex_file_location.size() <= 0) {
+ return false;
+ }
+
+ std::string dex_orig_name;
+ size_t dex_orig_pos = dex_file_location.rfind('/');
+ if (dex_orig_pos == std::string::npos)
+ dex_orig_name = dex_file_location;
+ else
+ dex_orig_name = dex_file_location.substr(dex_orig_pos + 1);
+
+ // A more elegant approach to efficiently name user installed apps is welcome
+ if (dex_orig_name.size() == 8 && !dex_orig_name.compare("base.apk")) {
+ dex_file_location.erase(dex_orig_pos, strlen("base.apk") + 1);
+ size_t apk_orig_pos = dex_file_location.rfind('/');
+ if (apk_orig_pos != std::string::npos) {
+ dex_orig_name = dex_file_location.substr(++apk_orig_pos);
+ }
+ }
+
+ std::string out_dex_path(options_.export_dex_location_);
+ if (out_dex_path.back() != '/') {
+ out_dex_path.append("/");
+ }
+ out_dex_path.append(dex_orig_name);
+ out_dex_path.append("_export.dex");
+ if (out_dex_path.length() > PATH_MAX) {
+ return false;
+ }
+
+ std::unique_ptr<File> file(OS::CreateEmptyFile(out_dex_path.c_str()));
+ if (file.get() == nullptr) {
+ os << "Failed to open output dex file " << out_dex_path;
+ return false;
+ }
+
+ if (!file->WriteFully(dex_file->Begin(), fsize)) {
+ os << "Failed to write dex file";
+ file->Erase();
+ return false;
+ }
+
+ if (file->FlushCloseOrErase() != 0) {
+ os << "Flush and close failed";
+ return false;
+ }
+
+ os << StringPrintf("Dex file exported at %s (%zd bytes)\n", out_dex_path.c_str(), fsize);
+ os << std::flush;
+
+ return true;
+ }
+
static void SkipAllFields(ClassDataItemIterator& it) {
while (it.HasNextStaticField()) {
it.Next();
@@ -599,8 +721,9 @@ class OatDumper {
}
bool DumpOatClass(std::ostream& os, const OatFile::OatClass& oat_class, const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
+ const DexFile::ClassDef& class_def, bool* stop_analysis) {
bool success = true;
+ bool addr_found = false;
const uint8_t* class_data = dex_file.GetClassData(class_def);
if (class_data == nullptr) { // empty class such as a marker interface?
os << std::flush;
@@ -612,18 +735,26 @@ class OatDumper {
while (it.HasNextDirectMethod()) {
if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
it.GetMemberIndex(), it.GetMethodCodeItem(),
- it.GetRawMemberAccessFlags())) {
+ it.GetRawMemberAccessFlags(), &addr_found)) {
success = false;
}
+ if (addr_found) {
+ *stop_analysis = true;
+ return success;
+ }
class_method_index++;
it.Next();
}
while (it.HasNextVirtualMethod()) {
if (!DumpOatMethod(os, class_def, class_method_index, oat_class, dex_file,
it.GetMemberIndex(), it.GetMethodCodeItem(),
- it.GetRawMemberAccessFlags())) {
+ it.GetRawMemberAccessFlags(), &addr_found)) {
success = false;
}
+ if (addr_found) {
+ *stop_analysis = true;
+ return success;
+ }
class_method_index++;
it.Next();
}
@@ -641,20 +772,39 @@ class OatDumper {
uint32_t class_method_index,
const OatFile::OatClass& oat_class, const DexFile& dex_file,
uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ uint32_t method_access_flags, bool* addr_found) {
bool success = true;
- std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
- if (pretty_method.find(options_->method_filter_) == std::string::npos) {
+
+ // TODO: Support regex
+ std::string method_name = dex_file.GetMethodName(dex_file.GetMethodId(dex_method_idx));
+ if (method_name.find(options_.method_filter_) == std::string::npos) {
return success;
}
+ std::string pretty_method = PrettyMethod(dex_method_idx, dex_file, true);
os << StringPrintf("%d: %s (dex_method_idx=%d)\n",
class_method_index, pretty_method.c_str(),
dex_method_idx);
+ if (options_.list_methods_) return success;
+
Indenter indent1_filter(os.rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent1_os(new std::ostream(&indent1_filter));
Indenter indent2_filter(indent1_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::unique_ptr<std::ostream> indent2_os(new std::ostream(&indent2_filter));
+
+ uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
+ const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
+ const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
+ uint32_t code_offset = oat_method.GetCodeOffset();
+ uint32_t code_size = oat_method.GetQuickCodeSize();
+ if (resolved_addr2instr_ != 0) {
+ if (resolved_addr2instr_ > code_offset + code_size) {
+ return success;
+ } else {
+ *addr_found = true; // stop analyzing file at next iteration
+ }
+ }
+
{
*indent1_os << "DEX CODE:\n";
DumpDexCode(*indent2_os, dex_file, code_item);
@@ -666,13 +816,9 @@ class OatDumper {
verifier.reset(DumpVerifier(*indent2_os, dex_method_idx, &dex_file, class_def, code_item,
method_access_flags));
}
-
- uint32_t oat_method_offsets_offset = oat_class.GetOatMethodOffsetsOffset(class_method_index);
- const OatMethodOffsets* oat_method_offsets = oat_class.GetOatMethodOffsets(class_method_index);
- const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_index);
{
*indent1_os << "OatMethodOffsets ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent1_os << StringPrintf("%p ", oat_method_offsets);
}
*indent1_os << StringPrintf("(offset=0x%08x)\n", oat_method_offsets_offset);
@@ -685,7 +831,6 @@ class OatDumper {
return false;
}
- uint32_t code_offset = oat_method.GetCodeOffset();
*indent2_os << StringPrintf("code_offset: 0x%08x ", code_offset);
uint32_t aligned_code_begin = AlignCodeOffset(oat_method.GetCodeOffset());
if (aligned_code_begin > oat_file_.Size()) {
@@ -697,7 +842,7 @@ class OatDumper {
*indent2_os << "\n";
*indent2_os << "gc_map: ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent2_os << StringPrintf("%p ", oat_method.GetGcMap());
}
uint32_t gc_map_offset = oat_method.GetGcMapOffset();
@@ -707,7 +852,7 @@ class OatDumper {
"gc map table offset 0x%08x is past end of file 0x%08zx.\n",
gc_map_offset, oat_file_.Size());
success = false;
- } else if (options_->dump_raw_gc_map_) {
+ } else if (options_.dump_raw_gc_map_) {
Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent3_os(&indent3_filter);
DumpGcMap(indent3_os, oat_method, code_item);
@@ -718,7 +863,7 @@ class OatDumper {
uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent1_os << StringPrintf("%p ", method_header);
}
*indent1_os << StringPrintf("(offset=0x%08x)\n", method_header_offset);
@@ -732,7 +877,7 @@ class OatDumper {
}
*indent2_os << "mapping_table: ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent2_os << StringPrintf("%p ", oat_method.GetMappingTable());
}
uint32_t mapping_table_offset = oat_method.GetMappingTableOffset();
@@ -744,14 +889,14 @@ class OatDumper {
mapping_table_offset, oat_file_.Size(),
oat_method.GetMappingTableOffsetOffset());
success = false;
- } else if (options_->dump_raw_mapping_table_) {
+ } else if (options_.dump_raw_mapping_table_) {
Indenter indent3_filter(indent2_os->rdbuf(), kIndentChar, kIndentBy1Count);
std::ostream indent3_os(&indent3_filter);
DumpMappingTable(indent3_os, oat_method);
}
*indent2_os << "vmap_table: ";
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent2_os << StringPrintf("%p ", oat_method.GetVmapTable());
}
uint32_t vmap_table_offset = oat_method.GetVmapTableOffset();
@@ -763,7 +908,7 @@ class OatDumper {
vmap_table_offset, oat_file_.Size(),
oat_method.GetVmapTableOffsetOffset());
success = false;
- } else if (options_->dump_vmap_) {
+ } else if (options_.dump_vmap_) {
DumpVmapData(*indent2_os, oat_method, code_item);
}
}
@@ -794,12 +939,10 @@ class OatDumper {
success = false;
} else {
const void* code = oat_method.GetQuickCode();
- uint32_t code_size = oat_method.GetQuickCodeSize();
- uint32_t code_offset = oat_method.GetCodeOffset();
uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
uint64_t aligned_code_end = aligned_code_begin + code_size;
- if (options_->absolute_addresses_) {
+ if (options_.absolute_addresses_) {
*indent1_os << StringPrintf("%p ", code);
}
*indent1_os << StringPrintf("(code_offset=0x%08x size_offset=0x%08x size=%u)%s\n",
@@ -820,7 +963,7 @@ class OatDumper {
aligned_code_end, oat_file_.Size(),
code_size, code_size_offset);
success = false;
- if (options_->disassemble_code_) {
+ if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
}
@@ -832,12 +975,12 @@ class OatDumper {
code_size, kMaxCodeSize,
code_size, code_size_offset);
success = false;
- if (options_->disassemble_code_) {
+ if (options_.disassemble_code_) {
if (code_size_offset + kPrologueBytes <= oat_file_.Size()) {
DumpCode(*indent2_os, verifier.get(), oat_method, code_item, true, kPrologueBytes);
}
}
- } else if (options_->disassemble_code_) {
+ } else if (options_.disassemble_code_) {
DumpCode(*indent2_os, verifier.get(), oat_method, code_item, !success, 0);
}
}
@@ -1175,7 +1318,8 @@ class OatDumper {
size_t i = 0;
while (i < code_item->insns_size_in_code_units_) {
const Instruction* instruction = Instruction::At(&code_item->insns_[i]);
- os << StringPrintf("0x%04zx: %s\n", i, instruction->DumpString(&dex_file).c_str());
+ os << StringPrintf("0x%04zx: ", i) << instruction->DumpHexLE(5)
+ << StringPrintf("\t| %s\n", instruction->DumpString(&dex_file).c_str());
i += instruction->SizeInCodeUnits();
}
}
@@ -1191,10 +1335,10 @@ class OatDumper {
StackHandleScope<1> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(
hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
- DCHECK(options_->class_loader_ != nullptr);
+ DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(soa.Self(), os, dex_method_idx, dex_file,
dex_cache,
- *options_->class_loader_,
+ *options_.class_loader_,
&class_def, code_item,
NullHandle<mirror::ArtMethod>(),
method_access_flags);
@@ -1237,7 +1381,8 @@ class OatDumper {
const OatFile& oat_file_;
const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
- const OatDumperOptions* options_;
+ const OatDumperOptions& options_;
+ uint32_t resolved_addr2instr_;
InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
@@ -1335,7 +1480,7 @@ class ImageDumper {
stats_.oat_file_bytes = oat_file->Size();
- oat_dumper_.reset(new OatDumper(*oat_file, oat_dumper_options_.release()));
+ oat_dumper_.reset(new OatDumper(*oat_file, *oat_dumper_options_));
for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
CHECK(oat_dex_file != nullptr);
@@ -2045,17 +2190,18 @@ static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOpti
soa.Decode<mirror::ClassLoader*>(class_loader));
options->class_loader_ = &loader_handle;
- OatDumper oat_dumper(*oat_file, options);
+ OatDumper oat_dumper(*oat_file, *options);
bool success = oat_dumper.Dump(*os);
return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
static int DumpOatWithoutRuntime(OatFile* oat_file, OatDumperOptions* options, std::ostream* os) {
+ CHECK(oat_file != nullptr && options != nullptr);
// No image = no class loader.
NullHandle<mirror::ClassLoader> null_class_loader;
options->class_loader_ = &null_class_loader;
- OatDumper oat_dumper(*oat_file, options);
+ OatDumper oat_dumper(*oat_file, *options);
bool success = oat_dumper.Dump(*os);
return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
@@ -2127,8 +2273,21 @@ struct OatdumpArgs : public CmdlineArgs {
} else if (option.starts_with("--symbolize=")) {
oat_filename_ = option.substr(strlen("--symbolize=")).data();
symbolize_ = true;
+ } else if (option.starts_with("--class-filter=")) {
+ class_filter_ = option.substr(strlen("--class-filter=")).data();
} else if (option.starts_with("--method-filter=")) {
method_filter_ = option.substr(strlen("--method-filter=")).data();
+ } else if (option.starts_with("--list-classes")) {
+ list_classes_ = true;
+ } else if (option.starts_with("--list-methods")) {
+ list_methods_ = true;
+ } else if (option.starts_with("--export-dex-to=")) {
+ export_dex_location_ = option.substr(strlen("--export-dex-to=")).data();
+ } else if (option.starts_with("--addr2instr=")) {
+ if (!ParseUint(option.substr(strlen("--addr2instr=")).data(), &addr2instr_)) {
+ *error_msg = "Address conversion failed";
+ return kParseError;
+ }
} else {
return kParseUnknownArgument;
}
@@ -2191,8 +2350,29 @@ struct OatdumpArgs : public CmdlineArgs {
" --no-disassemble may be used to disable disassembly.\n"
" Example: --no-disassemble\n"
"\n"
+ " --list-classes may be used to list target file classes (can be used with filters).\n"
+ " Example: --list-classes\n"
+ " Example: --list-classes --class-filter=com.example.foo\n"
+ "\n"
+ " --list-methods may be used to list target file methods (can be used with filters).\n"
+ " Example: --list-methods\n"
+ " Example: --list-methods --class-filter=com.example --method-filter=foo\n"
+ "\n"
+ " --symbolize=<file.oat>: output a copy of file.oat with elf symbols included.\n"
+ " Example: --symbolize=/system/framework/boot.oat\n"
+ "\n"
+ " --class-filter=<class name>: only dumps classes that contain the filter.\n"
+ " Example: --class-filter=com.example.foo\n"
+ "\n"
" --method-filter=<method name>: only dumps methods that contain the filter.\n"
" Example: --method-filter=foo\n"
+ "\n"
+ " --export-dex-to=<directory>: may be used to export oat embedded dex files.\n"
+ " Example: --export-dex-to=/data/local/tmp\n"
+ "\n"
+ " --addr2instr=<address>: output matching method disassembled code from relative\n"
+ " address (e.g. PC from crash dump)\n"
+ " Example: --addr2instr=0x00001a3b\n"
"\n";
return usage;
@@ -2200,6 +2380,7 @@ struct OatdumpArgs : public CmdlineArgs {
public:
const char* oat_filename_ = nullptr;
+ const char* class_filter_ = "";
const char* method_filter_ = "";
const char* image_location_ = nullptr;
std::string elf_filename_prefix_;
@@ -2208,6 +2389,10 @@ struct OatdumpArgs : public CmdlineArgs {
bool dump_vmap_ = true;
bool disassemble_code_ = true;
bool symbolize_ = false;
+ bool list_classes_ = false;
+ bool list_methods_ = false;
+ uint32_t addr2instr_ = 0;
+ const char* export_dex_location_ = nullptr;
};
struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
@@ -2223,7 +2408,12 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
args_->dump_vmap_,
args_->disassemble_code_,
absolute_addresses,
- args_->method_filter_));
+ args_->class_filter_,
+ args_->method_filter_,
+ args_->list_classes_,
+ args_->list_methods_,
+ args_->export_dex_location_,
+ args_->addr2instr_));
return (args_->boot_image_location_ != nullptr || args_->image_location_ != nullptr) &&
!args_->symbolize_;
@@ -2240,7 +2430,7 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
} else {
return DumpOat(nullptr,
args_->oat_filename_,
- oat_dumper_options_.release(),
+ oat_dumper_options_.get(),
args_->os_) == EXIT_SUCCESS;
}
}
@@ -2251,11 +2441,11 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
if (args_->oat_filename_ != nullptr) {
return DumpOat(runtime,
args_->oat_filename_,
- oat_dumper_options_.release(),
+ oat_dumper_options_.get(),
args_->os_) == EXIT_SUCCESS;
}
- return DumpImage(runtime, args_->image_location_, oat_dumper_options_.release(), args_->os_)
+ return DumpImage(runtime, args_->image_location_, oat_dumper_options_.get(), args_->os_)
== EXIT_SUCCESS;
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index ab346e3337..c5cf89014e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -44,6 +44,7 @@ LIBART_COMMON_SRC_FILES := \
elf_file.cc \
gc/allocator/dlmalloc.cc \
gc/allocator/rosalloc.cc \
+ gc/accounting/bitmap.cc \
gc/accounting/card_table.cc \
gc/accounting/heap_bitmap.cc \
gc/accounting/mod_union_table.cc \
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
new file mode 100644
index 0000000000..b1aa3ee63f
--- /dev/null
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
+
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+
+namespace art {
+
+/* Returns true if entrypoint contains direct reference to
+ native implementation. The list is required as direct
+ entrypoints need additional handling during invocation.*/
+static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
+ return
+ entrypoint == kQuickInstanceofNonTrivial ||
+ entrypoint == kQuickA64Load ||
+ entrypoint == kQuickA64Store ||
+ entrypoint == kQuickFmod ||
+ entrypoint == kQuickFmodf ||
+ entrypoint == kQuickMemcpy ||
+ entrypoint == kQuickL2d ||
+ entrypoint == kQuickL2f ||
+ entrypoint == kQuickD2iz ||
+ entrypoint == kQuickF2iz ||
+ entrypoint == kQuickD2l ||
+ entrypoint == kQuickF2l ||
+ entrypoint == kQuickLdiv ||
+ entrypoint == kQuickLmod ||
+ entrypoint == kQuickLmul ||
+ entrypoint == kQuickCmpgDouble ||
+ entrypoint == kQuickCmpgFloat ||
+ entrypoint == kQuickCmplDouble ||
+ entrypoint == kQuickCmplFloat;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 1a661c479f..e3ec27c100 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -23,6 +23,7 @@
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "entrypoints_direct_mips.h"
#include "interpreter/interpreter.h"
namespace art {
@@ -72,83 +73,155 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// Cast
qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+ static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
qpoints->pCheckCast = art_quick_check_cast;
+ static_assert(!IsDirectEntrypoint(kQuickCheckCast), "Non-direct C stub marked direct.");
// DexCache
qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+ static_assert(!IsDirectEntrypoint(kQuickInitializeStaticStorage),
+ "Non-direct C stub marked direct.");
qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+ static_assert(!IsDirectEntrypoint(kQuickInitializeTypeAndVerifyAccess),
+ "Non-direct C stub marked direct.");
qpoints->pInitializeType = art_quick_initialize_type;
+ static_assert(!IsDirectEntrypoint(kQuickInitializeType), "Non-direct C stub marked direct.");
qpoints->pResolveString = art_quick_resolve_string;
+ static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
// Field
qpoints->pSet8Instance = art_quick_set8_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet8Instance), "Non-direct C stub marked direct.");
qpoints->pSet8Static = art_quick_set8_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet8Static), "Non-direct C stub marked direct.");
qpoints->pSet16Instance = art_quick_set16_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet16Instance), "Non-direct C stub marked direct.");
qpoints->pSet16Static = art_quick_set16_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet16Static), "Non-direct C stub marked direct.");
qpoints->pSet32Instance = art_quick_set32_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet32Instance), "Non-direct C stub marked direct.");
qpoints->pSet32Static = art_quick_set32_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet32Static), "Non-direct C stub marked direct.");
qpoints->pSet64Instance = art_quick_set64_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSet64Instance), "Non-direct C stub marked direct.");
qpoints->pSet64Static = art_quick_set64_static;
+ static_assert(!IsDirectEntrypoint(kQuickSet64Static), "Non-direct C stub marked direct.");
qpoints->pSetObjInstance = art_quick_set_obj_instance;
+ static_assert(!IsDirectEntrypoint(kQuickSetObjInstance), "Non-direct C stub marked direct.");
qpoints->pSetObjStatic = art_quick_set_obj_static;
+ static_assert(!IsDirectEntrypoint(kQuickSetObjStatic), "Non-direct C stub marked direct.");
qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetBooleanInstance), "Non-direct C stub marked direct.");
qpoints->pGetByteInstance = art_quick_get_byte_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetByteInstance), "Non-direct C stub marked direct.");
qpoints->pGetCharInstance = art_quick_get_char_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetCharInstance), "Non-direct C stub marked direct.");
qpoints->pGetShortInstance = art_quick_get_short_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetShortInstance), "Non-direct C stub marked direct.");
qpoints->pGet32Instance = art_quick_get32_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGet32Instance), "Non-direct C stub marked direct.");
qpoints->pGet64Instance = art_quick_get64_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGet64Instance), "Non-direct C stub marked direct.");
qpoints->pGetObjInstance = art_quick_get_obj_instance;
+ static_assert(!IsDirectEntrypoint(kQuickGetObjInstance), "Non-direct C stub marked direct.");
qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetBooleanStatic), "Non-direct C stub marked direct.");
qpoints->pGetByteStatic = art_quick_get_byte_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetByteStatic), "Non-direct C stub marked direct.");
qpoints->pGetCharStatic = art_quick_get_char_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetCharStatic), "Non-direct C stub marked direct.");
qpoints->pGetShortStatic = art_quick_get_short_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetShortStatic), "Non-direct C stub marked direct.");
qpoints->pGet32Static = art_quick_get32_static;
+ static_assert(!IsDirectEntrypoint(kQuickGet32Static), "Non-direct C stub marked direct.");
qpoints->pGet64Static = art_quick_get64_static;
+ static_assert(!IsDirectEntrypoint(kQuickGet64Static), "Non-direct C stub marked direct.");
qpoints->pGetObjStatic = art_quick_get_obj_static;
+ static_assert(!IsDirectEntrypoint(kQuickGetObjStatic), "Non-direct C stub marked direct.");
// Array
qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check;
+ static_assert(!IsDirectEntrypoint(kQuickAputObjectWithNullAndBoundCheck),
+ "Non-direct C stub marked direct.");
qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check;
+ static_assert(!IsDirectEntrypoint(kQuickAputObjectWithBoundCheck),
+ "Non-direct C stub marked direct.");
qpoints->pAputObject = art_quick_aput_obj;
+ static_assert(!IsDirectEntrypoint(kQuickAputObject), "Non-direct C stub marked direct.");
qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
+ static_assert(!IsDirectEntrypoint(kQuickHandleFillArrayData), "Non-direct C stub marked direct.");
// JNI
qpoints->pJniMethodStart = JniMethodStart;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodStart), "Non-direct C stub marked direct.");
qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodStartSynchronized),
+ "Non-direct C stub marked direct.");
qpoints->pJniMethodEnd = JniMethodEnd;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEnd), "Non-direct C stub marked direct.");
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEndSynchronized),
+ "Non-direct C stub marked direct.");
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReference),
+ "Non-direct C stub marked direct.");
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+ static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReferenceSynchronized),
+ "Non-direct C stub marked direct.");
qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
+ static_assert(!IsDirectEntrypoint(kQuickQuickGenericJniTrampoline),
+ "Non-direct C stub marked direct.");
// Locks
qpoints->pLockObject = art_quick_lock_object;
+ static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct.");
qpoints->pUnlockObject = art_quick_unlock_object;
+ static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct.");
// Math
qpoints->pCmpgDouble = CmpgDouble;
+ static_assert(IsDirectEntrypoint(kQuickCmpgDouble), "Direct C stub not marked direct.");
qpoints->pCmpgFloat = CmpgFloat;
+ static_assert(IsDirectEntrypoint(kQuickCmpgFloat), "Direct C stub not marked direct.");
qpoints->pCmplDouble = CmplDouble;
+ static_assert(IsDirectEntrypoint(kQuickCmplDouble), "Direct C stub not marked direct.");
qpoints->pCmplFloat = CmplFloat;
+ static_assert(IsDirectEntrypoint(kQuickCmplFloat), "Direct C stub not marked direct.");
qpoints->pFmod = fmod;
+ static_assert(IsDirectEntrypoint(kQuickFmod), "Direct C stub not marked direct.");
qpoints->pL2d = art_l2d;
+ static_assert(IsDirectEntrypoint(kQuickL2d), "Direct C stub not marked direct.");
qpoints->pFmodf = fmodf;
+ static_assert(IsDirectEntrypoint(kQuickFmodf), "Direct C stub not marked direct.");
qpoints->pL2f = art_l2f;
+ static_assert(IsDirectEntrypoint(kQuickL2f), "Direct C stub not marked direct.");
qpoints->pD2iz = art_d2i;
+ static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
qpoints->pF2iz = art_f2i;
+ static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
qpoints->pIdivmod = NULL;
qpoints->pD2l = art_d2l;
+ static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
qpoints->pF2l = art_f2l;
+ static_assert(IsDirectEntrypoint(kQuickF2l), "Direct C stub not marked direct.");
qpoints->pLdiv = artLdiv;
+ static_assert(IsDirectEntrypoint(kQuickLdiv), "Direct C stub not marked direct.");
qpoints->pLmod = artLmod;
+ static_assert(IsDirectEntrypoint(kQuickLmod), "Direct C stub not marked direct.");
qpoints->pLmul = artLmul;
+ static_assert(IsDirectEntrypoint(kQuickLmul), "Direct C stub not marked direct.");
qpoints->pShlLong = art_quick_shl_long;
+ static_assert(!IsDirectEntrypoint(kQuickShlLong), "Non-direct C stub marked direct.");
qpoints->pShrLong = art_quick_shr_long;
+ static_assert(!IsDirectEntrypoint(kQuickShrLong), "Non-direct C stub marked direct.");
qpoints->pUshrLong = art_quick_ushr_long;
+ static_assert(!IsDirectEntrypoint(kQuickUshrLong), "Non-direct C stub marked direct.");
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
+ static_assert(!IsDirectEntrypoint(kQuickIndexOf), "Non-direct C stub marked direct.");
qpoints->pStringCompareTo = art_quick_string_compareto;
+ static_assert(!IsDirectEntrypoint(kQuickStringCompareTo), "Non-direct C stub marked direct.");
qpoints->pMemcpy = memcpy;
// Invocation
@@ -156,25 +229,44 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
+ "Non-direct C stub marked direct.");
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
+ static_assert(!IsDirectEntrypoint(kQuickTestSuspend), "Non-direct C stub marked direct.");
// Throws
qpoints->pDeliverException = art_quick_deliver_exception;
+ static_assert(!IsDirectEntrypoint(kQuickDeliverException), "Non-direct C stub marked direct.");
qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+ static_assert(!IsDirectEntrypoint(kQuickThrowArrayBounds), "Non-direct C stub marked direct.");
qpoints->pThrowDivZero = art_quick_throw_div_zero;
+ static_assert(!IsDirectEntrypoint(kQuickThrowDivZero), "Non-direct C stub marked direct.");
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+ static_assert(!IsDirectEntrypoint(kQuickThrowNoSuchMethod), "Non-direct C stub marked direct.");
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+ static_assert(!IsDirectEntrypoint(kQuickThrowNullPointer), "Non-direct C stub marked direct.");
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+ static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
// Atomic 64-bit load/store
qpoints->pA64Load = QuasiAtomic::Read64;
+ static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct.");
qpoints->pA64Store = QuasiAtomic::Write64;
+ static_assert(IsDirectEntrypoint(kQuickA64Store), "Non-direct C stub marked direct.");
};
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f28253acf6..ee5eefbc45 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2520,16 +2520,16 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
return GetQuickProxyInvokeHandler();
}
bool found;
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ if (found) {
+ auto* code = oat_method.GetQuickCode();
if (code != nullptr) {
return code;
}
}
- OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- if (found) {
- auto* code = oat_method.GetQuickCode();
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ auto* code = jit->GetCodeCache()->GetCodeFor(method);
if (code != nullptr) {
return code;
}
@@ -2545,6 +2545,11 @@ const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
return nullptr;
}
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ if (found) {
+ return oat_method.GetQuickCode();
+ }
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
auto* code = jit->GetCodeCache()->GetCodeFor(method);
@@ -2552,11 +2557,6 @@ const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
return code;
}
}
- bool found;
- OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- if (found) {
- return oat_method.GetQuickCode();
- }
return nullptr;
}
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index a802759474..92e0f070bc 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -134,6 +134,23 @@ std::string Instruction::DumpHex(size_t code_units) const {
return os.str();
}
+std::string Instruction::DumpHexLE(size_t instr_code_units) const {
+ size_t inst_length = SizeInCodeUnits();
+ if (inst_length > instr_code_units) {
+ inst_length = instr_code_units;
+ }
+ std::ostringstream os;
+ const uint16_t* insn = reinterpret_cast<const uint16_t*>(this);
+ for (size_t i = 0; i < inst_length; i++) {
+ os << StringPrintf("%02x%02x", (uint8_t)(insn[i] & 0x00FF),
+ (uint8_t)((insn[i] & 0xFF00)>>8)) << " ";
+ }
+ for (size_t i = inst_length; i < instr_code_units; i++) {
+ os << " ";
+ }
+ return os.str();
+}
+
std::string Instruction::DumpString(const DexFile* file) const {
std::ostringstream os;
const char* opcode = kInstructionNames[Opcode()];
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index af5d9d00d1..d3b9eb47df 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -525,6 +525,10 @@ class Instruction {
// Dump code_units worth of this instruction, padding to code_units for shorter instructions
std::string DumpHex(size_t code_units) const;
+ // Little-endian dump code_units worth of this instruction, padding to code_units for
+ // shorter instructions
+ std::string DumpHexLE(size_t instr_code_units) const;
+
uint16_t Fetch16(size_t offset) const {
const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
return insns[offset];
diff --git a/runtime/gc/accounting/bitmap-inl.h b/runtime/gc/accounting/bitmap-inl.h
new file mode 100644
index 0000000000..e87a0c039b
--- /dev/null
+++ b/runtime/gc/accounting/bitmap-inl.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ACCOUNTING_BITMAP_INL_H_
+#define ART_RUNTIME_GC_ACCOUNTING_BITMAP_INL_H_
+
+#include "bitmap.h"
+
+#include <memory>
+
+#include "atomic.h"
+#include "base/logging.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+inline bool Bitmap::AtomicTestAndSetBit(uintptr_t bit_index) {
+ CheckValidBitIndex(bit_index);
+ const size_t word_index = BitIndexToWordIndex(bit_index);
+ const uintptr_t word_mask = BitIndexToMask(bit_index);
+ auto* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[word_index]);
+ uintptr_t old_word;
+ do {
+ old_word = atomic_entry->LoadRelaxed();
+ // Fast path: The bit is already set.
+ if ((old_word & word_mask) != 0) {
+ DCHECK(TestBit(bit_index));
+ return true;
+ }
+ } while (!atomic_entry->CompareExchangeWeakSequentiallyConsistent(old_word,
+ old_word | word_mask));
+ DCHECK(TestBit(bit_index));
+ return false;
+}
+
+inline bool Bitmap::TestBit(uintptr_t bit_index) const {
+ CheckValidBitIndex(bit_index);
+ return (bitmap_begin_[BitIndexToWordIndex(bit_index)] & BitIndexToMask(bit_index)) != 0;
+}
+
+template<typename Visitor>
+inline void Bitmap::VisitSetBits(uintptr_t bit_start, uintptr_t bit_end, const Visitor& visitor)
+ const {
+ DCHECK_LE(bit_start, bit_end);
+ CheckValidBitIndex(bit_start);
+ const uintptr_t index_start = BitIndexToWordIndex(bit_start);
+ const uintptr_t index_end = BitIndexToWordIndex(bit_end);
+ if (bit_start != bit_end) {
+ CheckValidBitIndex(bit_end - 1);
+ }
+
+ // Index(begin) ... Index(end)
+ // [xxxxx???][........][????yyyy]
+ // ^ ^
+ // | #---- Bit of visit_end
+ // #---- Bit of visit_begin
+ //
+
+ // Left edge.
+ uintptr_t left_edge = bitmap_begin_[index_start];
+ // Clear the lower bits that are not in range.
+ left_edge &= ~((static_cast<uintptr_t>(1) << (bit_start % kBitsPerBitmapWord)) - 1);
+
+ // Right edge. Either unique, or left_edge.
+ uintptr_t right_edge;
+
+ if (index_start < index_end) {
+ // Left edge != right edge.
+
+ // Traverse left edge.
+ if (left_edge != 0) {
+ const uintptr_t ptr_base = WordIndexToBitIndex(index_start);
+ do {
+ const size_t shift = CTZ(left_edge);
+ visitor(ptr_base + shift);
+ left_edge ^= static_cast<uintptr_t>(1) << shift;
+ } while (left_edge != 0);
+ }
+
+ // Traverse the middle, full part.
+ for (size_t i = index_start + 1; i < index_end; ++i) {
+ uintptr_t w = bitmap_begin_[i];
+ if (w != 0) {
+ const uintptr_t ptr_base = WordIndexToBitIndex(i);
+ do {
+ const size_t shift = CTZ(w);
+ visitor(ptr_base + shift);
+ w ^= static_cast<uintptr_t>(1) << shift;
+ } while (w != 0);
+ }
+ }
+
+ // Right edge is unique.
+ // But maybe we don't have anything to do: visit_end starts in a new word...
+ if (bit_end == 0) {
+ // Do not read memory, as it could be after the end of the bitmap.
+ right_edge = 0;
+ } else {
+ right_edge = bitmap_begin_[index_end];
+ }
+ } else {
+ right_edge = left_edge;
+ }
+
+ // Right edge handling.
+ right_edge &= ((static_cast<uintptr_t>(1) << (bit_end % kBitsPerBitmapWord)) - 1);
+ if (right_edge != 0) {
+ const uintptr_t ptr_base = WordIndexToBitIndex(index_end);
+ do {
+ const size_t shift = CTZ(right_edge);
+ visitor(ptr_base + shift);
+ right_edge ^= (static_cast<uintptr_t>(1)) << shift;
+ } while (right_edge != 0);
+ }
+}
+
+template<bool kSetBit>
+inline bool Bitmap::ModifyBit(uintptr_t bit_index) {
+ CheckValidBitIndex(bit_index);
+ const size_t word_index = BitIndexToWordIndex(bit_index);
+ const uintptr_t word_mask = BitIndexToMask(bit_index);
+ uintptr_t* address = &bitmap_begin_[word_index];
+ uintptr_t old_word = *address;
+ if (kSetBit) {
+ *address = old_word | word_mask;
+ } else {
+ *address = old_word & ~word_mask;
+ }
+ DCHECK_EQ(TestBit(bit_index), kSetBit);
+ return (old_word & word_mask) != 0;
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ACCOUNTING_BITMAP_INL_H_
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
new file mode 100644
index 0000000000..de47f6094e
--- /dev/null
+++ b/runtime/gc/accounting/bitmap.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bitmap-inl.h"
+
+#include "card_table.h"
+#include "mem_map.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+Bitmap* Bitmap::CreateFromMemMap(MemMap* mem_map, size_t num_bits) {
+ CHECK(mem_map != nullptr);
+ return new Bitmap(mem_map, num_bits);
+}
+
+Bitmap::Bitmap(MemMap* mem_map, size_t bitmap_size)
+ : mem_map_(mem_map), bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map->Begin())),
+ bitmap_size_(bitmap_size) {
+ CHECK(bitmap_begin_ != nullptr);
+ CHECK_NE(bitmap_size, 0U);
+}
+
+MemMap* Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) {
+ const size_t bitmap_size = RoundUp(
+ RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize);
+ std::string error_msg;
+ std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
+ PROT_READ | PROT_WRITE, false, &error_msg));
+ if (UNLIKELY(mem_map.get() == nullptr)) {
+ LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
+ return nullptr;
+ }
+ return mem_map.release();
+}
+
+Bitmap* Bitmap::Create(const std::string& name, size_t num_bits) {
+ auto* const mem_map = AllocateMemMap(name, num_bits);
+ if (mem_map == nullptr) {
+ return nullptr;
+ }
+ return CreateFromMemMap(mem_map, num_bits);
+}
+
+void Bitmap::Clear() {
+ if (bitmap_begin_ != nullptr) {
+ mem_map_->MadviseDontNeedAndZero();
+ }
+}
+
+void Bitmap::CopyFrom(Bitmap* source_bitmap) {
+ DCHECK_EQ(BitmapSize(), source_bitmap->BitmapSize());
+ std::copy(source_bitmap->Begin(),
+ source_bitmap->Begin() + BitmapSize() / kBitsPerBitmapWord, Begin());
+}
+
+template<size_t kAlignment>
+MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::Create(
+ const std::string& name, uintptr_t cover_begin, uintptr_t cover_end) {
+ CHECK_ALIGNED(cover_begin, kAlignment);
+ CHECK_ALIGNED(cover_end, kAlignment);
+ const size_t num_bits = (cover_end - cover_begin) / kAlignment;
+ auto* const mem_map = Bitmap::AllocateMemMap(name, num_bits);
+ return CreateFromMemMap(mem_map, cover_begin, num_bits);
+}
+
+template<size_t kAlignment>
+MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
+ MemMap* mem_map, uintptr_t begin, size_t num_bits) {
+ return new MemoryRangeBitmap(mem_map, begin, num_bits);
+}
+
+template class MemoryRangeBitmap<CardTable::kCardSize>;
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
new file mode 100644
index 0000000000..cf2c2938c4
--- /dev/null
+++ b/runtime/gc/accounting/bitmap.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ACCOUNTING_BITMAP_H_
+#define ART_RUNTIME_GC_ACCOUNTING_BITMAP_H_
+
+#include <limits.h>
+#include <stdint.h>
+#include <memory>
+#include <set>
+#include <vector>
+
+#include "base/mutex.h"
+#include "globals.h"
+#include "object_callbacks.h"
+
+namespace art {
+
+class MemMap;
+
+namespace gc {
+namespace accounting {
+
+// TODO: Use this code to implement SpaceBitmap.
+class Bitmap {
+ public:
+ // Create and initialize a bitmap with size num_bits. Storage is allocated with a MemMap.
+ static Bitmap* Create(const std::string& name, size_t num_bits);
+
+ // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
+ // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
+ // Objects are kAlignement-aligned.
+ static Bitmap* CreateFromMemMap(MemMap* mem_map, size_t num_bits);
+
+ // offset is the difference from base to a index.
+ static ALWAYS_INLINE constexpr size_t BitIndexToWordIndex(uintptr_t offset) {
+ return offset / kBitsPerBitmapWord;
+ }
+
+ template<typename T>
+ static ALWAYS_INLINE constexpr T WordIndexToBitIndex(T word_index) {
+ return static_cast<T>(word_index * kBitsPerBitmapWord);
+ }
+
+ static ALWAYS_INLINE constexpr uintptr_t BitIndexToMask(uintptr_t bit_index) {
+ return static_cast<uintptr_t>(1) << (bit_index % kBitsPerBitmapWord);
+ }
+
+ ALWAYS_INLINE bool SetBit(size_t bit_index) {
+ return ModifyBit<true>(bit_index);
+ }
+
+ ALWAYS_INLINE bool ClearBit(size_t bit_index) {
+ return ModifyBit<false>(bit_index);
+ }
+
+ ALWAYS_INLINE bool TestBit(size_t bit_index) const;
+
+ // Returns true if the bit_index was previously set.
+ ALWAYS_INLINE bool AtomicTestAndSetBit(size_t bit_index);
+
+ // Fill the bitmap with zeroes. Returns the bitmap's memory to the system as a side-effect.
+ void Clear();
+
+ // Visit the all the set bits range [visit_begin, visit_end) where visit_begin and visit_end are
+ // bit indices visitor is called with the index of each set bit.
+ template <typename Visitor>
+ void VisitSetBits(uintptr_t visit_begin, size_t visit_end, const Visitor& visitor) const;
+
+ void CopyFrom(Bitmap* source_bitmap);
+
+ // Starting address of our internal storage.
+ uintptr_t* Begin() {
+ return bitmap_begin_;
+ }
+
+ // Size of our bitmap in bits.
+ size_t BitmapSize() const {
+ return bitmap_size_;
+ }
+
+ // Check that a bit index is valid with a DCHECK.
+ ALWAYS_INLINE void CheckValidBitIndex(size_t bit_index) const {
+ DCHECK_LT(bit_index, BitmapSize());
+ }
+
+ std::string Dump() const;
+
+ protected:
+ static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
+
+ Bitmap(MemMap* mem_map, size_t bitmap_size);
+
+ // Allocate the mem-map for a bitmap based on how many bits are required.
+ static MemMap* AllocateMemMap(const std::string& name, size_t num_bits);
+
+ template<bool kSetBit>
+ ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
+
+ // Backing storage for bitmap.
+ std::unique_ptr<MemMap> mem_map_;
+
+ // This bitmap itself, word sized for efficiency in scanning.
+ uintptr_t* const bitmap_begin_;
+
+ // Number of bits in the bitmap.
+ const size_t bitmap_size_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Bitmap);
+};
+
+// One bit per kAlignment in range (start, end]
+template<size_t kAlignment>
+class MemoryRangeBitmap : public Bitmap {
+ public:
+ static MemoryRangeBitmap* Create(const std::string& name, uintptr_t cover_begin,
+ uintptr_t cover_end);
+ static MemoryRangeBitmap* CreateFromMemMap(MemMap* mem_map, uintptr_t cover_begin,
+ size_t num_bits);
+
+ // Beginning of the memory range that the bitmap covers.
+ ALWAYS_INLINE uintptr_t CoverBegin() const {
+ return cover_begin_;
+ }
+
+ // End of the memory range that the bitmap covers.
+ ALWAYS_INLINE uintptr_t CoverEnd() const {
+ return cover_end_;
+ }
+
+ // Return the address associated with a bit index.
+ ALWAYS_INLINE uintptr_t AddrFromBitIndex(size_t bit_index) const {
+ const uintptr_t addr = CoverBegin() + bit_index * kAlignment;
+ DCHECK_EQ(BitIndexFromAddr(addr), bit_index);
+ return addr;
+ }
+
+ // Return the bit index associated with an address .
+ ALWAYS_INLINE uintptr_t BitIndexFromAddr(uintptr_t addr) const {
+ DCHECK(HasAddress(addr)) << CoverBegin() << " <= " << addr << " < " << CoverEnd();
+ return (addr - CoverBegin()) / kAlignment;
+ }
+
+ ALWAYS_INLINE bool HasAddress(const uintptr_t addr) const {
+ return cover_begin_ <= addr && addr < cover_end_;
+ }
+
+ ALWAYS_INLINE bool Set(uintptr_t addr) {
+ return SetBit(BitIndexFromAddr(addr));
+ }
+
+ ALWAYS_INLINE bool Clear(size_t addr) {
+ return ClearBit(BitIndexFromAddr(addr));
+ }
+
+ ALWAYS_INLINE bool Test(size_t addr) const {
+ return TestBit(BitIndexFromAddr(addr));
+ }
+
+ // Returns true if the object was previously set.
+ ALWAYS_INLINE bool AtomicTestAndSet(size_t addr) {
+ return AtomicTestAndSetBit(BitIndexFromAddr(addr));
+ }
+
+ private:
+ MemoryRangeBitmap(MemMap* mem_map, uintptr_t begin, size_t num_bits)
+ : Bitmap(mem_map, num_bits), cover_begin_(begin), cover_end_(begin + kAlignment * num_bits) {
+ }
+
+ uintptr_t const cover_begin_;
+ uintptr_t const cover_end_;
+};
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ACCOUNTING_BITMAP_H_
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index b1ccc0bfdf..a3fac58e8a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -19,6 +19,7 @@
#include <memory>
#include "base/stl_util.h"
+#include "bitmap-inl.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -40,14 +41,14 @@ namespace art {
namespace gc {
namespace accounting {
-class ModUnionClearCardSetVisitor {
+class ModUnionAddToCardSetVisitor {
public:
- explicit ModUnionClearCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
- : cleared_cards_(cleared_cards) {
+ explicit ModUnionAddToCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
+ : cleared_cards_(cleared_cards) {
}
- inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
- UNUSED(new_value);
+ inline void operator()(uint8_t* card, uint8_t expected_value,
+ uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -57,18 +58,38 @@ class ModUnionClearCardSetVisitor {
ModUnionTable::CardSet* const cleared_cards_;
};
-class ModUnionClearCardVisitor {
+class ModUnionAddToCardBitmapVisitor {
public:
- explicit ModUnionClearCardVisitor(std::vector<uint8_t*>* cleared_cards)
- : cleared_cards_(cleared_cards) {
+ explicit ModUnionAddToCardBitmapVisitor(ModUnionTable::CardBitmap* bitmap,
+ CardTable* card_table)
+ : bitmap_(bitmap), card_table_(card_table) {
}
- void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
- UNUSED(new_card);
+ inline void operator()(uint8_t* card, uint8_t expected_value,
+ uint8_t new_value ATTRIBUTE_UNUSED) const {
+ if (expected_value == CardTable::kCardDirty) {
+ // We want the address the card represents, not the address of the card.
+ bitmap_->Set(reinterpret_cast<uintptr_t>(card_table_->AddrFromCard(card)));
+ }
+ }
+
+ private:
+ ModUnionTable::CardBitmap* const bitmap_;
+ CardTable* const card_table_;
+};
+
+class ModUnionAddToCardVectorVisitor {
+ public:
+ explicit ModUnionAddToCardVectorVisitor(std::vector<uint8_t*>* cleared_cards)
+ : cleared_cards_(cleared_cards) {
+ }
+
+ void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card ATTRIBUTE_UNUSED) const {
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
}
+
private:
std::vector<uint8_t*>* const cleared_cards_;
};
@@ -77,19 +98,19 @@ class ModUnionUpdateObjectReferencesVisitor {
public:
ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg,
space::ContinuousSpace* from_space,
- space::ImageSpace* image_space,
+ space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : callback_(callback), arg_(arg), from_space_(from_space), image_space_(image_space),
+ : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
- mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
+ mirror::HeapReference<Object>* const obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = obj_ptr->AsMirrorPtr();
- if (ref != nullptr && !from_space_->HasAddress(ref) && !image_space_->HasAddress(ref)) {
+ if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
*contains_reference_to_other_space_ = true;
callback_(obj_ptr, arg_);
}
@@ -97,27 +118,30 @@ class ModUnionUpdateObjectReferencesVisitor {
private:
MarkHeapReferenceCallback* const callback_;
- void* arg_;
+ void* const arg_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
- space::ImageSpace* const image_space_;
+ space::ContinuousSpace* const immune_space_;
// Set if we have any references to another space.
bool* const contains_reference_to_other_space_;
};
class ModUnionScanImageRootVisitor {
public:
+ // Immune space is any other space which we don't care about references to. Currently this is
+ // the image space in the case of the zygote mod union table.
ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg,
- space::ContinuousSpace* from_space, space::ImageSpace* image_space,
+ space::ContinuousSpace* from_space,
+ space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : callback_(callback), arg_(arg), from_space_(from_space), image_space_(image_space),
+ : callback_(callback), arg_(arg), from_space_(from_space), immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {}
void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(root != NULL);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_, image_space_,
+ DCHECK(root != nullptr);
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_, immune_space_,
contains_reference_to_other_space_);
root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
}
@@ -127,14 +151,14 @@ class ModUnionScanImageRootVisitor {
void* const arg_;
// Space which we are scanning
space::ContinuousSpace* const from_space_;
- space::ImageSpace* const image_space_;
+ space::ContinuousSpace* const immune_space_;
// Set if we have any references to another space.
bool* const contains_reference_to_other_space_;
};
void ModUnionTableReferenceCache::ClearCards() {
CardTable* card_table = GetHeap()->GetCardTable();
- ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ ModUnionAddToCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
card_table->ModifyCardsAtomic(space_->Begin(), space_->End(), AgeCardVisitor(), visitor);
}
@@ -324,9 +348,54 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallb
}
}
+ModUnionTableCardCache::ModUnionTableCardCache(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space)
+ : ModUnionTable(name, heap, space) {
+ // Normally here we could use End() instead of Limit(), but for testing we may want to have a
+ // mod-union table for a space which can still grow.
+ if (!space->IsImageSpace()) {
+ CHECK_ALIGNED(reinterpret_cast<uintptr_t>(space->Limit()), CardTable::kCardSize);
+ }
+ card_bitmap_.reset(CardBitmap::Create(
+ "mod union bitmap", reinterpret_cast<uintptr_t>(space->Begin()),
+ RoundUp(reinterpret_cast<uintptr_t>(space->Limit()), CardTable::kCardSize)));
+}
+
+class CardBitVisitor {
+ public:
+ CardBitVisitor(MarkHeapReferenceCallback* callback, void* arg, space::ContinuousSpace* space,
+ space::ContinuousSpace* immune_space, ModUnionTable::CardBitmap* card_bitmap)
+ : callback_(callback), arg_(arg), space_(space), immune_space_(immune_space),
+ bitmap_(space->GetLiveBitmap()), card_bitmap_(card_bitmap) {
+ DCHECK(immune_space_ != nullptr);
+ }
+
+ void operator()(size_t bit_index) const {
+ const uintptr_t start = card_bitmap_->AddrFromBitIndex(bit_index);
+ DCHECK(space_->HasAddress(reinterpret_cast<mirror::Object*>(start)))
+ << start << " " << *space_;
+ bool reference_to_other_space = false;
+ ModUnionScanImageRootVisitor scan_visitor(callback_, arg_, space_, immune_space_,
+ &reference_to_other_space);
+ bitmap_->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
+ if (!reference_to_other_space) {
+ // No non null reference to another space, clear the bit.
+ card_bitmap_->ClearBit(bit_index);
+ }
+ }
+
+ private:
+ MarkHeapReferenceCallback* const callback_;
+ void* const arg_;
+ space::ContinuousSpace* const space_;
+ space::ContinuousSpace* const immune_space_;
+ ContinuousSpaceBitmap* const bitmap_;
+ ModUnionTable::CardBitmap* const card_bitmap_;
+};
+
void ModUnionTableCardCache::ClearCards() {
- CardTable* card_table = GetHeap()->GetCardTable();
- ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+ CardTable* const card_table = GetHeap()->GetCardTable();
+ ModUnionAddToCardBitmapVisitor visitor(card_bitmap_.get(), card_table);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
card_table->ModifyCardsAtomic(space_->Begin(), space_->End(), AgeCardVisitor(), visitor);
}
@@ -334,46 +403,51 @@ void ModUnionTableCardCache::ClearCards() {
// Mark all references to the alloc space(s).
void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void* arg) {
- CardTable* card_table = heap_->GetCardTable();
- space::ImageSpace* image_space = heap_->GetImageSpace();
- ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
- bool reference_to_other_space = false;
- ModUnionScanImageRootVisitor scan_visitor(callback, arg, space_, image_space,
- &reference_to_other_space);
- for (auto it = cleared_cards_.begin(), end = cleared_cards_.end(); it != end; ) {
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(*it));
- DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
- reference_to_other_space = false;
- bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
- if (!reference_to_other_space) {
- // No non null reference to another space, remove the card.
- it = cleared_cards_.erase(it);
- } else {
- ++it;
- }
- }
+ auto* image_space = heap_->GetImageSpace();
+ // If we don't have an image space, just pass in space_ as the immune space. Pass in the same
+ // space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
+ CardBitVisitor visitor(callback, arg, space_, image_space != nullptr ? image_space : space_,
+ card_bitmap_.get());
+ card_bitmap_->VisitSetBits(
+ 0, RoundUp(space_->Size(), CardTable::kCardSize) / CardTable::kCardSize, visitor);
}
void ModUnionTableCardCache::Dump(std::ostream& os) {
- CardTable* card_table = heap_->GetCardTable();
os << "ModUnionTable dirty cards: [";
- for (const uint8_t* card_addr : cleared_cards_) {
- auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
- auto end = start + CardTable::kCardSize;
- os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
+ // TODO: Find cleaner way of doing this.
+ for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ addr += CardTable::kCardSize) {
+ if (card_bitmap_->Test(reinterpret_cast<uintptr_t>(addr))) {
+ os << reinterpret_cast<void*>(addr) << "-"
+ << reinterpret_cast<void*>(addr + CardTable::kCardSize) << "\n";
+ }
}
os << "]";
}
void ModUnionTableCardCache::SetCards() {
- CardTable* card_table = heap_->GetCardTable();
+ // Only clean up to the end since there cannot be any objects past the End() of the space.
for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
addr += CardTable::kCardSize) {
- cleared_cards_.insert(card_table->CardFromAddr(addr));
+ card_bitmap_->Set(reinterpret_cast<uintptr_t>(addr));
}
}
+bool ModUnionTableCardCache::ContainsCardFor(uintptr_t addr) {
+ return card_bitmap_->Test(addr);
+}
+
void ModUnionTableReferenceCache::SetCards() {
+ for (uint8_t* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ addr += CardTable::kCardSize) {
+ cleared_cards_.insert(heap_->GetCardTable()->CardFromAddr(reinterpret_cast<void*>(addr)));
+ }
+}
+
+bool ModUnionTableReferenceCache::ContainsCardFor(uintptr_t addr) {
+ auto* card_ptr = heap_->GetCardTable()->CardFromAddr(reinterpret_cast<void*>(addr));
+ return cleared_cards_.find(card_ptr) != cleared_cards_.end() ||
+ references_.find(card_ptr) != references_.end();
}
} // namespace accounting
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index d6342cf057..2e232ca6a8 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -17,7 +17,9 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_MOD_UNION_TABLE_H_
#define ART_RUNTIME_GC_ACCOUNTING_MOD_UNION_TABLE_H_
+#include "bitmap.h"
#include "base/allocator.h"
+#include "card_table.h"
#include "globals.h"
#include "object_callbacks.h"
#include "safe_map.h"
@@ -44,6 +46,7 @@ class Heap;
namespace accounting {
+class Bitmap;
class HeapBitmap;
// The mod-union table is the union of modified cards. It is used to allow the card table to be
@@ -52,6 +55,7 @@ class ModUnionTable {
public:
typedef std::set<uint8_t*, std::less<uint8_t*>,
TrackingAllocator<uint8_t*, kAllocatorTagModUnionCardSet>> CardSet;
+ typedef MemoryRangeBitmap<CardTable::kCardSize> CardBitmap;
explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name),
@@ -80,6 +84,10 @@ class ModUnionTable {
// bitmap or not.
virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+ // Returns true if a card is marked inside the mod union table. Used for testing. The address
+ // doesn't need to be aligned.
+ virtual bool ContainsCardFor(uintptr_t addr) = 0;
+
virtual void Dump(std::ostream& os) = 0;
space::ContinuousSpace* GetSpace() {
return space_;
@@ -106,25 +114,27 @@ class ModUnionTableReferenceCache : public ModUnionTable {
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
- void ClearCards();
+ void ClearCards() OVERRIDE;
// Update table based on cleared cards and mark all references to the other spaces.
- void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg)
+ void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
- void Verify()
+ void Verify() OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
virtual bool ShouldAddReference(const mirror::Object* ref) const = 0;
- void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+
+ virtual void Dump(std::ostream& os) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetCards() OVERRIDE;
+ virtual void SetCards() OVERRIDE;
protected:
// Cleared card array, used to update the mod-union table.
@@ -138,28 +148,32 @@ class ModUnionTableReferenceCache : public ModUnionTable {
// Card caching implementation. Keeps track of which cards we cleared and only this information.
class ModUnionTableCardCache : public ModUnionTable {
public:
- explicit ModUnionTableCardCache(const std::string& name, Heap* heap, space::ContinuousSpace* space)
- : ModUnionTable(name, heap, space) {}
+ // Note: There is assumption that the space End() doesn't change.
+ explicit ModUnionTableCardCache(const std::string& name, Heap* heap,
+ space::ContinuousSpace* space);
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
- void ClearCards();
+ virtual void ClearCards() OVERRIDE;
// Mark all references to the alloc space(s).
- void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg)
+ virtual void UpdateAndMarkReferences(MarkHeapReferenceCallback* callback, void* arg) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Nothing to verify.
- void Verify() {}
+ virtual void Verify() OVERRIDE {}
- void Dump(std::ostream& os);
+ virtual void Dump(std::ostream& os) OVERRIDE;
- void SetCards() OVERRIDE;
+ virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
+
+ // Sets all the cards in the mod union table to be marked.
+ virtual void SetCards() OVERRIDE;
protected:
- // Cleared card array, used to update the mod-union table.
- CardSet cleared_cards_;
+ // Cleared card bitmap, used to update the mod-union table.
+ std::unique_ptr<CardBitmap> card_bitmap_;
};
} // namespace accounting
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
new file mode 100644
index 0000000000..87ce166147
--- /dev/null
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mod_union_table-inl.h"
+
+#include "common_runtime_test.h"
+#include "gc/space/space-inl.h"
+#include "mirror/array-inl.h"
+#include "space_bitmap-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+class ModUnionTableFactory {
+ public:
+ enum TableType {
+ kTableTypeCardCache,
+ kTableTypeReferenceCache,
+ kTableTypeCount, // Number of values in the enum.
+ };
+
+ // Target space is ignored for the card cache implementation.
+ static ModUnionTable* Create(
+ TableType type, space::ContinuousSpace* space, space::ContinuousSpace* target_space);
+};
+
+class ModUnionTableTest : public CommonRuntimeTest {
+ public:
+ ModUnionTableTest() : java_lang_object_array_(nullptr) {
+ }
+ mirror::ObjectArray<mirror::Object>* AllocObjectArray(
+ Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ auto* klass = GetObjectArrayClass(self, space);
+ const size_t size = ComputeArraySize(self, klass, component_count, 2);
+ size_t bytes_allocated = 0;
+ auto* obj = down_cast<mirror::ObjectArray<mirror::Object>*>(
+ space->Alloc(self, size, &bytes_allocated, nullptr));
+ if (obj != nullptr) {
+ obj->SetClass(klass);
+ obj->SetLength(static_cast<int32_t>(component_count));
+ space->GetLiveBitmap()->Set(obj);
+ EXPECT_GE(bytes_allocated, size);
+ }
+ return obj;
+ }
+ void ResetClass() {
+ java_lang_object_array_ = nullptr;
+ }
+ void RunTest(ModUnionTableFactory::TableType type);
+
+ private:
+ mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (java_lang_object_array_ == nullptr) {
+ java_lang_object_array_ =
+ Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass);
+ // Since the test doesn't have an image, the class of the object array keeps cards live
+ // inside the card cache mod-union table and causes the check
+ // ASSERT_FALSE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj3)));
+ // to fail since the class ends up keeping the card dirty. To get around this, we make a fake
+ // copy of the class in the same space that we are allocating in.
+ DCHECK(java_lang_object_array_ != nullptr);
+ const size_t class_size = java_lang_object_array_->GetClassSize();
+ size_t bytes_allocated = 0;
+ auto* klass = down_cast<mirror::Class*>(space->Alloc(self, class_size, &bytes_allocated,
+ nullptr));
+ DCHECK(klass != nullptr);
+ memcpy(klass, java_lang_object_array_, class_size);
+ Runtime::Current()->GetHeap()->GetCardTable()->MarkCard(klass);
+ java_lang_object_array_ = klass;
+ }
+ return java_lang_object_array_;
+ }
+ mirror::Class* java_lang_object_array_;
+};
+
+// Collect visited objects into container.
+static void CollectVisitedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(ref != nullptr);
+ DCHECK(arg != nullptr);
+ reinterpret_cast<std::set<mirror::Object*>*>(arg)->insert(ref->AsMirrorPtr());
+}
+
+// A mod union table that only holds references to a specified target space.
+class ModUnionTableRefCacheToSpace : public ModUnionTableReferenceCache {
+ public:
+ explicit ModUnionTableRefCacheToSpace(
+ const std::string& name, Heap* heap, space::ContinuousSpace* space,
+ space::ContinuousSpace* target_space)
+ : ModUnionTableReferenceCache(name, heap, space), target_space_(target_space) {}
+
+ bool ShouldAddReference(const mirror::Object* ref) const OVERRIDE {
+ return target_space_->HasAddress(ref);
+ }
+
+ private:
+ space::ContinuousSpace* const target_space_;
+};
+
+std::ostream& operator<<(std::ostream& oss, ModUnionTableFactory::TableType type) {
+ switch (type) {
+ case ModUnionTableFactory::kTableTypeCardCache: {
+ oss << "CardCache";
+ break;
+ }
+ case ModUnionTableFactory::kTableTypeReferenceCache: {
+ oss << "ReferenceCache";
+ break;
+ }
+ default: {
+ UNIMPLEMENTED(FATAL) << static_cast<size_t>(type);
+ }
+ }
+ return oss;
+}
+
+ModUnionTable* ModUnionTableFactory::Create(
+ TableType type, space::ContinuousSpace* space, space::ContinuousSpace* target_space) {
+ std::ostringstream name;
+ name << "Mod union table: " << type;
+ switch (type) {
+ case kTableTypeCardCache: {
+ return new ModUnionTableCardCache(name.str(), Runtime::Current()->GetHeap(), space);
+ }
+ case kTableTypeReferenceCache: {
+ return new ModUnionTableRefCacheToSpace(name.str(), Runtime::Current()->GetHeap(), space,
+ target_space);
+ }
+ default: {
+ UNIMPLEMENTED(FATAL) << "Invalid type " << type;
+ }
+ }
+ return nullptr;
+}
+
+TEST_F(ModUnionTableTest, TestCardCache) {
+ RunTest(ModUnionTableFactory::kTableTypeCardCache);
+}
+
+TEST_F(ModUnionTableTest, TestReferenceCache) {
+ RunTest(ModUnionTableFactory::kTableTypeReferenceCache);
+}
+
+void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
+ Thread* const self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ Runtime* const runtime = Runtime::Current();
+ gc::Heap* const heap = runtime->GetHeap();
+ // Use non moving space since moving GC don't necessarily have a primary free list space.
+ auto* space = heap->GetNonMovingSpace();
+ ResetClass();
+ // Create another space that we can put references in.
+ std::unique_ptr<space::DlMallocSpace> other_space(space::DlMallocSpace::Create(
+ "other space", 128 * KB, 4 * MB, 4 * MB, nullptr, false));
+ ASSERT_TRUE(other_space.get() != nullptr);
+ heap->AddSpace(other_space.get());
+ std::unique_ptr<ModUnionTable> table(ModUnionTableFactory::Create(
+ type, space, other_space.get()));
+ ASSERT_TRUE(table.get() != nullptr);
+ // Create some fake objects and put the main space and dirty cards in the non moving space.
+ auto* obj1 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj1 != nullptr);
+ auto* obj2 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj2 != nullptr);
+ auto* obj3 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj3 != nullptr);
+ auto* obj4 = AllocObjectArray(self, space, CardTable::kCardSize);
+ ASSERT_TRUE(obj4 != nullptr);
+ // Dirty some cards.
+ obj1->Set(0, obj2);
+ obj2->Set(0, obj3);
+ obj3->Set(0, obj4);
+ obj4->Set(0, obj1);
+ // Dirty some more cards to objects in another space.
+ auto* other_space_ref1 = AllocObjectArray(self, other_space.get(), CardTable::kCardSize);
+ ASSERT_TRUE(other_space_ref1 != nullptr);
+ auto* other_space_ref2 = AllocObjectArray(self, other_space.get(), CardTable::kCardSize);
+ ASSERT_TRUE(other_space_ref2 != nullptr);
+ obj1->Set(1, other_space_ref1);
+ obj2->Set(3, other_space_ref2);
+ table->ClearCards();
+ std::set<mirror::Object*> visited;
+ table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited);
+ // Check that we visited all the references in other spaces only.
+ ASSERT_GE(visited.size(), 2u);
+ ASSERT_TRUE(visited.find(other_space_ref1) != visited.end());
+ ASSERT_TRUE(visited.find(other_space_ref2) != visited.end());
+ // Verify that all the other references were visited.
+ // obj1, obj2 cards should still be in mod union table since they have references to other
+ // spaces.
+ ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj1)));
+ ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj2)));
+ // obj3, obj4 don't have a reference to any object in the other space, their cards should have
+ // been removed from the mod union table during UpdateAndMarkReferences.
+ ASSERT_FALSE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj3)));
+ ASSERT_FALSE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(obj4)));
+ {
+ // Currently no-op, make sure it still works however.
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ table->Verify();
+ }
+ // Verify that dump doesn't crash.
+ std::ostringstream oss;
+ table->Dump(oss);
+ // Set all the cards, then verify.
+ table->SetCards();
+ // TODO: Check that the cards are actually set.
+ for (auto* ptr = space->Begin(); ptr < AlignUp(space->End(), CardTable::kCardSize);
+ ptr += CardTable::kCardSize) {
+ ASSERT_TRUE(table->ContainsCardFor(reinterpret_cast<uintptr_t>(ptr)));
+ }
+ // Visit again and make sure the cards got cleared back to their sane state.
+ visited.clear();
+ table->UpdateAndMarkReferences(&CollectVisitedCallback, &visited);
+ // Verify that the dump matches what we saw earlier.
+ std::ostringstream oss2;
+ table->Dump(oss2);
+ ASSERT_EQ(oss.str(), oss2.str());
+ // Remove the space we added so it doesn't persist to the next test.
+ heap->RemoveSpace(other_space.get());
+}
+
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 7bc83effd3..d6b3ed4f26 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -188,13 +188,6 @@ class SpaceBitmap {
std::string Dump() const;
- const void* GetObjectWordAddress(const mirror::Object* obj) const {
- uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
- const uintptr_t offset = addr - heap_begin_;
- const size_t index = OffsetToIndex(offset);
- return &bitmap_begin_[index];
- }
-
private:
// TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
// however, we document that this is expected on heap_end_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9e159c2533..a4bc941a60 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -398,14 +398,14 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
rb_table_.reset(new accounting::ReadBarrierTable());
DCHECK(rb_table_->IsAllCleared());
}
-
- // Card cache for now since it makes it easier for us to update the references to the copying
- // spaces.
- accounting::ModUnionTable* mod_union_table =
- new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
- GetImageSpace());
- CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
- AddModUnionTable(mod_union_table);
+ if (GetImageSpace() != nullptr) {
+ // Don't add the image mod union table if we are running without an image, this can crash if
+ // we use the CardCache implementation.
+ accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableToZygoteAllocspace(
+ "Image mod-union table", this, GetImageSpace());
+ CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
+ AddModUnionTable(mod_union_table);
+ }
if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
accounting::RememberedSet* non_moving_space_rem_set =
new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
@@ -1675,7 +1675,8 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
AddSpace(to_space);
// Make sure that we will have enough room to copy.
CHECK_GE(to_space->GetFootprintLimit(), from_space->GetFootprintLimit());
- Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
+ collector::GarbageCollector* collector = Compact(to_space, from_space,
+ kGcCauseHomogeneousSpaceCompact);
const uint64_t space_size_after_compaction = to_space->Size();
main_space_ = to_space;
main_space_backup_.reset(from_space);
@@ -1694,6 +1695,7 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
// Finish GC.
reference_processor_.EnqueueClearedReferences(self);
GrowForUtilization(semi_space_collector_);
+ LogGC(kGcCauseHomogeneousSpaceCompact, collector);
FinishGC(self, collector::kGcTypeFull);
return HomogeneousSpaceCompactResult::kSuccess;
}
@@ -1744,6 +1746,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
FinishGC(self, collector::kGcTypeNone);
return;
}
+ collector::GarbageCollector* collector = nullptr;
tl->SuspendAll();
switch (collector_type) {
case kCollectorTypeSS: {
@@ -1758,7 +1761,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
mem_map.release());
AddSpace(bump_pointer_space_);
- Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+ collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
// Use the now empty main space mem map for the bump pointer temp space.
mem_map.reset(main_space_->ReleaseMemMap());
// Unset the pointers just in case.
@@ -1795,7 +1798,7 @@ void Heap::TransitionCollector(CollectorType collector_type) {
mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
- Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+ collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
mem_map.reset(bump_pointer_space_->ReleaseMemMap());
RemoveSpace(bump_pointer_space_);
bump_pointer_space_ = nullptr;
@@ -1826,6 +1829,8 @@ void Heap::TransitionCollector(CollectorType collector_type) {
reference_processor_.EnqueueClearedReferences(self);
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
+ DCHECK(collector != nullptr);
+ LogGC(kGcCauseCollectorTransition, collector);
FinishGC(self, collector::kGcTypeFull);
int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
int32_t delta_allocated = before_allocated - after_allocated;
@@ -2166,9 +2171,9 @@ void Heap::SwapSemiSpaces() {
std::swap(bump_pointer_space_, temp_space_);
}
-void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
- space::ContinuousMemMapAllocSpace* source_space,
- GcCause gc_cause) {
+collector::GarbageCollector* Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
+ space::ContinuousMemMapAllocSpace* source_space,
+ GcCause gc_cause) {
CHECK(kMovingCollector);
if (target_space != source_space) {
// Don't swap spaces since this isn't a typical semi space collection.
@@ -2176,11 +2181,13 @@ void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
semi_space_collector_->SetFromSpace(source_space);
semi_space_collector_->SetToSpace(target_space);
semi_space_collector_->Run(gc_cause, false);
+ return semi_space_collector_;
} else {
CHECK(target_space->IsBumpPointerSpace())
<< "In-place compaction is only supported for bump pointer spaces";
mark_compact_collector_->SetSpace(target_space->AsBumpPointerSpace());
mark_compact_collector_->Run(kGcCauseCollectorTransition, false);
+ return mark_compact_collector_;
}
}
@@ -2291,6 +2298,14 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
reference_processor_.EnqueueClearedReferences(self);
// Grow the heap so that we know when to perform the next GC.
GrowForUtilization(collector, bytes_allocated_before_gc);
+ LogGC(gc_cause, collector);
+ FinishGC(self, gc_type);
+ // Inform DDMS that a GC completed.
+ Dbg::GcDidFinish();
+ return gc_type;
+}
+
+void Heap::LogGC(GcCause gc_cause, collector::GarbageCollector* collector) {
const size_t duration = GetCurrentGcIteration()->GetDurationNs();
const std::vector<uint64_t>& pause_times = GetCurrentGcIteration()->GetPauseTimes();
// Print the GC if it is an explicit GC (e.g. Runtime.gc()) or a slow GC
@@ -2310,8 +2325,8 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
const size_t total_memory = GetTotalMemory();
std::ostringstream pause_string;
for (size_t i = 0; i < pause_times.size(); ++i) {
- pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
- << ((i != pause_times.size() - 1) ? "," : "");
+ pause_string << PrettyDuration((pause_times[i] / 1000) * 1000)
+ << ((i != pause_times.size() - 1) ? "," : "");
}
LOG(INFO) << gc_cause << " " << collector->GetName()
<< " GC freed " << current_gc_iteration_.GetFreedObjects() << "("
@@ -2323,10 +2338,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
<< " total " << PrettyDuration((duration / 1000) * 1000);
VLOG(heap) << Dumpable<TimingLogger>(*current_gc_iteration_.GetTimings());
}
- FinishGC(self, gc_type);
- // Inform DDMS that a GC completed.
- Dbg::GcDidFinish();
- return gc_type;
}
void Heap::FinishGC(Thread* self, collector::GcType gc_type) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 57c14606c4..b2478e6d70 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -666,12 +666,13 @@ class Heap {
class CollectorTransitionTask;
class HeapTrimTask;
- // Compact source space to target space.
- void Compact(space::ContinuousMemMapAllocSpace* target_space,
- space::ContinuousMemMapAllocSpace* source_space,
- GcCause gc_cause)
+ // Compact source space to target space. Returns the collector used.
+ collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
+ space::ContinuousMemMapAllocSpace* source_space,
+ GcCause gc_cause)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
// Create a mem map with a preferred base address.
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index b71f6cdfc4..fc08d23274 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -633,7 +633,11 @@ void JdwpState::SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy
AcquireJdwpTokenForEvent(threadId);
}
EventFinish(pReq);
- SuspendByPolicy(suspend_policy, thread_self_id);
+ {
+ // Before suspending, we change our state to kSuspended so the debugger sees us as RUNNING.
+ ScopedThreadStateChange stsc(self, kSuspended);
+ SuspendByPolicy(suspend_policy, thread_self_id);
+ }
self->TransitionFromSuspendedToRunnable();
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index d09f27a214..851ecebb05 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1264,14 +1264,6 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
return;
}
-#if !defined(HAVE_ANDROID_OS)
- if (GetTid() != tid) {
- // TODO: dumping of other threads is disabled to avoid crashes during stress testing.
- // b/15446488.
- return;
- }
-#endif
-
std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 1dfbe510bc..c2485652da 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -580,8 +580,9 @@ void RegTypeCache::VisitStaticRoots(RootCallback* callback, void* arg) {
}
void RegTypeCache::VisitRoots(RootCallback* callback, void* arg) {
- for (const RegType* entry : entries_) {
- entry->VisitRoots(callback, arg);
+ // Exclude the static roots that are visited by VisitStaticRoots().
+ for (size_t i = primitive_count_; i < entries_.size(); ++i) {
+ entries_[i]->VisitRoots(callback, arg);
}
}
diff --git a/test/134-reg-promotion/expected.txt b/test/134-reg-promotion/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/134-reg-promotion/expected.txt
diff --git a/test/134-reg-promotion/info.txt b/test/134-reg-promotion/info.txt
new file mode 100644
index 0000000000..6eff7eb832
--- /dev/null
+++ b/test/134-reg-promotion/info.txt
@@ -0,0 +1,4 @@
+Test that a vreg value that was defined by a const 0 and is used is both ref
+and float operations is flushed to all home location.
+
+See: b/19417710, b/7250540 & b.android.com/147187
diff --git a/test/134-reg-promotion/smali/Test.smali b/test/134-reg-promotion/smali/Test.smali
new file mode 100644
index 0000000000..6a35c45cd7
--- /dev/null
+++ b/test/134-reg-promotion/smali/Test.smali
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+
+.super Ljava/lang/Object;
+
+.method public static run()V
+ .registers 3
+ new-instance v2, Ljava/lang/String;
+ invoke-direct {v2}, Ljava/lang/String;-><init>()V
+ const/4 v0, 0
+ move v1, v0
+ :start
+ invoke-static {}, LMain;->blowup()V
+ if-ne v1, v0, :end
+ const/4 v2, 1
+ invoke-static {v2}, Ljava/lang/Integer;->toString(I)Ljava/lang/String;
+ move v2, v0
+ # The call makes v2 float type.
+ invoke-static {v2}, Ljava/lang/Float;->isNaN(F)Z
+ const/4 v1, 1
+ goto :start
+ :end
+ return-void
+.end method
diff --git a/test/134-reg-promotion/src/Main.java b/test/134-reg-promotion/src/Main.java
new file mode 100644
index 0000000000..d45ec661d3
--- /dev/null
+++ b/test/134-reg-promotion/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static char [][] holder;
+ static boolean sawOome;
+
+ static void blowup() {
+ try {
+ for (int i = 0; i < holder.length; ++i) {
+ holder[i] = new char[1024 * 1024];
+ }
+ } catch (OutOfMemoryError oome) {
+ sawOome = true;
+ }
+ }
+
+ public static void main(String args[]) throws Exception {
+ Class<?> c = Class.forName("Test");
+ Method m = c.getMethod("run", (Class[]) null);
+ for (int i = 0; i < 10; i++) {
+ holder = new char[128 * 1024][];
+ m.invoke(null, (Object[]) null);
+ holder = null;
+ }
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 41c8da6f59..c764414921 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -437,7 +437,7 @@ endif
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter default optimizing}-{5: relocate no-relocate relocate-no-patchoat}-
+# {4: interpreter default optimizing jit}-{5: relocate no-relocate relocate-no-patchoat}-
# {6: trace or no-trace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
# {9: no-image image picimage}-{10: pictest nopictest}-{11: test name}{12: 32 or 64}
define define-test-art-run-test
@@ -501,7 +501,7 @@ define define-test-art-run-test
run_test_options += --quick
else
ifeq ($(4),jit)
- test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_JIT_RULES
run_test_options += --jit
else
$$(error found $(4) expected $(COMPILER_TYPES))
@@ -569,32 +569,38 @@ define define-test-art-run-test
endif
endif
endif
+ ifeq ($(4),jit)
+ # Use interpreter image for JIT.
+ image_suffix := interpreter
+ else
+ image_suffix := $(4)
+ endif
ifeq ($(9),no-image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
run_test_options += --no-image
# Add the core dependency. This is required for pre-building.
ifeq ($(1),host)
- prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $(HOST_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
else
- prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $(TARGET_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
endif
else
ifeq ($(9),image)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
# Add the core dependency.
ifeq ($(1),host)
- prereq_rule += $(HOST_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $(HOST_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
else
- prereq_rule += $(TARGET_CORE_IMAGE_$(4)_no-pic_$(12))
+ prereq_rule += $(TARGET_CORE_IMAGE_$(image_suffix)_no-pic_$(12))
endif
else
ifeq ($(9),picimage)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PICIMAGE_RULES
run_test_options += --pic-image
ifeq ($(1),host)
- prereq_rule += $(HOST_CORE_IMAGE_$(4)_pic_$(12))
+ prereq_rule += $(HOST_CORE_IMAGE_$(image_suffix)_pic_$(12))
else
- prereq_rule += $(TARGET_CORE_IMAGE_$(4)_pic_$(12))
+ prereq_rule += $(TARGET_CORE_IMAGE_$(image_suffix)_pic_$(12))
endif
else
$$(error found $(9) expected $(IMAGE_TYPES))
diff --git a/test/run-test b/test/run-test
index 8bc4151941..52f5e0c401 100755
--- a/test/run-test
+++ b/test/run-test
@@ -195,6 +195,7 @@ while true; do
shift
elif [ "x$1" = "x--jit" ]; then
run_args="${run_args} --jit"
+ image_suffix="-interpreter"
shift
elif [ "x$1" = "x--optimizing" ]; then
run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing"
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 9c1976f415..41d814a155 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -99,5 +99,12 @@
result: EXEC_FAILED,
name: "org.apache.harmony.security.tests.java.security.Signature2Test#test_verify$BII",
bug: 18869265
+},
+{
+ description: "Test sometimes timeouts on volantis",
+ result: EXEC_TIMEOUT,
+ modes_variants: [[device,X64]],
+ names: ["libcore.java.lang.SystemTest#testArrayCopyConcurrentModification"],
+ bug: 19165288
}
]