summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_test.mk1
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--build/Android.oat.mk26
-rw-r--r--cmdline/cmdline_parser_test.cc11
-rw-r--r--compiler/image_writer.cc6
-rw-r--r--compiler/optimizing/builder.cc68
-rw-r--r--compiler/optimizing/builder.h11
-rw-r--r--compiler/optimizing/dead_code_elimination.cc2
-rw-r--r--compiler/optimizing/gvn.cc4
-rw-r--r--compiler/optimizing/gvn_test.cc25
-rw-r--r--compiler/optimizing/inliner.cc3
-rw-r--r--compiler/optimizing/licm.cc2
-rw-r--r--compiler/optimizing/nodes.h183
-rw-r--r--compiler/optimizing/optimization.h2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
-rw-r--r--compiler/optimizing/side_effects_analysis.cc9
-rw-r--r--compiler/optimizing/side_effects_test.cc219
-rw-r--r--compiler/optimizing/stack_map_stream.cc2
-rw-r--r--compiler/optimizing/stack_map_test.cc15
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc69
-rw-r--r--compiler/utils/arm/assembler_thumb2.h21
-rw-r--r--oatdump/oatdump.cc7
-rw-r--r--runtime/Android.mk8
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S25
-rw-r--r--runtime/arch/x86/asm_support_x86.S127
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S433
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S110
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S116
-rw-r--r--runtime/art_method-inl.h5
-rw-r--r--runtime/art_method.cc11
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/base/allocator.h1
-rw-r--r--runtime/base/hash_set.h42
-rw-r--r--runtime/base/hash_set_test.cc6
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h5
-rw-r--r--runtime/check_jni.cc8
-rw-r--r--runtime/class_linker.cc16
-rw-r--r--runtime/class_linker.h6
-rw-r--r--runtime/debugger.cc1
-rw-r--r--runtime/dex_instruction.h6
-rw-r--r--runtime/gc/collector/mark_sweep.cc2
-rw-r--r--runtime/gc/weak_root_state.h39
-rw-r--r--runtime/intern_table.cc95
-rw-r--r--runtime/intern_table.h34
-rw-r--r--runtime/interpreter/interpreter_common.h42
-rw-r--r--runtime/lambda/box_table.cc220
-rw-r--r--runtime/lambda/box_table.h148
-rw-r--r--runtime/mirror/abstract_method.h3
-rw-r--r--runtime/native/java_lang_System.cc10
-rw-r--r--runtime/native/java_lang_Thread.cc1
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/parsed_options.cc11
-rw-r--r--runtime/runtime.cc24
-rw-r--r--runtime/runtime.h20
-rw-r--r--runtime/runtime_options.def3
-rw-r--r--runtime/runtime_options.h1
-rw-r--r--runtime/stack_map.cc19
-rw-r--r--runtime/stack_map.h188
-rw-r--r--runtime/thread.cc9
-rw-r--r--runtime/thread.h2
-rw-r--r--runtime/thread_state.h1
-rw-r--r--runtime/verifier/method_verifier.cc6
-rw-r--r--runtime/verifier/reg_type.cc10
-rw-r--r--runtime/verifier/verify_mode.h35
-rw-r--r--test/011-array-copy/src/Main.java10
-rw-r--r--test/140-field-packing/expected.txt2
-rw-r--r--test/140-field-packing/info.txt1
-rw-r--r--test/140-field-packing/src/GapOrder.java78
-rw-r--r--test/140-field-packing/src/GapOrderBase.java24
-rw-r--r--test/140-field-packing/src/Main.java23
-rw-r--r--test/525-arrays-and-fields/expected.txt0
-rw-r--r--test/525-arrays-and-fields/info.txt1
-rw-r--r--test/525-arrays-and-fields/src/Main.java803
-rw-r--r--test/800-smali/expected.txt5
-rw-r--r--test/800-smali/smali/b_22411633_1.smali35
-rw-r--r--test/800-smali/smali/b_22411633_2.smali45
-rw-r--r--test/800-smali/smali/b_22411633_3.smali31
-rw-r--r--test/800-smali/smali/b_22411633_4.smali25
-rw-r--r--test/800-smali/smali/b_22411633_5.smali28
-rw-r--r--test/800-smali/src/Main.java10
-rw-r--r--test/955-lambda-smali/expected.txt1
-rw-r--r--test/955-lambda-smali/smali/BoxUnbox.smali61
-rw-r--r--test/Android.run-test.mk31
-rwxr-xr-xtest/etc/run-test-jar16
-rwxr-xr-xtest/run-test7
-rw-r--r--tools/libcore_failures.txt6
87 files changed, 3070 insertions, 745 deletions
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 45b649047f..2f43f5f809 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -45,6 +45,7 @@ ART_TEST_DEFAULT_COMPILER ?= true
# Do you want interpreter tests run?
ART_TEST_INTERPRETER ?= $(ART_TEST_FULL)
+ART_TEST_INTERPRETER_ACCESS_CHECKS ?= $(ART_TEST_FULL)
# Do you want JIT tests run?
ART_TEST_JIT ?= $(ART_TEST_FULL)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 0958c645d8..377cd4ed34 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -264,6 +264,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \
compiler/optimizing/parallel_move_test.cc \
compiler/optimizing/pretty_printer_test.cc \
compiler/optimizing/register_allocator_test.cc \
+ compiler/optimizing/side_effects_test.cc \
compiler/optimizing/ssa_test.cc \
compiler/optimizing/stack_map_test.cc \
compiler/optimizing/suspend_check_test.cc \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 728469c2c4..c70e12deec 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -31,7 +31,7 @@ ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),)
endif
# Use dex2oat debug version for better error reporting
-# $(1): compiler - default, optimizing, jit or interpreter.
+# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
# $(2): pic/no-pic
# $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds.
# $(4): wrapper, e.g., valgrind.
@@ -64,12 +64,16 @@ define create-core-oat-host-rules
core_compile_options += --compiler-filter=interpret-only
core_infix := -interpreter
endif
+ ifeq ($(1),interpreter-access-checks)
+ core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail
+ core_infix := -interpreter-access-checks
+ endif
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter jit optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter interpreter-access-checks jit optimizing,$(1)),)
#Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected default, interpreter, jit or optimizing)
+ $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing)
endif
ifeq ($(2),pic)
@@ -127,7 +131,7 @@ $$(core_oat_name): $$(core_image_name)
core_pic_infix :=
endef # create-core-oat-host-rules
-# $(1): compiler - default, optimizing, jit or interpreter.
+# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
# $(2): wrapper.
# $(3): dex2oat suffix.
define create-core-oat-host-rule-combination
@@ -143,12 +147,14 @@ endef
$(eval $(call create-core-oat-host-rule-combination,default,,))
$(eval $(call create-core-oat-host-rule-combination,optimizing,,))
$(eval $(call create-core-oat-host-rule-combination,interpreter,,))
+$(eval $(call create-core-oat-host-rule-combination,interpreter-access-checks,,))
valgrindHOST_CORE_IMG_OUTS :=
valgrindHOST_CORE_OAT_OUTS :=
$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32))
$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32))
$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32))
+$(eval $(call create-core-oat-host-rule-combination,interpreter-access-checks,valgrind,32))
valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
@@ -178,12 +184,16 @@ define create-core-oat-target-rules
core_compile_options += --compiler-filter=interpret-only
core_infix := -interpreter
endif
+ ifeq ($(1),interpreter-access-checks)
+ core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail
+ core_infix := -interpreter-access-checks
+ endif
ifeq ($(1),default)
# Default has no infix, no compile options.
endif
- ifneq ($(filter-out default interpreter jit optimizing,$(1)),)
+ ifneq ($(filter-out default interpreter interpreter-access-checks jit optimizing,$(1)),)
# Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected default, interpreter, jit or optimizing)
+ $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing)
endif
ifeq ($(2),pic)
@@ -246,7 +256,7 @@ $$(core_oat_name): $$(core_image_name)
core_pic_infix :=
endef # create-core-oat-target-rules
-# $(1): compiler - default, optimizing, jit or interpreter.
+# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks.
# $(2): wrapper.
# $(3): dex2oat suffix.
define create-core-oat-target-rule-combination
@@ -262,12 +272,14 @@ endef
$(eval $(call create-core-oat-target-rule-combination,default,,))
$(eval $(call create-core-oat-target-rule-combination,optimizing,,))
$(eval $(call create-core-oat-target-rule-combination,interpreter,,))
+$(eval $(call create-core-oat-target-rule-combination,interpreter-access-checks,,))
valgrindTARGET_CORE_IMG_OUTS :=
valgrindTARGET_CORE_OAT_OUTS :=
$(eval $(call create-core-oat-target-rule-combination,default,valgrind,32))
$(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32))
$(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32))
+$(eval $(call create-core-oat-target-rule-combination,interpreter-access-checks,valgrind,32))
valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS)
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 98fd327f02..52df7deb25 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -216,9 +216,6 @@ TEST_F(CmdlineParserTest, TestSimpleSuccesses) {
EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote);
EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath);
- EXPECT_SINGLE_PARSE_VALUE(false, "-Xverify:none", M::Verify);
- EXPECT_SINGLE_PARSE_VALUE(true, "-Xverify:remote", M::Verify);
- EXPECT_SINGLE_PARSE_VALUE(true, "-Xverify:all", M::Verify);
EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize);
EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize);
EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM);
@@ -550,6 +547,14 @@ TEST_F(CmdlineParserTest, TestExperimentalLambdas) {
M::ExperimentalLambdas);
}
+// -Xverify:_
+TEST_F(CmdlineParserTest, TestVerify) {
+ EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kNone, "-Xverify:none", M::Verify);
+ EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:remote", M::Verify);
+ EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:all", M::Verify);
+ EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kSoftFail, "-Xverify:softfail", M::Verify);
+}
+
TEST_F(CmdlineParserTest, TestIgnoreUnrecognized) {
RuntimeParser::Builder parserBuilder;
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index fdfeb485fd..2b65aa9337 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -715,8 +715,10 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) {
DCHECK_EQ(obj, obj->AsString()->Intern());
return;
}
- mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrong(
- obj->AsString()->Intern());
+ // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since
+ // we are guaranteed to not have GC during image writing.
+ mirror::String* const interned = Runtime::Current()->GetInternTable()->InternImageString(
+ obj->AsString());
if (obj != interned) {
if (!IsImageBinSlotAssigned(interned)) {
// interned obj is after us, allocate its location early
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 1319f2c62a..52a3a1534a 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -804,7 +804,9 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
invoke_type = kDirect;
break;
case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
invoke_type = kVirtual;
break;
case Instruction::INVOKE_INTERFACE:
@@ -1051,7 +1053,15 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_22c();
uint32_t obj_reg = instruction.VRegB_22c();
- uint16_t field_index = instruction.VRegC_22c();
+ uint16_t field_index;
+ if (instruction.IsQuickened()) {
+ if (!CanDecodeQuickenedInfo()) {
+ return false;
+ }
+ field_index = LookupQuickenedInfo(dex_pc);
+ } else {
+ field_index = instruction.VRegC_22c();
+ }
ScopedObjectAccess soa(Thread::Current());
ArtField* resolved_field =
@@ -1560,6 +1570,17 @@ void HGraphBuilder::PotentiallyAddSuspendCheck(HBasicBlock* target, uint32_t dex
}
}
+bool HGraphBuilder::CanDecodeQuickenedInfo() const {
+ return interpreter_metadata_ != nullptr;
+}
+
+uint16_t HGraphBuilder::LookupQuickenedInfo(uint32_t dex_pc) {
+ DCHECK(interpreter_metadata_ != nullptr);
+ uint32_t dex_pc_in_map = DecodeUnsignedLeb128(&interpreter_metadata_);
+ DCHECK_EQ(dex_pc, dex_pc_in_map);
+ return DecodeUnsignedLeb128(&interpreter_metadata_);
+}
+
bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
if (current_block_ == nullptr) {
return true; // Dead code
@@ -1657,6 +1678,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::RETURN_VOID_NO_BARRIER:
case Instruction::RETURN_VOID: {
BuildReturn(instruction, Primitive::kPrimVoid);
break;
@@ -1705,8 +1727,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_SUPER:
- case Instruction::INVOKE_VIRTUAL: {
- uint32_t method_idx = instruction.VRegB_35c();
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ uint16_t method_idx;
+ if (instruction.Opcode() == Instruction::INVOKE_VIRTUAL_QUICK) {
+ if (!CanDecodeQuickenedInfo()) {
+ return false;
+ }
+ method_idx = LookupQuickenedInfo(dex_pc);
+ } else {
+ method_idx = instruction.VRegB_35c();
+ }
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
@@ -1721,8 +1752,17 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::INVOKE_INTERFACE_RANGE:
case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_SUPER_RANGE:
- case Instruction::INVOKE_VIRTUAL_RANGE: {
- uint32_t method_idx = instruction.VRegB_3rc();
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
+ uint16_t method_idx;
+ if (instruction.Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK) {
+ if (!CanDecodeQuickenedInfo()) {
+ return false;
+ }
+ method_idx = LookupQuickenedInfo(dex_pc);
+ } else {
+ method_idx = instruction.VRegB_3rc();
+ }
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
if (!BuildInvoke(instruction, dex_pc, method_idx,
@@ -2375,12 +2415,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
case Instruction::IGET:
+ case Instruction::IGET_QUICK:
case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
case Instruction::IGET_OBJECT:
+ case Instruction::IGET_OBJECT_QUICK:
case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BOOLEAN_QUICK:
case Instruction::IGET_BYTE:
+ case Instruction::IGET_BYTE_QUICK:
case Instruction::IGET_CHAR:
- case Instruction::IGET_SHORT: {
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT:
+ case Instruction::IGET_SHORT_QUICK: {
if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) {
return false;
}
@@ -2388,12 +2435,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::IPUT:
+ case Instruction::IPUT_QUICK:
case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_OBJECT_QUICK:
case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BOOLEAN_QUICK:
case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_BYTE_QUICK:
case Instruction::IPUT_CHAR:
- case Instruction::IPUT_SHORT: {
+ case Instruction::IPUT_CHAR_QUICK:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_SHORT_QUICK: {
if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) {
return false;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 76610f5be2..ad5d92345b 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -39,7 +39,8 @@ class HGraphBuilder : public ValueObject {
const DexCompilationUnit* const outer_compilation_unit,
const DexFile* dex_file,
CompilerDriver* driver,
- OptimizingCompilerStats* compiler_stats)
+ OptimizingCompilerStats* compiler_stats,
+ const uint8_t* interpreter_metadata)
: arena_(graph->GetArena()),
branch_targets_(graph->GetArena(), 0),
locals_(graph->GetArena(), 0),
@@ -55,7 +56,8 @@ class HGraphBuilder : public ValueObject {
code_start_(nullptr),
latest_result_(nullptr),
can_use_baseline_for_string_init_(true),
- compilation_stats_(compiler_stats) {}
+ compilation_stats_(compiler_stats),
+ interpreter_metadata_(interpreter_metadata) {}
// Only for unit testing.
HGraphBuilder(HGraph* graph, Primitive::Type return_type = Primitive::kPrimInt)
@@ -120,6 +122,9 @@ class HGraphBuilder : public ValueObject {
const DexFile::CodeItem& code_item,
const DexFile::TryItem& try_item);
+ bool CanDecodeQuickenedInfo() const;
+ uint16_t LookupQuickenedInfo(uint32_t dex_pc);
+
void InitializeLocals(uint16_t count);
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
@@ -307,6 +312,8 @@ class HGraphBuilder : public ValueObject {
OptimizingCompilerStats* compilation_stats_;
+ const uint8_t* interpreter_metadata_;
+
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 5de629d605..6269d1628e 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -128,7 +128,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
for (i.Advance(); !i.Done(); i.Advance()) {
HInstruction* inst = i.Current();
DCHECK(!inst->IsControlFlow());
- if (!inst->HasSideEffects()
+ if (!inst->DoesAnyWrite()
&& !inst->CanThrow()
&& !inst->IsSuspendCheck()
// If we added an explicit barrier then we should keep it.
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 708733e28c..39006465d5 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -120,7 +120,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
// Removes all instructions in the set affected by the given side effects.
void Kill(SideEffects side_effects) {
DeleteAllImpureWhich([side_effects](Node* node) {
- return node->GetInstruction()->GetSideEffects().DependsOn(side_effects);
+ return node->GetInstruction()->GetSideEffects().MayDependOn(side_effects);
});
}
@@ -264,7 +264,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
// odd buckets to speed up deletion.
size_t HashCode(HInstruction* instruction) const {
size_t hash_code = instruction->ComputeHashCode();
- if (instruction->GetSideEffects().HasDependencies()) {
+ if (instruction->GetSideEffects().DoesAnyRead()) {
return (hash_code << 1) | 0;
} else {
return (hash_code << 1) | 1;
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index d8a09ffc38..5c6239b3f9 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -206,7 +206,7 @@ TEST(GVNTest, LoopFieldElimination) {
// and the body to be GVN'ed.
loop_body->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
parameter,
- Primitive::kPrimNot,
+ Primitive::kPrimBoolean,
MemberOffset(42),
false,
kUnknownFieldIndex,
@@ -323,9 +323,10 @@ TEST(GVNTest, LoopSideEffects) {
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
- ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects());
- ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects());
- ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects());
+ ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
}
// Check that the side effects of the outer loop does not affect the inner loop.
@@ -343,10 +344,10 @@ TEST(GVNTest, LoopSideEffects) {
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
- ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects());
- ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).HasSideEffects());
- ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects());
- ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects());
+ ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
}
// Check that the side effects of the inner loop affects the outer loop.
@@ -365,10 +366,10 @@ TEST(GVNTest, LoopSideEffects) {
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
- ASSERT_TRUE(side_effects.GetBlockEffects(entry).HasSideEffects());
- ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).HasSideEffects());
- ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).HasSideEffects());
- ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).HasSideEffects());
+ ASSERT_TRUE(side_effects.GetBlockEffects(entry).DoesAnyWrite());
+ ASSERT_FALSE(side_effects.GetBlockEffects(outer_loop_body).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetLoopEffects(outer_loop_header).DoesAnyWrite());
+ ASSERT_TRUE(side_effects.GetLoopEffects(inner_loop_header).DoesAnyWrite());
}
}
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3efe7c77fa..cea7dd9b8d 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -326,7 +326,8 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
&outer_compilation_unit_,
resolved_method->GetDexFile(),
compiler_driver_,
- &inline_stats);
+ &inline_stats,
+ resolved_method->GetQuickenedInfo());
if (!builder.BuildGraph(*code_item)) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 2535ea274a..5b89b4ec74 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -115,7 +115,7 @@ void LICM::Run() {
HInstruction* instruction = inst_it.Current();
if (instruction->CanBeMoved()
&& (!instruction->CanThrow() || !found_first_non_hoisted_throwing_instruction_in_loop)
- && !instruction->GetSideEffects().DependsOn(loop_effects)
+ && !instruction->GetSideEffects().MayDependOn(loop_effects)
&& InputsAreDefinedBeforeLoop(instruction)) {
// We need to update the environment if the instruction has a loop header
// phi in it.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fd2a04d892..85aa0040ca 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1182,13 +1182,25 @@ class HUserRecord : public ValueObject {
HUseListNode<T>* use_node_;
};
-// TODO: Add better documentation to this class and maybe refactor with more suggestive names.
-// - Has(All)SideEffects suggests that all the side effects are present but only ChangesSomething
-// flag is consider.
-// - DependsOn suggests that there is a real dependency between side effects but it only
-// checks DependendsOnSomething flag.
-//
-// Represents the side effects an instruction may have.
+/**
+ * Side-effects representation for write/read dependences on fields/arrays.
+ *
+ * The dependence analysis uses type disambiguation (e.g. a float field write
+ * cannot modify the value of an integer field read) and the access type (e.g.
+ * a reference array write cannot modify the value of a reference field read
+ * [although it may modify the reference fetch prior to reading the field,
+ * which is represented by its own write/read dependence]). The analysis
+ * makes conservative points-to assumptions on reference types (e.g. two same
+ * typed arrays are assumed to be the same, and any reference read depends
+ * on any reference read without further regard of its type).
+ *
+ * The internal representation uses the following 36-bit flags assignments:
+ *
+ * |ARRAY-R |FIELD-R |ARRAY-W |FIELD-W |
+ * +---------+---------+---------+---------+
+ * |543210987|654321098|765432109|876543210|
+ * |DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|
+ */
class SideEffects : public ValueObject {
public:
SideEffects() : flags_(0) {}
@@ -1198,57 +1210,125 @@ class SideEffects : public ValueObject {
}
static SideEffects All() {
- return SideEffects(ChangesSomething().flags_ | DependsOnSomething().flags_);
+ return SideEffects(kAllWrites | kAllReads);
+ }
+
+ static SideEffects AllWrites() {
+ return SideEffects(kAllWrites);
+ }
+
+ static SideEffects AllReads() {
+ return SideEffects(kAllReads);
+ }
+
+ static SideEffects FieldWriteOfType(Primitive::Type type, bool is_volatile) {
+ return is_volatile
+ ? All()
+ : SideEffects(TypeFlagWithAlias(type, kFieldWriteOffset));
+ }
+
+ static SideEffects ArrayWriteOfType(Primitive::Type type) {
+ return SideEffects(TypeFlagWithAlias(type, kArrayWriteOffset));
}
- static SideEffects ChangesSomething() {
- return SideEffects((1 << kFlagChangesCount) - 1);
+ static SideEffects FieldReadOfType(Primitive::Type type, bool is_volatile) {
+ return is_volatile
+ ? All()
+ : SideEffects(TypeFlagWithAlias(type, kFieldReadOffset));
}
- static SideEffects DependsOnSomething() {
- int count = kFlagDependsOnCount - kFlagChangesCount;
- return SideEffects(((1 << count) - 1) << kFlagChangesCount);
+ static SideEffects ArrayReadOfType(Primitive::Type type) {
+ return SideEffects(TypeFlagWithAlias(type, kArrayReadOffset));
}
+ // Combines the side-effects of this and the other.
SideEffects Union(SideEffects other) const {
return SideEffects(flags_ | other.flags_);
}
- bool HasSideEffects() const {
- size_t all_bits_set = (1 << kFlagChangesCount) - 1;
- return (flags_ & all_bits_set) != 0;
+ // Returns true if something is written.
+ bool DoesAnyWrite() const {
+ return (flags_ & kAllWrites);
}
- bool HasAllSideEffects() const {
- size_t all_bits_set = (1 << kFlagChangesCount) - 1;
- return all_bits_set == (flags_ & all_bits_set);
+ // Returns true if something is read.
+ bool DoesAnyRead() const {
+ return (flags_ & kAllReads);
}
- bool DependsOn(SideEffects other) const {
- size_t depends_flags = other.ComputeDependsFlags();
- return (flags_ & depends_flags) != 0;
+ // Returns true if nothing is written or read.
+ bool DoesNothing() const {
+ return flags_ == 0;
}
- bool HasDependencies() const {
- int count = kFlagDependsOnCount - kFlagChangesCount;
- size_t all_bits_set = (1 << count) - 1;
- return ((flags_ >> kFlagChangesCount) & all_bits_set) != 0;
+ // Returns true if potentially everything is written and read
+ // (every type and every kind of access).
+ bool DoesAll() const {
+ return flags_ == (kAllWrites | kAllReads);
}
- private:
- static constexpr int kFlagChangesSomething = 0;
- static constexpr int kFlagChangesCount = kFlagChangesSomething + 1;
+ // Returns true if this may read something written by other.
+ bool MayDependOn(SideEffects other) const {
+ const uint64_t reads = (flags_ & kAllReads) >> kFieldReadOffset;
+ return (other.flags_ & reads);
+ }
- static constexpr int kFlagDependsOnSomething = kFlagChangesCount;
- static constexpr int kFlagDependsOnCount = kFlagDependsOnSomething + 1;
+ // Returns string representation of flags (for debugging only).
+ // Format: |DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|
+ std::string ToString() const {
+ static const char *kDebug = "LZBCSIJFD";
+ std::string flags = "|";
+ for (int s = 35; s >= 0; s--) {
+ const int t = s % kBits;
+ if ((flags_ >> s) & 1)
+ flags += kDebug[t];
+ if (t == 0)
+ flags += "|";
+ }
+ return flags;
+ }
- explicit SideEffects(size_t flags) : flags_(flags) {}
+ private:
+ static constexpr int kBits = 9;
+ static constexpr int kFieldWriteOffset = 0 * kBits;
+ static constexpr int kArrayWriteOffset = 1 * kBits;
+ static constexpr int kFieldReadOffset = 2 * kBits;
+ static constexpr int kArrayReadOffset = 3 * kBits;
+
+ static constexpr uint64_t kAllWrites = 0x0003ffff;
+ static constexpr uint64_t kAllReads = kAllWrites << kFieldReadOffset;
+
+ // Work around the fact that HIR aliases I/F and J/D.
+ // TODO: remove this interceptor once HIR types are clean
+ static uint64_t TypeFlagWithAlias(Primitive::Type type, int offset) {
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return TypeFlag(Primitive::kPrimInt, offset) |
+ TypeFlag(Primitive::kPrimFloat, offset);
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return TypeFlag(Primitive::kPrimLong, offset) |
+ TypeFlag(Primitive::kPrimDouble, offset);
+ default:
+ return TypeFlag(type, offset);
+ }
+ }
- size_t ComputeDependsFlags() const {
- return flags_ << kFlagChangesCount;
+ // Translates type to bit flag.
+ static uint64_t TypeFlag(Primitive::Type type, int offset) {
+ CHECK_NE(type, Primitive::kPrimVoid);
+ const uint64_t one = 1;
+ const int shift = type; // 0-based consecutive enum
+ DCHECK_LE(kFieldWriteOffset, shift);
+ DCHECK_LT(shift, kArrayWriteOffset);
+ return one << (type + offset);
}
- size_t flags_;
+ // Private constructor on direct flags value.
+ explicit SideEffects(uint64_t flags) : flags_(flags) {}
+
+ uint64_t flags_;
};
// A HEnvironment object contains the values of virtual registers at a given location.
@@ -1511,7 +1591,8 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
}
virtual bool IsControlFlow() const { return false; }
virtual bool CanThrow() const { return false; }
- bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
+
+ bool DoesAnyWrite() const { return side_effects_.DoesAnyWrite(); }
// Does not apply for all instructions, but having this at top level greatly
// simplifies the null check elimination.
@@ -2732,7 +2813,7 @@ class HInvoke : public HInstruction {
uint32_t dex_pc,
uint32_t dex_method_index,
InvokeType original_invoke_type)
- : HInstruction(SideEffects::All()),
+ : HInstruction(SideEffects::All()), // assume write/read on all fields/arrays
number_of_arguments_(number_of_arguments),
inputs_(arena, number_of_arguments),
return_type_(return_type),
@@ -3524,7 +3605,9 @@ class HInstanceFieldGet : public HExpression<1> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HExpression(field_type, SideEffects::DependsOnSomething()),
+ : HExpression(
+ field_type,
+ SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, value);
}
@@ -3566,7 +3649,8 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HTemplateInstruction(SideEffects::ChangesSomething()),
+ : HTemplateInstruction(
+ SideEffects::FieldWriteOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file),
value_can_be_null_(true) {
SetRawInputAt(0, object);
@@ -3597,7 +3681,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
class HArrayGet : public HExpression<2> {
public:
HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type)
- : HExpression(type, SideEffects::DependsOnSomething()) {
+ : HExpression(type, SideEffects::ArrayReadOfType(type)) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
@@ -3635,7 +3719,7 @@ class HArraySet : public HTemplateInstruction<3> {
HInstruction* value,
Primitive::Type expected_component_type,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::ChangesSomething()),
+ : HTemplateInstruction(SideEffects::ArrayWriteOfType(expected_component_type)),
dex_pc_(dex_pc),
expected_component_type_(expected_component_type),
needs_type_check_(value->GetType() == Primitive::kPrimNot),
@@ -3934,7 +4018,9 @@ class HLoadString : public HExpression<1> {
class HClinitCheck : public HExpression<1> {
public:
explicit HClinitCheck(HLoadClass* constant, uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffects::ChangesSomething()),
+ : HExpression(
+ Primitive::kPrimNot,
+ SideEffects::AllWrites()), // assume write on all fields/arrays
dex_pc_(dex_pc) {
SetRawInputAt(0, constant);
}
@@ -3970,7 +4056,9 @@ class HStaticFieldGet : public HExpression<1> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HExpression(field_type, SideEffects::DependsOnSomething()),
+ : HExpression(
+ field_type,
+ SideEffects::SideEffects::FieldReadOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file) {
SetRawInputAt(0, cls);
}
@@ -4009,7 +4097,8 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
bool is_volatile,
uint32_t field_idx,
const DexFile& dex_file)
- : HTemplateInstruction(SideEffects::ChangesSomething()),
+ : HTemplateInstruction(
+ SideEffects::FieldWriteOfType(field_type, is_volatile)),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file),
value_can_be_null_(true) {
SetRawInputAt(0, cls);
@@ -4184,7 +4273,8 @@ class HCheckCast : public HTemplateInstruction<2> {
class HMemoryBarrier : public HTemplateInstruction<0> {
public:
explicit HMemoryBarrier(MemBarrierKind barrier_kind)
- : HTemplateInstruction(SideEffects::None()),
+ : HTemplateInstruction(
+ SideEffects::All()), // assume write/read on all fields/arrays
barrier_kind_(barrier_kind) {}
MemBarrierKind GetBarrierKind() { return barrier_kind_; }
@@ -4205,7 +4295,8 @@ class HMonitorOperation : public HTemplateInstruction<1> {
};
HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
- : HTemplateInstruction(SideEffects::ChangesSomething()), kind_(kind), dex_pc_(dex_pc) {
+ : HTemplateInstruction(SideEffects::All()), // assume write/read on all fields/arrays
+ kind_(kind), dex_pc_(dex_pc) {
SetRawInputAt(0, object);
}
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index bc565468b2..f793a65bf3 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -40,7 +40,7 @@ class HOptimization : public ArenaObject<kArenaAllocMisc> {
// Return the name of the pass.
const char* GetPassName() const { return pass_name_; }
- // Peform the analysis itself.
+ // Perform the analysis itself.
virtual void Run() = 0;
protected:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index aeb1ae20a3..710d3bcef0 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -35,6 +35,7 @@
#include "dex/verified_method.h"
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "elf_writer_quick.h"
@@ -555,8 +556,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
// Implementation of the space filter: do not compile a code item whose size in
- // code units is bigger than 256.
- static constexpr size_t kSpaceFilterOptimizingThreshold = 256;
+ // code units is bigger than 128.
+ static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerOptions::kSpace)
&& (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
@@ -565,7 +566,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
DexCompilationUnit dex_compilation_unit(
- nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
+ nullptr, class_loader, Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags,
compiler_driver->GetVerifiedMethod(&dex_file, method_idx));
@@ -602,12 +603,29 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
visualizer_output_.get(),
compiler_driver);
+ const uint8_t* interpreter_metadata = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<4> hs(soa.Self());
+ ClassLinker* class_linker = dex_compilation_unit.GetClassLinker();
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(dex_file)));
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(class_loader)));
+ ArtMethod* art_method = compiler_driver->ResolveMethod(
+ soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
+ // We may not get a method, for example if its class is erroneous.
+ // TODO: Clean this up, the compiler driver should just pass the ArtMethod to compile.
+ if (art_method != nullptr) {
+ interpreter_metadata = art_method->GetQuickenedInfo();
+ }
+ }
HGraphBuilder builder(graph,
&dex_compilation_unit,
&dex_compilation_unit,
&dex_file,
compiler_driver,
- compilation_stats_.get());
+ compilation_stats_.get(),
+ interpreter_metadata);
VLOG(compiler) << "Building " << method_name;
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index ea1ca5a731..9dbf638442 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -24,14 +24,15 @@ void SideEffectsAnalysis::Run() {
block_effects_.SetSize(graph_->GetBlocks().Size());
loop_effects_.SetSize(graph_->GetBlocks().Size());
+ // In DEBUG mode, ensure side effects are properly initialized to empty.
if (kIsDebugBuild) {
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
SideEffects effects = GetBlockEffects(block);
- DCHECK(!effects.HasSideEffects() && !effects.HasDependencies());
+ DCHECK(effects.DoesNothing());
if (block->IsLoopHeader()) {
effects = GetLoopEffects(block);
- DCHECK(!effects.HasSideEffects() && !effects.HasDependencies());
+ DCHECK(effects.DoesNothing());
}
}
}
@@ -46,7 +47,9 @@ void SideEffectsAnalysis::Run() {
inst_it.Advance()) {
HInstruction* instruction = inst_it.Current();
effects = effects.Union(instruction->GetSideEffects());
- if (effects.HasAllSideEffects()) {
+ // If every possible write/read is represented, scanning further
+ // will not add any more information to side-effects of this block.
+ if (effects.DoesAll()) {
break;
}
}
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
new file mode 100644
index 0000000000..8db5a8a350
--- /dev/null
+++ b/compiler/optimizing/side_effects_test.cc
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not read this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gtest/gtest.h"
+#include "nodes.h"
+#include "primitive.h"
+
+namespace art {
+
+/**
+ * Tests for the SideEffects class.
+ */
+
+//
+// Helper methods.
+//
+
+void testWriteAndReadSanity(SideEffects write, SideEffects read) {
+ EXPECT_FALSE(write.DoesNothing());
+ EXPECT_FALSE(read.DoesNothing());
+
+ EXPECT_TRUE(write.DoesAnyWrite());
+ EXPECT_FALSE(write.DoesAnyRead());
+ EXPECT_FALSE(read.DoesAnyWrite());
+ EXPECT_TRUE(read.DoesAnyRead());
+
+ // All-dependences.
+ SideEffects all = SideEffects::All();
+ EXPECT_TRUE(all.MayDependOn(write));
+ EXPECT_FALSE(write.MayDependOn(all));
+ EXPECT_FALSE(all.MayDependOn(read));
+ EXPECT_TRUE(read.MayDependOn(all));
+
+ // None-dependences.
+ SideEffects none = SideEffects::None();
+ EXPECT_FALSE(none.MayDependOn(write));
+ EXPECT_FALSE(write.MayDependOn(none));
+ EXPECT_FALSE(none.MayDependOn(read));
+ EXPECT_FALSE(read.MayDependOn(none));
+}
+
+void testWriteAndReadDependence(SideEffects write, SideEffects read) {
+ testWriteAndReadSanity(write, read);
+
+ // Dependence only in one direction.
+ EXPECT_FALSE(write.MayDependOn(read));
+ EXPECT_TRUE(read.MayDependOn(write));
+}
+
+void testNoWriteAndReadDependence(SideEffects write, SideEffects read) {
+ testWriteAndReadSanity(write, read);
+
+ // No dependence in any direction.
+ EXPECT_FALSE(write.MayDependOn(read));
+ EXPECT_FALSE(read.MayDependOn(write));
+}
+
+//
+// Actual tests.
+//
+
+TEST(SideEffectsTest, All) {
+ SideEffects all = SideEffects::All();
+ EXPECT_TRUE(all.DoesAnyWrite());
+ EXPECT_TRUE(all.DoesAnyRead());
+ EXPECT_FALSE(all.DoesNothing());
+ EXPECT_TRUE(all.DoesAll());
+}
+
+TEST(SideEffectsTest, None) {
+ SideEffects none = SideEffects::None();
+ EXPECT_FALSE(none.DoesAnyWrite());
+ EXPECT_FALSE(none.DoesAnyRead());
+ EXPECT_TRUE(none.DoesNothing());
+ EXPECT_FALSE(none.DoesAll());
+}
+
+TEST(SideEffectsTest, DependencesAndNoDependences) {
+ // Apply test to each individual primitive type.
+ for (Primitive::Type type = Primitive::kPrimNot;
+ type < Primitive::kPrimVoid;
+ type = Primitive::Type(type + 1)) {
+ // Same primitive type and access type: proper write/read dep.
+ testWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(type, false),
+ SideEffects::FieldReadOfType(type, false));
+ testWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(type),
+ SideEffects::ArrayReadOfType(type));
+ // Same primitive type but different access type: no write/read dep.
+ testNoWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(type, false),
+ SideEffects::ArrayReadOfType(type));
+ testNoWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(type),
+ SideEffects::FieldReadOfType(type, false));
+ }
+}
+
+TEST(SideEffectsTest, NoDependences) {
+ // Different primitive type, same access type: no write/read dep.
+ testNoWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+ testNoWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+ // Everything different: no write/read dep.
+ testNoWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+ testNoWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+}
+
+TEST(SideEffectsTest, VolatileDependences) {
+ SideEffects volatile_write =
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, true);
+ SideEffects any_write =
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false);
+ SideEffects volatile_read =
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, true);
+ SideEffects any_read =
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, false);
+
+ EXPECT_FALSE(volatile_write.MayDependOn(any_read));
+ EXPECT_TRUE(any_read.MayDependOn(volatile_write));
+ EXPECT_TRUE(volatile_write.MayDependOn(any_write));
+ EXPECT_FALSE(any_write.MayDependOn(volatile_write));
+
+ EXPECT_FALSE(volatile_read.MayDependOn(any_read));
+ EXPECT_TRUE(any_read.MayDependOn(volatile_read));
+ EXPECT_TRUE(volatile_read.MayDependOn(any_write));
+ EXPECT_FALSE(any_write.MayDependOn(volatile_read));
+}
+
+TEST(SideEffectsTest, SameWidthTypes) {
+ // Type I/F.
+ testWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimInt, false),
+ SideEffects::FieldReadOfType(Primitive::kPrimFloat, false));
+ testWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimInt),
+ SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
+ // Type L/D.
+ testWriteAndReadDependence(
+ SideEffects::FieldWriteOfType(Primitive::kPrimLong, false),
+ SideEffects::FieldReadOfType(Primitive::kPrimDouble, false));
+ testWriteAndReadDependence(
+ SideEffects::ArrayWriteOfType(Primitive::kPrimLong),
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+}
+
+TEST(SideEffectsTest, AllWritesAndReads) {
+ SideEffects s = SideEffects::None();
+ // Keep taking the union of different writes and reads.
+ for (Primitive::Type type = Primitive::kPrimNot;
+ type < Primitive::kPrimVoid;
+ type = Primitive::Type(type + 1)) {
+ s = s.Union(SideEffects::FieldWriteOfType(type, false));
+ s = s.Union(SideEffects::ArrayWriteOfType(type));
+ s = s.Union(SideEffects::FieldReadOfType(type, false));
+ s = s.Union(SideEffects::ArrayReadOfType(type));
+ }
+ EXPECT_TRUE(s.DoesAll());
+}
+
+TEST(SideEffectsTest, BitStrings) {
+ EXPECT_STREQ(
+ "|||||",
+ SideEffects::None().ToString().c_str());
+ EXPECT_STREQ(
+ "|DFJISCBZL|DFJISCBZL|DFJISCBZL|DFJISCBZL|",
+ SideEffects::All().ToString().c_str());
+ EXPECT_STREQ(
+ "|||DFJISCBZL|DFJISCBZL|",
+ SideEffects::AllWrites().ToString().c_str());
+ EXPECT_STREQ(
+ "|DFJISCBZL|DFJISCBZL|||",
+ SideEffects::AllReads().ToString().c_str());
+ EXPECT_STREQ(
+ "||||L|",
+ SideEffects::FieldWriteOfType(Primitive::kPrimNot, false).ToString().c_str());
+ EXPECT_STREQ(
+ "|||Z||",
+ SideEffects::ArrayWriteOfType(Primitive::kPrimBoolean).ToString().c_str());
+ EXPECT_STREQ(
+ "||B|||",
+ SideEffects::FieldReadOfType(Primitive::kPrimByte, false).ToString().c_str());
+ EXPECT_STREQ(
+ "|DJ||||", // note: DJ alias
+ SideEffects::ArrayReadOfType(Primitive::kPrimDouble).ToString().c_str());
+ SideEffects s = SideEffects::None();
+ s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimChar, false));
+ s = s.Union(SideEffects::FieldWriteOfType(Primitive::kPrimLong, false));
+ s = s.Union(SideEffects::ArrayWriteOfType(Primitive::kPrimShort));
+ s = s.Union(SideEffects::FieldReadOfType(Primitive::kPrimInt, false));
+ s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimFloat));
+ s = s.Union(SideEffects::ArrayReadOfType(Primitive::kPrimDouble));
+ EXPECT_STREQ(
+ "|DFJI|FI|S|DJC|", // note: DJ/FI alias.
+ s.ToString().c_str());
+}
+
+} // namespace art
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 65610d54a6..1f1530fa1e 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -248,7 +248,7 @@ void StackMapStream::FillIn(MemoryRegion region) {
DCHECK_EQ(code_info.GetStackMapsSize(code_info.ExtractEncoding()), stack_maps_size_);
// Set the Dex register location catalog.
- code_info.SetNumberOfDexRegisterLocationCatalogEntries(location_catalog_entries_.Size());
+ code_info.SetNumberOfLocationCatalogEntries(location_catalog_entries_.Size());
MemoryRegion dex_register_location_catalog_region = region.Subregion(
dex_register_location_catalog_start_, dex_register_location_catalog_size_);
DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index b4ac1b4d1a..33207d92d2 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -55,8 +55,7 @@ TEST(StackMapTest, Test1) {
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(2u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
@@ -154,8 +153,7 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(4u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
@@ -304,8 +302,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(1u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
@@ -398,8 +395,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
// The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
// and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
// has a size of 1 bit.
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(2u, number_of_location_catalog_entries);
ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_location_catalog_entries));
@@ -501,8 +497,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
- uint32_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
ASSERT_EQ(0u, number_of_location_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
ASSERT_EQ(0u, location_catalog.Size());
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 5843886727..413b9eaa8c 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -25,6 +25,58 @@
namespace art {
namespace arm {
+void Thumb2Assembler::Fixup::PrepareDependents(Thumb2Assembler* assembler) {
+ // For each Fixup, it's easy to find the Fixups that it depends on as they are either
+ // the following or the preceding Fixups until we find the target. However, for fixup
+ // adjustment we need the reverse lookup, i.e. what Fixups depend on a given Fixup.
+ // This function creates a compact representation of this relationship, where we have
+ // all the dependents in a single array and Fixups reference their ranges by start
+ // index and count. (Instead of having a per-fixup vector.)
+
+ // Count the number of dependents of each Fixup.
+ const FixupId end_id = assembler->fixups_.size();
+ Fixup* fixups = assembler->fixups_.data();
+ for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
+ uint32_t target = fixups[fixup_id].target_;
+ if (target > fixups[fixup_id].location_) {
+ for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
+ fixups[id].dependents_count_ += 1u;
+ }
+ } else {
+ for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) {
+ fixups[id - 1u].dependents_count_ += 1u;
+ }
+ }
+ }
+ // Assign index ranges in fixup_dependents_ to individual fixups. Record the end of the
+ // range in dependents_start_, we shall later decrement it as we fill in fixup_dependents_.
+ uint32_t number_of_dependents = 0u;
+ for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
+ number_of_dependents += fixups[fixup_id].dependents_count_;
+ fixups[fixup_id].dependents_start_ = number_of_dependents;
+ }
+ if (number_of_dependents == 0u) {
+ return;
+ }
+ // Create and fill in the fixup_dependents_.
+ assembler->fixup_dependents_.reset(new FixupId[number_of_dependents]);
+ FixupId* dependents = assembler->fixup_dependents_.get();
+ for (FixupId fixup_id = 0u; fixup_id != end_id; ++fixup_id) {
+ uint32_t target = fixups[fixup_id].target_;
+ if (target > fixups[fixup_id].location_) {
+ for (FixupId id = fixup_id + 1u; id != end_id && fixups[id].location_ < target; ++id) {
+ fixups[id].dependents_start_ -= 1u;
+ dependents[fixups[id].dependents_start_] = fixup_id;
+ }
+ } else {
+ for (FixupId id = fixup_id; id != 0u && fixups[id - 1u].location_ >= target; --id) {
+ fixups[id - 1u].dependents_start_ -= 1u;
+ dependents[fixups[id - 1u].dependents_start_] = fixup_id;
+ }
+ }
+ }
+}
+
void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) {
CHECK(!label->IsBound());
@@ -32,10 +84,6 @@ void Thumb2Assembler::BindLabel(Label* label, uint32_t bound_pc) {
FixupId fixup_id = label->Position(); // The id for linked Fixup.
Fixup* fixup = GetFixup(fixup_id); // Get the Fixup at this id.
fixup->Resolve(bound_pc); // Fixup can be resolved now.
- // Add this fixup as a dependency of all later fixups.
- for (FixupId id = fixup_id + 1u, end = fixups_.size(); id != end; ++id) {
- GetFixup(id)->AddDependent(fixup_id);
- }
uint32_t fixup_location = fixup->GetLocation();
uint16_t next = buffer_.Load<uint16_t>(fixup_location); // Get next in chain.
buffer_.Store<int16_t>(fixup_location, 0);
@@ -59,7 +107,7 @@ void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_s
uint32_t adjustment = fixup->AdjustSizeIfNeeded(*current_code_size);
if (adjustment != 0u) {
*current_code_size += adjustment;
- for (FixupId dependent_id : fixup->Dependents()) {
+ for (FixupId dependent_id : fixup->Dependents(*this)) {
Fixup* dependent = GetFixup(dependent_id);
dependent->IncreaseAdjustment(adjustment);
if (buffer_.Load<int16_t>(dependent->GetLocation()) == 0) {
@@ -71,6 +119,7 @@ void Thumb2Assembler::AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_s
}
uint32_t Thumb2Assembler::AdjustFixups() {
+ Fixup::PrepareDependents(this);
uint32_t current_code_size = buffer_.Size();
std::deque<FixupId> fixups_to_recalculate;
if (kIsDebugBuild) {
@@ -2220,17 +2269,7 @@ void Thumb2Assembler::EmitBranch(Condition cond, Label* label, bool link, bool x
if (label->IsBound()) {
// The branch is to a bound label which means that it's a backwards branch.
- // Record this branch as a dependency of all Fixups between the label and the branch.
GetFixup(branch_id)->Resolve(label->Position());
- for (FixupId fixup_id = branch_id; fixup_id != 0u; ) {
- --fixup_id;
- Fixup* fixup = GetFixup(fixup_id);
- DCHECK_GE(label->Position(), 0);
- if (fixup->GetLocation() < static_cast<uint32_t>(label->Position())) {
- break;
- }
- fixup->AddDependent(branch_id);
- }
Emit16(0);
} else {
// Branch target is an unbound label. Add it to a singly-linked list maintained within
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 5e6969b4c2..838554ee6d 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -24,6 +24,7 @@
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/arm/assembler_arm.h"
+#include "utils/array_ref.h"
#include "offsets.h"
namespace art {
@@ -37,6 +38,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
it_cond_index_(kNoItCondition),
next_condition_(AL),
fixups_(),
+ fixup_dependents_(),
literals_(),
last_position_adjustment_(0u),
last_old_position_(0u),
@@ -507,12 +509,12 @@ class Thumb2Assembler FINAL : public ArmAssembler {
return adjustment_;
}
- const std::vector<FixupId>& Dependents() const {
- return dependents_;
- }
+ // Prepare the assembler->fixup_dependents_ and each Fixup's dependents_start_/count_.
+ static void PrepareDependents(Thumb2Assembler* assembler);
- void AddDependent(FixupId dependent_id) {
- dependents_.push_back(dependent_id);
+ ArrayRef<FixupId> Dependents(const Thumb2Assembler& assembler) const {
+ return ArrayRef<FixupId>(assembler.fixup_dependents_.get() + dependents_start_,
+ dependents_count_);
}
// Resolve a branch when the target is known.
@@ -557,7 +559,8 @@ class Thumb2Assembler FINAL : public ArmAssembler {
location_(location),
target_(kUnresolved),
adjustment_(0u),
- dependents_() {
+ dependents_count_(0u),
+ dependents_start_(0u) {
}
static size_t SizeInBytes(Size size);
@@ -584,7 +587,10 @@ class Thumb2Assembler FINAL : public ArmAssembler {
uint32_t location_; // Offset into assembler buffer in bytes.
uint32_t target_; // Offset into assembler buffer in bytes.
uint32_t adjustment_; // The number of extra bytes inserted between location_ and target_.
- std::vector<FixupId> dependents_; // Fixups that require adjustment when current size changes.
+ // Fixups that require adjustment when current size changes are stored in a single
+ // array in the assembler and we store only the start index and count here.
+ uint32_t dependents_count_;
+ uint32_t dependents_start_;
};
// Emit a single 32 or 16 bit data processing instruction.
@@ -760,6 +766,7 @@ class Thumb2Assembler FINAL : public ArmAssembler {
static int32_t LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset);
std::vector<Fixup> fixups_;
+ std::unique_ptr<FixupId[]> fixup_dependents_;
// Use std::deque<> for literal labels to allow insertions at the end
// without invalidating pointers and references to existing elements.
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e0d77086a7..93254547d7 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1950,9 +1950,12 @@ class ImageDumper {
}
state->stats_.managed_code_bytes_ignoring_deduplication += quick_oat_code_size;
+ uint32_t method_access_flags = method->GetAccessFlags();
+
indent_os << StringPrintf("OAT CODE: %p-%p\n", quick_oat_code_begin, quick_oat_code_end);
- indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd\n",
- dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes);
+ indent_os << StringPrintf("SIZE: Dex Instructions=%zd GC=%zd Mapping=%zd AccessFlags=0x%x\n",
+ dex_instruction_bytes, gc_map_bytes, pc_mapping_table_bytes,
+ method_access_flags);
size_t total_size = dex_instruction_bytes + gc_map_bytes + pc_mapping_table_bytes +
vmap_table_bytes + quick_oat_code_size + ArtMethod::ObjectSize(image_pointer_size);
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7f103a4a7d..fe79e72031 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -98,6 +98,7 @@ LIBART_COMMON_SRC_FILES := \
jit/jit.cc \
jit/jit_code_cache.cc \
jit/jit_instrumentation.cc \
+ lambda/box_table.cc \
jni_internal.cc \
jobject_comparator.cc \
linear_alloc.cc \
@@ -311,13 +312,14 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
dex_instruction.h \
dex_instruction_utils.h \
gc_root.h \
- gc/allocator/rosalloc.h \
- gc/collector/gc_type.h \
gc/allocator_type.h \
+ gc/allocator/rosalloc.h \
gc/collector_type.h \
+ gc/collector/gc_type.h \
+ gc/heap.h \
gc/space/region_space.h \
gc/space/space.h \
- gc/heap.h \
+ gc/weak_root_state.h \
image.h \
instrumentation.h \
indirect_reference_table.h \
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 068f458738..20001109a6 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -24,10 +24,9 @@
.extern artDeliverPendingException
/*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ * Macro to spill the GPRs.
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SPILL_ALL_CALLEE_SAVE_GPRS
push {r4-r11, lr} @ 9 words (36 bytes) of callee saves.
.cfi_adjust_cfa_offset 36
.cfi_rel_offset r4, 0
@@ -39,6 +38,14 @@
.cfi_rel_offset r10, 24
.cfi_rel_offset r11, 28
.cfi_rel_offset lr, 32
+.endm
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveAll)
+ */
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2
+ SPILL_ALL_CALLEE_SAVE_GPRS @ 9 words (36 bytes) of callee saves.
vpush {s16-s31} @ 16 words (64 bytes) of floats.
.cfi_adjust_cfa_offset 64
sub sp, #12 @ 3 words of space, bottom word will hold Method*
@@ -380,17 +387,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
* +-------------------------+
*/
ENTRY art_quick_invoke_stub_internal
- push {r4, r5, r6, r7, r8, r9, r10, r11, lr} @ spill regs
- .cfi_adjust_cfa_offset 16
- .cfi_rel_offset r4, 0
- .cfi_rel_offset r5, 4
- .cfi_rel_offset r6, 8
- .cfi_rel_offset r7, 12
- .cfi_rel_offset r8, 16
- .cfi_rel_offset r9, 20
- .cfi_rel_offset r10, 24
- .cfi_rel_offset r11, 28
- .cfi_rel_offset lr, 32
+ SPILL_ALL_CALLEE_SAVE_GPRS @ spill regs (9)
mov r11, sp @ save the stack pointer
.cfi_def_cfa_register r11
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 2159f0e717..77b8e87c99 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -19,61 +19,53 @@
#include "asm_support_x86.h"
-#if defined(__APPLE__) || (defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5))
- // Clang's as(1) doesn't let you name macro parameters prior to 3.5.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name
- #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
- #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
- #define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name
- #define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name
- #define END_MACRO .endmacro
-
- // Clang's as(1) uses $0, $1, and so on for macro arguments.
- #define RAW_VAR(name,index) $index
- #define VAR(name,index) SYMBOL($index)
- #define PLT_VAR(name, index) SYMBOL($index)
- #define REG_VAR(name,index) %$index
- #define CALL_MACRO(name,index) $index
-
- // The use of $x for arguments mean that literals need to be represented with $$x in macros.
- #define LITERAL(value) $value
- #define MACRO_LITERAL(value) $$value
+// Regular gas(1) & current clang/llvm assembler support named macro parameters.
+#define MACRO0(macro_name) .macro macro_name
+#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
+#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
+#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+#define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4
+#define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5
+#define END_MACRO .endm
+
+#if defined(__clang__)
+ // Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't
+ // separate the backslash and parameter by a space. Everything just works.
+ #define RAW_VAR(name) \name
+ #define VAR(name) \name
+ #define CALLVAR(name) SYMBOL(\name)
+ #define PLT_VAR(name) \name@PLT
+ #define REG_VAR(name) %\name
+ #define CALL_MACRO(name) \name
#else
- // Regular gas(1) lets you name macro parameters.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
- #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
- #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
- #define MACRO4(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4
- #define MACRO5(macro_name, macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5) .macro macro_name macro_arg1, macro_arg2, macro_arg3, macro_arg4, macro_arg5
- #define END_MACRO .endm
-
// Regular gas(1) uses \argument_name for macro arguments.
// We need to turn on alternate macro syntax so we can use & instead or the preprocessor
// will screw us by inserting a space between the \ and the name. Even in this mode there's
// no special meaning to $, so literals are still just $x. The use of altmacro means % is a
- // special character meaning care needs to be taken when passing registers as macro arguments.
+ // special character meaning care needs to be taken when passing registers as macro
+ // arguments.
.altmacro
- #define RAW_VAR(name,index) name&
- #define VAR(name,index) name&
- #define PLT_VAR(name, index) name&@PLT
- #define REG_VAR(name,index) %name
- #define CALL_MACRO(name,index) name&
+ #define RAW_VAR(name) name&
+ #define VAR(name) name&
+ #define CALLVAR(name) SYMBOL(name&)
+ #define PLT_VAR(name) name&@PLT
+ #define REG_VAR(name) %name
+ #define CALL_MACRO(name) name&
+#endif
- #define LITERAL(value) $value
+#define LITERAL(value) $value
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $(value)
+#else
#define MACRO_LITERAL(value) $value
#endif
#if defined(__APPLE__)
- #define FUNCTION_TYPE(name,index)
- #define SIZE(name,index)
-#elif defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
- #define FUNCTION_TYPE(name,index) .type $index, @function
- #define SIZE(name,index) .size $index, .-$index
+ #define FUNCTION_TYPE(name)
+ #define SIZE(name)
#else
- #define FUNCTION_TYPE(name,index) .type name&, @function
- #define SIZE(name,index) .size name, .-name
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define SIZE(name) .size name, .-name
#endif
// CFI support.
@@ -100,16 +92,10 @@
#define CFI_REMEMBER_STATE
#endif
- // Symbols.
+ // Symbols. On a Mac, we need a leading underscore.
#if !defined(__APPLE__)
#define SYMBOL(name) name
- #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
- // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a
- // better fix.
- #define PLT_SYMBOL(name) name // ## @PLT
- #else
- #define PLT_SYMBOL(name) name ## @PLT
- #endif
+ #define PLT_SYMBOL(name) name ## @PLT
#else
// Mac OS' symbols have an _ prefix.
#define SYMBOL(name) _ ## name
@@ -129,11 +115,11 @@ MACRO0(ALIGN_FUNCTION_ENTRY)
END_MACRO
MACRO1(DEFINE_FUNCTION, c_name)
- FUNCTION_TYPE(\c_name, 0)
- ASM_HIDDEN VAR(c_name, 0)
- .globl VAR(c_name, 0)
+ FUNCTION_TYPE(SYMBOL(\c_name))
+ ASM_HIDDEN CALLVAR(c_name)
+ .globl CALLVAR(c_name)
ALIGN_FUNCTION_ENTRY
-VAR(c_name, 0):
+CALLVAR(c_name):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(esp, 4)
@@ -141,36 +127,38 @@ END_MACRO
MACRO1(END_FUNCTION, c_name)
CFI_ENDPROC
- SIZE(\c_name, 0)
+ SIZE(SYMBOL(\c_name))
END_MACRO
MACRO1(PUSH, reg)
- pushl REG_VAR(reg, 0)
+ pushl REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(4)
- CFI_REL_OFFSET(REG_VAR(reg, 0), 0)
+ CFI_REL_OFFSET(REG_VAR(reg), 0)
END_MACRO
MACRO1(POP, reg)
- popl REG_VAR(reg,0)
+ popl REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(-4)
- CFI_RESTORE(REG_VAR(reg,0))
+ CFI_RESTORE(REG_VAR(reg))
END_MACRO
+#define UNREACHABLE int3
+
MACRO1(UNIMPLEMENTED,name)
- FUNCTION_TYPE(\name, 0)
- .globl VAR(name, 0)
+ FUNCTION_TYPE(\name)
+ .globl VAR(name)
ALIGN_FUNCTION_ENTRY
-VAR(name, 0):
+VAR(name):
CFI_STARTPROC
- int3
- int3
+ UNREACHABLE
+ UNREACHABLE
CFI_ENDPROC
- SIZE(\name, 0)
+ SIZE(\name)
END_MACRO
MACRO1(SETUP_GOT_NOSAVE, got_reg)
#ifndef __APPLE__
- .ifc RAW_VAR(got_reg, 0), ebx
+ .ifc VAR(got_reg), ebx
call __x86.get_pc_thunk.bx
addl $_GLOBAL_OFFSET_TABLE_, %ebx
.else
@@ -182,15 +170,16 @@ END_MACRO
// Macros to poison (negate) the reference for heap poisoning.
MACRO1(POISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
- neg REG_VAR(rRef, 0)
+ neg REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
// Macros to unpoison (negate) the reference for heap poisoning.
MACRO1(UNPOISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
- neg REG_VAR(rRef, 0)
+ neg REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
+
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 7086b5bc1d..ebfb3faf4b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -26,15 +26,15 @@ MACRO2(SETUP_SAVE_ALL_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
- subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
+ subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
CFI_ADJUST_CFA_OFFSET(12)
- SETUP_GOT_NOSAVE RAW_VAR(got_reg, 0)
+ SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
- movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
- movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
+ movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
THIS_LOAD_REQUIRES_READ_BARRIER
- pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
+ pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
@@ -53,15 +53,15 @@ MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
- subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
+ subl MACRO_LITERAL(12), %esp // Grow stack by 3 words.
CFI_ADJUST_CFA_OFFSET(12)
- SETUP_GOT_NOSAVE VAR(got_reg, 0)
+ SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
- movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
- movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
+ movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
THIS_LOAD_REQUIRES_READ_BARRIER
- pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
+ pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
@@ -101,13 +101,13 @@ MACRO2(SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME, got_reg, temp_reg)
movsd %xmm2, 16(%esp)
movsd %xmm3, 24(%esp)
- SETUP_GOT_NOSAVE VAR(got_reg, 0)
+ SETUP_GOT_NOSAVE RAW_VAR(got_reg)
// Load Runtime::instance_ from GOT.
- movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
- movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
+ movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg)), REG_VAR(temp_reg)
+ movl (REG_VAR(temp_reg)), REG_VAR(temp_reg)
// Push save all callee-save method.
THIS_LOAD_REQUIRES_READ_BARRIER
- pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
+ pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
@@ -157,10 +157,10 @@ MACRO0(RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME)
addl MACRO_LITERAL(36), %esp // Remove FPRs and EAX.
CFI_ADJUST_CFA_OFFSET(-36)
- POP ecx // Restore args except eax
+ POP ecx // Restore args except eax
POP edx
POP ebx
- POP ebp // Restore callee saves
+ POP ebp // Restore callee saves
POP esi
POP edi
END_MACRO
@@ -196,54 +196,54 @@ END_MACRO
MACRO0(DELIVER_PENDING_EXCEPTION)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save callee saves for throw
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // Alignment padding
+ subl MACRO_LITERAL(12), %esp // Alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
- int3 // unreached
+ UNREACHABLE
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // alignment padding
+ subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call VAR(cxx_name, 1) // cxx_name(Thread*)
- int3 // unreached
- END_FUNCTION RAW_VAR(c_name, 0)
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
+ UNREACHABLE
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
mov %esp, %ecx
// Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // alignment padding
+ subl MACRO_LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, Thread*)
- int3 // unreached
- END_FUNCTION RAW_VAR(c_name, 0)
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
+ UNREACHABLE
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- PUSH eax // alignment padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ PUSH eax // alignment padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*)
- int3 // unreached
- END_FUNCTION RAW_VAR(c_name, 0)
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*)
+ UNREACHABLE
+ END_FUNCTION VAR(c_name)
END_MACRO
/*
@@ -303,7 +303,7 @@ MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call VAR(cxx_name, 0) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
addl MACRO_LITERAL(20), %esp // Pop arguments skip eax
CFI_ADJUST_CFA_OFFSET(-20)
@@ -335,9 +335,9 @@ MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
- INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name, 1)
- END_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
+ INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name)
+ END_FUNCTION VAR(c_name)
END_MACRO
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
@@ -354,27 +354,27 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
*/
MACRO5(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, shorty, arg_array, temp_char, finished)
1: // LOOP
- movb (REG_VAR(shorty, 1)), REG_VAR(temp_char, 3) // temp_char := *shorty
- addl MACRO_LITERAL(1), REG_VAR(shorty, 1) // shorty++
- cmpb MACRO_LITERAL(0), REG_VAR(temp_char, 3) // if (temp_char == '\0')
- je RAW_VAR(finished, 4) // goto finished
- cmpb MACRO_LITERAL(68), REG_VAR(temp_char, 3) // if (temp_char == 'D')
- je 2f // goto FOUND_DOUBLE
- cmpb MACRO_LITERAL(70), REG_VAR(temp_char, 3) // if (temp_char == 'F')
- je 3f // goto FOUND_FLOAT
- addl MACRO_LITERAL(4), REG_VAR(arg_array, 2) // arg_array++
+ movb (REG_VAR(shorty)), REG_VAR(temp_char) // temp_char := *shorty
+ addl MACRO_LITERAL(1), REG_VAR(shorty) // shorty++
+ cmpb MACRO_LITERAL(0), REG_VAR(temp_char) // if (temp_char == '\0')
+ je VAR(finished) // goto finished
+ cmpb MACRO_LITERAL(68), REG_VAR(temp_char) // if (temp_char == 'D')
+ je 2f // goto FOUND_DOUBLE
+ cmpb MACRO_LITERAL(70), REG_VAR(temp_char) // if (temp_char == 'F')
+ je 3f // goto FOUND_FLOAT
+ addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
// Handle extra space in arg array taken by a long.
- cmpb MACRO_LITERAL(74), REG_VAR(temp_char, 3) // if (temp_char != 'J')
- jne 1b // goto LOOP
- addl MACRO_LITERAL(4), REG_VAR(arg_array, 2) // arg_array++
- jmp 1b // goto LOOP
+ cmpb MACRO_LITERAL(74), REG_VAR(temp_char) // if (temp_char != 'J')
+ jne 1b // goto LOOP
+ addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
+ jmp 1b // goto LOOP
2: // FOUND_DOUBLE
- movsd (REG_VAR(arg_array, 2)), REG_VAR(xmm_reg, 0)
- addl MACRO_LITERAL(8), REG_VAR(arg_array, 2) // arg_array+=2
+ movsd (REG_VAR(arg_array)), REG_VAR(xmm_reg)
+ addl MACRO_LITERAL(8), REG_VAR(arg_array) // arg_array+=2
jmp 4f
3: // FOUND_FLOAT
- movss (REG_VAR(arg_array, 2)), REG_VAR(xmm_reg, 0)
- addl MACRO_LITERAL(4), REG_VAR(arg_array, 2) // arg_array++
+ movss (REG_VAR(arg_array)), REG_VAR(xmm_reg)
+ addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
4:
END_MACRO
@@ -385,21 +385,21 @@ END_MACRO
*/
MACRO4(SKIP_OVER_FLOATS, shorty, arg_array, temp_char, finished)
1: // LOOP:
- movb (REG_VAR(shorty, 0)), REG_VAR(temp_char, 2) // temp_char := *shorty
- addl MACRO_LITERAL(1), REG_VAR(shorty, 0) // shorty++
- cmpb MACRO_LITERAL(0), REG_VAR(temp_char, 2) // if (temp_char == '\0')
- je RAW_VAR(finished, 3) // goto finished
- cmpb MACRO_LITERAL(70), REG_VAR(temp_char, 2) // if (temp_char == 'F')
- je 3f // goto SKIP_FLOAT
- cmpb MACRO_LITERAL(68), REG_VAR(temp_char, 2) // if (temp_char == 'D')
- je 4f // goto SKIP_DOUBLE
- jmp 5f // goto end
+ movb (REG_VAR(shorty)), REG_VAR(temp_char) // temp_char := *shorty
+ addl MACRO_LITERAL(1), REG_VAR(shorty) // shorty++
+ cmpb MACRO_LITERAL(0), REG_VAR(temp_char) // if (temp_char == '\0')
+ je VAR(finished) // goto finished
+ cmpb MACRO_LITERAL(70), REG_VAR(temp_char) // if (temp_char == 'F')
+ je 3f // goto SKIP_FLOAT
+ cmpb MACRO_LITERAL(68), REG_VAR(temp_char) // if (temp_char == 'D')
+ je 4f // goto SKIP_DOUBLE
+ jmp 5f // goto end
3: // SKIP_FLOAT
- addl MACRO_LITERAL(4), REG_VAR(arg_array, 1) // arg_array++
- jmp 1b // goto LOOP
+ addl MACRO_LITERAL(4), REG_VAR(arg_array) // arg_array++
+ jmp 1b // goto LOOP
4: // SKIP_DOUBLE
- addl MACRO_LITERAL(8), REG_VAR(arg_array, 1) // arg_array+=2
- jmp 1b // goto LOOP
+ addl MACRO_LITERAL(8), REG_VAR(arg_array) // arg_array+=2
+ jmp 1b // goto LOOP
5:
END_MACRO
@@ -619,147 +619,148 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
END_FUNCTION art_quick_invoke_static_stub
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // push padding
+ subl MACRO_LITERAL(12), %esp // push padding
CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call VAR(cxx_name, 1) // cxx_name(Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ call CALLVAR(cxx_name) // cxx_name(Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // push padding
+ subl MACRO_LITERAL(8), %esp // push padding
CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // push padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ PUSH eax // push padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass arg3
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl MACRO_LITERAL(12), %esp // alignment padding
+ subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ebx // pass arg4
- PUSH edx // pass arg3
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
- addl MACRO_LITERAL(32), %esp // pop arguments
+ PUSH ebx // pass arg4
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
+ addl MACRO_LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
- PUSH eax // push padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ PUSH eax // push padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass referrer
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, referrer, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ PUSH ecx // pass referrer
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, referrer, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %edx // get referrer
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass referrer
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, referrer, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ PUSH edx // pass referrer
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, referrer, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
- subl MACRO_LITERAL(12), %esp // alignment padding
+ subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ebx // pass referrer
- PUSH edx // pass arg3
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, referrer, Thread*)
- addl LITERAL(32), %esp // pop arguments
+ PUSH ebx // pass referrer
+ PUSH edx // pass arg3
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, arg2, arg3, referrer,
+ // Thread*)
+ addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION RAW_VAR(c_name, 0)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
@@ -780,9 +781,9 @@ END_MACRO
MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ?
- jne 1f // if exception field != 0 goto 1
- ret // return
-1: // deliver exception on current thread
+ jne 1f // if exception field != 0 goto 1
+ ret // return
+1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -1020,15 +1021,15 @@ DEFINE_FUNCTION art_quick_lock_object
.Lslow_lock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl LITERAL(8), %esp // alignment padding
+ subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass object
- call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
- addl LITERAL(16), %esp // pop arguments
+ PUSH eax // pass object
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*)
+ addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
@@ -1075,54 +1076,54 @@ DEFINE_FUNCTION art_quick_unlock_object
.Lslow_unlock:
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- subl LITERAL(8), %esp // alignment padding
+ subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass object
+ PUSH eax // pass object
call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*)
- addl LITERAL(16), %esp // pop arguments
+ addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
DEFINE_FUNCTION art_quick_is_assignable
- PUSH eax // alignment padding
- PUSH ecx // pass arg2 - obj->klass
- PUSH eax // pass arg1 - checked class
+ PUSH eax // alignment padding
+ PUSH ecx // pass arg2 - obj->klass
+ PUSH eax // pass arg1 - checked class
call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
- addl LITERAL(12), %esp // pop arguments
+ addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
END_FUNCTION art_quick_is_assignable
DEFINE_FUNCTION art_quick_check_cast
- PUSH eax // alignment padding
- PUSH ecx // pass arg2 - obj->klass
- PUSH eax // pass arg1 - checked class
+ PUSH eax // alignment padding
+ PUSH ecx // pass arg2 - obj->klass
+ PUSH eax // pass arg1 - checked class
call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testl %eax, %eax
- jz 1f // jump forward if not assignable
- addl LITERAL(12), %esp // pop arguments
+ jz 1f // jump forward if not assignable
+ addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
- CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds.
+ CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds.
1:
- POP eax // pop arguments
+ POP eax // pop arguments
POP ecx
addl LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-4)
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
- PUSH eax // alignment padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ PUSH eax // alignment padding
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass arg2
- PUSH eax // pass arg1
+ PUSH ecx // pass arg2
+ PUSH eax // pass arg1
call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
- int3 // unreached
+ UNREACHABLE
END_FUNCTION art_quick_check_cast
/*
@@ -1174,10 +1175,10 @@ DEFINE_FUNCTION art_quick_aput_obj
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
ret
.Lcheck_assignability:
- PUSH eax // save arguments
+ PUSH eax // save arguments
PUSH ecx
PUSH edx
- subl LITERAL(8), %esp // alignment padding
+ subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
#ifdef USE_HEAP_POISONING
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored
@@ -1215,7 +1216,7 @@ DEFINE_FUNCTION art_quick_aput_obj
PUSH edx // pass arg2 - value
PUSH eax // pass arg1 - array
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
- int3 // unreached
+ UNREACHABLE
END_FUNCTION art_quick_aput_obj
DEFINE_FUNCTION art_quick_memcpy
@@ -1252,37 +1253,37 @@ DEFINE_FUNCTION art_quick_f2l
END_FUNCTION art_quick_f2l
DEFINE_FUNCTION art_quick_ldiv
- subl LITERAL(12), %esp // alignment padding
+ subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass arg4 b.hi
- PUSH edx // pass arg3 b.lo
- PUSH ecx // pass arg2 a.hi
- PUSH eax // pass arg1 a.lo
- call SYMBOL(artLdiv) // (jlong a, jlong b)
- addl LITERAL(28), %esp // pop arguments
+ PUSH ebx // pass arg4 b.hi
+ PUSH edx // pass arg3 b.lo
+ PUSH ecx // pass arg2 a.hi
+ PUSH eax // pass arg1 a.lo
+ call SYMBOL(artLdiv) // (jlong a, jlong b)
+ addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_ldiv
DEFINE_FUNCTION art_quick_lmod
- subl LITERAL(12), %esp // alignment padding
+ subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
- PUSH ebx // pass arg4 b.hi
- PUSH edx // pass arg3 b.lo
- PUSH ecx // pass arg2 a.hi
- PUSH eax // pass arg1 a.lo
- call SYMBOL(artLmod) // (jlong a, jlong b)
- addl LITERAL(28), %esp // pop arguments
+ PUSH ebx // pass arg4 b.hi
+ PUSH edx // pass arg3 b.lo
+ PUSH ecx // pass arg2 a.hi
+ PUSH eax // pass arg1 a.lo
+ call SYMBOL(artLmod) // (jlong a, jlong b)
+ addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
END_FUNCTION art_quick_lmod
DEFINE_FUNCTION art_quick_lmul
- imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx)
- imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx)
- mul %edx // edx:eax = a.lo(eax) * b.lo(edx)
+ imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx)
+ imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx)
+ mul %edx // edx:eax = a.lo(eax) * b.lo(edx)
add %ebx, %ecx
- add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi)
+ add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi)
ret
END_FUNCTION art_quick_lmul
@@ -1442,7 +1443,7 @@ END_FUNCTION art_quick_resolution_trampoline
DEFINE_FUNCTION art_quick_generic_jni_trampoline
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
- movl %esp, %ebp // save SP at callee-save frame
+ movl %esp, %ebp // save SP at callee-save frame
CFI_DEF_CFA_REGISTER(ebp)
subl LITERAL(5120), %esp
// prepare for artQuickGenericJniTrampoline call
@@ -1477,7 +1478,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
// (esp) 4(esp) 12(esp) <= C calling convention
// fs:... eax:edx fp0 <= where they are
- subl LITERAL(20), %esp // Padding & pass float result.
+ subl LITERAL(20), %esp // Padding & pass float result.
fstpl (%esp)
pushl %edx // Pass int result.
pushl %eax
@@ -1500,7 +1501,7 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
CFI_ADJUST_CFA_OFFSET(-(4 + 4 * 8))
POP ecx
- addl LITERAL(4), %esp // Avoid edx, as it may be part of the result.
+ addl LITERAL(4), %esp // Avoid edx, as it may be part of the result.
CFI_ADJUST_CFA_OFFSET(-4)
POP ebx
POP ebp // Restore callee saves
@@ -1539,7 +1540,7 @@ DEFINE_FUNCTION art_quick_to_interpreter_bridge
addl LITERAL(48), %esp // Remove FPRs and EAX, ECX, EDX, EBX.
CFI_ADJUST_CFA_OFFSET(-48)
- POP ebp // Restore callee saves
+ POP ebp // Restore callee saves
POP esi
POP edi
@@ -1636,7 +1637,7 @@ SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
- int3 // Unreachable.
+ UNREACHABLE
END_FUNCTION art_quick_deoptimize
/*
@@ -1647,8 +1648,8 @@ END_FUNCTION art_quick_deoptimize
* ecx: comp string object (known non-null)
*/
DEFINE_FUNCTION art_quick_string_compareto
- PUSH esi // push callee save reg
- PUSH edi // push callee save reg
+ PUSH esi // push callee save reg
+ PUSH edi // push callee save reg
mov MIRROR_STRING_COUNT_OFFSET(%eax), %edx
mov MIRROR_STRING_COUNT_OFFSET(%ecx), %ebx
lea MIRROR_STRING_VALUE_OFFSET(%eax), %esi
@@ -1692,7 +1693,7 @@ DEFINE_FUNCTION art_nested_signal_return
PUSH ecx // second arg to longjmp (1)
PUSH eax // first arg to longjmp (jmp_buf)
call PLT_SYMBOL(longjmp)
- int3 // won't get here.
+ UNREACHABLE
END_FUNCTION art_nested_signal_return
// TODO: implement these!
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index b2b6c2dbfb..706ae58d91 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -19,57 +19,49 @@
#include "asm_support_x86_64.h"
-#if defined(__APPLE__) || (defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5))
- // Clang's as(1) doesn't let you name macro parameters prior to 3.5.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name
- #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
- #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
- #define END_MACRO .endmacro
-
- // Clang's as(1) uses $0, $1, and so on for macro arguments.
- #define RAW_VAR(name,index) $index
- #define VAR(name,index) SYMBOL($index)
- #define PLT_VAR(name, index) PLT_SYMBOL($index)
- #define REG_VAR(name,index) %$index
- #define CALL_MACRO(name,index) $index
-
- // The use of $x for arguments mean that literals need to be represented with $$x in macros.
- #define LITERAL(value) $value
- #define MACRO_LITERAL(value) $$value
+// Regular gas(1) & current clang/llvm assembler support named macro parameters.
+#define MACRO0(macro_name) .macro macro_name
+#define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
+#define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
+#define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+#define END_MACRO .endm
+
+#if defined(__clang__)
+ // Clang/llvm does not support .altmacro. However, the clang/llvm preprocessor doesn't
+ // separate the backslash and parameter by a space. Everything just works.
+ #define RAW_VAR(name) \name
+ #define VAR(name) SYMBOL(\name)
+ #define PLT_VAR(name) \name@PLT
+ #define REG_VAR(name) %\name
+ #define CALL_MACRO(name) \name
#else
- // Regular gas(1) lets you name macro parameters.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
- #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
- #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
- #define END_MACRO .endm
-
// Regular gas(1) uses \argument_name for macro arguments.
// We need to turn on alternate macro syntax so we can use & instead or the preprocessor
// will screw us by inserting a space between the \ and the name. Even in this mode there's
// no special meaning to $, so literals are still just $x. The use of altmacro means % is a
- // special character meaning care needs to be taken when passing registers as macro arguments.
+ // special character meaning care needs to be taken when passing registers as macro
+ // arguments.
.altmacro
- #define RAW_VAR(name,index) name&
- #define VAR(name,index) name&
- #define PLT_VAR(name, index) name&@PLT
- #define REG_VAR(name,index) %name
- #define CALL_MACRO(name,index) name&
+ #define RAW_VAR(name) name&
+ #define VAR(name) name&
+ #define PLT_VAR(name) name&@PLT
+ #define REG_VAR(name) %name
+ #define CALL_MACRO(name) name&
+#endif
- #define LITERAL(value) $value
+#define LITERAL(value) $value
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $$(value)
+#else
#define MACRO_LITERAL(value) $value
#endif
#if defined(__APPLE__)
- #define FUNCTION_TYPE(name,index)
- #define SIZE(name,index)
-#elif defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
- #define FUNCTION_TYPE(name,index) .type $index, @function
- #define SIZE(name,index) .size $index, .-$index
+ #define FUNCTION_TYPE(name)
+ #define SIZE(name)
#else
- #define FUNCTION_TYPE(name,index) .type name&, @function
- #define SIZE(name,index) .size name, .-name
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define SIZE(name) .size name, .-name
#endif
// CFI support.
@@ -95,13 +87,7 @@
// Symbols.
#if !defined(__APPLE__)
#define SYMBOL(name) name
- #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
- // TODO: Disabled for old clang 3.3, this leads to text relocations and there should be a
- // better fix.
- #define PLT_SYMBOL(name) name // ## @PLT
- #else
- #define PLT_SYMBOL(name) name ## @PLT
- #endif
+ #define PLT_SYMBOL(name) name ## @PLT
#else
#define SYMBOL(name) _ ## name
#define PLT_SYMBOL(name) _ ## name
@@ -122,11 +108,11 @@ END_MACRO
// TODO: we might need to use SYMBOL() here to add the underscore prefix
// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
- FUNCTION_TYPE(\c_name, 0)
- ASM_HIDDEN VAR(c_name, 0)
- .globl VAR(c_name, 0)
+ FUNCTION_TYPE(SYMBOL(\c_name))
+ ASM_HIDDEN SYMBOL(\c_name)
+ .globl VAR(c_name)
ALIGN_FUNCTION_ENTRY
-VAR(c_name, 0):
+VAR(c_name):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(rsp, 8)
@@ -134,32 +120,32 @@ END_MACRO
MACRO1(END_FUNCTION, c_name)
CFI_ENDPROC
- SIZE(\c_name, 0)
+ SIZE(SYMBOL(\c_name))
END_MACRO
MACRO1(PUSH, reg)
- pushq REG_VAR(reg, 0)
+ pushq REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(REG_VAR(reg, 0), 0)
+ CFI_REL_OFFSET(REG_VAR(reg), 0)
END_MACRO
MACRO1(POP, reg)
- popq REG_VAR(reg,0)
+ popq REG_VAR(reg)
CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(REG_VAR(reg,0))
+ CFI_RESTORE(REG_VAR(reg))
END_MACRO
MACRO1(UNIMPLEMENTED,name)
- FUNCTION_TYPE(\name, 0)
- ASM_HIDDEN VAR(c_name, 0)
- .globl VAR(name, 0)
+ FUNCTION_TYPE(SYMBOL(\name))
+ ASM_HIDDEN VAR(name)
+ .globl VAR(name)
ALIGN_FUNCTION_ENTRY
-VAR(name, 0):
+VAR(name):
CFI_STARTPROC
int3
int3
CFI_ENDPROC
- SIZE(\name, 0)
+ SIZE(SYMBOL(\name))
END_MACRO
MACRO0(UNREACHABLE)
@@ -173,14 +159,14 @@ END_MACRO
// Macros to poison (negate) the reference for heap poisoning.
MACRO1(POISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
- negl REG_VAR(rRef, 0)
+ negl REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
// Macros to unpoison (negate) the reference for heap poisoning.
MACRO1(UNPOISON_HEAP_REF, rRef)
#ifdef USE_HEAP_POISONING
- negl REG_VAR(rRef, 0)
+ negl REG_VAR(rRef)
#endif // USE_HEAP_POISONING
END_MACRO
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ad89bcaa78..7e7d789c8d 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -275,33 +275,33 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
END_MACRO
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(Thread*)
+ call VAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg1, Thread*)
+ call VAR(cxx_name) // cxx_name(arg1, Thread*)
UNREACHABLE
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
- DEFINE_FUNCTION VAR(c_name, 0)
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(Thread*)
+ call VAR(cxx_name) // cxx_name(Thread*)
UNREACHABLE
- END_FUNCTION VAR(c_name, 0)
+ END_FUNCTION VAR(c_name)
END_MACRO
/*
@@ -361,7 +361,7 @@ MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
movq %rsp, %rcx // pass SP
- call VAR(cxx_name, 0) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name) // cxx_name(arg1, arg2, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
@@ -376,9 +376,9 @@ MACRO1(INVOKE_TRAMPOLINE_BODY, cxx_name)
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
- DEFINE_FUNCTION RAW_VAR(c_name, 0)
- INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name, 1)
- END_FUNCTION RAW_VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
+ INVOKE_TRAMPOLINE_BODY RAW_VAR(cxx_name)
+ END_FUNCTION VAR(c_name)
END_MACRO
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
@@ -399,7 +399,7 @@ MACRO2(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, finished)
movb (%r10), %al // al := *shorty
addq MACRO_LITERAL(1), %r10 // shorty++
cmpb MACRO_LITERAL(0), %al // if (al == '\0') goto xmm_setup_finished
- je VAR(finished, 1)
+ je VAR(finished)
cmpb MACRO_LITERAL(68), %al // if (al == 'D') goto FOUND_DOUBLE
je 2f
cmpb MACRO_LITERAL(70), %al // if (al == 'F') goto FOUND_FLOAT
@@ -411,11 +411,11 @@ MACRO2(LOOP_OVER_SHORTY_LOADING_XMMS, xmm_reg, finished)
addq MACRO_LITERAL(4), %r11 // arg_array++
jmp 1b // goto LOOP
2: // FOUND_DOUBLE
- movsd (%r11), REG_VAR(xmm_reg, 0)
+ movsd (%r11), REG_VAR(xmm_reg)
addq MACRO_LITERAL(8), %r11 // arg_array+=2
jmp 4f
3: // FOUND_FLOAT
- movss (%r11), REG_VAR(xmm_reg, 0)
+ movss (%r11), REG_VAR(xmm_reg)
addq MACRO_LITERAL(4), %r11 // arg_array++
4:
END_MACRO
@@ -430,18 +430,18 @@ MACRO3(LOOP_OVER_SHORTY_LOADING_GPRS, gpr_reg64, gpr_reg32, finished)
movb (%r10), %al // al := *shorty
addq MACRO_LITERAL(1), %r10 // shorty++
cmpb MACRO_LITERAL(0), %al // if (al == '\0') goto gpr_setup_finished
- je VAR(finished, 2)
+ je VAR(finished)
cmpb MACRO_LITERAL(74), %al // if (al == 'J') goto FOUND_LONG
je 2f
cmpb MACRO_LITERAL(70), %al // if (al == 'F') goto SKIP_FLOAT
je 3f
cmpb MACRO_LITERAL(68), %al // if (al == 'D') goto SKIP_DOUBLE
je 4f
- movl (%r11), REG_VAR(gpr_reg32, 1)
+ movl (%r11), REG_VAR(gpr_reg32)
addq MACRO_LITERAL(4), %r11 // arg_array++
jmp 5f
2: // FOUND_LONG
- movq (%r11), REG_VAR(gpr_reg64, 0)
+ movq (%r11), REG_VAR(gpr_reg64)
addq MACRO_LITERAL(8), %r11 // arg_array+=2
jmp 5f
3: // SKIP_FLOAT
@@ -693,94 +693,94 @@ DEFINE_FUNCTION art_quick_do_long_jump
END_FUNCTION art_quick_do_long_jump
MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(Thread*)
+ call VAR(cxx_name) // cxx_name(Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, Thread*)
+ call VAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*)
+ call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*)
+ call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
+ call VAR(cxx_name) // cxx_name(arg1, arg2, arg3, arg4, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*)
+ call VAR(cxx_name) // cxx_name(arg0, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2)
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro)
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rdx // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*)
+ call VAR(cxx_name) // (arg0, arg1, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2)
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro)
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
- DEFINE_FUNCTION VAR(c_name, 0)
+ DEFINE_FUNCTION VAR(c_name)
movq 8(%rsp), %rcx // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
- call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
+ call VAR(cxx_name) // cxx_name(arg0, arg1, arg2, referrer, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- CALL_MACRO(return_macro, 2) // return or deliver exception
- END_FUNCTION VAR(c_name, 0)
+ CALL_MACRO(return_macro) // return or deliver exception
+ END_FUNCTION VAR(c_name)
END_MACRO
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
@@ -1145,7 +1145,7 @@ DEFINE_FUNCTION art_quick_check_cast
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*)
- int3 // unreached
+ UNREACHABLE
END_FUNCTION art_quick_check_cast
@@ -1275,7 +1275,7 @@ DEFINE_FUNCTION art_quick_aput_obj
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
- int3 // unreached
+ UNREACHABLE
END_FUNCTION art_quick_aput_obj
// TODO: This is quite silly on X86_64 now.
@@ -1672,7 +1672,7 @@ SYMBOL(art_quick_deoptimize_from_compiled_slow_path):
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
call SYMBOL(artDeoptimize) // artDeoptimize(Thread*)
- int3 // Unreachable.
+ UNREACHABLE
END_FUNCTION art_quick_deoptimize
/*
@@ -1731,5 +1731,5 @@ DEFINE_FUNCTION art_nested_signal_return
// first arg to longjmp is already in correct register
movq LITERAL(1), %rsi // second arg to longjmp (1)
call PLT_SYMBOL(longjmp)
- int3 // won't get here
+ UNREACHABLE
END_FUNCTION art_nested_signal_return
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 8712bdbbf5..bb3c72c433 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -20,6 +20,7 @@
#include "art_method.h"
#include "art_field.h"
+#include "base/logging.h"
#include "dex_file.h"
#include "dex_file-inl.h"
#include "gc_root-inl.h"
@@ -317,7 +318,9 @@ inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
+ if (kIsDebugBuild && !IsProxyMethod()) {
+ CHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
+ }
return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index c78a851b0e..7673418fd1 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -35,6 +35,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
+#include "oat_file-inl.h"
#include "scoped_thread_state_change.h"
#include "well_known_classes.h"
@@ -561,4 +562,14 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
return true;
}
+const uint8_t* ArtMethod::GetQuickenedInfo() {
+ bool found = false;
+ OatFile::OatMethod oat_method =
+ Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
+ if (!found || (oat_method.GetQuickCode() != nullptr)) {
+ return nullptr;
+ }
+ return oat_method.GetVmapTable();
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index e8c47d94bc..4169c5ebd9 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -336,6 +336,8 @@ class ArtMethod FINAL {
const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint8_t* GetQuickenedInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a GcMap instance for convenient access.
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 07daa7e0fa..3422625282 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -50,6 +50,7 @@ enum AllocatorTag {
kAllocatorTagMonitorList,
kAllocatorTagClassTable,
kAllocatorTagInternTable,
+ kAllocatorTagLambdaBoxTable,
kAllocatorTagMaps,
kAllocatorTagLOS,
kAllocatorTagSafeMap,
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index f2c8355f53..709d9ae771 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -231,19 +231,33 @@ class HashSet {
return ret;
}
+ // Lower case for c++11 for each. const version.
+ ConstIterator begin() const {
+ ConstIterator ret(this, 0);
+ if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
+ ++ret; // Skip all the empty slots.
+ }
+ return ret;
+ }
+
// Lower case for c++11 for each.
Iterator end() {
return Iterator(this, NumBuckets());
}
+ // Lower case for c++11 for each. const version.
+ ConstIterator end() const {
+ return ConstIterator(this, NumBuckets());
+ }
+
bool Empty() {
return Size() == 0;
}
// Erase algorithm:
// Make an empty slot where the iterator is pointing.
- // Scan fowards until we hit another empty slot.
- // If an element inbetween doesn't rehash to the range from the current empty slot to the
+ // Scan forwards until we hit another empty slot.
+ // If an element in between doesn't rehash to the range from the current empty slot to the
// iterator. It must be before the empty slot, in that case we can move it to the empty slot
// and set the empty slot to be the location we just moved from.
// Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
@@ -299,23 +313,23 @@ class HashSet {
// Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
// object in the heap for performance solution.
template <typename K>
- Iterator Find(const K& element) {
- return FindWithHash(element, hashfn_(element));
+ Iterator Find(const K& key) {
+ return FindWithHash(key, hashfn_(key));
}
template <typename K>
- ConstIterator Find(const K& element) const {
- return FindWithHash(element, hashfn_(element));
+ ConstIterator Find(const K& key) const {
+ return FindWithHash(key, hashfn_(key));
}
template <typename K>
- Iterator FindWithHash(const K& element, size_t hash) {
- return Iterator(this, FindIndex(element, hash));
+ Iterator FindWithHash(const K& key, size_t hash) {
+ return Iterator(this, FindIndex(key, hash));
}
template <typename K>
- ConstIterator FindWithHash(const K& element, size_t hash) const {
- return ConstIterator(this, FindIndex(element, hash));
+ ConstIterator FindWithHash(const K& key, size_t hash) const {
+ return ConstIterator(this, FindIndex(key, hash));
}
// Insert an element, allows duplicates.
@@ -399,6 +413,10 @@ class HashSet {
}
size_t IndexForHash(size_t hash) const {
+ // Protect against undefined behavior (division by zero).
+ if (UNLIKELY(num_buckets_ == 0)) {
+ return 0;
+ }
return hash % num_buckets_;
}
@@ -414,6 +432,10 @@ class HashSet {
// This value for not found is important so that Iterator(this, FindIndex(...)) == end().
template <typename K>
size_t FindIndex(const K& element, size_t hash) const {
+ // Guard against failing to get an element for a non-existing index.
+ if (UNLIKELY(NumBuckets() == 0)) {
+ return 0;
+ }
DCHECK_EQ(hashfn_(element), hash);
size_t index = IndexForHash(hash);
while (true) {
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index fd9eb45e3f..4ef1f9e8c9 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -186,6 +186,12 @@ TEST_F(HashSetTest, TestShrink) {
// Shrink again, the load factor should be good again.
hash_set.ShrinkToMaximumLoad();
EXPECT_DOUBLE_EQ(initial_load, hash_set.CalculateLoadFactor());
+
+ // Make sure all the initial elements we had are still there
+ for (const std::string& initial_string : strings) {
+ EXPECT_NE(hash_set.end(), hash_set.Find(initial_string))
+ << "expected to find " << initial_string;
+ }
}
TEST_F(HashSetTest, TestStress) {
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index e48d170635..c591a51886 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -61,6 +61,7 @@ ConditionVariable* Locks::thread_exit_cond_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
+Mutex* Locks::lambda_table_lock_ = nullptr;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -946,6 +947,7 @@ void Locks::Init() {
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
+ DCHECK(lambda_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1048,6 +1050,10 @@ void Locks::Init() {
DCHECK(reference_queue_soft_references_lock_ == nullptr);
reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kLambdaTableLock);
+ DCHECK(lambda_table_lock_ == nullptr);
+ lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index f87467a0e1..5b258e5ddb 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,6 +60,7 @@ enum LockLevel {
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kLambdaTableLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
kTransactionLogLock,
@@ -648,6 +649,10 @@ class Locks {
// Have an exclusive logging thread.
static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
+ // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
+ // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
+ static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
};
} // namespace art
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 45fb9c4b8d..0ae32f4785 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1206,6 +1206,8 @@ class GuardedCopy {
const_cast<char*>(copy->StartRedZone())[i] = kCanary[j];
if (kCanary[j] == '\0') {
j = 0;
+ } else {
+ j++;
}
}
@@ -1217,6 +1219,8 @@ class GuardedCopy {
const_cast<char*>(copy->EndRedZone())[i] = kCanary[j];
if (kCanary[j] == '\0') {
j = 0;
+ } else {
+ j++;
}
}
@@ -1367,6 +1371,8 @@ class GuardedCopy {
}
if (kCanary[j] == '\0') {
j = 0;
+ } else {
+ j++;
}
}
@@ -1381,6 +1387,8 @@ class GuardedCopy {
}
if (kCanary[j] == '\0') {
j = 0;
+ } else {
+ j++;
}
}
return true;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 122c35fdc3..8f7862a3b9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -195,7 +195,9 @@ struct FieldGapsComparator {
bool operator() (const FieldGap& lhs, const FieldGap& rhs)
NO_THREAD_SAFETY_ANALYSIS {
// Sort by gap size, largest first. Secondary sort by starting offset.
- return lhs.size > rhs.size || (lhs.size == rhs.size && lhs.start_offset < rhs.start_offset);
+ // Note that the priority queue returns the largest element, so operator()
+ // should return true if lhs is less than rhs.
+ return lhs.size < rhs.size || (lhs.size == rhs.size && lhs.start_offset > rhs.start_offset);
}
};
typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
@@ -3038,6 +3040,18 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifyingAtRuntime, self);
}
+ // Skip verification if we are forcing a soft fail.
+ // This has to be before the normal verification enabled check,
+ // since technically verification is disabled in this mode.
+ if (UNLIKELY(Runtime::Current()->IsVerificationSoftFail())) {
+ // Force verification to be a 'soft failure'.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
+ // As this is a fake verified status, make sure the methods are _not_ marked preverified
+ // later.
+ klass->SetPreverified();
+ return;
+ }
+
// Skip verification if disabled.
if (!Runtime::Current()->IsVerificationEnabled()) {
mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index e4f7b7a3e7..b60cba43d0 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -406,6 +406,9 @@ class ClassLinker {
const void* GetOatMethodQuickCodeFor(ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
@@ -484,9 +487,6 @@ class ClassLinker {
void DropFindArrayClassCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 97d170e39c..eccebf1fc4 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2100,6 +2100,7 @@ JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
case kWaitingInMainDebuggerLoop:
case kWaitingInMainSignalCatcherLoop:
case kWaitingPerformingGc:
+ case kWaitingWeakRootRead:
case kWaiting:
return JDWP::TS_WAIT;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 0ddbf7c7a8..df2d3799ab 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -488,6 +488,12 @@ class Instruction {
// Returns true if the instruction allows control flow to go to the following instruction.
bool CanFlowThrough() const;
+ // Returns true if the instruction is a quickened instruction.
+ bool IsQuickened() const {
+ return (kInstructionIndexTypes[Opcode()] == kIndexFieldOffset) ||
+ (kInstructionIndexTypes[Opcode()] == kIndexVtableOffset);
+ }
+
// Returns true if this instruction is a switch.
bool IsSwitch() const {
return (kInstructionFlags[Opcode()] & kSwitch) != 0;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 4035143d5b..abb1d3dcf7 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1006,7 +1006,7 @@ void MarkSweep::ReMarkRoots() {
void MarkSweep::SweepSystemWeaks(Thread* self) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Runtime::Current()->SweepSystemWeaks(this);
}
diff --git a/runtime/gc/weak_root_state.h b/runtime/gc/weak_root_state.h
new file mode 100644
index 0000000000..b66f19d4d8
--- /dev/null
+++ b/runtime/gc/weak_root_state.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
+#define ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
+
+#include <iosfwd>
+
+namespace art {
+namespace gc {
+
+enum WeakRootState {
+ // Can read or add weak roots.
+ kWeakRootStateNormal,
+ // Need to wait until we can read weak roots.
+ kWeakRootStateNoReadsOrWrites,
+ // Need to mark new weak roots to make sure they don't get swept.
+ kWeakRootStateMarkNewRoots,
+};
+
+std::ostream& operator<<(std::ostream& os, const WeakRootState&);
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_WEAK_ROOT_STATE_H_
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 6ea047fbe4..ae521b164e 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -21,6 +21,7 @@
#include "gc_root-inl.h"
#include "gc/collector/garbage_collector.h"
#include "gc/space/image_space.h"
+#include "gc/weak_root_state.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
@@ -32,8 +33,8 @@ namespace art {
InternTable::InternTable()
: image_added_to_intern_table_(false), log_new_roots_(false),
- allow_new_interns_(true),
- new_intern_condition_("New intern condition", *Locks::intern_table_lock_) {
+ weak_intern_condition_("New intern condition", *Locks::intern_table_lock_),
+ weak_root_state_(gc::kWeakRootStateNormal) {
}
size_t InternTable::Size() const {
@@ -89,6 +90,7 @@ mirror::String* InternTable::LookupStrong(mirror::String* s) {
}
mirror::String* InternTable::LookupWeak(mirror::String* s) {
+ // TODO: Return only if marked.
return weak_interns_.Find(s);
}
@@ -183,8 +185,7 @@ void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) {
}
}
-mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+mirror::String* InternTable::LookupStringFromImage(mirror::String* s) {
if (image_added_to_intern_table_) {
return nullptr;
}
@@ -212,48 +213,61 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s)
return nullptr;
}
-void InternTable::AllowNewInterns() {
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::intern_table_lock_);
- allow_new_interns_ = true;
- new_intern_condition_.Broadcast(self);
-}
-
-void InternTable::DisallowNewInterns() {
- Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::intern_table_lock_);
- allow_new_interns_ = false;
-}
-
-void InternTable::EnsureNewInternsDisallowed() {
+void InternTable::EnsureNewWeakInternsDisallowed() {
// Lock and unlock once to ensure that no threads are still in the
// middle of adding new interns.
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- CHECK(!allow_new_interns_);
+ CHECK_EQ(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
}
void InternTable::BroadcastForNewInterns() {
CHECK(kUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
- new_intern_condition_.Broadcast(self);
+ weak_intern_condition_.Broadcast(self);
+}
+
+void InternTable::WaitUntilAccessible(Thread* self) {
+ Locks::intern_table_lock_->ExclusiveUnlock(self);
+ self->TransitionFromRunnableToSuspended(kWaitingWeakRootRead);
+ Locks::intern_table_lock_->ExclusiveLock(self);
+ while (weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) {
+ weak_intern_condition_.Wait(self);
+ }
+ Locks::intern_table_lock_->ExclusiveUnlock(self);
+ self->TransitionFromSuspendedToRunnable();
+ Locks::intern_table_lock_->ExclusiveLock(self);
}
-mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
+mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool holding_locks) {
if (s == nullptr) {
return nullptr;
}
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
- while (UNLIKELY((!kUseReadBarrier && !allow_new_interns_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
- new_intern_condition_.WaitHoldingLocks(self);
+ if (kDebugLocking && !holding_locks) {
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ CHECK_EQ(2u, self->NumberOfHeldMutexes()) << "may only safely hold the mutator lock";
}
- // Check the strong table for a match.
- mirror::String* strong = LookupStrong(s);
- if (strong != nullptr) {
- return strong;
+ while (true) {
+ // Check the strong table for a match.
+ mirror::String* strong = LookupStrong(s);
+ if (strong != nullptr) {
+ return strong;
+ }
+ // weak_root_state_ is set to gc::kWeakRootStateNoReadsOrWrites in the GC pause but is only
+ // cleared after SweepSystemWeaks has completed. This is why we need to wait until it is
+ // cleared.
+ if (weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
+ break;
+ }
+ CHECK(!holding_locks);
+ StackHandleScope<1> hs(self);
+ auto h = hs.NewHandleWrapper(&s);
+ WaitUntilAccessible(self);
}
+ CHECK_NE(weak_root_state_, gc::kWeakRootStateNoReadsOrWrites);
+ DCHECK_NE(weak_root_state_, gc::kWeakRootStateMarkNewRoots) << "Unsupported";
// There is no match in the strong table, check the weak table.
mirror::String* weak = LookupWeak(s);
if (weak != nullptr) {
@@ -284,12 +298,17 @@ mirror::String* InternTable::InternStrong(const char* utf8_data) {
return InternStrong(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
}
+mirror::String* InternTable::InternImageString(mirror::String* s) {
+ // May be holding the heap bitmap lock.
+ return Insert(s, true, true);
+}
+
mirror::String* InternTable::InternStrong(mirror::String* s) {
- return Insert(s, true);
+ return Insert(s, true, false);
}
mirror::String* InternTable::InternWeak(mirror::String* s) {
- return Insert(s, false);
+ return Insert(s, false, false);
}
bool InternTable::ContainsWeak(mirror::String* s) {
@@ -300,6 +319,8 @@ bool InternTable::ContainsWeak(mirror::String* s) {
void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
weak_interns_.SweepWeaks(visitor);
+ // Done sweeping, back to a normal state.
+ ChangeWeakRootStateLocked(gc::kWeakRootStateNormal);
}
void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) {
@@ -425,4 +446,16 @@ size_t InternTable::Table::Size() const {
return pre_zygote_table_.Size() + post_zygote_table_.Size();
}
+void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ ChangeWeakRootStateLocked(new_state);
+}
+
+void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
+ weak_root_state_ = new_state;
+ if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
+ weak_intern_condition_.Broadcast(Thread::Current());
+ }
+}
+
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 67a8b34965..ef08d74c7f 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -19,10 +19,12 @@
#include <unordered_set>
+#include "atomic.h"
#include "base/allocator.h"
#include "base/hash_set.h"
#include "base/mutex.h"
#include "gc_root.h"
+#include "gc/weak_root_state.h"
#include "object_callbacks.h"
namespace art {
@@ -54,18 +56,22 @@ class InternTable {
public:
InternTable();
- // Interns a potentially new string in the 'strong' table. (See above.)
+ // Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Interns a potentially new string in the 'strong' table. (See above.)
+ // Only used by image writer.
+ mirror::String* InternImageString(mirror::String* s)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(const char* utf8_data)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Interns a potentially new string in the 'strong' table. (See above.)
+ // Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Interns a potentially new string in the 'weak' table. (See above.)
+ // Interns a potentially new string in the 'weak' table. May cause thread suspension.
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SweepInternTableWeaks(IsMarkedVisitor* visitor)
@@ -89,6 +95,7 @@ class InternTable {
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void EnsureNewInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void BroadcastForNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void EnsureNewWeakInternsDisallowed() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image space into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls.
@@ -112,6 +119,10 @@ class InternTable {
size_t WriteToMemory(uint8_t* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::intern_table_lock_);
+ // Change the weak root state. May broadcast to waiters.
+ void ChangeWeakRootState(gc::WeakRootState new_state)
+ LOCKS_EXCLUDED(Locks::intern_table_lock_);
+
private:
class StringHashEquals {
public:
@@ -176,7 +187,7 @@ class InternTable {
};
// Insert if non null, otherwise return null.
- mirror::String* Insert(mirror::String* s, bool is_strong)
+ mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -221,10 +232,17 @@ class InternTable {
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Change the weak root state. May broadcast to waiters.
+ void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
+
+ // Wait until we can read weak roots.
+ void WaitUntilAccessible(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
- bool allow_new_interns_ GUARDED_BY(Locks::intern_table_lock_);
- ConditionVariable new_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
+ ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (strong) roots, they need a read barrier to
// enable concurrent intern table (strong) root scan. Do not
// directly access the strings in it. Use functions that contain
@@ -236,6 +254,8 @@ class InternTable {
// not directly access the strings in it. Use functions that contain
// read barriers.
Table weak_interns_ GUARDED_BY(Locks::intern_table_lock_);
+ // Weak root state, used for concurrent system weak processing and more.
+ gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
};
} // namespace art
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 776b6a3521..9babb18325 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -34,6 +34,7 @@
#include "dex_instruction-inl.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "handle_scope-inl.h"
+#include "lambda/box_table.h"
#include "mirror/class-inl.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
@@ -506,8 +507,8 @@ static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const In
uint32_t vreg_target_object = inst->VRegA_22x(inst_data);
uint32_t vreg_source_closure = inst->VRegB_22x();
- ArtMethod* const closure_method = ReadLambdaClosureFromVRegsOrThrow(shadow_frame,
- vreg_source_closure);
+ ArtMethod* closure_method = ReadLambdaClosureFromVRegsOrThrow(shadow_frame,
+ vreg_source_closure);
// Failed lambda target runtime check, an exception was raised.
if (UNLIKELY(closure_method == nullptr)) {
@@ -515,28 +516,21 @@ static inline bool DoBoxLambda(Thread* self, ShadowFrame& shadow_frame, const In
return false;
}
- // Convert the ArtMethod into a java.lang.reflect.Method which will serve
- // as the temporary 'boxed' version of the lambda. This is good enough
- // to check all the basic object identities that a boxed lambda must retain.
+ mirror::Object* closure_as_object =
+ Runtime::Current()->GetLambdaBoxTable()->BoxLambda(closure_method);
- // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
- // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
- // TODO: Repeated boxing should return the same object reference
- mirror::Method* method_as_object =
- mirror::Method::CreateFromArtMethod(self, closure_method);
-
- if (UNLIKELY(method_as_object == nullptr)) {
- // Most likely an OOM has occurred.
+ // Failed to box the lambda, an exception was raised.
+ if (UNLIKELY(closure_as_object == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
}
- shadow_frame.SetVRegReference(vreg_target_object, method_as_object);
+ shadow_frame.SetVRegReference(vreg_target_object, closure_as_object);
return true;
}
template <bool _do_check> SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-static inline bool DoUnboxLambda(Thread* self ATTRIBUTE_UNUSED,
+static inline bool DoUnboxLambda(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
uint16_t inst_data) {
@@ -556,23 +550,15 @@ static inline bool DoUnboxLambda(Thread* self ATTRIBUTE_UNUSED,
return false;
}
- // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
- if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
- ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
+ ArtMethod* unboxed_closure = nullptr;
+ // Raise an exception if unboxing fails.
+ if (!Runtime::Current()->GetLambdaBoxTable()->UnboxLambda(boxed_closure_object,
+ &unboxed_closure)) {
+ CHECK(self->IsExceptionPending());
return false;
}
- // TODO(iam): We must check that the closure object extends/implements the type
- // specified in [type id]. This is not currently implemented since it's always a Method.
-
- // If we got this far, the inputs are valid.
- // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
- mirror::AbstractMethod* boxed_closure_as_method =
- down_cast<mirror::AbstractMethod*>(boxed_closure_object);
-
- ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
DCHECK(unboxed_closure != nullptr);
-
WriteLambdaClosureIntoVRegs(shadow_frame, *unboxed_closure, vreg_target_closure);
return true;
}
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
new file mode 100644
index 0000000000..64a6076aea
--- /dev/null
+++ b/runtime/lambda/box_table.cc
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "lambda/box_table.h"
+
+#include "base/mutex.h"
+#include "common_throws.h"
+#include "gc_root-inl.h"
+#include "mirror/method.h"
+#include "mirror/object-inl.h"
+#include "thread.h"
+
+#include <vector>
+
+namespace art {
+namespace lambda {
+
+BoxTable::BoxTable()
+ : allow_new_weaks_(true),
+ new_weaks_condition_("lambda box table allowed weaks", *Locks::lambda_table_lock_) {}
+
+mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
+ Thread* self = Thread::Current();
+
+ {
+ // TODO: Switch to ReaderMutexLock if ConditionVariable ever supports RW Mutexes
+ /*Reader*/MutexLock mu(self, *Locks::lambda_table_lock_);
+ BlockUntilWeaksAllowed();
+
+ // Attempt to look up this object, it's possible it was already boxed previously.
+ // If this is the case we *must* return the same object as before to maintain
+ // referential equality.
+ //
+ // In managed code:
+ // Functional f = () -> 5; // vF = create-lambda
+ // Object a = f; // vA = box-lambda vA
+ // Object b = f; // vB = box-lambda vB
+ // assert(a == f)
+ ValueType value = FindBoxedLambda(closure);
+ if (!value.IsNull()) {
+ return value.Read();
+ }
+
+ // Otherwise we need to box ourselves and insert it into the hash map
+ }
+
+ // Release the lambda table lock here, so that thread suspension is allowed.
+
+ // Convert the ArtMethod into a java.lang.reflect.Method which will serve
+ // as the temporary 'boxed' version of the lambda. This is good enough
+ // to check all the basic object identities that a boxed lambda must retain.
+
+ // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
+ // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
+ mirror::Method* method_as_object =
+ mirror::Method::CreateFromArtMethod(self, closure);
+ // There are no thread suspension points after this, so we don't need to put it into a handle.
+
+ if (UNLIKELY(method_as_object == nullptr)) {
+ // Most likely an OOM has occurred.
+ CHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ // The method has been successfully boxed into an object, now insert it into the hash map.
+ {
+ MutexLock mu(self, *Locks::lambda_table_lock_);
+ BlockUntilWeaksAllowed();
+
+ // Lookup the object again, it's possible another thread already boxed it while
+ // we were allocating the object before.
+ ValueType value = FindBoxedLambda(closure);
+ if (UNLIKELY(!value.IsNull())) {
+ // Let the GC clean up method_as_object at a later time.
+ return value.Read();
+ }
+
+ // Otherwise we should insert it into the hash map in this thread.
+ map_.Insert(std::make_pair(closure, ValueType(method_as_object)));
+ }
+
+ return method_as_object;
+}
+
+bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
+ DCHECK(object != nullptr);
+ *out_closure = nullptr;
+
+ // Note that we do not need to access lambda_table_lock_ here
+ // since we don't need to look at the map.
+
+ mirror::Object* boxed_closure_object = object;
+
+ // Raise ClassCastException if object is not instanceof java.lang.reflect.Method
+ if (UNLIKELY(!boxed_closure_object->InstanceOf(mirror::Method::StaticClass()))) {
+ ThrowClassCastException(mirror::Method::StaticClass(), boxed_closure_object->GetClass());
+ return false;
+ }
+
+ // TODO(iam): We must check that the closure object extends/implements the type
+ // specified in [type id]. This is not currently implemented since it's always a Method.
+
+ // If we got this far, the inputs are valid.
+ // Write out the java.lang.reflect.Method's embedded ArtMethod* into the vreg target.
+ mirror::AbstractMethod* boxed_closure_as_method =
+ down_cast<mirror::AbstractMethod*>(boxed_closure_object);
+
+ ArtMethod* unboxed_closure = boxed_closure_as_method->GetArtMethod();
+ DCHECK(unboxed_closure != nullptr);
+
+ *out_closure = unboxed_closure;
+ return true;
+}
+
+BoxTable::ValueType BoxTable::FindBoxedLambda(const ClosureType& closure) const {
+ auto map_iterator = map_.Find(closure);
+ if (map_iterator != map_.end()) {
+ const std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+ const ValueType& value = key_value_pair.second;
+
+ DCHECK(!value.IsNull()); // Never store null boxes.
+ return value;
+ }
+
+ return ValueType(nullptr);
+}
+
+void BoxTable::BlockUntilWeaksAllowed() {
+ Thread* self = Thread::Current();
+ while (UNLIKELY(allow_new_weaks_ == false)) {
+ new_weaks_condition_.WaitHoldingLocks(self); // wait while holding mutator lock
+ }
+}
+
+void BoxTable::SweepWeakBoxedLambdas(IsMarkedVisitor* visitor) {
+ DCHECK(visitor != nullptr);
+
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::lambda_table_lock_);
+
+ /*
+ * Visit every weak root in our lambda box table.
+ * Remove unmarked objects, update marked objects to new address.
+ */
+ std::vector<ClosureType> remove_list;
+ for (auto map_iterator = map_.begin(); map_iterator != map_.end(); ) {
+ std::pair<ClosureType, ValueType>& key_value_pair = *map_iterator;
+
+ const ValueType& old_value = key_value_pair.second;
+
+ // This does not need a read barrier because this is called by GC.
+ mirror::Object* old_value_raw = old_value.Read<kWithoutReadBarrier>();
+ mirror::Object* new_value = visitor->IsMarked(old_value_raw);
+
+ if (new_value == nullptr) {
+ const ClosureType& closure = key_value_pair.first;
+ // The object has been swept away.
+ // Delete the entry from the map.
+ map_iterator = map_.Erase(map_.Find(closure));
+ } else {
+ // The object has been moved.
+ // Update the map.
+ key_value_pair.second = ValueType(new_value);
+ ++map_iterator;
+ }
+ }
+
+ // Occasionally shrink the map to avoid growing very large.
+ if (map_.CalculateLoadFactor() < kMinimumLoadFactor) {
+ map_.ShrinkToMaximumLoad();
+ }
+}
+
+void BoxTable::DisallowNewWeakBoxedLambdas() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::lambda_table_lock_);
+
+ allow_new_weaks_ = false;
+}
+
+void BoxTable::AllowNewWeakBoxedLambdas() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::lambda_table_lock_);
+
+ allow_new_weaks_ = true;
+ new_weaks_condition_.Broadcast(self);
+}
+
+void BoxTable::EnsureNewWeakBoxedLambdasDisallowed() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::lambda_table_lock_);
+ CHECK_NE(allow_new_weaks_, false);
+}
+
+bool BoxTable::EqualsFn::operator()(const ClosureType& lhs, const ClosureType& rhs) const {
+ // Nothing needs this right now, but leave this assertion for later when
+ // we need to look at the references inside of the closure.
+ if (kIsDebugBuild) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ }
+
+ // TODO: Need rework to use read barriers once closures have references inside of them that can
+ // move. Until then, it's safe to just compare the data inside of it directly.
+ return lhs == rhs;
+}
+
+} // namespace lambda
+} // namespace art
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
new file mode 100644
index 0000000000..12d3ff3ac6
--- /dev/null
+++ b/runtime/lambda/box_table.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ART_RUNTIME_LAMBDA_BOX_TABLE_H_
+#define ART_RUNTIME_LAMBDA_BOX_TABLE_H_
+
+#include "base/allocator.h"
+#include "base/hash_map.h"
+#include "gc_root.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "object_callbacks.h"
+
+#include <stdint.h>
+
+namespace art {
+
+class ArtMethod; // forward declaration
+
+namespace mirror {
+class Object; // forward declaration
+} // namespace mirror
+
+namespace lambda {
+
+/*
+ * Store a table of boxed lambdas. This is required to maintain object referential equality
+ * when a lambda is re-boxed.
+ *
+ * Conceptually, we store a mapping of Closures -> Weak Reference<Boxed Lambda Object>.
+ * When too many objects get GCd, we shrink the underlying table to use less space.
+ */
+class BoxTable FINAL {
+ public:
+ using ClosureType = art::ArtMethod*;
+
+ // Boxes a closure into an object. Returns null and throws an exception on failure.
+ mirror::Object* BoxLambda(const ClosureType& closure)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+ // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
+ bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ // Sweep weak references to lambda boxes. Update the addresses if the objects have been
+ // moved, and delete them from the table if the objects have been cleaned up.
+ void SweepWeakBoxedLambdas(IsMarkedVisitor* visitor)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+ // GC callback: Temporarily block anyone from touching the map.
+ void DisallowNewWeakBoxedLambdas()
+ LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+ // GC callback: Unblock any readers who have been queued waiting to touch the map.
+ void AllowNewWeakBoxedLambdas()
+ LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+ // GC callback: Verify that the state is now blocking anyone from touching the map.
+ void EnsureNewWeakBoxedLambdasDisallowed()
+ LOCKS_EXCLUDED(Locks::lambda_table_lock_);
+
+ BoxTable();
+ ~BoxTable() = default;
+
+ private:
+ // FIXME: This needs to be a GcRoot.
+ // Explanation:
+ // - After all threads are suspended (exclusive mutator lock),
+ // the concurrent-copying GC can move objects from the "from" space to the "to" space.
+ // If an object is moved at that time and *before* SweepSystemWeaks are called then
+ // we don't know if the move has happened yet.
+ // Successive reads will then (incorrectly) look at the objects in the "from" space,
+ // which is a problem since the objects have been already forwarded and mutations
+ // would not be visible in the right space.
+ // Instead, use a GcRoot here which will be automatically updated by the GC.
+ //
+ // Also, any reads should be protected by a read barrier to always give us the "to" space address.
+ using ValueType = GcRoot<mirror::Object>;
+
+ // Attempt to look up the lambda in the map, or return null if it's not there yet.
+ ValueType FindBoxedLambda(const ClosureType& closure) const
+ SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+
+ // If the GC has come in and temporarily disallowed touching weaks, block until is it allowed.
+ void BlockUntilWeaksAllowed()
+ SHARED_LOCKS_REQUIRED(Locks::lambda_table_lock_);
+
+ // EmptyFn implementation for art::HashMap
+ struct EmptyFn {
+ void MakeEmpty(std::pair<ClosureType, ValueType>& item) const {
+ item.first = nullptr;
+ }
+ bool IsEmpty(const std::pair<ClosureType, ValueType>& item) const {
+ return item.first == nullptr;
+ }
+ };
+
+ // HashFn implementation for art::HashMap
+ struct HashFn {
+ size_t operator()(const ClosureType& key) const {
+ // TODO(iam): Rewrite hash function when ClosureType is no longer an ArtMethod*
+ return static_cast<size_t>(reinterpret_cast<uintptr_t>(key));
+ }
+ };
+
+ // EqualsFn implementation for art::HashMap
+ struct EqualsFn {
+ bool operator()(const ClosureType& lhs, const ClosureType& rhs) const;
+ };
+
+ using UnorderedMap = art::HashMap<ClosureType,
+ ValueType,
+ EmptyFn,
+ HashFn,
+ EqualsFn,
+ TrackingAllocator<std::pair<ClosureType, ValueType>,
+ kAllocatorTagLambdaBoxTable>>;
+
+ UnorderedMap map_ GUARDED_BY(Locks::lambda_table_lock_);
+ bool allow_new_weaks_ GUARDED_BY(Locks::lambda_table_lock_);
+ ConditionVariable new_weaks_condition_ GUARDED_BY(Locks::lambda_table_lock_);
+
+ // Shrink the map when we get below this load factor.
+ // (This is an arbitrary value that should be large enough to prevent aggressive map erases
+ // from shrinking the table too often.)
+ static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
+
+ DISALLOW_COPY_AND_ASSIGN(BoxTable);
+};
+
+} // namespace lambda
+} // namespace art
+
+#endif // ART_RUNTIME_LAMBDA_BOX_TABLE_H_
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 99d697a6d3..6240b3be7a 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -60,9 +60,8 @@ class MANAGED AbstractMethod : public AccessibleObject {
HeapReference<mirror::Class> declaring_class_;
HeapReference<mirror::Class> declaring_class_of_overridden_method_;
- uint32_t padding_;
- uint64_t art_method_;
uint32_t access_flags_;
+ uint64_t art_method_;
uint32_t dex_method_index_;
friend struct art::AbstractMethodOffsets; // for verifying offset information
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 736b42b739..97aae67178 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -105,15 +105,21 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
dstArray->AsShortSizedArray()->Memmove(dstPos, srcArray->AsShortSizedArray(), srcPos, count);
return;
case Primitive::kPrimInt:
- case Primitive::kPrimFloat:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
dstArray->AsIntArray()->Memmove(dstPos, srcArray->AsIntArray(), srcPos, count);
return;
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
+ dstArray->AsFloatArray()->Memmove(dstPos, srcArray->AsFloatArray(), srcPos, count);
+ return;
case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
dstArray->AsLongArray()->Memmove(dstPos, srcArray->AsLongArray(), srcPos, count);
return;
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
+ dstArray->AsDoubleArray()->Memmove(dstPos, srcArray->AsDoubleArray(), srcPos, count);
+ return;
case Primitive::kPrimNot: {
mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>();
mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>();
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 6569d833c5..b40d94a1a7 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -90,6 +90,7 @@ static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean ha
case kWaitingInMainSignalCatcherLoop: return kJavaWaiting;
case kWaitingForMethodTracingStart: return kJavaWaiting;
case kWaitingForVisitObjects: return kJavaWaiting;
+ case kWaitingWeakRootRead: return kJavaWaiting;
case kSuspended: return kJavaRunnable;
// Don't add a 'default' here so the compiler can spot incompatible enum changes.
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 3451d0f7b4..ee2f3f60f3 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '6', '6', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '6', '7', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index d08af71e6e..77723545a2 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -244,10 +244,11 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.AppendValues()
.IntoKey(M::ImageCompilerOptions)
.Define("-Xverify:_")
- .WithType<bool>()
- .WithValueMap({{"none", false},
- {"remote", true},
- {"all", true}})
+ .WithType<verifier::VerifyMode>()
+ .WithValueMap({{"none", verifier::VerifyMode::kNone},
+ {"remote", verifier::VerifyMode::kEnable},
+ {"all", verifier::VerifyMode::kEnable},
+ {"softfail", verifier::VerifyMode::kSoftFail}})
.IntoKey(M::Verify)
.Define("-XX:NativeBridge=_")
.WithType<std::string>()
@@ -686,7 +687,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -esa\n");
UsageMessage(stream, " -dsa\n");
UsageMessage(stream, " (-enablesystemassertions, -disablesystemassertions)\n");
- UsageMessage(stream, " -Xverify:{none,remote,all}\n");
+ UsageMessage(stream, " -Xverify:{none,remote,all,softfail}\n");
UsageMessage(stream, " -Xrs\n");
UsageMessage(stream, " -Xint:portable, -Xint:fast, -Xint:jit\n");
UsageMessage(stream, " -Xdexopt:{none,verified,all,full}\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3b0ca9e28a..cc8b215049 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -75,6 +75,7 @@
#include "jit/jit.h"
#include "jni_internal.h"
#include "linear_alloc.h"
+#include "lambda/box_table.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -185,7 +186,7 @@ Runtime::Runtime()
system_class_loader_(nullptr),
dump_gc_performance_on_shutdown_(false),
preinitialization_transaction_(nullptr),
- verify_(false),
+ verify_(verifier::VerifyMode::kNone),
allow_dex_file_fallback_(true),
target_sdk_version_(0),
implicit_null_checks_(false),
@@ -408,6 +409,7 @@ void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
GetMonitorList()->SweepMonitorList(visitor);
GetJavaVM()->SweepJniWeakGlobals(visitor);
GetHeap()->SweepAllocationRecords(visitor);
+ GetLambdaBoxTable()->SweepWeakBoxedLambdas(visitor);
}
bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
@@ -912,6 +914,9 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
jit_options_->SetUseJIT(false);
}
+ // Allocate a global table of boxed lambda objects <-> closures.
+ lambda_box_table_ = MakeUnique<lambda::BoxTable>();
+
// Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
// can't be trimmed as easily.
const bool use_malloc = IsAotCompiler();
@@ -1497,24 +1502,27 @@ ArtMethod* Runtime::CreateCalleeSaveMethod() {
void Runtime::DisallowNewSystemWeaks() {
monitor_list_->DisallowNewMonitors();
- intern_table_->DisallowNewInterns();
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
heap_->DisallowNewAllocationRecords();
+ lambda_box_table_->DisallowNewWeakBoxedLambdas();
}
void Runtime::AllowNewSystemWeaks() {
monitor_list_->AllowNewMonitors();
- intern_table_->AllowNewInterns();
+ intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping?
java_vm_->AllowNewWeakGlobals();
heap_->AllowNewAllocationRecords();
+ lambda_box_table_->AllowNewWeakBoxedLambdas();
}
void Runtime::EnsureNewSystemWeaksDisallowed() {
// Lock and unlock the system weak locks once to ensure that no
// threads are still in the middle of adding new system weaks.
monitor_list_->EnsureNewMonitorsDisallowed();
- intern_table_->EnsureNewInternsDisallowed();
+ intern_table_->EnsureNewWeakInternsDisallowed();
java_vm_->EnsureNewWeakGlobalsDisallowed();
+ lambda_box_table_->EnsureNewWeakBoxedLambdasDisallowed();
}
void Runtime::BroadcastForNewSystemWeaks() {
@@ -1757,4 +1765,12 @@ void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
imt_unimplemented_method_ = method;
}
+bool Runtime::IsVerificationEnabled() const {
+ return verify_ == verifier::VerifyMode::kEnable;
+}
+
+bool Runtime::IsVerificationSoftFail() const {
+ return verify_ == verifier::VerifyMode::kSoftFail;
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 9ee96a3d1b..55adaf1276 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -53,6 +53,10 @@ namespace jit {
class JitOptions;
} // namespace jit
+namespace lambda {
+ class BoxTable;
+} // namespace lambda
+
namespace mirror {
class ClassLoader;
class Array;
@@ -64,6 +68,7 @@ namespace mirror {
} // namespace mirror
namespace verifier {
class MethodVerifier;
+ enum class VerifyMode : int8_t;
} // namespace verifier
class ArenaPool;
class ArtMethod;
@@ -500,9 +505,8 @@ class Runtime {
return !implicit_so_checks_;
}
- bool IsVerificationEnabled() const {
- return verify_;
- }
+ bool IsVerificationEnabled() const;
+ bool IsVerificationSoftFail() const;
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
@@ -532,6 +536,10 @@ class Runtime {
return experimental_lambdas_;
}
+ lambda::BoxTable* GetLambdaBoxTable() const {
+ return lambda_box_table_.get();
+ }
+
// Create the JIT and instrumentation and code cache.
void CreateJit();
@@ -646,6 +654,8 @@ class Runtime {
std::unique_ptr<jit::Jit> jit_;
std::unique_ptr<jit::JitOptions> jit_options_;
+ std::unique_ptr<lambda::BoxTable> lambda_box_table_;
+
// Fault message, printed when we get a SIGSEGV.
Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::string fault_message_ GUARDED_BY(fault_message_lock_);
@@ -700,8 +710,8 @@ class Runtime {
// Transaction used for pre-initializing classes at compilation time.
Transaction* preinitialization_transaction_;
- // If false, verification is disabled. True by default.
- bool verify_;
+ // If kNone, verification is disabled. kEnable by default.
+ verifier::VerifyMode verify_;
// If true, the runtime may use dex files directly with the interpreter if an oat file is not
// available/usable.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index dc4c0c7493..9922c5f993 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -106,7 +106,8 @@ RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
CompilerOptions) // -Xcompiler-option ...
RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
ImageCompilerOptions) // -Ximage-compiler-option ...
-RUNTIME_OPTIONS_KEY (bool, Verify, true)
+RUNTIME_OPTIONS_KEY (verifier::VerifyMode, \
+ Verify, verifier::VerifyMode::kEnable)
RUNTIME_OPTIONS_KEY (std::string, NativeBridge)
RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 7e59000e09..88ac00a672 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -32,6 +32,7 @@
#include "gc/space/large_object_space.h"
#include "profiler_options.h"
#include "arch/instruction_set.h"
+#include "verifier/verify_mode.h"
#include <stdio.h>
#include <stdarg.h>
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 962132b29f..5544507c06 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -27,16 +27,17 @@ constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex;
constexpr uint32_t StackMap::kNoDexRegisterMap;
constexpr uint32_t StackMap::kNoInlineInfo;
-DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(uint16_t dex_register_number,
- uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(
+ uint16_t dex_register_number,
+ uint16_t number_of_dex_registers,
+ const CodeInfo& code_info,
+ const StackMapEncoding& enc) const {
DexRegisterLocationCatalog dex_register_location_catalog =
code_info.GetDexRegisterLocationCatalog(enc);
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfDexRegisterLocationCatalogEntries());
+ code_info.GetNumberOfLocationCatalogEntries());
return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
}
@@ -49,7 +50,7 @@ DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfDexRegisterLocationCatalogEntries());
+ code_info.GetNumberOfLocationCatalogEntries());
return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
}
@@ -140,8 +141,7 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info) {
StackMapEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
vios->Stream()
<< "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
@@ -157,8 +157,7 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers) const {
StackMapEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries =
- code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
// TODO: Display the bit mask of live Dex registers.
for (size_t j = 0; j < number_of_dex_registers; ++j) {
if (IsDexRegisterLive(j)) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 1acc44291c..0d3816b97e 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -23,6 +23,12 @@
namespace art {
+#define ELEMENT_BYTE_OFFSET_AFTER(PreviousElement) \
+ k ## PreviousElement ## Offset + sizeof(PreviousElement ## Type)
+
+#define ELEMENT_BIT_OFFSET_AFTER(PreviousElement) \
+ k ## PreviousElement ## BitOffset + PreviousElement ## BitSize
+
class VariableIndentationOutputStream;
// Size of a frame slot, in bytes. This constant is a signed value,
@@ -195,7 +201,9 @@ class DexRegisterLocation {
/**
* Store information on unique Dex register locations used in a method.
* The information is of the form:
- * [DexRegisterLocation+].
+ *
+ * [DexRegisterLocation+].
+ *
* DexRegisterLocations are either 1- or 5-byte wide (see art::DexRegisterLocation::Kind).
*/
class DexRegisterLocationCatalog {
@@ -432,7 +440,9 @@ class DexRegisterLocationCatalog {
/* Information on Dex register locations for a specific PC, mapping a
* stack map's Dex register to a location entry in a DexRegisterLocationCatalog.
* The information is of the form:
- * [live_bit_mask, entries*]
+ *
+ * [live_bit_mask, entries*]
+ *
* where entries are concatenated unsigned integer values encoded on a number
* of bits (fixed per DexRegisterMap instances of a CodeInfo object) depending
* on the number of entries in the Dex register location catalog
@@ -757,8 +767,9 @@ class StackMapEncoding {
* - Knowing the values of dex registers.
*
* The information is of the form:
- * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask,
- * stack_mask].
+ *
+ * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask,
+ * stack_mask].
*/
class StackMap {
public:
@@ -861,8 +872,6 @@ class StackMap {
static constexpr uint32_t kNoInlineInfo = -1;
private:
- // TODO: Instead of plain types such as "uint32_t", introduce
- // typedefs (and document the memory layout of StackMap).
static constexpr int kFixedSize = 0;
// Loads `number_of_bytes` at the given `offset` and assemble a uint32_t. If `check_max` is true,
@@ -877,61 +886,74 @@ class StackMap {
/**
* Inline information for a specific PC. The information is of the form:
- * [inlining_depth, [dex_pc, method_index, dex_register_map_offset]+]
+ *
+ * [inlining_depth, entry+]
+ *
+ * where `entry` is of the form:
+ *
+ * [dex_pc, method_index, dex_register_map_offset].
*/
class InlineInfo {
public:
+ // Memory layout: fixed contents.
+ typedef uint8_t DepthType;
+ // Memory layout: single entry contents.
+ typedef uint32_t MethodIndexType;
+ typedef uint32_t DexPcType;
+ typedef uint8_t InvokeTypeType;
+ typedef uint32_t DexRegisterMapType;
+
explicit InlineInfo(MemoryRegion region) : region_(region) {}
- uint8_t GetDepth() const {
- return region_.LoadUnaligned<uint8_t>(kDepthOffset);
+ DepthType GetDepth() const {
+ return region_.LoadUnaligned<DepthType>(kDepthOffset);
}
- void SetDepth(uint8_t depth) {
- region_.StoreUnaligned<uint8_t>(kDepthOffset, depth);
+ void SetDepth(DepthType depth) {
+ region_.StoreUnaligned<DepthType>(kDepthOffset, depth);
}
- uint32_t GetMethodIndexAtDepth(uint8_t depth) const {
- return region_.LoadUnaligned<uint32_t>(
+ MethodIndexType GetMethodIndexAtDepth(DepthType depth) const {
+ return region_.LoadUnaligned<MethodIndexType>(
kFixedSize + depth * SingleEntrySize() + kMethodIndexOffset);
}
- void SetMethodIndexAtDepth(uint8_t depth, uint32_t index) {
- region_.StoreUnaligned<uint32_t>(
+ void SetMethodIndexAtDepth(DepthType depth, MethodIndexType index) {
+ region_.StoreUnaligned<MethodIndexType>(
kFixedSize + depth * SingleEntrySize() + kMethodIndexOffset, index);
}
- uint32_t GetDexPcAtDepth(uint8_t depth) const {
- return region_.LoadUnaligned<uint32_t>(
+ DexPcType GetDexPcAtDepth(DepthType depth) const {
+ return region_.LoadUnaligned<DexPcType>(
kFixedSize + depth * SingleEntrySize() + kDexPcOffset);
}
- void SetDexPcAtDepth(uint8_t depth, uint32_t dex_pc) {
- region_.StoreUnaligned<uint32_t>(
+ void SetDexPcAtDepth(DepthType depth, DexPcType dex_pc) {
+ region_.StoreUnaligned<DexPcType>(
kFixedSize + depth * SingleEntrySize() + kDexPcOffset, dex_pc);
}
- uint8_t GetInvokeTypeAtDepth(uint8_t depth) const {
- return region_.LoadUnaligned<uint8_t>(
+ InvokeTypeType GetInvokeTypeAtDepth(DepthType depth) const {
+ return region_.LoadUnaligned<InvokeTypeType>(
kFixedSize + depth * SingleEntrySize() + kInvokeTypeOffset);
}
- void SetInvokeTypeAtDepth(uint8_t depth, uint8_t invoke_type) {
- region_.StoreUnaligned<uint8_t>(
+ void SetInvokeTypeAtDepth(DepthType depth, InvokeTypeType invoke_type) {
+ region_.StoreUnaligned<InvokeTypeType>(
kFixedSize + depth * SingleEntrySize() + kInvokeTypeOffset, invoke_type);
}
- uint32_t GetDexRegisterMapOffsetAtDepth(uint8_t depth) const {
- return region_.LoadUnaligned<uint32_t>(
+ DexRegisterMapType GetDexRegisterMapOffsetAtDepth(DepthType depth) const {
+ return region_.LoadUnaligned<DexRegisterMapType>(
kFixedSize + depth * SingleEntrySize() + kDexRegisterMapOffset);
}
- void SetDexRegisterMapOffsetAtDepth(uint8_t depth, uint32_t offset) {
- region_.StoreUnaligned<uint32_t>(
+ void SetDexRegisterMapOffsetAtDepth(DepthType depth, DexRegisterMapType offset) {
+ region_.StoreUnaligned<DexRegisterMapType>(
kFixedSize + depth * SingleEntrySize() + kDexRegisterMapOffset, offset);
}
- bool HasDexRegisterMapAtDepth(uint8_t depth) const {
+ bool HasDexRegisterMapAtDepth(DepthType depth) const {
return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoDexRegisterMap;
}
@@ -942,17 +964,16 @@ class InlineInfo {
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& info, uint16_t* number_of_dex_registers) const;
+
private:
- // TODO: Instead of plain types such as "uint8_t", introduce
- // typedefs (and document the memory layout of InlineInfo).
static constexpr int kDepthOffset = 0;
- static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
+ static constexpr int kFixedSize = ELEMENT_BYTE_OFFSET_AFTER(Depth);
static constexpr int kMethodIndexOffset = 0;
- static constexpr int kDexPcOffset = kMethodIndexOffset + sizeof(uint32_t);
- static constexpr int kInvokeTypeOffset = kDexPcOffset + sizeof(uint32_t);
- static constexpr int kDexRegisterMapOffset = kInvokeTypeOffset + sizeof(uint8_t);
- static constexpr int kFixedEntrySize = kDexRegisterMapOffset + sizeof(uint32_t);
+ static constexpr int kDexPcOffset = ELEMENT_BYTE_OFFSET_AFTER(MethodIndex);
+ static constexpr int kInvokeTypeOffset = ELEMENT_BYTE_OFFSET_AFTER(DexPc);
+ static constexpr int kDexRegisterMapOffset = ELEMENT_BYTE_OFFSET_AFTER(InvokeType);
+ static constexpr int kFixedEntrySize = ELEMENT_BYTE_OFFSET_AFTER(DexRegisterMap);
MemoryRegion region_;
@@ -964,11 +985,32 @@ class InlineInfo {
/**
* Wrapper around all compiler information collected for a method.
* The information is of the form:
- * [overall_size, number_of_location_catalog_entries, number_of_stack_maps, stack_mask_size,
- * DexRegisterLocationCatalog+, StackMap+, DexRegisterMap+, InlineInfo*].
+ *
+ * [overall_size, encoding_info, number_of_location_catalog_entries, number_of_stack_maps,
+ * stack_mask_size, DexRegisterLocationCatalog+, StackMap+, DexRegisterMap+, InlineInfo*]
+ *
+ * where `encoding_info` is of the form:
+ *
+ * [has_inline_info, inline_info_size_in_bytes, dex_register_map_size_in_bytes,
+ * dex_pc_size_in_bytes, native_pc_size_in_bytes, register_mask_size_in_bytes].
*/
class CodeInfo {
public:
+ // Memory layout: fixed contents.
+ typedef uint32_t OverallSizeType;
+ typedef uint16_t EncodingInfoType;
+ typedef uint32_t NumberOfLocationCatalogEntriesType;
+ typedef uint32_t NumberOfStackMapsType;
+ typedef uint32_t StackMaskSizeType;
+
+ // Memory (bit) layout: encoding info.
+ static constexpr int HasInlineInfoBitSize = 1;
+ static constexpr int InlineInfoBitSize = kNumberOfBitForNumberOfBytesForEncoding;
+ static constexpr int DexRegisterMapBitSize = kNumberOfBitForNumberOfBytesForEncoding;
+ static constexpr int DexPcBitSize = kNumberOfBitForNumberOfBytesForEncoding;
+ static constexpr int NativePcBitSize = kNumberOfBitForNumberOfBytesForEncoding;
+ static constexpr int RegisterMaskBitSize = kNumberOfBitForNumberOfBytesForEncoding;
+
explicit CodeInfo(MemoryRegion region) : region_(region) {}
explicit CodeInfo(const void* data) {
@@ -1018,33 +1060,35 @@ class CodeInfo {
return StackMap(GetStackMaps(encoding).Subregion(i * stack_map_size, stack_map_size));
}
- uint32_t GetOverallSize() const {
- return region_.LoadUnaligned<uint32_t>(kOverallSizeOffset);
+ OverallSizeType GetOverallSize() const {
+ return region_.LoadUnaligned<OverallSizeType>(kOverallSizeOffset);
}
- void SetOverallSize(uint32_t size) {
- region_.StoreUnaligned<uint32_t>(kOverallSizeOffset, size);
+ void SetOverallSize(OverallSizeType size) {
+ region_.StoreUnaligned<OverallSizeType>(kOverallSizeOffset, size);
}
- uint32_t GetNumberOfDexRegisterLocationCatalogEntries() const {
- return region_.LoadUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset);
+ NumberOfLocationCatalogEntriesType GetNumberOfLocationCatalogEntries() const {
+ return region_.LoadUnaligned<NumberOfLocationCatalogEntriesType>(
+ kNumberOfLocationCatalogEntriesOffset);
}
- void SetNumberOfDexRegisterLocationCatalogEntries(uint32_t num_entries) {
- region_.StoreUnaligned<uint32_t>(kNumberOfDexRegisterLocationCatalogEntriesOffset, num_entries);
+ void SetNumberOfLocationCatalogEntries(NumberOfLocationCatalogEntriesType num_entries) {
+ region_.StoreUnaligned<NumberOfLocationCatalogEntriesType>(
+ kNumberOfLocationCatalogEntriesOffset, num_entries);
}
uint32_t GetDexRegisterLocationCatalogSize(const StackMapEncoding& encoding) const {
return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(encoding),
- GetNumberOfDexRegisterLocationCatalogEntries());
+ GetNumberOfLocationCatalogEntries());
}
- size_t GetNumberOfStackMaps() const {
- return region_.LoadUnaligned<uint32_t>(kNumberOfStackMapsOffset);
+ NumberOfStackMapsType GetNumberOfStackMaps() const {
+ return region_.LoadUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset);
}
- void SetNumberOfStackMaps(uint32_t number_of_stack_maps) {
- region_.StoreUnaligned<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
+ void SetNumberOfStackMaps(NumberOfStackMapsType number_of_stack_maps) {
+ region_.StoreUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset, number_of_stack_maps);
}
// Get the size all the stack maps of this CodeInfo object, in bytes.
@@ -1129,27 +1173,28 @@ class CodeInfo {
bool dump_stack_maps) const;
private:
- // TODO: Instead of plain types such as "uint32_t", introduce
- // typedefs (and document the memory layout of CodeInfo).
static constexpr int kOverallSizeOffset = 0;
- static constexpr int kEncodingInfoOffset = kOverallSizeOffset + sizeof(uint32_t);
- static constexpr int kNumberOfDexRegisterLocationCatalogEntriesOffset =
- kEncodingInfoOffset + sizeof(uint16_t);
+ static constexpr int kEncodingInfoOffset = ELEMENT_BYTE_OFFSET_AFTER(OverallSize);
+ static constexpr int kNumberOfLocationCatalogEntriesOffset =
+ ELEMENT_BYTE_OFFSET_AFTER(EncodingInfo);
static constexpr int kNumberOfStackMapsOffset =
- kNumberOfDexRegisterLocationCatalogEntriesOffset + sizeof(uint32_t);
- static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
- static constexpr int kFixedSize = kStackMaskSizeOffset + sizeof(uint32_t);
-
- static constexpr int kHasInlineInfoBitOffset = (kEncodingInfoOffset * kBitsPerByte);
- static constexpr int kInlineInfoBitOffset = kHasInlineInfoBitOffset + 1;
- static constexpr int kDexRegisterMapBitOffset =
- kInlineInfoBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int kDexPcBitOffset =
- kDexRegisterMapBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int kNativePcBitOffset =
- kDexPcBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int kRegisterMaskBitOffset =
- kNativePcBitOffset + kNumberOfBitForNumberOfBytesForEncoding;
+ ELEMENT_BYTE_OFFSET_AFTER(NumberOfLocationCatalogEntries);
+ static constexpr int kStackMaskSizeOffset = ELEMENT_BYTE_OFFSET_AFTER(NumberOfStackMaps);
+ static constexpr int kFixedSize = ELEMENT_BYTE_OFFSET_AFTER(StackMaskSize);
+
+ static constexpr int kHasInlineInfoBitOffset = kEncodingInfoOffset * kBitsPerByte;
+ static constexpr int kInlineInfoBitOffset = ELEMENT_BIT_OFFSET_AFTER(HasInlineInfo);
+ static constexpr int kDexRegisterMapBitOffset = ELEMENT_BIT_OFFSET_AFTER(InlineInfo);
+ static constexpr int kDexPcBitOffset = ELEMENT_BIT_OFFSET_AFTER(DexRegisterMap);
+ static constexpr int kNativePcBitOffset = ELEMENT_BIT_OFFSET_AFTER(DexPc);
+ static constexpr int kRegisterMaskBitOffset = ELEMENT_BIT_OFFSET_AFTER(NativePc);
+
+ static constexpr int kEncodingInfoPastTheEndBitOffset = ELEMENT_BIT_OFFSET_AFTER(RegisterMask);
+ static constexpr int kEncodingInfoOverallBitSize =
+ kEncodingInfoPastTheEndBitOffset - kHasInlineInfoBitOffset;
+
+ static_assert(kEncodingInfoOverallBitSize <= (sizeof(EncodingInfoType) * kBitsPerByte),
+ "art::CodeInfo::EncodingInfoType is too short to hold all encoding info elements.");
MemoryRegion GetStackMaps(const StackMapEncoding& encoding) const {
return region_.size() == 0
@@ -1172,7 +1217,7 @@ class CodeInfo {
size_t number_of_live_dex_registers =
dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
size_t location_mapping_data_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(GetNumberOfDexRegisterLocationCatalogEntries())
+ DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries())
* number_of_live_dex_registers;
size_t location_mapping_data_size_in_bytes =
RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -1214,6 +1259,9 @@ class CodeInfo {
friend class StackMapStream;
};
+#undef ELEMENT_BYTE_OFFSET_AFTER
+#undef ELEMENT_BIT_OFFSET_AFTER
+
} // namespace art
#endif // ART_RUNTIME_STACK_MAP_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index cede998d0a..a2edfa3155 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1180,6 +1180,7 @@ struct StackDumpVisitor : public StackVisitor {
if (m->IsRuntimeMethod()) {
return true;
}
+ m = m->GetInterfaceMethodIfProxy(sizeof(void*));
const int kMaxRepetition = 3;
mirror::Class* c = m->GetDeclaringClass();
mirror::DexCache* dex_cache = c->GetDexCache();
@@ -2734,4 +2735,12 @@ void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
tlsPtr_.method_verifier = verifier->link_;
}
+size_t Thread::NumberOfHeldMutexes() const {
+ size_t count = 0;
+ for (BaseMutex* mu : tlsPtr_.held_mutexes) {
+ count += static_cast<size_t>(mu != nullptr);
+ }
+ return count;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index 7826e629cc..cf87f22ad0 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -288,6 +288,8 @@ class Thread {
return tls32_.daemon;
}
+ size_t NumberOfHeldMutexes() const;
+
bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index c7ea7f4381..c000e61d20 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -43,6 +43,7 @@ enum ThreadState {
kWaitingForMethodTracingStart, // WAITING TS_WAIT waiting for method tracing to start
kWaitingForVisitObjects, // WAITING TS_WAIT waiting for visiting objects
kWaitingForGetObjectsAllocated, // WAITING TS_WAIT waiting for getting the number of allocated objects
+ kWaitingWeakRootRead, // WAITING TS_WAIT waiting to read a weak root
kStarting, // NEW TS_WAIT native thread started, not yet ready to run managed code
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 11c3e659ee..764b6ba0aa 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2946,6 +2946,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// If the code would've normally hard-failed, then the interpreter will throw the
// appropriate verification errors at runtime.
Fail(VERIFY_ERROR_FORCE_INTERPRETER); // TODO(iam): implement box-lambda verification
+
+ // Partial verification. Sets the resulting type to always be an object, which
+ // is good enough for some other verification to occur without hard-failing.
+ const uint32_t vreg_target_object = inst->VRegA_22x(); // box-lambda vA, vB
+ const RegType& reg_type = reg_types_.JavaLangObject(need_precise_constants_);
+ work_line_->SetRegisterType(this, vreg_target_object, reg_type);
break;
}
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 9c528190b2..6e23234182 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -690,6 +690,11 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty
} else if (IsReferenceTypes() && incoming_type.IsReferenceTypes()) {
if (IsZero() || incoming_type.IsZero()) {
return SelectNonConstant(*this, incoming_type); // 0 MERGE ref => ref
+ } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes()) {
+ // Something that is uninitialized hasn't had its constructor called. Unitialized types are
+ // special. They may only ever be merged with themselves (must be taken care of by the
+ // caller of Merge(), see the DCHECK on entry). So mark any other merge as conflicting here.
+ return conflict;
} else if (IsJavaLangObject() || incoming_type.IsJavaLangObject()) {
return reg_types->JavaLangObject(false); // Object MERGE ref => Object
} else if (IsUnresolvedTypes() || incoming_type.IsUnresolvedTypes()) {
@@ -698,11 +703,6 @@ const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_ty
// type that reflects our lack of knowledge and that allows the rest of the unresolved
// mechanics to continue.
return reg_types->FromUnresolvedMerge(*this, incoming_type);
- } else if (IsUninitializedTypes() || incoming_type.IsUninitializedTypes()) {
- // Something that is uninitialized hasn't had its constructor called. Mark any merge
- // of this type with something that is initialized as conflicting. The cases of a merge
- // with itself, 0 or Object are handled above.
- return conflict;
} else { // Two reference types, compute Join
mirror::Class* c1 = GetClass();
mirror::Class* c2 = incoming_type.GetClass();
diff --git a/runtime/verifier/verify_mode.h b/runtime/verifier/verify_mode.h
new file mode 100644
index 0000000000..bea43787c1
--- /dev/null
+++ b/runtime/verifier/verify_mode.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VERIFIER_VERIFY_MODE_H_
+#define ART_RUNTIME_VERIFIER_VERIFY_MODE_H_
+
+#include <stdint.h>
+
+namespace art {
+namespace verifier {
+
+// The mode that the verifier should run as.
+enum class VerifyMode : int8_t {
+ kNone, // Everything is assumed verified.
+ kEnable, // Standard verification, try pre-verifying at compile-time.
+ kSoftFail, // Force a soft fail, punting to the interpreter with access checks.
+};
+
+} // namespace verifier
+} // namespace art
+
+#endif // ART_RUNTIME_VERIFIER_VERIFY_MODE_H_
diff --git a/test/011-array-copy/src/Main.java b/test/011-array-copy/src/Main.java
index 505d8b09ce..96e1dbf21a 100644
--- a/test/011-array-copy/src/Main.java
+++ b/test/011-array-copy/src/Main.java
@@ -23,6 +23,7 @@ public class Main {
public static void main(String args[]) {
testObjectCopy();
testOverlappingMoves();
+ testFloatAndDouble();
}
public static void testObjectCopy() {
@@ -143,4 +144,13 @@ public class Main {
/* copy forward, mixed alignment, trivial length */
makeCopies(0, 5, 1);
}
+
+ private static void testFloatAndDouble() {
+ // Float & double copies have the same implementation as int & long. However, there are
+ // protective DCHECKs in the code (there is nothing unifying like ByteSizedArray or
+ // ShortSizedArray). Just test that we don't fail those checks.
+ final int len = 10;
+ System.arraycopy(new float[len], 0, new float[len], 0, len);
+ System.arraycopy(new double[len], 0, new double[len], 0, len);
+ }
}
diff --git a/test/140-field-packing/expected.txt b/test/140-field-packing/expected.txt
new file mode 100644
index 0000000000..2b0a2ce905
--- /dev/null
+++ b/test/140-field-packing/expected.txt
@@ -0,0 +1,2 @@
+running test...
+test completed.
diff --git a/test/140-field-packing/info.txt b/test/140-field-packing/info.txt
new file mode 100644
index 0000000000..a28bd0463e
--- /dev/null
+++ b/test/140-field-packing/info.txt
@@ -0,0 +1 @@
+Test field packing for classes with various arrangements of fields.
diff --git a/test/140-field-packing/src/GapOrder.java b/test/140-field-packing/src/GapOrder.java
new file mode 100644
index 0000000000..09d09b8e0b
--- /dev/null
+++ b/test/140-field-packing/src/GapOrder.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Regression test for 22460222, the sub class.
+// The field gaps order was wrong. If there were two gaps of different sizes,
+// and the larger one was needed, it wouldn't be found.
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+
+class GapOrder extends GapOrderBase {
+ // The base class is 9 bytes. The entire class should be packed as:
+ //
+ // 00: oooo oooo
+ // 08: b-ss rrrr
+ // 16: rrrr iiii
+ // 24: dddd dddd
+ //
+ // The problem was, the packer wasn't finding the gap where iiii should go,
+ // because the gap where ss goes was given priority. Instead it packed as:
+ // 00: oooo oooo
+ // 08: b--- rrrr
+ // 16: rrrr ----
+ // 24: dddd dddd
+ // 32: iiii ss
+ public Object r1;
+ public Object r2;
+ public double d;
+ public int i;
+ public short s;
+
+ static private void CheckField(String fieldName, int expected) {
+ Field field = null;
+ try {
+ field = GapOrder.class.getField(fieldName);
+ } catch (ReflectiveOperationException e) {
+ System.out.println(fieldName + " not found in GapOrder.");
+ return;
+ }
+
+ int actual = -1;
+ try {
+ Method getOffset = Field.class.getMethod("getOffset");
+ actual = (Integer)getOffset.invoke(field);
+ } catch (ReflectiveOperationException e) {
+ System.out.println("Unable to get field offset for " + fieldName + ":" + e);
+ return;
+ }
+
+ if (actual != expected) {
+ System.out.println(
+ String.format("GapOrder.%s has offset %d, but expected %d",
+ fieldName, actual, expected));
+ }
+ }
+
+ static public void Check() {
+ CheckField("r1", 12);
+ CheckField("r2", 16);
+ CheckField("d", 24);
+ CheckField("i", 20);
+ CheckField("s", 10);
+ }
+}
+
diff --git a/test/140-field-packing/src/GapOrderBase.java b/test/140-field-packing/src/GapOrderBase.java
new file mode 100644
index 0000000000..4a0b378c57
--- /dev/null
+++ b/test/140-field-packing/src/GapOrderBase.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Regression test for 22460222, the base class.
+// The field gaps order was wrong. If there were two gaps of different sizes,
+// and the larger one was needed, it wouldn't be found.
+
+// This class has a size of 9 bytes: 8 for object plus 1 for the field 'b'.
+class GapOrderBase {
+ public byte b;
+}
diff --git a/test/140-field-packing/src/Main.java b/test/140-field-packing/src/Main.java
new file mode 100644
index 0000000000..2810b32a82
--- /dev/null
+++ b/test/140-field-packing/src/Main.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println("running test...");
+ GapOrder.Check();
+ System.out.println("test completed.");
+ }
+}
diff --git a/test/525-arrays-and-fields/expected.txt b/test/525-arrays-and-fields/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/525-arrays-and-fields/expected.txt
diff --git a/test/525-arrays-and-fields/info.txt b/test/525-arrays-and-fields/info.txt
new file mode 100644
index 0000000000..3e16abf204
--- /dev/null
+++ b/test/525-arrays-and-fields/info.txt
@@ -0,0 +1 @@
+Test on (in)variant static and instance field and array references in loops.
diff --git a/test/525-arrays-and-fields/src/Main.java b/test/525-arrays-and-fields/src/Main.java
new file mode 100644
index 0000000000..cb1e4afeab
--- /dev/null
+++ b/test/525-arrays-and-fields/src/Main.java
@@ -0,0 +1,803 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on (in)variant static and instance field and array references in loops.
+//
+public class Main {
+
+ private static Object anObject = new Object();
+ private static Object anotherObject = new Object();
+
+ //
+ // Static fields.
+ //
+
+ private static boolean sZ;
+ private static byte sB;
+ private static char sC;
+ private static short sS;
+ private static int sI;
+ private static long sJ;
+ private static float sF;
+ private static double sD;
+ private static Object sL;
+
+ //
+ // Static arrays.
+ //
+
+ private static boolean[] sArrZ;
+ private static byte[] sArrB;
+ private static char[] sArrC;
+ private static short[] sArrS;
+ private static int[] sArrI;
+ private static long[] sArrJ;
+ private static float[] sArrF;
+ private static double[] sArrD;
+ private static Object[] sArrL;
+
+ //
+ // Instance fields.
+ //
+
+ private boolean mZ;
+ private byte mB;
+ private char mC;
+ private short mS;
+ private int mI;
+ private long mJ;
+ private float mF;
+ private double mD;
+ private Object mL;
+
+ //
+ // Instance arrays.
+ //
+
+ private boolean[] mArrZ;
+ private byte[] mArrB;
+ private char[] mArrC;
+ private short[] mArrS;
+ private int[] mArrI;
+ private long[] mArrJ;
+ private float[] mArrF;
+ private double[] mArrD;
+ private Object[] mArrL;
+
+ //
+ // Loops on static arrays with invariant static field references.
+ //
+
+ private static void SInvLoopZ() {
+ for (int i = 0; i < sArrZ.length; i++) {
+ sArrZ[i] = sZ;
+ }
+ }
+
+ private static void SInvLoopB() {
+ for (int i = 0; i < sArrB.length; i++) {
+ sArrB[i] = sB;
+ }
+ }
+
+ private static void SInvLoopC() {
+ for (int i = 0; i < sArrC.length; i++) {
+ sArrC[i] = sC;
+ }
+ }
+
+ private static void SInvLoopS() {
+ for (int i = 0; i < sArrS.length; i++) {
+ sArrS[i] = sS;
+ }
+ }
+
+ private static void SInvLoopI() {
+ for (int i = 0; i < sArrI.length; i++) {
+ sArrI[i] = sI;
+ }
+ }
+
+ private static void SInvLoopJ() {
+ for (int i = 0; i < sArrJ.length; i++) {
+ sArrJ[i] = sJ;
+ }
+ }
+
+ private static void SInvLoopF() {
+ for (int i = 0; i < sArrF.length; i++) {
+ sArrF[i] = sF;
+ }
+ }
+
+ private static void SInvLoopD() {
+ for (int i = 0; i < sArrD.length; i++) {
+ sArrD[i] = sD;
+ }
+ }
+
+ private static void SInvLoopL() {
+ for (int i = 0; i < sArrL.length; i++) {
+ sArrL[i] = sL;
+ }
+ }
+
+ //
+ // Loops on static arrays with variant static field references.
+ //
+
+ private static void SVarLoopZ() {
+ for (int i = 0; i < sArrZ.length; i++) {
+ sArrZ[i] = sZ;
+ if (i == 10)
+ sZ = !sZ;
+ }
+ }
+
+ private static void SVarLoopB() {
+ for (int i = 0; i < sArrB.length; i++) {
+ sArrB[i] = sB;
+ if (i == 10)
+ sB++;
+ }
+ }
+
+ private static void SVarLoopC() {
+ for (int i = 0; i < sArrC.length; i++) {
+ sArrC[i] = sC;
+ if (i == 10)
+ sC++;
+ }
+ }
+
+ private static void SVarLoopS() {
+ for (int i = 0; i < sArrS.length; i++) {
+ sArrS[i] = sS;
+ if (i == 10)
+ sS++;
+ }
+ }
+
+ private static void SVarLoopI() {
+ for (int i = 0; i < sArrI.length; i++) {
+ sArrI[i] = sI;
+ if (i == 10)
+ sI++;
+ }
+ }
+
+ private static void SVarLoopJ() {
+ for (int i = 0; i < sArrJ.length; i++) {
+ sArrJ[i] = sJ;
+ if (i == 10)
+ sJ++;
+ }
+ }
+
+ private static void SVarLoopF() {
+ for (int i = 0; i < sArrF.length; i++) {
+ sArrF[i] = sF;
+ if (i == 10)
+ sF++;
+ }
+ }
+
+ private static void SVarLoopD() {
+ for (int i = 0; i < sArrD.length; i++) {
+ sArrD[i] = sD;
+ if (i == 10)
+ sD++;
+ }
+ }
+
+ private static void SVarLoopL() {
+ for (int i = 0; i < sArrL.length; i++) {
+ sArrL[i] = sL;
+ if (i == 10)
+ sL = anotherObject;
+ }
+ }
+
+ //
+ // Loops on static arrays with a cross-over reference.
+ //
+
+ private static void SCrossOverLoopZ() {
+ for (int i = 0; i < sArrZ.length; i++) {
+ sArrZ[i] = !sArrZ[20];
+ }
+ }
+
+ private static void SCrossOverLoopB() {
+ for (int i = 0; i < sArrB.length; i++) {
+ sArrB[i] = (byte)(sArrB[20] + 2);
+ }
+ }
+
+ private static void SCrossOverLoopC() {
+ for (int i = 0; i < sArrC.length; i++) {
+ sArrC[i] = (char)(sArrC[20] + 2);
+ }
+ }
+
+ private static void SCrossOverLoopS() {
+ for (int i = 0; i < sArrS.length; i++) {
+ sArrS[i] = (short)(sArrS[20] + 2);
+ }
+ }
+
+ private static void SCrossOverLoopI() {
+ for (int i = 0; i < sArrI.length; i++) {
+ sArrI[i] = sArrI[20] + 2;
+ }
+ }
+
+ private static void SCrossOverLoopJ() {
+ for (int i = 0; i < sArrJ.length; i++) {
+ sArrJ[i] = sArrJ[20] + 2;
+ }
+ }
+
+ private static void SCrossOverLoopF() {
+ for (int i = 0; i < sArrF.length; i++) {
+ sArrF[i] = sArrF[20] + 2;
+ }
+ }
+
+ private static void SCrossOverLoopD() {
+ for (int i = 0; i < sArrD.length; i++) {
+ sArrD[i] = sArrD[20] + 2;
+ }
+ }
+
+ private static void SCrossOverLoopL() {
+ for (int i = 0; i < sArrL.length; i++) {
+ sArrL[i] = (sArrL[20] == anObject) ? anotherObject : anObject;
+ }
+ }
+
+ //
+ // Loops on instance arrays with invariant instance field references.
+ //
+
+ private void InvLoopZ() {
+ for (int i = 0; i < mArrZ.length; i++) {
+ mArrZ[i] = mZ;
+ }
+ }
+
+ private void InvLoopB() {
+ for (int i = 0; i < mArrB.length; i++) {
+ mArrB[i] = mB;
+ }
+ }
+
+ private void InvLoopC() {
+ for (int i = 0; i < mArrC.length; i++) {
+ mArrC[i] = mC;
+ }
+ }
+
+ private void InvLoopS() {
+ for (int i = 0; i < mArrS.length; i++) {
+ mArrS[i] = mS;
+ }
+ }
+
+ private void InvLoopI() {
+ for (int i = 0; i < mArrI.length; i++) {
+ mArrI[i] = mI;
+ }
+ }
+
+ private void InvLoopJ() {
+ for (int i = 0; i < mArrJ.length; i++) {
+ mArrJ[i] = mJ;
+ }
+ }
+
+ private void InvLoopF() {
+ for (int i = 0; i < mArrF.length; i++) {
+ mArrF[i] = mF;
+ }
+ }
+
+ private void InvLoopD() {
+ for (int i = 0; i < mArrD.length; i++) {
+ mArrD[i] = mD;
+ }
+ }
+
+ private void InvLoopL() {
+ for (int i = 0; i < mArrL.length; i++) {
+ mArrL[i] = mL;
+ }
+ }
+
+ //
+ // Loops on instance arrays with variant instance field references.
+ //
+
+ private void VarLoopZ() {
+ for (int i = 0; i < mArrZ.length; i++) {
+ mArrZ[i] = mZ;
+ if (i == 10)
+ mZ = !mZ;
+ }
+ }
+
+ private void VarLoopB() {
+ for (int i = 0; i < mArrB.length; i++) {
+ mArrB[i] = mB;
+ if (i == 10)
+ mB++;
+ }
+ }
+
+ private void VarLoopC() {
+ for (int i = 0; i < mArrC.length; i++) {
+ mArrC[i] = mC;
+ if (i == 10)
+ mC++;
+ }
+ }
+
+ private void VarLoopS() {
+ for (int i = 0; i < mArrS.length; i++) {
+ mArrS[i] = mS;
+ if (i == 10)
+ mS++;
+ }
+ }
+
+ private void VarLoopI() {
+ for (int i = 0; i < mArrI.length; i++) {
+ mArrI[i] = mI;
+ if (i == 10)
+ mI++;
+ }
+ }
+
+ private void VarLoopJ() {
+ for (int i = 0; i < mArrJ.length; i++) {
+ mArrJ[i] = mJ;
+ if (i == 10)
+ mJ++;
+ }
+ }
+
+ private void VarLoopF() {
+ for (int i = 0; i < mArrF.length; i++) {
+ mArrF[i] = mF;
+ if (i == 10)
+ mF++;
+ }
+ }
+
+ private void VarLoopD() {
+ for (int i = 0; i < mArrD.length; i++) {
+ mArrD[i] = mD;
+ if (i == 10)
+ mD++;
+ }
+ }
+
+ private void VarLoopL() {
+ for (int i = 0; i < mArrL.length; i++) {
+ mArrL[i] = mL;
+ if (i == 10)
+ mL = anotherObject;
+ }
+ }
+
+ //
+ // Loops on instance arrays with a cross-over reference.
+ //
+
+ private void CrossOverLoopZ() {
+ for (int i = 0; i < mArrZ.length; i++) {
+ mArrZ[i] = !mArrZ[20];
+ }
+ }
+
+ private void CrossOverLoopB() {
+ for (int i = 0; i < mArrB.length; i++) {
+ mArrB[i] = (byte)(mArrB[20] + 2);
+ }
+ }
+
+ private void CrossOverLoopC() {
+ for (int i = 0; i < mArrC.length; i++) {
+ mArrC[i] = (char)(mArrC[20] + 2);
+ }
+ }
+
+ private void CrossOverLoopS() {
+ for (int i = 0; i < mArrS.length; i++) {
+ mArrS[i] = (short)(mArrS[20] + 2);
+ }
+ }
+
+ private void CrossOverLoopI() {
+ for (int i = 0; i < mArrI.length; i++) {
+ mArrI[i] = mArrI[20] + 2;
+ }
+ }
+
+ private void CrossOverLoopJ() {
+ for (int i = 0; i < mArrJ.length; i++) {
+ mArrJ[i] = mArrJ[20] + 2;
+ }
+ }
+
+ private void CrossOverLoopF() {
+ for (int i = 0; i < mArrF.length; i++) {
+ mArrF[i] = mArrF[20] + 2;
+ }
+ }
+
+ private void CrossOverLoopD() {
+ for (int i = 0; i < mArrD.length; i++) {
+ mArrD[i] = mArrD[20] + 2;
+ }
+ }
+
+ private void CrossOverLoopL() {
+ for (int i = 0; i < mArrL.length; i++) {
+ mArrL[i] = (mArrL[20] == anObject) ? anotherObject : anObject;
+ }
+ }
+
+ //
+ // Driver and testers.
+ //
+
+ public static void main(String[] args) {
+ DoStaticTests();
+ new Main().DoInstanceTests();
+ }
+
+ private static void DoStaticTests() {
+ // Type Z.
+ sZ = true;
+ sArrZ = new boolean[100];
+ SInvLoopZ();
+ for (int i = 0; i < sArrZ.length; i++) {
+ expectEquals(true, sArrZ[i]);
+ }
+ SVarLoopZ();
+ for (int i = 0; i < sArrZ.length; i++) {
+ expectEquals(i <= 10, sArrZ[i]);
+ }
+ SCrossOverLoopZ();
+ for (int i = 0; i < sArrZ.length; i++) {
+ expectEquals(i <= 20, sArrZ[i]);
+ }
+ // Type B.
+ sB = 1;
+ sArrB = new byte[100];
+ SInvLoopB();
+ for (int i = 0; i < sArrB.length; i++) {
+ expectEquals(1, sArrB[i]);
+ }
+ SVarLoopB();
+ for (int i = 0; i < sArrB.length; i++) {
+ expectEquals(i <= 10 ? 1 : 2, sArrB[i]);
+ }
+ SCrossOverLoopB();
+ for (int i = 0; i < sArrB.length; i++) {
+ expectEquals(i <= 20 ? 4 : 6, sArrB[i]);
+ }
+ // Type C.
+ sC = 2;
+ sArrC = new char[100];
+ SInvLoopC();
+ for (int i = 0; i < sArrC.length; i++) {
+ expectEquals(2, sArrC[i]);
+ }
+ SVarLoopC();
+ for (int i = 0; i < sArrC.length; i++) {
+ expectEquals(i <= 10 ? 2 : 3, sArrC[i]);
+ }
+ SCrossOverLoopC();
+ for (int i = 0; i < sArrC.length; i++) {
+ expectEquals(i <= 20 ? 5 : 7, sArrC[i]);
+ }
+ // Type S.
+ sS = 3;
+ sArrS = new short[100];
+ SInvLoopS();
+ for (int i = 0; i < sArrS.length; i++) {
+ expectEquals(3, sArrS[i]);
+ }
+ SVarLoopS();
+ for (int i = 0; i < sArrS.length; i++) {
+ expectEquals(i <= 10 ? 3 : 4, sArrS[i]);
+ }
+ SCrossOverLoopS();
+ for (int i = 0; i < sArrS.length; i++) {
+ expectEquals(i <= 20 ? 6 : 8, sArrS[i]);
+ }
+ // Type I.
+ sI = 4;
+ sArrI = new int[100];
+ SInvLoopI();
+ for (int i = 0; i < sArrI.length; i++) {
+ expectEquals(4, sArrI[i]);
+ }
+ SVarLoopI();
+ for (int i = 0; i < sArrI.length; i++) {
+ expectEquals(i <= 10 ? 4 : 5, sArrI[i]);
+ }
+ SCrossOverLoopI();
+ for (int i = 0; i < sArrI.length; i++) {
+ expectEquals(i <= 20 ? 7 : 9, sArrI[i]);
+ }
+ // Type J.
+ sJ = 5;
+ sArrJ = new long[100];
+ SInvLoopJ();
+ for (int i = 0; i < sArrJ.length; i++) {
+ expectEquals(5, sArrJ[i]);
+ }
+ SVarLoopJ();
+ for (int i = 0; i < sArrJ.length; i++) {
+ expectEquals(i <= 10 ? 5 : 6, sArrJ[i]);
+ }
+ SCrossOverLoopJ();
+ for (int i = 0; i < sArrJ.length; i++) {
+ expectEquals(i <= 20 ? 8 : 10, sArrJ[i]);
+ }
+ // Type F.
+ sF = 6.0f;
+ sArrF = new float[100];
+ SInvLoopF();
+ for (int i = 0; i < sArrF.length; i++) {
+ expectEquals(6, sArrF[i]);
+ }
+ SVarLoopF();
+ for (int i = 0; i < sArrF.length; i++) {
+ expectEquals(i <= 10 ? 6 : 7, sArrF[i]);
+ }
+ SCrossOverLoopF();
+ for (int i = 0; i < sArrF.length; i++) {
+ expectEquals(i <= 20 ? 9 : 11, sArrF[i]);
+ }
+ // Type D.
+ sD = 7.0;
+ sArrD = new double[100];
+ SInvLoopD();
+ for (int i = 0; i < sArrD.length; i++) {
+ expectEquals(7.0, sArrD[i]);
+ }
+ SVarLoopD();
+ for (int i = 0; i < sArrD.length; i++) {
+ expectEquals(i <= 10 ? 7 : 8, sArrD[i]);
+ }
+ SCrossOverLoopD();
+ for (int i = 0; i < sArrD.length; i++) {
+ expectEquals(i <= 20 ? 10 : 12, sArrD[i]);
+ }
+ // Type L.
+ sL = anObject;
+ sArrL = new Object[100];
+ SInvLoopL();
+ for (int i = 0; i < sArrL.length; i++) {
+ expectEquals(anObject, sArrL[i]);
+ }
+ SVarLoopL();
+ for (int i = 0; i < sArrL.length; i++) {
+ expectEquals(i <= 10 ? anObject : anotherObject, sArrL[i]);
+ }
+ SCrossOverLoopL();
+ for (int i = 0; i < sArrL.length; i++) {
+ expectEquals(i <= 20 ? anObject : anotherObject, sArrL[i]);
+ }
+ }
+
+ private void DoInstanceTests() {
+ // Type Z.
+ mZ = true;
+ mArrZ = new boolean[100];
+ InvLoopZ();
+ for (int i = 0; i < mArrZ.length; i++) {
+ expectEquals(true, mArrZ[i]);
+ }
+ VarLoopZ();
+ for (int i = 0; i < mArrZ.length; i++) {
+ expectEquals(i <= 10, mArrZ[i]);
+ }
+ CrossOverLoopZ();
+ for (int i = 0; i < mArrZ.length; i++) {
+ expectEquals(i <= 20, mArrZ[i]);
+ }
+ // Type B.
+ mB = 1;
+ mArrB = new byte[100];
+ InvLoopB();
+ for (int i = 0; i < mArrB.length; i++) {
+ expectEquals(1, mArrB[i]);
+ }
+ VarLoopB();
+ for (int i = 0; i < mArrB.length; i++) {
+ expectEquals(i <= 10 ? 1 : 2, mArrB[i]);
+ }
+ CrossOverLoopB();
+ for (int i = 0; i < mArrB.length; i++) {
+ expectEquals(i <= 20 ? 4 : 6, mArrB[i]);
+ }
+ // Type C.
+ mC = 2;
+ mArrC = new char[100];
+ InvLoopC();
+ for (int i = 0; i < mArrC.length; i++) {
+ expectEquals(2, mArrC[i]);
+ }
+ VarLoopC();
+ for (int i = 0; i < mArrC.length; i++) {
+ expectEquals(i <= 10 ? 2 : 3, mArrC[i]);
+ }
+ CrossOverLoopC();
+ for (int i = 0; i < mArrC.length; i++) {
+ expectEquals(i <= 20 ? 5 : 7, mArrC[i]);
+ }
+ // Type S.
+ mS = 3;
+ mArrS = new short[100];
+ InvLoopS();
+ for (int i = 0; i < mArrS.length; i++) {
+ expectEquals(3, mArrS[i]);
+ }
+ VarLoopS();
+ for (int i = 0; i < mArrS.length; i++) {
+ expectEquals(i <= 10 ? 3 : 4, mArrS[i]);
+ }
+ CrossOverLoopS();
+ for (int i = 0; i < mArrS.length; i++) {
+ expectEquals(i <= 20 ? 6 : 8, mArrS[i]);
+ }
+ // Type I.
+ mI = 4;
+ mArrI = new int[100];
+ InvLoopI();
+ for (int i = 0; i < mArrI.length; i++) {
+ expectEquals(4, mArrI[i]);
+ }
+ VarLoopI();
+ for (int i = 0; i < mArrI.length; i++) {
+ expectEquals(i <= 10 ? 4 : 5, mArrI[i]);
+ }
+ CrossOverLoopI();
+ for (int i = 0; i < mArrI.length; i++) {
+ expectEquals(i <= 20 ? 7 : 9, mArrI[i]);
+ }
+ // Type J.
+ mJ = 5;
+ mArrJ = new long[100];
+ InvLoopJ();
+ for (int i = 0; i < mArrJ.length; i++) {
+ expectEquals(5, mArrJ[i]);
+ }
+ VarLoopJ();
+ for (int i = 0; i < mArrJ.length; i++) {
+ expectEquals(i <= 10 ? 5 : 6, mArrJ[i]);
+ }
+ CrossOverLoopJ();
+ for (int i = 0; i < mArrJ.length; i++) {
+ expectEquals(i <= 20 ? 8 : 10, mArrJ[i]);
+ }
+ // Type F.
+ mF = 6.0f;
+ mArrF = new float[100];
+ InvLoopF();
+ for (int i = 0; i < mArrF.length; i++) {
+ expectEquals(6, mArrF[i]);
+ }
+ VarLoopF();
+ for (int i = 0; i < mArrF.length; i++) {
+ expectEquals(i <= 10 ? 6 : 7, mArrF[i]);
+ }
+ CrossOverLoopF();
+ for (int i = 0; i < mArrF.length; i++) {
+ expectEquals(i <= 20 ? 9 : 11, mArrF[i]);
+ }
+ // Type D.
+ mD = 7.0;
+ mArrD = new double[100];
+ InvLoopD();
+ for (int i = 0; i < mArrD.length; i++) {
+ expectEquals(7.0, mArrD[i]);
+ }
+ VarLoopD();
+ for (int i = 0; i < mArrD.length; i++) {
+ expectEquals(i <= 10 ? 7 : 8, mArrD[i]);
+ }
+ CrossOverLoopD();
+ for (int i = 0; i < mArrD.length; i++) {
+ expectEquals(i <= 20 ? 10 : 12, mArrD[i]);
+ }
+ // Type L.
+ mL = anObject;
+ mArrL = new Object[100];
+ InvLoopL();
+ for (int i = 0; i < mArrL.length; i++) {
+ expectEquals(anObject, mArrL[i]);
+ }
+ VarLoopL();
+ for (int i = 0; i < mArrL.length; i++) {
+ expectEquals(i <= 10 ? anObject : anotherObject, mArrL[i]);
+ }
+ CrossOverLoopL();
+ for (int i = 0; i < mArrL.length; i++) {
+ expectEquals(i <= 20 ? anObject : anotherObject, mArrL[i]);
+ }
+ }
+
+ private static void expectEquals(boolean expected, boolean result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(byte expected, byte result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(char expected, char result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(short expected, short result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEquals(Object expected, Object result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 4c17240455..fd9fcafbff 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -31,4 +31,9 @@ b/22244733
b/22331663
b/22331663 (pass)
b/22331663 (fail)
+b/22411633 (1)
+b/22411633 (2)
+b/22411633 (3)
+b/22411633 (4)
+b/22411633 (5)
Done!
diff --git a/test/800-smali/smali/b_22411633_1.smali b/test/800-smali/smali/b_22411633_1.smali
new file mode 100644
index 0000000000..ffc82a86ae
--- /dev/null
+++ b/test/800-smali/smali/b_22411633_1.smali
@@ -0,0 +1,35 @@
+.class public LB22411633_1;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ # Make v3 & v4 defined, just use null.
+ const v3, 0
+ const v4, 0
+
+ # Allocate a java.lang.Object (do not initialize).
+ new-instance v4, Ljava/lang/Object;
+
+ # Branch forward.
+ if-eqz v5, :LabelMerge
+
+ # Just some random work.
+ add-int/lit16 v3, v3, 1
+
+ # Another branch forward.
+ if-nez v5, :LabelMerge
+
+ # Some more random work, technically dead, but reachable.
+ add-int/lit16 v3, v3, 1
+
+:LabelMerge
+ # v4 is still an uninitialized reference here. Initialize it.
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+
+ # And test whether it's initialized by calling hashCode.
+ invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I
+
+ return-void
+
+.end method
diff --git a/test/800-smali/smali/b_22411633_2.smali b/test/800-smali/smali/b_22411633_2.smali
new file mode 100644
index 0000000000..9f27c4cb12
--- /dev/null
+++ b/test/800-smali/smali/b_22411633_2.smali
@@ -0,0 +1,45 @@
+.class public LB22411633_2;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ # Make v3 & v4 defined, just use null.
+ const v3, 0
+ const v4, 0
+
+ # Allocate a java.lang.Object (do not initialize).
+ new-instance v4, Ljava/lang/Object;
+
+ # Branch forward.
+ if-eqz v5, :LabelMerge
+
+ # Create a non-precise object reference. We can do this by merging to objects together
+ # that only have Object as a common ancestor.
+
+ # Allocate a java.lang.Object and initialize it.
+ new-instance v4, Ljava/lang/Object;
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+
+ if-nez v5, :LabelMergeObject
+
+ new-instance v4, Ljava/lang/Integer;
+ invoke-direct {v4}, Ljava/lang/Integer;-><init>()V
+
+:LabelMergeObject
+
+ # Dummy work to separate blocks. At this point, v4 is of type Reference<Object>.
+ add-int/lit16 v3, v3, 1
+
+:LabelMerge
+ # Merge the uninitialized Object from line 12 with the reference to Object from 31. Older
+ # rules set any reference merged with Object to Object. This is wrong in the case of the
+ # other reference being an uninitialized reference, as we'd suddenly allow calling on it.
+
+ # Test whether it's some initialized reference by calling hashCode. This should fail, as we
+ # merged initialized and uninitialized.
+ invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I
+
+ return-void
+
+.end method
diff --git a/test/800-smali/smali/b_22411633_3.smali b/test/800-smali/smali/b_22411633_3.smali
new file mode 100644
index 0000000000..d1212f13dd
--- /dev/null
+++ b/test/800-smali/smali/b_22411633_3.smali
@@ -0,0 +1,31 @@
+.class public LB22411633_3;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ # Make v3 & v4 defined, just use null.
+ const v3, 0
+ const v4, 0
+
+ # Allocate a java.lang.Object (do not initialize).
+ new-instance v4, Ljava/lang/Object;
+
+ # Branch forward.
+ if-eqz v5, :LabelMerge
+
+ # Create an initialized Object.
+ new-instance v4, Ljava/lang/Object;
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+
+ # Just some random work.
+ add-int/lit16 v3, v3, 1
+
+:LabelMerge
+ # At this point, an initialized and an uninitialized reference are merged. However, the
+ # merge is only from forward branches. If the conflict isn't used (as here), this should
+ # pass the verifier.
+
+ return-void
+
+.end method
diff --git a/test/800-smali/smali/b_22411633_4.smali b/test/800-smali/smali/b_22411633_4.smali
new file mode 100644
index 0000000000..503ca99569
--- /dev/null
+++ b/test/800-smali/smali/b_22411633_4.smali
@@ -0,0 +1,25 @@
+.class public LB22411633_4;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ # Do not merge into the backward branch target.
+ goto :LabelEntry
+
+:LabelBwd
+ # At this point v4 is an uninitialized reference. This should fail to verify.
+ # Note: we make sure that it is an uninitialized reference and not a conflict in sister
+ # file b_22411633_bwdok.smali.
+ invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I
+
+:LabelEntry
+ # Allocate a java.lang.Object (do not initialize).
+ new-instance v4, Ljava/lang/Object;
+
+ # Branch backward.
+ if-eqz v5, :LabelBwd
+
+ return-void
+
+.end method
diff --git a/test/800-smali/smali/b_22411633_5.smali b/test/800-smali/smali/b_22411633_5.smali
new file mode 100644
index 0000000000..b7964f64a5
--- /dev/null
+++ b/test/800-smali/smali/b_22411633_5.smali
@@ -0,0 +1,28 @@
+.class public LB22411633_5;
+.super Ljava/lang/Object;
+
+
+.method public static run(Z)V
+.registers 6
+ # Do not merge into the backward branch target.
+ goto :LabelEntry
+
+:LabelBwd
+ # At this point v4 is an uninitialized reference. We should be able to initialize here
+ # and call a method afterwards.
+ invoke-direct {v4}, Ljava/lang/Object;-><init>()V
+ invoke-virtual {v4}, Ljava/lang/Object;->hashCode()I
+
+ # Make sure this is not an infinite loop.
+ const v5, 1
+
+:LabelEntry
+ # Allocate a java.lang.Object (do not initialize).
+ new-instance v4, Ljava/lang/Object;
+
+ # Branch backward.
+ if-eqz v5, :LabelBwd
+
+ return-void
+
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 8be6418d77..8da2af4e84 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -109,6 +109,16 @@ public class Main {
new Object[] { false }, null, null));
testCases.add(new TestCase("b/22331663 (fail)", "B22331663Fail", "run",
new Object[] { false }, new VerifyError(), null));
+ testCases.add(new TestCase("b/22411633 (1)", "B22411633_1", "run", new Object[] { false },
+ null, null));
+ testCases.add(new TestCase("b/22411633 (2)", "B22411633_2", "run", new Object[] { false },
+ new VerifyError(), null));
+ testCases.add(new TestCase("b/22411633 (3)", "B22411633_3", "run", new Object[] { false },
+ null, null));
+ testCases.add(new TestCase("b/22411633 (4)", "B22411633_4", "run", new Object[] { false },
+ new VerifyError(), null));
+ testCases.add(new TestCase("b/22411633 (5)", "B22411633_5", "run", new Object[] { false },
+ null, null));
}
public void runTests() {
diff --git a/test/955-lambda-smali/expected.txt b/test/955-lambda-smali/expected.txt
index 0a5b5fd37d..5059f4be12 100644
--- a/test/955-lambda-smali/expected.txt
+++ b/test/955-lambda-smali/expected.txt
@@ -3,6 +3,7 @@ Hello world! (0-args, no closure)
ABCD Hello world! (4-args, no closure)
Caught NPE
(BoxUnbox) Hello boxing world! (0-args, no closure)
+(BoxUnbox) Boxing repeatedly yields referentially-equal objects
(BoxUnbox) Caught NPE for unbox-lambda
(BoxUnbox) Caught NPE for box-lambda
(BoxUnbox) Caught ClassCastException for unbox-lambda
diff --git a/test/955-lambda-smali/smali/BoxUnbox.smali b/test/955-lambda-smali/smali/BoxUnbox.smali
index 5e6673368c..108b5fafbc 100644
--- a/test/955-lambda-smali/smali/BoxUnbox.smali
+++ b/test/955-lambda-smali/smali/BoxUnbox.smali
@@ -23,15 +23,14 @@
.end method
.method public static run()V
-.registers 2
- # Trivial 0-arg hello world
- create-lambda v0, LBoxUnbox;->doHelloWorld(Ljava/lang/reflect/ArtMethod;)V
- # TODO: create-lambda should not write to both v0 and v1
- invoke-lambda v0, {}
+ .registers 0
+ invoke-static {}, LBoxUnbox;->testBox()V
+ invoke-static {}, LBoxUnbox;->testBoxEquality()V
invoke-static {}, LBoxUnbox;->testFailures()V
invoke-static {}, LBoxUnbox;->testFailures2()V
invoke-static {}, LBoxUnbox;->testFailures3()V
+ invoke-static {}, LBoxUnbox;->forceGC()V
return-void
.end method
@@ -48,6 +47,47 @@
return-void
.end method
+# Test boxing and unboxing; the same lambda should be invoked as if there was no box.
+.method private static testBox()V
+ .registers 3
+
+ create-lambda v0, LBoxUnbox;->doHelloWorld(Ljava/lang/reflect/ArtMethod;)V
+ box-lambda v2, v0 # v2 = box(v0)
+ unbox-lambda v0, v2, Ljava/lang/reflect/ArtMethod; # v0 = unbox(v2)
+ invoke-lambda v0, {}
+
+ return-void
+.end method
+
+# Test that boxing the same lambda twice yield the same object.
+.method private static testBoxEquality()V
+ .registers 6 # 0 parameters, 6 locals
+
+ create-lambda v0, LBoxUnbox;->doHelloWorld(Ljava/lang/reflect/ArtMethod;)V
+ box-lambda v2, v0 # v2 = box(v0)
+ box-lambda v3, v0 # v3 = box(v0)
+
+ # The objects should be not-null, and they should have the same reference
+ if-eqz v2, :is_zero
+ if-ne v2, v3, :is_not_equal
+
+ const-string v4, "(BoxUnbox) Boxing repeatedly yields referentially-equal objects"
+ goto :end
+
+:is_zero
+ const-string v4, "(BoxUnbox) Boxing repeatedly FAILED: boxing returned null"
+ goto :end
+
+:is_not_equal
+ const-string v4, "(BoxUnbox) Boxing repeatedly FAILED: objects were not same reference"
+ goto :end
+
+:end
+ sget-object v5, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v5, v4}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
+ return-void
+.end method
+
# Test exceptions are thrown as expected when used opcodes incorrectly
.method private static testFailures()V
.registers 4 # 0 parameters, 4 locals
@@ -116,3 +156,14 @@
.catch Ljava/lang/ClassCastException; {:start .. :end} :handler
.end method
+
+
+# Force a GC. Used to ensure our weak reference table of boxed lambdas is getting swept.
+.method private static forceGC()V
+ .registers 1
+ invoke-static {}, Ljava/lang/Runtime;->getRuntime()Ljava/lang/Runtime;
+ move-result-object v0
+ invoke-virtual {v0}, Ljava/lang/Runtime;->gc()V
+
+ return-void
+.end method
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 65ddf8da17..3d5c483a3b 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -112,6 +112,9 @@ COMPILER_TYPES :=
ifeq ($(ART_TEST_DEFAULT_COMPILER),true)
COMPILER_TYPES += default
endif
+ifeq ($(ART_TEST_INTERPRETER_ACCESS_CHECKS),true)
+ COMPILER_TYPES += interpreter-access-checks
+endif
ifeq ($(ART_TEST_INTERPRETER),true)
COMPILER_TYPES += interpreter
endif
@@ -260,6 +263,28 @@ endif
TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
+# Temporarily disable some broken tests when forcing access checks in interpreter b/22414682
+TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \
+ 004-JniTest \
+ 005-annotations \
+ 044-proxy \
+ 073-mismatched-field \
+ 088-monitor-verification \
+ 135-MirandaDispatch \
+ 137-cfi \
+ 412-new-array \
+ 471-uninitialized-locals \
+ 506-verify-aput \
+ 800-smali
+
+ifneq (,$(filter interpreter-access-checks,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ interpreter-access-checks,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
+
# Tests that are broken with GC stress.
# 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
# hope the second process got into the expected state. The slowness of gcstress makes this bad.
@@ -604,7 +629,8 @@ endif
# Create a rule to build and run a tests following the form:
# test-art-{1: host or target}-run-test-{2: debug ndebug}-{3: prebuild no-prebuild no-dex2oat}-
-# {4: interpreter default optimizing jit}-{5: relocate nrelocate relocate-npatchoat}-
+# {4: interpreter default optimizing jit interpreter-access-checks}-
+# {5: relocate nrelocate relocate-npatchoat}-
# {6: trace or ntrace}-{7: gcstress gcverify cms}-{8: forcecopy checkjni jni}-
# {9: no-image image picimage}-{10: pictest npictest}-
# {11: ndebuggable debuggable}-{12: test name}{13: 32 or 64}
@@ -674,6 +700,9 @@ define define-test-art-run-test
ifeq ($(4),interpreter)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES
run_test_options += --interpreter
+ else ifeq ($(4),interpreter-access-checks)
+ test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_ACCESS_CHECKS_RULES
+ run_test_options += --interpreter --verify-soft-fail
else
ifeq ($(4),default)
test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 842d87e565..db64b77f6c 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -42,7 +42,7 @@ TIME_OUT="y"
TIME_OUT_VALUE=10
USE_GDB="n"
USE_JVM="n"
-VERIFY="y"
+VERIFY="y" # y=yes,n=no,s=softfail
ZYGOTE=""
DEX_VERIFY=""
USE_DEX2OAT_AND_PATCHOAT="y"
@@ -149,6 +149,9 @@ while true; do
elif [ "x$1" = "x--no-verify" ]; then
VERIFY="n"
shift
+ elif [ "x$1" = "x--verify-soft-fail" ]; then
+ VERIFY="s"
+ shift
elif [ "x$1" = "x--no-optimize" ]; then
OPTIMIZE="n"
shift
@@ -201,7 +204,11 @@ if [ "$ZYGOTE" = "" ]; then
if [ "$VERIFY" = "y" ]; then
JVM_VERIFY_ARG="-Xverify:all"
msg "Performing verification"
- else
+ elif [ "$VERIFY" = "s" ]; then
+ JVM_VERIFY_ARG="Xverify:all"
+ DEX_VERIFY="-Xverify:softfail"
+ msg "Forcing verification to be soft fail"
+ else # VERIFY = "n"
DEX_VERIFY="-Xverify:none"
JVM_VERIFY_ARG="-Xverify:none"
msg "Skipping verification"
@@ -263,7 +270,10 @@ if [ "$INTERPRETER" = "y" ]; then
INT_OPTS="-Xint"
if [ "$VERIFY" = "y" ] ; then
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only"
- else
+ elif [ "$VERIFY" = "s" ]; then
+ COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime"
+ DEX_VERIFY="${DEX_VERIFY} -Xverify:softfail"
+ else # VERIFY = "n"
COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none"
DEX_VERIFY="${DEX_VERIFY} -Xverify:none"
fi
diff --git a/test/run-test b/test/run-test
index bdf680b98a..eabbab32a9 100755
--- a/test/run-test
+++ b/test/run-test
@@ -262,6 +262,10 @@ while true; do
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
+ elif [ "x$1" = "x--verify-soft-fail" ]; then
+ run_args="${run_args} --verify-soft-fail"
+ image_suffix="-interpreter-access-checks"
+ shift
elif [ "x$1" = "x--no-optimize" ]; then
run_args="${run_args} --no-optimize"
shift
@@ -520,6 +524,9 @@ if [ "$usage" = "yes" ]; then
echo " --optimizing Enable optimizing compiler (default)."
echo " --quick Use Quick compiler (off by default)."
echo " --no-verify Turn off verification (on by default)."
+ echo " --verify-soft-fail Force soft fail verification (off by default)."
+ echo " Verification is enabled if neither --no-verify"
+ echo " nor --verify-soft-fail is specified."
echo " --no-optimize Turn off optimization (on by default)."
echo " --no-precise Turn off precise GC (on by default)."
echo " --zygote Spawn the process from the Zygote." \
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 65c3fed4f3..992a8a6ea1 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -144,5 +144,11 @@
modes: [device],
names: ["libcore.io.OsTest#test_xattr"],
bug: 22258911
+},
+{
+ description: "fails on L builds: needs investigation",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["org.apache.harmony.tests.java.lang.ClassTest#test_forNameLjava_lang_String"]
}
]