summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/optimizing/code_generator_arm.cc3
-rw-r--r--compiler/optimizing/code_generator_arm64.cc3
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc3
-rw-r--r--compiler/optimizing/code_generator_mips.cc3
-rw-r--r--compiler/optimizing/code_generator_mips64.cc3
-rw-r--r--compiler/optimizing/code_generator_x86.cc3
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc3
-rw-r--r--compiler/optimizing/inliner.cc9
-rw-r--r--compiler/optimizing/instruction_builder.cc5
-rw-r--r--compiler/optimizing/nodes.h4
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc33
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S114
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S128
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S112
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S104
-rw-r--r--runtime/arch/quick_alloc_entrypoints.S34
-rw-r--r--runtime/arch/stub_test.cc12
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S175
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S127
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/class_linker.cc70
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h65
-rw-r--r--runtime/entrypoints/entrypoint_utils.h21
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc136
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h7
-rw-r--r--runtime/entrypoints_order_test.cc12
-rw-r--r--runtime/gc/collector/concurrent_copying.cc21
-rw-r--r--runtime/gc/collector/concurrent_copying.h3
-rw-r--r--runtime/gc/collector/garbage_collector.h5
-rw-r--r--runtime/gc/collector/mark_compact.cc10
-rw-r--r--runtime/gc/collector/mark_compact.h3
-rw-r--r--runtime/gc/collector/mark_sweep.cc9
-rw-r--r--runtime/gc/collector/mark_sweep.h3
-rw-r--r--runtime/gc/collector/semi_space.cc7
-rw-r--r--runtime/gc/collector/semi_space.h3
-rw-r--r--runtime/gc/reference_processor.cc4
-rw-r--r--runtime/gc/reference_queue.cc10
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc5
-rw-r--r--runtime/interpreter/mterp/mterp.cc7
-rw-r--r--runtime/java_vm_ext.cc5
-rw-r--r--runtime/mirror/object_reference-inl.h9
-rw-r--r--runtime/mirror/object_reference.h3
-rw-r--r--runtime/monitor.cc6
-rw-r--r--runtime/native/dalvik_system_VMStack.cc12
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/openjdkjvmti/Android.bp1
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc9
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc241
-rw-r--r--runtime/openjdkjvmti/ti_stack.h9
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc357
-rw-r--r--runtime/openjdkjvmti/ti_thread.h51
-rw-r--r--runtime/runtime.cc48
-rw-r--r--runtime/thread.cc3
-rw-r--r--runtime/thread.h6
-rw-r--r--test/129-ThreadGetId/expected.txt1
-rw-r--r--test/129-ThreadGetId/src/Main.java14
-rw-r--r--test/529-checker-unresolved/src/Main.java4
-rw-r--r--test/621-checker-new-instance/expected.txt0
-rw-r--r--test/621-checker-new-instance/info.txt1
-rw-r--r--test/621-checker-new-instance/src/Main.java53
-rw-r--r--test/909-attach-agent/expected.txt8
-rwxr-xr-xtest/909-attach-agent/run10
-rw-r--r--test/911-get-stack-trace/expected.txt737
-rw-r--r--test/911-get-stack-trace/src/Main.java109
-rw-r--r--test/911-get-stack-trace/stack_trace.cc72
-rwxr-xr-xtest/924-threads/build17
-rw-r--r--test/924-threads/expected.txt30
-rw-r--r--test/924-threads/info.txt1
-rwxr-xr-xtest/924-threads/run19
-rw-r--r--test/924-threads/src/Main.java216
-rw-r--r--test/924-threads/threads.cc104
-rw-r--r--test/Android.bp1
-rw-r--r--test/Android.run-test.mk11
76 files changed, 2270 insertions, 1186 deletions
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 86d92ff0b5..4180e0e6c9 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -487,7 +487,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(163 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3009103ac7..541a1c5b8f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3936,7 +3936,6 @@ void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(R0));
}
@@ -3954,7 +3953,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4b6a9bed61..9aaeadb44a 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4744,7 +4744,6 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(LocationFrom(kArtMethodRegister));
} else {
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -4762,7 +4761,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index b1f6d599ab..c769decaa0 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3948,7 +3948,6 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConventionARMVIXL calling_convention;
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(LocationFrom(r0));
}
@@ -3970,7 +3969,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 9af03e8153..bc62854e5d 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -5903,7 +5903,6 @@ void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -5920,7 +5919,7 @@ void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 046d59cee7..1b9c6da460 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3844,7 +3844,6 @@ void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
}
@@ -3862,7 +3861,7 @@ void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f13b60aebf..a9b717db4f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4150,7 +4150,6 @@ void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
} else {
InvokeRuntimeCallingConvention calling_convention;
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
}
@@ -4166,7 +4165,7 @@ void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 89f4ae04d7..261473505f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -4038,7 +4038,6 @@ void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
} else {
locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
locations->SetOut(Location::RegisterLocation(RAX));
}
@@ -4055,7 +4054,7 @@ void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction)
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
} else {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
DCHECK(!codegen_->IsLeafMethod());
}
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e5d05e9e6d..c970e5cbba 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1428,15 +1428,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
return false;
}
- if (current->IsNewInstance() &&
- (current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
- VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
- << " could not be inlined because it is using an entrypoint"
- << " with access checks";
- // Allocation entrypoint does not handle inlined frames.
- return false;
- }
-
if (current->IsNewArray() &&
(current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
VLOG(compiler) << "Method " << callee_dex_file.PrettyMethod(method_index)
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 768b1d80a1..009d549547 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -917,11 +917,11 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
bool finalizable;
bool needs_access_check = NeedsAccessCheck(type_index, dex_cache, &finalizable);
- // Only the non-resolved entrypoint handles the finalizable class case. If we
+ // Only the access check entrypoint handles the finalizable class case. If we
// need access checks, then we haven't resolved the method and the class may
// again be finalizable.
QuickEntrypointEnum entrypoint = (finalizable || needs_access_check)
- ? kQuickAllocObject
+ ? kQuickAllocObjectWithChecks
: kQuickAllocObjectInitialized;
if (outer_dex_cache.Get() != dex_cache.Get()) {
@@ -946,7 +946,6 @@ bool HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t d
AppendInstruction(new (arena_) HNewInstance(
cls,
- graph_->GetCurrentMethod(),
dex_pc,
type_index,
*dex_compilation_unit_->GetDexFile(),
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7d6f6164ec..ea9a94c420 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3774,10 +3774,9 @@ class HCompare FINAL : public HBinaryOperation {
DISALLOW_COPY_AND_ASSIGN(HCompare);
};
-class HNewInstance FINAL : public HExpression<2> {
+class HNewInstance FINAL : public HExpression<1> {
public:
HNewInstance(HInstruction* cls,
- HCurrentMethod* current_method,
uint32_t dex_pc,
dex::TypeIndex type_index,
const DexFile& dex_file,
@@ -3791,7 +3790,6 @@ class HNewInstance FINAL : public HExpression<2> {
SetPackedFlag<kFlagNeedsAccessCheck>(needs_access_check);
SetPackedFlag<kFlagFinalizable>(finalizable);
SetRawInputAt(0, cls);
- SetRawInputAt(1, current_method);
}
dex::TypeIndex GetTypeIndex() const { return type_index_; }
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index f9ac3a0f72..db7c1fbb06 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -134,39 +134,6 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
}
}
-void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
- HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
- const bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
- // Change the entrypoint to kQuickAllocObject if either:
- // - the class is finalizable (only kQuickAllocObject handles finalizable classes),
- // - the class needs access checks (we do not know if it's finalizable),
- // - or the load class has only one use.
- if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObject);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex().index_), 0);
- if (has_only_one_use) {
- // We've just removed the only use of the HLoadClass. Since we don't run DCE after this pass,
- // do it manually if possible.
- if (!load_class->CanThrow()) {
- // If the load class can not throw, it has no side effects and can be removed if there is
- // only one use.
- load_class->GetBlock()->RemoveInstruction(load_class);
- } else if (!instruction->GetEnvironment()->IsFromInlinedInvoke() &&
- CanMoveClinitCheck(load_class, instruction)) {
- // The allocation entry point that deals with access checks does not work with inlined
- // methods, so we need to check whether this allocation comes from an inlined method.
- // We also need to make the same check as for moving clinit check, whether the HLoadClass
- // has the clinit check responsibility or not (HLoadClass can throw anyway).
- // If it needed access checks, we delegate the access check to the allocation.
- if (load_class->NeedsAccessCheck()) {
- instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
- }
- load_class->GetBlock()->RemoveInstruction(load_class);
- }
- }
- }
-}
-
bool PrepareForRegisterAllocation::CanEmitConditionAt(HCondition* condition,
HInstruction* user) const {
if (condition->GetNext() != user) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a6791482a7..c128227654 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -44,7 +44,6 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
void VisitCondition(HCondition* condition) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- void VisitNewInstance(HNewInstance* instruction) OVERRIDE;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index ab4f9e944c..a3fce02970 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -5610,7 +5610,7 @@ const char* const VixlJniHelpersResults[] = {
" 214: ecbd 8a10 vpop {s16-s31}\n",
" 218: e8bd 8de0 ldmia.w sp!, {r5, r6, r7, r8, sl, fp, pc}\n",
" 21c: 4660 mov r0, ip\n",
- " 21e: f8d9 c2b0 ldr.w ip, [r9, #688] ; 0x2b0\n",
+ " 21e: f8d9 c2ac ldr.w ip, [r9, #684] ; 0x2ac\n",
" 222: 47e0 blx ip\n",
nullptr
};
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index a71ab4b53c..61d1607112 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1124,28 +1124,23 @@ END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
- // r2, r3, r12: free.
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
-
+ // r0: type/return value, r9: Thread::Current
+ // r1, r2, r3, r12: free.
ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// TODO: consider using ldrd.
ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
+ ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1159,7 +1154,7 @@ ENTRY art_quick_alloc_object_rosalloc
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
+ cbz r3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1172,8 +1167,8 @@ ENTRY art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF r2
- str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
+ POISON_HEAP_REF r0
+ str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the class status load with respect
@@ -1204,20 +1199,20 @@ ENTRY art_quick_alloc_object_rosalloc
mov r0, r3 // Set the return value and return.
bx lr
-.Lart_quick_alloc_object_rosalloc_slow_path:
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 @ pass Thread::Current
+ bl artAllocObjectFromCodeResolvedRosAlloc @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_rosalloc
+END art_quick_alloc_object_resolved_rosalloc
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
//
-// r0: type_idx/return value, r1: ArtMethod*, r2: class, r9: Thread::Current, r3, r12: free.
-// Need to preserve r0 and r1 to the slow path.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
- cbz r2, \slowPathLabel // Check null class
+// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// Need to preserve r0 to the slow path.
+.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel
// Load thread_local_pos (r12) and
// thread_local_end (r3) with ldrd.
// Check constraints for ldrd.
@@ -1226,20 +1221,20 @@ END art_quick_alloc_object_rosalloc
#endif
ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
+ ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Reload old thread_local_pos (r0)
// for the return value.
- ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
- add r1, r0, r3
+ ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+ add r1, r2, r3
str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF r2
- str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ POISON_HEAP_REF r0
+ str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
@@ -1247,71 +1242,46 @@ END art_quick_alloc_object_rosalloc
// Alternatively we could use "ishst"
// if we use load-acquire for the
// object size load.)
+ mov r0, r2
dmb ish
bx lr
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_RESOLVED_OBJECT(_tlab, TLAB).
+ENTRY art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
- // r2, r3, r12: free.
+ // r0: type, r9: Thread::Current
+ // r1, r2, r3, r12: free.
#if defined(USE_READ_BARRIER)
mvn r0, #0 // Read barrier not supported here.
bx lr // Return -1.
#endif
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r2, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_tlab
+END art_quick_alloc_object_resolved_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-ENTRY art_quick_alloc_object_region_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+ENTRY art_quick_alloc_object_resolved_region_tlab
// Fast path tlab allocation.
- // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current, r2, r3, r12: free.
+ // r0: type, r9: Thread::Current, r1, r2, r3, r12: free.
#if !defined(USE_READ_BARRIER)
eor r0, r0, r0 // Read barrier must be enabled here.
sub r0, r0, #1 // Return -1.
bx lr
#endif
- ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
- // Load the class (r2)
- ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // Read barrier for class load.
- ldr r3, [r9, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz r3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_marking:
- cbz r2, .Lart_quick_alloc_object_region_tlab_slow_path // Null check for loading lock word.
- // Check lock word for mark bit, if marked do the allocation.
- ldr r3, [r2, MIRROR_OBJECT_LOCK_WORD_OFFSET]
- ands r3, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
- bne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark
- // the class.
- push {r0, r1, r3, lr} // Save registers. r3 is pushed only
- // to align sp by 16 bytes.
- mov r0, r2 // Pass the class as the first param.
- bl artReadBarrierMark
- mov r2, r0 // Get the (marked) class back.
- pop {r0, r1, r3, lr}
- b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
+.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r2, r9 // Pass Thread::Current.
- bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
+ mov r1, r9 // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedRegionTLAB // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_region_tlab
+END art_quick_alloc_object_resolved_region_tlab
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index b88515f21f..8b1e0388c6 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1669,7 +1669,6 @@ END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_REGION_TLAB_ALLOCATORS
// Comment out allocators that have arm64 specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) implemented in asm
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
@@ -1682,27 +1681,23 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization
// checks.
- bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ bhs .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size. Since the size is
// already aligned we can combine the
@@ -1715,7 +1710,7 @@ ENTRY art_quick_alloc_object_rosalloc
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
- cbz x3, .Lart_quick_alloc_object_rosalloc_slow_path
+ cbz x3, .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
// and update the list head with the
@@ -1728,8 +1723,8 @@ ENTRY art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF w2
- str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
+ POISON_HEAP_REF w0
+ str w0, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
// the object size load with respect
@@ -1759,13 +1754,13 @@ ENTRY art_quick_alloc_object_rosalloc
mov x0, x3 // Set the return value and return.
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
- mov x2, xSELF // pass Thread::Current
- bl artAllocObjectFromCodeRosAlloc // (uint32_t type_idx, Method* method, Thread*)
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ mov x1, xSELF // pass Thread::Current
+ bl artAllocObjectFromCodeResolvedRosAlloc // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_rosalloc
+END art_quick_alloc_object_resolved_rosalloc
// The common fast path code for art_quick_alloc_array_region_tlab.
@@ -1834,16 +1829,6 @@ END art_quick_alloc_object_rosalloc
ret
.endm
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-//
-// x0: type_idx/return value, x1: ArtMethod*, x2: Class*, xSELF(x19): Thread::Current
-// x3-x7: free.
-// Need to preserve x0 and x1 to the slow path.
-.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
- cbz x2, \slowPathLabel // Check null class
- ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
-.endm
-
// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
@@ -1853,20 +1838,18 @@ END art_quick_alloc_object_rosalloc
.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
- ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
+ ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
add x6, x4, x7 // Add object size to tlab pos.
cmp x6, x5 // Check if it fits, overflow works
// since the tlab pos and end are 32
// bit values.
bhi \slowPathLabel
- // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
- mov x0, x4
str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add x5, x5, #1
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
- POISON_HEAP_REF w2
- str w2, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
+ POISON_HEAP_REF w0
+ str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
// that the code after this allocation
// site will see the right values in
@@ -1874,91 +1857,52 @@ END art_quick_alloc_object_rosalloc
// Alternatively we could use "ishst"
// if we use load-acquire for the
// object size load.)
+ mov x0, x4
dmb ish
ret
.endm
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
-ENTRY art_quick_alloc_object_tlab
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB).
+ENTRY art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // x0: type_idx/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // x2-x7: free.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
#if defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier not supported here.
ret // Return -1.
#endif
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x2, xSELF // Pass Thread::Current.
- bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
+ ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
+ mov x1, xSELF // Pass Thread::Current.
+ bl artAllocObjectFromCodeResolvedTLAB // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END art_quick_alloc_object_tlab
+END art_quick_alloc_object_resolved_tlab
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier
+.macro GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB name, entrypoint, fast_path
ENTRY \name
// Fast path region tlab allocation.
- // x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
- // If is_resolved is 1 then x0 is the resolved type, otherwise it is the index.
- // x2-x7: free.
+ // x0: type, xSELF(x19): Thread::Current
+ // x1-x7: free.
#if !defined(USE_READ_BARRIER)
mvn x0, xzr // Read barrier must be enabled here.
ret // Return -1.
#endif
-.if \is_resolved
- mov x2, x0 // class is actually stored in x0 already
-.else
- ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
- // Load the class (x2)
- ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
- // If the class is null, go slow path. The check is required to read the lock word.
- cbz w2, .Lslow_path\name
-.endif
-.if \read_barrier
- // Most common case: GC is not marking.
- ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lmarking\name
-.endif
.Ldo_allocation\name:
\fast_path .Lslow_path\name
-.Lmarking\name:
-.if \read_barrier
- // GC is marking, check the lock word of the class for the mark bit.
- // Class is not null, check mark bit in lock word.
- ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- // If the bit is not zero, do the allocation.
- tbnz w3, #LOCK_WORD_MARK_BIT_SHIFT, .Ldo_allocation\name
- // The read barrier slow path. Mark
- // the class.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 // Save registers (x0, x1, lr).
- SAVE_REG xLR, 24 // Align sp by 16 bytes.
- mov x0, x2 // Pass the class as the first param.
- bl artReadBarrierMark
- mov x2, x0 // Get the (marked) class back.
- RESTORE_REG xLR, 24
- RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 // Restore registers.
- b .Ldo_allocation\name
-.endif
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
- mov x2, xSELF // Pass Thread::Current.
- bl \entrypoint // (uint32_t type_idx, Method* method, Thread*)
+ mov x1, xSELF // Pass Thread::Current.
+ bl \entrypoint // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
END \name
.endm
-// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB.
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1
-// No read barrier for the resolved or initialized cases since the caller is responsible for the
-// read barrier due to the to-space invariant.
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0
+GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED
+GENERATE_ALLOC_OBJECT_RESOLVED_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED
// TODO: We could use this macro for the normal tlab allocator too.
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 3e8cdc9374..964ea563b0 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1831,116 +1831,10 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
-
- # Fast path rosalloc allocation
- # a0: type_idx
- # a1: ArtMethod*
- # s1: Thread::Current
- # -----------------------------
- # t0: class
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # t4: thread stack bottom offset
- # v0: free list head
- #
- # t5, t6 : temps
-
- lw $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_32($a1) # Load dex cache resolved types
- # array.
-
- sll $t5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
- addu $t5, $t0, $t5 # Compute the index.
- lw $t0, 0($t5) # Load class (t0).
- beqz $t0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- li $t6, MIRROR_CLASS_STATUS_INITIALIZED
- lw $t5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
- bne $t5, $t6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Add a fake dependence from the following access flag and size loads to the status load. This
- # is to prevent those loads from being reordered above the status load and reading wrong values.
- xor $t5, $t5, $t5
- addu $t0, $t0, $t5
-
- lw $t5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
- li $t6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
- and $t6, $t5, $t6
- bnez $t6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
- lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
- bgeu $t3, $t4, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lw $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
- li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation.
- bgtu $t1, $t5, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
- # quantum size and divide by the quantum size and subtract by 1.
-
- addiu $t1, $t1, -1 # Decrease obj size and shift right
- srl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # by quantum.
-
- sll $t2, $t1, POINTER_SIZE_SHIFT
- addu $t2, $t2, $s1
- lw $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
-
- lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqz $v0, .Lart_quick_alloc_object_rosalloc_slow_path
- nop
-
- # Load the next pointer of the head and update the list head with the next pointer.
-
- lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $t0
- sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
-
- sw $v0, 0($t3)
- addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
-
- lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $t5, $t5, -1
- sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
- sync # Fence.
-
- jalr $zero, $ra
- nop
-
- .Lart_quick_alloc_object_rosalloc_slow_path:
-
- SETUP_SAVE_REFS_ONLY_FRAME
- la $t9, artAllocObjectFromCodeRosAlloc
- jalr $t9
- move $a2, $s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-END art_quick_alloc_object_rosalloc
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 0861d2d73e..2a18d53aea 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1775,107 +1775,9 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-ENTRY art_quick_alloc_object_rosalloc
-
- # Fast path rosalloc allocation
- # a0: type_idx
- # a1: ArtMethod*
- # s1: Thread::Current
- # -----------------------------
- # t0: class
- # t1: object size
- # t2: rosalloc run
- # t3: thread stack top offset
- # a4: thread stack bottom offset
- # v0: free list head
- #
- # a5, a6 : temps
-
- ld $t0, ART_METHOD_DEX_CACHE_TYPES_OFFSET_64($a1) # Load dex cache resolved types array.
-
- dsll $a5, $a0, COMPRESSED_REFERENCE_SIZE_SHIFT # Shift the value.
- daddu $a5, $t0, $a5 # Compute the index.
- lwu $t0, 0($a5) # Load class (t0).
- beqzc $t0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- li $a6, MIRROR_CLASS_STATUS_INITIALIZED
- lwu $a5, MIRROR_CLASS_STATUS_OFFSET($t0) # Check class status.
- bnec $a5, $a6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Add a fake dependence from the following access flag and size loads to the status load. This
- # is to prevent those loads from being reordered above the status load and reading wrong values.
- xor $a5, $a5, $a5
- daddu $t0, $t0, $a5
-
- lwu $a5, MIRROR_CLASS_ACCESS_FLAGS_OFFSET($t0) # Check if access flags has
- li $a6, ACCESS_FLAGS_CLASS_IS_FINALIZABLE # kAccClassIsFinalizable.
- and $a6, $a5, $a6
- bnezc $a6, .Lart_quick_alloc_object_rosalloc_slow_path
-
- ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
- ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
- bgeuc $t3, $a4, .Lart_quick_alloc_object_rosalloc_slow_path
-
- lwu $t1, MIRROR_CLASS_OBJECT_SIZE_OFFSET($t0) # Load object size (t1).
- li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
- # allocation.
- bltuc $a5, $t1, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Compute the rosalloc bracket index from the size. Allign up the size by the rosalloc bracket
- # quantum size and divide by the quantum size and subtract by 1.
- daddiu $t1, $t1, -1 # Decrease obj size and shift right by
- dsrl $t1, $t1, ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT # quantum.
-
- dsll $t2, $t1, POINTER_SIZE_SHIFT
- daddu $t2, $t2, $s1
- ld $t2, THREAD_ROSALLOC_RUNS_OFFSET($t2) # Load rosalloc run (t2).
-
- # Load the free list head (v0).
- # NOTE: this will be the return val.
- ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
- beqzc $v0, .Lart_quick_alloc_object_rosalloc_slow_path
-
- # Load the next pointer of the head and update the list head with the next pointer.
- ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
- sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
- # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
- # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
- POISON_HEAP_REF $t0
- sw $t0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
- # Push the new object onto the thread local allocation stack and increment the thread local
- # allocation stack top.
- sd $v0, 0($t3)
- daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
- sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
- # Decrement the size of the free list.
- lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
- addiu $a5, $a5, -1
- sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
- sync # Fence.
-
- jalr $zero, $ra
- .cpreturn # Restore gp from t8 in branch delay slot.
-
-.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME
- jal artAllocObjectFromCodeRosAlloc
- move $a2 ,$s1 # Pass self as argument.
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-END art_quick_alloc_object_rosalloc
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
/*
* Entry from managed code to resolve a string, this stub will allocate a String and deliver an
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index db2fdcabea..abd9046174 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -15,15 +15,13 @@
*/
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
-// Called by managed code to allocate an object.
-TWO_ARG_DOWNCALL art_quick_alloc_object\c_suffix, artAllocObjectFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of a resolved class.
-TWO_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object of an initialized class.
-TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
-TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array.
THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Called by managed code to allocate an array of a resolve class.
@@ -61,14 +59,12 @@ GENERATE_ALLOC_ENTRYPOINTS _region_tlab_instrumented, RegionTLABInstrumented
// Generate the allocation entrypoints for each allocator. This is used as an alternative to
// GNERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
// hand-written assembly.
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
@@ -93,8 +89,7 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
@@ -109,8 +104,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
@@ -129,7 +123,6 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_TLAB_ALLOCATOR
.endm
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
@@ -142,7 +135,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
@@ -156,8 +148,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMal
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
@@ -169,7 +160,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
@@ -182,7 +172,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAl
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
@@ -195,7 +184,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
@@ -208,7 +196,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, B
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
@@ -221,7 +208,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstr
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
@@ -234,7 +220,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
@@ -247,7 +232,6 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionI
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 9e385f839f..ee65fa8ab0 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1062,12 +1062,8 @@ TEST_F(StubTest, AllocObject) {
EXPECT_FALSE(self->IsExceptionPending());
{
- // Use an arbitrary method from c to use as referrer
- size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex().index_), // type_idx
- // arbitrary
- reinterpret_cast<size_t>(c->GetVirtualMethod(0, kRuntimePointerSize)),
- 0U,
- StubTest::GetEntrypoint(self, kQuickAllocObject),
+ size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
+ StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1078,8 +1074,6 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use null in the second argument as we do not need a method here (not used in
- // resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
self);
@@ -1092,8 +1086,6 @@ TEST_F(StubTest, AllocObject) {
}
{
- // We can use null in the second argument as we do not need a method here (not used in
- // resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index c6f4c0346f..62c29cf268 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -956,52 +956,42 @@ END_MACRO
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_rosalloc
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // eax: uint32_t type_idx/return value, ecx: ArtMethod*
- // ebx, edx: free
- PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Load the class (edx)
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_rosalloc_slow_path
-
+ // eax: type/return value
+ // ecx, ebx, edx: free
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Check if the thread local allocation
// stack has room
- movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
- cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi
- jae .Lart_quick_alloc_object_rosalloc_slow_path
+ movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %ecx
+ cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %ecx
+ jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %edi // Load the object size (edi)
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size (ecx)
// Check if the size is for a thread
// local allocation. Also does the
// finalizable and initialization check.
- cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi
- ja .Lart_quick_alloc_object_rosalloc_slow_path
- shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index
+ cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %ecx
+ ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
+ shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %ecx // Calculate the rosalloc bracket index
// from object size.
// Load thread local rosalloc run (ebx)
// Subtract __SIZEOF_POINTER__ to subtract
// one from edi as there is no 0 byte run
// and the size is already aligned.
- movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %edi, __SIZEOF_POINTER__), %ebx
+ movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %ecx, __SIZEOF_POINTER__), %ebx
// Load free_list head (edi),
// this will be the return value.
- movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi
- test %edi, %edi
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %ecx
+ jecxz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Point of no slow path. Won't go to
- // the slow path from here on. Ok to
- // clobber eax and ecx.
- movl %edi, %eax
+ // the slow path from here on.
// Load the next pointer of the head
// and update head of free list with
// next pointer
- movl ROSALLOC_SLOT_NEXT_OFFSET(%eax), %edi
- movl %edi, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
+ movl ROSALLOC_SLOT_NEXT_OFFSET(%ecx), %edx
+ movl %edx, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx)
// Decrement size of free list by 1
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%ebx)
// Store the class pointer in the
@@ -1011,141 +1001,104 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%ecx)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Push the new object onto the thread
// local allocation stack and
// increment the thread local
// allocation stack top.
- movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %edi
- movl %eax, (%edi)
- addl LITERAL(COMPRESSED_REFERENCE_SIZE), %edi
- movl %edi, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
+ movl THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx), %eax
+ movl %ecx, (%eax)
+ addl LITERAL(COMPRESSED_REFERENCE_SIZE), %eax
+ movl %eax, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%ebx)
// No fence needed for x86.
- POP edi
+ movl %ecx, %eax // Move object to return register
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
- POP edi
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
+ subl LITERAL(8), %esp // alignment padding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx
PUSH eax
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
-END_FUNCTION art_quick_alloc_object_rosalloc
+END_FUNCTION art_quick_alloc_object_resolved_rosalloc
-// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+// The common fast path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
//
-// EAX: type_idx/return_value, ECX: ArtMethod*, EDX: the class.
-MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
- testl %edx, %edx // Check null class
- jz VAR(slowPathLabel)
+// EAX: type/return_value
+MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %esi // Load the object size.
- cmpl %edi, %esi // Check if it fits.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%eax), %ecx // Load the object size.
+ cmpl %edi, %ecx // Check if it fits.
ja VAR(slowPathLabel)
- movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
+ movl THREAD_LOCAL_POS_OFFSET(%ebx), %edx // Load thread_local_pos
// as allocated object.
- addl %eax, %esi // Add the object size.
- movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
+ addl %edx, %ecx // Add the object size.
+ movl %ecx, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
incl THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
// Store the class pointer in the header.
// No fence needed for x86.
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%eax)
+ POISON_HEAP_REF eax
+ movl %eax, MIRROR_OBJECT_CLASS_OFFSET(%edx)
+ movl %edx, %eax
POP edi
- POP esi
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
-MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
+// The common slow path code for art_quick_alloc_object_resolved_tlab
+// and art_quick_alloc_object_resolved_region_tlab.
+MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
POP edi
- POP esi
SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx // save ref containing registers for GC
// Outgoing argument set up
- PUSH eax // alignment padding
+ subl LITERAL(8), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx
PUSH eax
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
addl LITERAL(16), %esp
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be called
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be called
// for CC if the GC is not marking.
-DEFINE_FUNCTION art_quick_alloc_object_tlab
+DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
// Fast path tlab allocation.
- // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
- // EBX, EDX: free.
- PUSH esi
+ // EAX: type
+ // EBX, ECX, EDX: free.
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
-END_FUNCTION art_quick_alloc_object_tlab
-
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_region_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
+.Lart_quick_alloc_object_resolved_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
+END_FUNCTION art_quick_alloc_object_resolved_tlab
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
// Fast path region tlab allocation.
- // EAX: uint32_t type_idx/return value, ECX: ArtMethod*.
- // EBX, EDX: free.
+ // EAX: type/return value
+ // EBX, ECX, EDX: free.
#if !defined(USE_READ_BARRIER)
int3
int3
#endif
- PUSH esi
PUSH edi
- movl ART_METHOD_DEX_CACHE_TYPES_OFFSET_32(%ecx), %edx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
- // Read barrier for class load.
- cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
- // Null check so that we can load the lock word.
- testl %edx, %edx
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
- // Check the mark bit, if it is 1 return.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH eax
- PUSH ecx
- // Outgoing argument set up
- subl MACRO_LITERAL(8), %esp // Alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- PUSH edx // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movl %eax, %edx
- addl MACRO_LITERAL(12), %esp
- CFI_ADJUST_CFA_OFFSET(-12)
- POP ecx
- POP eax
- jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
-END_FUNCTION art_quick_alloc_object_region_tlab
+ ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
+.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
+ ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
+END_FUNCTION art_quick_alloc_object_resolved_region_tlab
+
DEFINE_FUNCTION art_quick_resolve_string
SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 4c46b08a9e..facd563428 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -983,7 +983,6 @@ GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
// Comment out allocators that have x86_64 specific asm.
// Region TLAB:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
@@ -996,11 +995,9 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
// Normal TLAB:
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
@@ -1009,29 +1006,25 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
-DEFINE_FUNCTION art_quick_alloc_object_rosalloc
+
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc).
+DEFINE_FUNCTION art_quick_alloc_object_resolved_rosalloc
// Fast path rosalloc allocation.
- // RDI: type_idx, RSI: ArtMethod*, RAX: return value
- // RDX, RCX, R8, R9: free.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Load the class (edx)
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- testl %edx, %edx // Check null class
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ // RDI: mirror::Class*, RAX: return value
+ // RSI, RDX, RCX, R8, R9: free.
// Check if the thread local
// allocation stack has room.
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
movq THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET(%r8), %rcx // rcx = alloc stack top.
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
- jae .Lart_quick_alloc_object_rosalloc_slow_path
+ jae .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Load the object size
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %eax
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %eax
// Check if the size is for a thread
// local allocation. Also does the
// initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
- ja .Lart_quick_alloc_object_rosalloc_slow_path
+ ja .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
@@ -1045,7 +1038,7 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
testq %rax, %rax
- jz .Lart_quick_alloc_object_rosalloc_slow_path
+ jz .Lart_quick_alloc_object_resolved_rosalloc_slow_path
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber rdi and rsi.
// Push the new object onto the thread
// local allocation stack and
@@ -1066,17 +1059,17 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc
#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
#error "Class pointer needs to overwrite next pointer."
#endif
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ POISON_HEAP_REF edi
+ movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax)
// Decrement the size of the free list
decl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)(%r9)
// No fence necessary for x86.
ret
-.Lart_quick_alloc_object_rosalloc_slow_path:
+.Lart_quick_alloc_object_resolved_rosalloc_slow_path:
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeRosAlloc) // cxx_name(arg0, arg1, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call SYMBOL(artAllocObjectFromCodeResolvedRosAlloc) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_rosalloc
@@ -1095,19 +1088,19 @@ END_MACRO
// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
+// RDI: the class, RAX: return value.
+// RCX, RSI, RDX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
END_MACRO
// The fast path code for art_quick_alloc_object_initialized_region_tlab.
//
-// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
-// RCX: scratch, r8: Thread::Current().
+// RDI: the class, RSI: ArtMethod*, RAX: return value.
+// RCX, RSI, RDX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %ecx // Load the object size.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdi), %ecx // Load the object size.
movq THREAD_LOCAL_POS_OFFSET(%r8), %rax
addq %rax, %rcx // Add size to pos, note that these
// are both 32 bit ints, overflow
@@ -1120,8 +1113,8 @@ MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
// Store the class pointer in the
// header.
// No fence needed for x86.
- POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
+ POISON_HEAP_REF edi
+ movl %edi, MIRROR_OBJECT_CLASS_OFFSET(%rax)
ret // Fast path succeeded.
END_MACRO
@@ -1164,12 +1157,14 @@ MACRO1(ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED, slowPathLabel)
ret // Fast path succeeded.
END_MACRO
-// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab.
+
+// The common slow path code for art_quick_alloc_object_{resolved, initialized}_tlab
+// and art_quick_alloc_object_{resolved, initialized}_region_tlab.
MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
SETUP_SAVE_REFS_ONLY_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
@@ -1184,26 +1179,11 @@ MACRO1(ALLOC_ARRAY_TLAB_SLOW_PATH, cxx_name)
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_MACRO
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). May be
-// called with CC if the GC is not active.
-DEFINE_FUNCTION art_quick_alloc_object_tlab
- // RDI: uint32_t type_idx, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- // Might need to break down into multiple instructions to get the base address in a register.
- // Load the class
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
- ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
-.Lart_quick_alloc_object_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB
-END_FUNCTION art_quick_alloc_object_tlab
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
// called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_resolved_tlab
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq %rdi, %rdx
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_tlab_slow_path
.Lart_quick_alloc_object_resolved_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedTLAB
@@ -1212,9 +1192,8 @@ END_FUNCTION art_quick_alloc_object_resolved_tlab
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB).
// May be called with CC if the GC is not active.
DEFINE_FUNCTION art_quick_alloc_object_initialized_tlab
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- movq %rdi, %rdx
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_tlab_slow_path
.Lart_quick_alloc_object_initialized_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedTLAB
@@ -1292,49 +1271,12 @@ DEFINE_FUNCTION art_quick_alloc_array_resolved_region_tlab
ALLOC_ARRAY_TLAB_SLOW_PATH artAllocArrayFromCodeResolvedRegionTLAB
END_FUNCTION art_quick_alloc_array_resolved_region_tlab
-// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB).
-DEFINE_FUNCTION art_quick_alloc_object_region_tlab
- // Fast path region tlab allocation.
- // RDI: uint32_t type_idx, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
- ASSERT_USE_READ_BARRIER
- movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array
- movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx // Load the class
- // Null check so that we can load the lock word.
- testl %edx, %edx
- jz .Lart_quick_alloc_object_region_tlab_slow_path
- // Since we have allocation entrypoint switching, we know the GC is marking.
- // Check the mark bit, if it is 0, do the read barrier mark.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jz .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit:
- // Use resolved one since we already did the null check.
- ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path
-.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_region_tlab_slow_path:
- ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB
-END_FUNCTION art_quick_alloc_object_region_tlab
-
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_resolved_region_tlab
// Fast path region tlab allocation.
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ASSERT_USE_READ_BARRIER
- // No read barrier since the caller is responsible for that.
- movq %rdi, %rdx
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
@@ -1343,10 +1285,9 @@ END_FUNCTION art_quick_alloc_object_resolved_region_tlab
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB).
DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
// Fast path region tlab allocation.
- // RDI: mirror::Class* klass, RSI: ArtMethod*
- // RDX, RCX, R8, R9: free. RAX: return val.
+ // RDI: mirror::Class* klass
+ // RDX, RSI, RCX, R8, R9: free. RAX: return val.
ASSERT_USE_READ_BARRIER
- movq %rdi, %rdx
// No read barrier since the caller is responsible for that.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path
.Lart_quick_alloc_object_initialized_region_tlab_slow_path:
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index e4972da13d..bfdddf7b03 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -98,7 +98,7 @@ ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + 2 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c9b2cc8b68..035ceadeb7 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -6933,7 +6933,7 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
method_alignment_);
const size_t old_methods_ptr_size = (old_methods != nullptr) ? old_size : 0;
auto* methods = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- class_linker_->GetAllocatorForClassLoader(klass_->GetClassLoader())->Realloc(
+ Runtime::Current()->GetLinearAlloc()->Realloc(
self_, old_methods, old_methods_ptr_size, new_size));
CHECK(methods != nullptr); // Native allocation failure aborts.
@@ -6953,13 +6953,19 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
StrideIterator<ArtMethod> out(methods->begin(method_size_, method_alignment_) + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
- for (ArtMethod* mir_method : miranda_methods_) {
+ for (size_t i = 0; i < miranda_methods_.size(); ++i) {
+ ArtMethod* mir_method = miranda_methods_[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(mir_method, pointer_size);
new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda | kAccCopied);
DCHECK_NE(new_method.GetAccessFlags() & kAccAbstract, 0u)
<< "Miranda method should be abstract!";
move_table_.emplace(mir_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ miranda_methods_[i] = &new_method;
++out;
}
// We need to copy the default methods into our own method table since the runtime requires that
@@ -6968,9 +6974,10 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
// interface but will have different ArtMethod*s for them. This also means we cannot compare a
// default method found on a class with one found on the declaring interface directly and must
// look at the declaring class to determine if they are the same.
- for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_methods_,
- overriding_default_methods_}) {
- for (ArtMethod* def_method : methods_vec) {
+ for (ScopedArenaVector<ArtMethod*>* methods_vec : {&default_methods_,
+ &overriding_default_methods_}) {
+ for (size_t i = 0; i < methods_vec->size(); ++i) {
+ ArtMethod* def_method = (*methods_vec)[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(def_method, pointer_size);
// Clear the kAccSkipAccessChecks flag if it is present. Since this class hasn't been
@@ -6981,12 +6988,18 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
constexpr uint32_t kMaskFlags = ~kAccSkipAccessChecks;
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
move_table_.emplace(def_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ (*methods_vec)[i] = &new_method;
++out;
}
}
- for (const ScopedArenaVector<ArtMethod*>& methods_vec : {default_conflict_methods_,
- overriding_default_conflict_methods_}) {
- for (ArtMethod* conf_method : methods_vec) {
+ for (ScopedArenaVector<ArtMethod*>* methods_vec : {&default_conflict_methods_,
+ &overriding_default_conflict_methods_}) {
+ for (size_t i = 0; i < methods_vec->size(); ++i) {
+ ArtMethod* conf_method = (*methods_vec)[i];
ArtMethod& new_method = *out;
new_method.CopyFrom(conf_method, pointer_size);
// This is a type of default method (there are default method impls, just a conflict) so
@@ -7002,6 +7015,11 @@ void ClassLinker::LinkInterfaceMethodsHelper::ReallocMethods() {
// that the compiler will not invoke the implementation of whatever method we copied from.
EnsureThrowsInvocationError(class_linker_, &new_method);
move_table_.emplace(conf_method, &new_method);
+ // Update the entry in the method array, as the array will be used for future lookups,
+ // where thread suspension is allowed.
+ // As such, the array should not contain locally allocated ArtMethod, otherwise the GC
+ // would not see them.
+ (*methods_vec)[i] = &new_method;
++out;
}
}
@@ -7034,12 +7052,7 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
default_conflict_methods_,
miranda_methods_}) {
// These are the functions that are not already in the vtable!
- for (ArtMethod* new_method : methods_vec) {
- auto translated_method_it = move_table_.find(new_method);
- CHECK(translated_method_it != move_table_.end())
- << "We must have a translation for methods added to the classes methods_ array! We "
- << "could not find the ArtMethod added for " << ArtMethod::PrettyMethod(new_method);
- ArtMethod* new_vtable_method = translated_method_it->second;
+ for (ArtMethod* new_vtable_method : methods_vec) {
// Leave the declaring class alone the method's dex_code_item_offset_ and dex_method_index_
// fields are references into the dex file the method was defined in. Since the ArtMethod
// does not store that information it uses declaring_class_->dex_cache_.
@@ -7056,7 +7069,6 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
// Try and find what we need to change this method to.
auto translation_it = default_translations.find(i);
- bool found_translation = false;
if (translation_it != default_translations.end()) {
if (translation_it->second.IsInConflict()) {
// Find which conflict method we are to use for this method.
@@ -7080,30 +7092,28 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
// Normal default method (changed from an older default or abstract interface method).
DCHECK(translation_it->second.IsTranslation());
translated_method = translation_it->second.GetTranslation();
+ auto it = move_table_.find(translated_method);
+ DCHECK(it != move_table_.end());
+ translated_method = it->second;
}
- found_translation = true;
+ } else {
+ auto it = move_table_.find(translated_method);
+ translated_method = (it != move_table_.end()) ? it->second : nullptr;
}
- DCHECK(translated_method != nullptr);
- auto it = move_table_.find(translated_method);
- if (it != move_table_.end()) {
- auto* new_method = it->second;
- DCHECK(new_method != nullptr);
+
+ if (translated_method != nullptr) {
// Make sure the new_methods index is set.
- if (new_method->GetMethodIndexDuringLinking() != i) {
+ if (translated_method->GetMethodIndexDuringLinking() != i) {
if (kIsDebugBuild) {
auto* methods = klass_->GetMethodsPtr();
CHECK_LE(reinterpret_cast<uintptr_t>(&*methods->begin(method_size_, method_alignment_)),
- reinterpret_cast<uintptr_t>(new_method));
- CHECK_LT(reinterpret_cast<uintptr_t>(new_method),
+ reinterpret_cast<uintptr_t>(translated_method));
+ CHECK_LT(reinterpret_cast<uintptr_t>(translated_method),
reinterpret_cast<uintptr_t>(&*methods->end(method_size_, method_alignment_)));
}
- new_method->SetMethodIndex(0xFFFF & i);
+ translated_method->SetMethodIndex(0xFFFF & i);
}
- vtable->SetElementPtrSize(i, new_method, pointer_size);
- } else {
- // If it was not going to be updated we wouldn't have put it into the default_translations
- // map.
- CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
+ vtable->SetElementPtrSize(i, translated_method, pointer_size);
}
}
klass_->SetVTable(vtable.Ptr());
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 14c9c21356..469c45c10c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -127,43 +127,21 @@ inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveTyp
self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
}
-template <const bool kAccessCheck>
-ALWAYS_INLINE
-inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
- ArtMethod* method,
- Thread* self,
- bool* slow_path) {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- PointerSize pointer_size = class_linker->GetImagePointerSize();
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, pointer_size);
- if (UNLIKELY(klass == nullptr)) {
- klass = class_linker->ResolveType(type_idx, method);
+ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_) {
+ if (UNLIKELY(!klass->IsInstantiable())) {
+ self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str());
*slow_path = true;
- if (klass == nullptr) {
- DCHECK(self->IsExceptionPending());
- return nullptr; // Failure
- } else {
- DCHECK(!self->IsExceptionPending());
- }
+ return nullptr; // Failure
}
- if (kAccessCheck) {
- if (UNLIKELY(!klass->IsInstantiable())) {
- self->ThrowNewException("Ljava/lang/InstantiationError;", klass->PrettyDescriptor().c_str());
- *slow_path = true;
- return nullptr; // Failure
- }
- if (UNLIKELY(klass->IsClassClass())) {
- ThrowIllegalAccessError(nullptr, "Class %s is inaccessible",
- klass->PrettyDescriptor().c_str());
- *slow_path = true;
- return nullptr; // Failure
- }
- mirror::Class* referrer = method->GetDeclaringClass();
- if (UNLIKELY(!referrer->CanAccess(klass))) {
- ThrowIllegalAccessErrorClass(referrer, klass);
- *slow_path = true;
- return nullptr; // Failure
- }
+ if (UNLIKELY(klass->IsClassClass())) {
+ ThrowIllegalAccessError(nullptr, "Class %s is inaccessible",
+ klass->PrettyDescriptor().c_str());
+ *slow_path = true;
+ return nullptr; // Failure
}
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
@@ -191,7 +169,9 @@ inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
ALWAYS_INLINE
inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
- bool* slow_path) {
+ bool* slow_path)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -213,18 +193,15 @@ inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
return klass;
}
-// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
-// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-template <bool kAccessCheck, bool kInstrumented>
+// Allocate an instance of klass. Throws InstantationError if klass is not instantiable,
+// or IllegalAccessError if klass is j.l.Class. Performs a clinit check too.
+template <bool kInstrumented>
ALWAYS_INLINE
-inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx,
- ArtMethod* method,
+inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type) {
bool slow_path = false;
- mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
+ klass = CheckObjectAlloc(klass, self, &slow_path);
if (UNLIKELY(slow_path)) {
if (klass == nullptr) {
return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 7cc136e227..4794610ca8 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -45,27 +45,10 @@ class OatQuickMethodHeader;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
-template <const bool kAccessCheck>
-ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(dex::TypeIndex type_idx,
- ArtMethod* method,
- Thread* self,
- bool* slow_path)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
-ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self,
- bool* slow_path)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Roles::uninterruptible_);
-
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
-// When verification/compiler hasn't been able to verify access, optionally perform an access
-// check.
-template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(dex::TypeIndex type_idx,
- ArtMethod* method,
+template <bool kInstrumented>
+ALWAYS_INLINE inline mirror::Object* AllocObjectFromCode(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 82bb8e53c6..2d06508069 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,87 +29,58 @@ namespace art {
static constexpr bool kUseTlabFastPath = true;
+template <bool kInitialized,
+ bool kFinalize,
+ bool kInstrumented,
+ gc::AllocatorType allocator_type>
+static ALWAYS_INLINE inline mirror::Object* artAllocObjectFromCode(
+ mirror::Class* klass,
+ Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK(klass != nullptr);
+ if (kUseTlabFastPath && !kInstrumented && allocator_type == gc::kAllocatorTypeTLAB) {
+ if (kInitialized || klass->IsInitialized()) {
+ if (!kFinalize || !klass->IsFinalizable()) {
+ size_t byte_count = klass->GetObjectSize();
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment);
+ mirror::Object* obj;
+ if (LIKELY(byte_count < self->TlabSize())) {
+ obj = self->AllocTlab(byte_count);
+ DCHECK(obj != nullptr) << "AllocTlab can't fail";
+ obj->SetClass(klass);
+ if (kUseBakerReadBarrier) {
+ obj->AssertReadBarrierState();
+ }
+ QuasiAtomic::ThreadFenceForConstructor();
+ return obj;
+ }
+ }
+ }
+ }
+ if (kInitialized) {
+ return AllocObjectFromCodeInitialized<kInstrumented>(klass, self, allocator_type);
+ } else if (!kFinalize) {
+ return AllocObjectFromCodeResolved<kInstrumented>(klass, self, allocator_type);
+ } else {
+ return AllocObjectFromCode<kInstrumented>(klass, self, allocator_type);
+ }
+}
+
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
-extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, ArtMethod* method, Thread* self) \
+extern "C" mirror::Object* artAllocObjectFromCodeWithChecks##suffix##suffix2( \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- mirror::Class* klass = method->GetDexCacheResolvedType<false>(dex::TypeIndex(type_idx), \
- kRuntimePointerSize); \
- if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- } \
- return AllocObjectFromCode<false, instrumented_bool>(dex::TypeIndex(type_idx), \
- method, \
- self, \
- allocator_type); \
+ return artAllocObjectFromCode<false, true, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- if (LIKELY(klass->IsInitialized())) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- } \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
+ return artAllocObjectFromCode<false, false, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
- size_t byte_count = klass->GetObjectSize(); \
- byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
- mirror::Object* obj; \
- if (LIKELY(byte_count < self->TlabSize())) { \
- obj = self->AllocTlab(byte_count); \
- DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
- obj->SetClass(klass); \
- if (kUseBakerReadBarrier) { \
- obj->AssertReadBarrierState(); \
- } \
- QuasiAtomic::ThreadFenceForConstructor(); \
- return obj; \
- } \
- } \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
-} \
-extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
- uint32_t type_idx, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, Thread* self) \
REQUIRES_SHARED(Locks::mutator_lock_) { \
- ScopedQuickEntrypointChecks sqec(self); \
- return AllocObjectFromCode<true, instrumented_bool>(dex::TypeIndex(type_idx), \
- method, \
- self, \
- allocator_type); \
+ return artAllocObjectFromCode<true, false, instrumented_bool, allocator_type>(klass, self); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
@@ -220,10 +191,9 @@ GENERATE_ENTRYPOINTS_FOR_ALLOCATOR(RegionTLAB, gc::kAllocatorTypeRegionTLAB)
extern "C" void* art_quick_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_resolved##suffix(mirror::Class* klass, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_initialized##suffix(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_with_checks##suffix(mirror::Class* klass); \
extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix(void*, int32_t, int32_t, int32_t); \
@@ -233,9 +203,9 @@ extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, int32_t,
extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(mirror::Class* klass, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass, ArtMethod* ref); \
-extern "C" void* art_quick_alloc_object_with_access_check##suffix##_instrumented(uint32_t type_idx, ArtMethod* ref); \
+extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_initialized##suffix##_instrumented(mirror::Class* klass); \
+extern "C" void* art_quick_alloc_object_with_checks##suffix##_instrumented(mirror::Class* klass); \
extern "C" void* art_quick_check_and_alloc_array##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented(uint32_t, int32_t, ArtMethod* ref); \
extern "C" void* art_quick_alloc_string_from_bytes##suffix##_instrumented(void*, int32_t, int32_t, int32_t); \
@@ -246,10 +216,9 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument
qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix##_instrumented; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix##_instrumented; \
+ qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix##_instrumented; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix##_instrumented; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \
qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix##_instrumented; \
@@ -259,10 +228,9 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument
qpoints->pAllocArray = art_quick_alloc_array##suffix; \
qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \
qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \
- qpoints->pAllocObject = art_quick_alloc_object##suffix; \
qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \
qpoints->pAllocObjectInitialized = art_quick_alloc_object_initialized##suffix; \
- qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check##suffix; \
+ qpoints->pAllocObjectWithChecks = art_quick_alloc_object_with_checks##suffix; \
qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array##suffix; \
qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix; \
qpoints->pAllocStringFromBytes = art_quick_alloc_string_from_bytes##suffix; \
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index a1c5082c93..0911aeb0f4 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -23,10 +23,9 @@
V(AllocArray, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocArrayResolved, void*, mirror::Class*, int32_t, ArtMethod*) \
V(AllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
- V(AllocObject, void*, uint32_t, ArtMethod*) \
- V(AllocObjectResolved, void*, mirror::Class*, ArtMethod*) \
- V(AllocObjectInitialized, void*, mirror::Class*, ArtMethod*) \
- V(AllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*) \
+ V(AllocObjectResolved, void*, mirror::Class*) \
+ V(AllocObjectInitialized, void*, mirror::Class*) \
+ V(AllocObjectWithChecks, void*, mirror::Class*) \
V(CheckAndAllocArray, void*, uint32_t, int32_t, ArtMethod*) \
V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*) \
V(AllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t) \
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 12836602d5..6866abb6ae 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -122,9 +122,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
// Skip across the entrypoints structures.
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_start, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_objects, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(size_t));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
@@ -156,13 +156,13 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArray, pAllocArrayResolved, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayResolved, pAllocArrayWithAccessCheck,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObject, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObject, pAllocObjectResolved, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocArrayWithAccessCheck, pAllocObjectResolved,
+ sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectResolved, pAllocObjectInitialized,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithAccessCheck,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectInitialized, pAllocObjectWithChecks,
sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithAccessCheck, pCheckAndAllocArray,
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocObjectWithChecks, pCheckAndAllocArray,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckAndAllocArray, pCheckAndAllocArrayWithAccessCheck,
sizeof(void*));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e1117e6ea3..7b86339663 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -2406,16 +2406,29 @@ void ConcurrentCopying::FinishPhase() {
}
}
-bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
+bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) {
mirror::Object* from_ref = field->AsMirrorPtr();
+ if (from_ref == nullptr) {
+ return true;
+ }
mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
if (from_ref != to_ref) {
- QuasiAtomic::ThreadFenceRelease();
- field->Assign(to_ref);
- QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ if (do_atomic_update) {
+ do {
+ if (field->AsMirrorPtr() != from_ref) {
+ // Concurrently overwritten by a mutator.
+ break;
+ }
+ } while (!field->CasWeakRelaxed(from_ref, to_ref));
+ } else {
+ QuasiAtomic::ThreadFenceRelease();
+ field->Assign(to_ref);
+ QuasiAtomic::ThreadFenceSequentiallyConsistent();
+ }
}
return true;
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 5b8a557375..844bb450cc 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -183,7 +183,8 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 5b513991d1..0177e2a1ad 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -187,7 +187,10 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ // Returns true if the given heap reference is null or is already marked. If it's already marked,
+ // update the reference (uses a CAS if do_atomic_update is true. Otherwise, returns false.
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index ddcb6c0698..85e6783599 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -472,9 +472,15 @@ mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
return mark_bitmap_->Test(object) ? object : nullptr;
}
-bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr,
+ // MarkCompact does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
// Side effect free since we call this before ever moving objects.
- return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
+ mirror::Object* obj = ref_ptr->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 564f85b3f8..6d52d5d515 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -175,7 +175,8 @@ class MarkCompact : public GarbageCollector {
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) OVERRIDE
REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 06ed0290a9..f00da73458 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -390,8 +390,13 @@ inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
}
}
-bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
- return IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ mirror::Object* obj = ref->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
+ return IsMarked(obj);
}
class MarkSweep::MarkObjectSlowPath {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 02cf462bd3..a6e2d61f6d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -188,7 +188,8 @@ class MarkSweep : public GarbageCollector {
void VerifyIsLive(const mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f2aa5a7599..cb9e7e2c15 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -765,8 +765,13 @@ mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
-bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ // SemiSpace does the GC in a pause. No CAS needed.
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
mirror::Object* obj = object->AsMirrorPtr();
+ if (obj == nullptr) {
+ return true;
+ }
mirror::Object* new_obj = IsMarked(obj);
if (new_obj == nullptr) {
return false;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 4cebcc3044..52b5e5fe30 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -166,7 +166,8 @@ class SemiSpace : public GarbageCollector {
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
- virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
+ virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
+ bool do_atomic_update) OVERRIDE
REQUIRES(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 081be968eb..c1548365c7 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -203,7 +203,9 @@ void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
DCHECK(klass != nullptr);
DCHECK(klass->IsTypeOfReferenceClass());
mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
- if (referent->AsMirrorPtr() != nullptr && !collector->IsMarkedHeapReference(referent)) {
+ // do_atomic_update needs to be true because this happens outside of the reference processing
+ // phase.
+ if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) {
Thread* self = Thread::Current();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index a0eb197bd5..734caea371 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -129,8 +129,9 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
while (!IsEmpty()) {
ObjPtr<mirror::Reference> ref = DequeuePendingReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
// Referent is white, clear it.
if (Runtime::Current()->IsActiveTransaction()) {
ref->ClearReferent<true>();
@@ -147,8 +148,9 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc
while (!IsEmpty()) {
ObjPtr<mirror::FinalizerReference> ref = DequeuePendingReference()->AsFinalizerReference();
mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr();
- if (referent_addr->AsMirrorPtr() != nullptr &&
- !collector->IsMarkedHeapReference(referent_addr)) {
+ // do_atomic_update is false because this happens during the reference processing phase where
+ // Reference.clear() would block.
+ if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) {
ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr());
// Move the updated referent to the zombie field.
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index b0d7fb247a..d7dfcd4408 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -508,9 +508,8 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<do_access_check, true>(
- dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame.GetMethod(),
+ obj = AllocObjectFromCode<true>(
+ c.Ptr(),
self,
Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index c8c1563ff6..369c2614a7 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -375,10 +375,9 @@ extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
} else {
- obj = AllocObjectFromCode<false, true>(dex::TypeIndex(inst->VRegB_21c()),
- shadow_frame->GetMethod(),
- self,
- Runtime::Current()->GetHeap()->GetCurrentAllocator());
+ obj = AllocObjectFromCode<true>(c,
+ self,
+ Runtime::Current()->GetHeap()->GetCurrentAllocator());
}
}
if (UNLIKELY(obj == nullptr)) {
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index f80c43d80c..e0f28adc4f 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -566,7 +566,10 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
return nullptr;
}
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
- while (UNLIKELY(!MayAccessWeakGlobals(self))) {
+ // CMS needs this to block for concurrent reference processing because an object allocated during
+ // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
+ // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!kUseReadBarrier && UNLIKELY(!MayAccessWeakGlobals(self))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpoint();
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index e70b93607e..22fb83cb5c 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -34,6 +34,15 @@ HeapReference<MirrorType> HeapReference<MirrorType>::FromObjPtr(ObjPtr<MirrorTyp
return HeapReference<MirrorType>(ptr.Ptr());
}
+template<class MirrorType>
+bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
+ HeapReference<Object> expected_ref(HeapReference<Object>::FromMirrorPtr(expected_ptr));
+ HeapReference<Object> new_ref(HeapReference<Object>::FromMirrorPtr(new_ptr));
+ Atomic<uint32_t>* atomic_reference = reinterpret_cast<Atomic<uint32_t>*>(&this->reference_);
+ return atomic_reference->CompareExchangeWeakRelaxed(expected_ref.reference_,
+ new_ref.reference_);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 71f34c66e2..a96a120d68 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -94,6 +94,9 @@ class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, Mirr
static HeapReference<MirrorType> FromObjPtr(ObjPtr<MirrorType> ptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ bool CasWeakRelaxed(MirrorType* old_ptr, MirrorType* new_ptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
explicit HeapReference(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 893abd5462..9c0927584e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1361,8 +1361,10 @@ void MonitorList::BroadcastForNewMonitors() {
void MonitorList::Add(Monitor* m) {
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
- while (UNLIKELY((!kUseReadBarrier && !allow_new_monitors_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ // CMS needs this to block for concurrent reference processing because an object allocated during
+ // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
+ // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpoint();
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 36825cb870..268d71ac65 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -17,6 +17,7 @@
#include "dalvik_system_VMStack.h"
#include "art_method-inl.h"
+#include "gc/task_processor.h"
#include "jni_internal.h"
#include "nth_caller_visitor.h"
#include "mirror/class-inl.h"
@@ -31,9 +32,18 @@ namespace art {
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
REQUIRES_SHARED(Locks::mutator_lock_) {
jobject trace = nullptr;
- if (soa.Decode<mirror::Object>(peer) == soa.Self()->GetPeer()) {
+ ObjPtr<mirror::Object> decoded_peer = soa.Decode<mirror::Object>(peer);
+ if (decoded_peer == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
} else {
+ // Never allow suspending the heap task thread since it may deadlock if allocations are
+ // required for the stack trace.
+ Thread* heap_task_thread =
+ Runtime::Current()->GetHeap()->GetTaskProcessor()->GetRunningThread();
+ // heap_task_thread could be null if the daemons aren't yet started.
+ if (heap_task_thread != nullptr && decoded_peer == heap_task_thread->GetPeer()) {
+ return nullptr;
+ }
// Suspend thread to build stack trace.
ScopedThreadSuspension sts(soa.Self(), kNative);
ThreadList* thread_list = Runtime::Current()->GetThreadList();
diff --git a/runtime/oat.h b/runtime/oat.h
index 1fd906dc1b..dc103e2b52 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '9', '4', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '9', '5', '\0' }; // alloc entrypoints change
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index b757b2114f..4bd21b4c2f 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -29,6 +29,7 @@ cc_defaults {
"ti_properties.cc",
"ti_stack.cc",
"ti_redefine.cc",
+ "ti_thread.cc",
"transform.cc"],
include_dirs: ["art/runtime"],
shared_libs: [
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index c52dd76b59..2629c9fc07 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -55,6 +55,7 @@
#include "ti_properties.h"
#include "ti_redefine.h"
#include "ti_stack.h"
+#include "ti_thread.h"
#include "transform.h"
// TODO Remove this at some point by annotating all the methods. It was put in to make the skeleton
@@ -117,11 +118,11 @@ class JvmtiFunctions {
}
static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetThreadState(env, thread, thread_state_ptr);
}
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetCurrentThread(env, thread_ptr);
}
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr) {
@@ -159,7 +160,7 @@ class JvmtiFunctions {
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return ThreadUtil::GetThreadInfo(env, thread, info_ptr);
}
static jvmtiError GetOwnedMonitorInfo(jvmtiEnv* env,
@@ -237,7 +238,7 @@ class JvmtiFunctions {
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
jint* thread_count_ptr) {
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::GetAllStackTraces(env, max_frame_count, stack_info_ptr, thread_count_ptr);
}
static jvmtiError GetThreadListStackTraces(jvmtiEnv* env,
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 579fb50ecc..098cedbffa 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -31,9 +31,15 @@
#include "ti_stack.h"
+#include <list>
+#include <unordered_map>
+#include <vector>
+
#include "art_jvmti.h"
#include "art_method-inl.h"
+#include "base/bit_utils.h"
#include "base/enums.h"
+#include "base/mutex.h"
#include "dex_file.h"
#include "dex_file_annotations.h"
#include "jni_env_ext.h"
@@ -41,19 +47,19 @@
#include "mirror/class.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change-inl.h"
+#include "ScopedLocalRef.h"
#include "stack.h"
-#include "thread.h"
+#include "thread-inl.h"
+#include "thread_list.h"
#include "thread_pool.h"
namespace openjdkjvmti {
struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitor(art::Thread* thread_in,
- art::ScopedObjectAccessAlreadyRunnable& soa_,
size_t start_,
size_t stop_)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- soa(soa_),
start(start_),
stop(stop_) {}
@@ -85,7 +91,6 @@ struct GetStackTraceVisitor : public art::StackVisitor {
return true;
}
- art::ScopedObjectAccessAlreadyRunnable& soa;
std::vector<jvmtiFrameInfo> frames;
size_t start;
size_t stop;
@@ -99,10 +104,8 @@ struct GetStackTraceClosure : public art::Closure {
start_result(0),
stop_result(0) {}
- void Run(art::Thread* self) OVERRIDE {
- art::ScopedObjectAccess soa(art::Thread::Current());
-
- GetStackTraceVisitor visitor(self, soa, start_input, stop_input);
+ void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetStackTraceVisitor visitor(self, start_input, stop_input);
visitor.WalkStack(false);
frames.swap(visitor.frames);
@@ -118,6 +121,44 @@ struct GetStackTraceClosure : public art::Closure {
size_t stop_result;
};
+static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
+ jint start_depth,
+ size_t start_result,
+ jint max_frame_count,
+ jvmtiFrameInfo* frame_buffer,
+ jint* count_ptr) {
+ size_t collected_frames = frames.size();
+
+ // Assume we're here having collected something.
+ DCHECK_GT(max_frame_count, 0);
+
+ // Frames from the top.
+ if (start_depth >= 0) {
+ if (start_result != 0) {
+ // Not enough frames.
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+ if (frames.size() > 0) {
+ memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
+ }
+ *count_ptr = static_cast<jint>(frames.size());
+ return ERR(NONE);
+ }
+
+ // Frames from the bottom.
+ if (collected_frames < static_cast<size_t>(-start_depth)) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+
+ size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
+ memcpy(frame_buffer,
+ &frames.data()[collected_frames + start_depth],
+ count * sizeof(jvmtiFrameInfo));
+ *count_ptr = static_cast<jint>(count);
+ return ERR(NONE);
+}
+
jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
jthread java_thread,
jint start_depth,
@@ -157,35 +198,179 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
}
GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
- start_depth >= 0 ?static_cast<size_t>(max_frame_count) : 0);
+ start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
thread->RequestSynchronousCheckpoint(&closure);
- size_t collected_frames = closure.frames.size();
+ return TranslateFrameVector(closure.frames,
+ start_depth,
+ closure.start_result,
+ max_frame_count,
+ frame_buffer,
+ count_ptr);
+}
- // Frames from the top.
- if (start_depth >= 0) {
- if (closure.start_result != 0) {
- // Not enough frames.
- return ERR(ILLEGAL_ARGUMENT);
+struct GetAllStackTraceClosure : public art::Closure {
+ public:
+ explicit GetAllStackTraceClosure(size_t stop)
+ : start_input(0),
+ stop_input(stop),
+ frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
+ start_result(0),
+ stop_result(0) {}
+
+ void Run(art::Thread* self)
+ OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
+ // self should be live here (so it could be suspended). No need to filter.
+
+ art::Thread* current = art::Thread::Current();
+ std::vector<jvmtiFrameInfo> self_frames;
+
+ GetStackTraceVisitor visitor(self, start_input, stop_input);
+ visitor.WalkStack(false);
+
+ self_frames.swap(visitor.frames);
+
+ art::MutexLock mu(current, frames_lock);
+ frames.emplace(self, self_frames);
+ }
+
+ const size_t start_input;
+ const size_t stop_input;
+
+ art::Mutex frames_lock;
+ std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
+ size_t start_result;
+ size_t stop_result;
+};
+
+
+
+jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr,
+ jint* thread_count_ptr) {
+ if (max_frame_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+
+ art::Thread* current = art::Thread::Current();
+ art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
+ art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
+ art::ScopedSuspendAll ssa("GetAllStackTraces");
+
+ std::vector<art::Thread*> threads;
+ std::vector<std::vector<jvmtiFrameInfo>> frames;
+ {
+ std::list<art::Thread*> thread_list;
+ {
+ art::MutexLock mu(current, *art::Locks::thread_list_lock_);
+ thread_list = art::Runtime::Current()->GetThreadList()->GetList();
}
- DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
- if (closure.frames.size() > 0) {
- memcpy(frame_buffer, closure.frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
+
+ for (art::Thread* thread : thread_list) {
+ // Skip threads that are still starting.
+ if (thread->IsStillStarting()) {
+ continue;
+ }
+
+ GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
+ thread->RequestSynchronousCheckpoint(&closure);
+
+ threads.push_back(thread);
+ frames.emplace_back();
+ frames.back().swap(closure.frames);
}
- *count_ptr = static_cast<jint>(closure.frames.size());
- return ERR(NONE);
}
- // Frames from the bottom.
- if (collected_frames < static_cast<size_t>(-start_depth)) {
- return ERR(ILLEGAL_ARGUMENT);
+ // Convert the data into our output format. Note: we need to keep the threads suspended,
+ // as we need to access them for their peers.
+
+ // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
+ // allocate one big chunk for this and the actual frames, which means we need
+ // to either be conservative or rearrange things later (the latter is implemented).
+ std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
+ std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
+ frame_infos.reserve(frames.size());
+
+ // Now run through and add data for each thread.
+ size_t sum_frames = 0;
+ for (size_t index = 0; index < frames.size(); ++index) {
+ jvmtiStackInfo& stack_info = stack_info_array.get()[index];
+ memset(&stack_info, 0, sizeof(jvmtiStackInfo));
+
+ art::Thread* self = threads[index];
+ const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
+
+ // For the time being, set the thread to null. We don't have good ScopedLocalRef
+ // infrastructure.
+ DCHECK(self->GetPeer() != nullptr);
+ stack_info.thread = nullptr;
+ stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
+
+ size_t collected_frames = thread_frames.size();
+ if (max_frame_count == 0 || collected_frames == 0) {
+ stack_info.frame_count = 0;
+ stack_info.frame_buffer = nullptr;
+ continue;
+ }
+ DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
+
+ jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
+ frame_infos.emplace_back(frame_info);
+
+ jint count;
+ jvmtiError translate_result = TranslateFrameVector(thread_frames,
+ 0,
+ 0,
+ static_cast<jint>(collected_frames),
+ frame_info,
+ &count);
+ DCHECK(translate_result == JVMTI_ERROR_NONE);
+ stack_info.frame_count = static_cast<jint>(collected_frames);
+ stack_info.frame_buffer = frame_info;
+ sum_frames += static_cast<size_t>(count);
}
- size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
- memcpy(frame_buffer,
- &closure.frames.data()[collected_frames + start_depth],
- count * sizeof(jvmtiFrameInfo));
- *count_ptr = static_cast<jint>(count);
+ // No errors, yet. Now put it all into an output buffer.
+ size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * frames.size(),
+ alignof(jvmtiFrameInfo));
+ size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
+ unsigned char* chunk_data;
+ jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
+ if (alloc_result != ERR(NONE)) {
+ return alloc_result;
+ }
+
+ jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
+ // First copy in all the basic data.
+ memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * frames.size());
+
+ // Now copy the frames and fix up the pointers.
+ jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
+ chunk_data + rounded_stack_info_size);
+ for (size_t i = 0; i < frames.size(); ++i) {
+ jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
+ jvmtiStackInfo& new_stack_info = stack_info[i];
+
+ jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+ new_stack_info.thread = thread_peer;
+
+ if (old_stack_info.frame_count > 0) {
+ // Only copy when there's data - leave the nullptr alone.
+ size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
+ memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
+ new_stack_info.frame_buffer = frame_info;
+ frame_info += old_stack_info.frame_count;
+ }
+ }
+
+ *stack_info_ptr = stack_info;
+ *thread_count_ptr = static_cast<jint>(frames.size());
+
return ERR(NONE);
}
diff --git a/runtime/openjdkjvmti/ti_stack.h b/runtime/openjdkjvmti/ti_stack.h
index 1931ed3113..7619f98daf 100644
--- a/runtime/openjdkjvmti/ti_stack.h
+++ b/runtime/openjdkjvmti/ti_stack.h
@@ -32,12 +32,21 @@
#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
#define ART_RUNTIME_OPENJDKJVMTI_TI_STACK_H_
+#include "jni.h"
#include "jvmti.h"
+#include "base/mutex.h"
+
namespace openjdkjvmti {
class StackUtil {
public:
+ static jvmtiError GetAllStackTraces(jvmtiEnv* env,
+ jint max_frame_count,
+ jvmtiStackInfo** stack_info_ptr,
+ jint* thread_count_ptr)
+ REQUIRES(!art::Locks::thread_list_lock_);
+
static jvmtiError GetStackTrace(jvmtiEnv* env,
jthread thread,
jint start_depth,
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
new file mode 100644
index 0000000000..e20f5605d8
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -0,0 +1,357 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "ti_thread.h"
+
+#include "art_field.h"
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "base/mutex.h"
+#include "jni_internal.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+#include "mirror/string.h"
+#include "obj_ptr.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "well_known_classes.h"
+
+namespace openjdkjvmti {
+
+jvmtiError ThreadUtil::GetCurrentThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread* thread_ptr) {
+ art::Thread* self = art::Thread::Current();
+
+ art::ScopedObjectAccess soa(self);
+
+ jthread thread_peer;
+ if (self->IsStillStarting()) {
+ thread_peer = nullptr;
+ } else {
+ thread_peer = soa.AddLocalReference<jthread>(self->GetPeer());
+ }
+
+ *thread_ptr = thread_peer;
+ return ERR(NONE);
+}
+
+// Read the context classloader from a Java thread object. This is a lazy implementation
+// that assumes GetThreadInfo isn't called too often. If we instead cache the ArtField,
+// we will have to add synchronization as this can't be cached on startup (which is
+// potentially runtime startup).
+static art::ObjPtr<art::mirror::Object> GetContextClassLoader(art::ObjPtr<art::mirror::Object> peer)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (peer == nullptr) {
+ return nullptr;
+ }
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* cc_field = klass->FindDeclaredInstanceField("contextClassLoader",
+ "Ljava/lang/ClassLoader;");
+ CHECK(cc_field != nullptr);
+ return cc_field->GetObject(peer);
+}
+
+// Get the native thread. The spec says a null object denotes the current thread.
+static art::Thread* GetNativeThread(jthread thread,
+ const art::ScopedObjectAccessAlreadyRunnable& soa)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (thread == nullptr) {
+ return art::Thread::Current();
+ }
+
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ return art::Thread::FromManagedThread(soa, thread);
+}
+
+jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
+ if (info_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+
+ art::Thread* self = GetNativeThread(thread, soa);
+ if (self == nullptr && thread == nullptr) {
+ return ERR(INVALID_THREAD);
+ }
+
+ JvmtiUniquePtr name_uptr;
+ if (self != nullptr) {
+ // Have a native thread object, this thread is alive.
+ std::string name;
+ self->GetThreadName(name);
+ jvmtiError name_result = CopyString(
+ env, name.c_str(), reinterpret_cast<unsigned char**>(&info_ptr->name));
+ if (name_result != ERR(NONE)) {
+ return name_result;
+ }
+ name_uptr = MakeJvmtiUniquePtr(env, info_ptr->name);
+
+ info_ptr->priority = self->GetNativePriority();
+
+ info_ptr->is_daemon = self->IsDaemon();
+
+ art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+
+ // ThreadGroup.
+ if (peer != nullptr) {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_group);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> group = f->GetObject(peer);
+ info_ptr->thread_group = group == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jthreadGroup>(group);
+ } else {
+ info_ptr->thread_group = nullptr;
+ }
+
+ // Context classloader.
+ art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ info_ptr->context_class_loader = ccl == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jobject>(ccl);
+ } else {
+ // Only the peer. This thread has either not been started, or is dead. Read things from
+ // the Java side.
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread);
+
+ // Name.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_name);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> name = f->GetObject(peer);
+ std::string name_cpp;
+ const char* name_cstr;
+ if (name != nullptr) {
+ name_cpp = name->AsString()->ToModifiedUtf8();
+ name_cstr = name_cpp.c_str();
+ } else {
+ name_cstr = "";
+ }
+ jvmtiError name_result = CopyString(
+ env, name_cstr, reinterpret_cast<unsigned char**>(&info_ptr->name));
+ if (name_result != ERR(NONE)) {
+ return name_result;
+ }
+ name_uptr = MakeJvmtiUniquePtr(env, info_ptr->name);
+ }
+
+ // Priority.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_priority);
+ CHECK(f != nullptr);
+ info_ptr->priority = static_cast<jint>(f->GetInt(peer));
+ }
+
+ // Daemon.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_daemon);
+ CHECK(f != nullptr);
+ info_ptr->is_daemon = f->GetBoolean(peer) == 0 ? JNI_FALSE : JNI_TRUE;
+ }
+
+ // ThreadGroup.
+ {
+ art::ArtField* f = art::jni::DecodeArtField(art::WellKnownClasses::java_lang_Thread_group);
+ CHECK(f != nullptr);
+ art::ObjPtr<art::mirror::Object> group = f->GetObject(peer);
+ info_ptr->thread_group = group == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jthreadGroup>(group);
+ }
+
+ // Context classloader.
+ art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ info_ptr->context_class_loader = ccl == nullptr
+ ? nullptr
+ : soa.AddLocalReference<jobject>(ccl);
+ }
+
+ name_uptr.release();
+
+ return ERR(NONE);
+}
+
+// Return the thread's (or current thread, if null) thread state. Return kStarting in case
+// there's no native counterpart (thread hasn't been started, yet, or is dead).
+static art::ThreadState GetNativeThreadState(jthread thread,
+ const art::ScopedObjectAccessAlreadyRunnable& soa,
+ art::Thread** native_thread)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Thread* self = nullptr;
+ art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
+ if (thread == nullptr) {
+ self = art::Thread::Current();
+ } else {
+ self = art::Thread::FromManagedThread(soa, thread);
+ }
+ *native_thread = self;
+ if (self == nullptr || self->IsStillStarting()) {
+ return art::ThreadState::kStarting;
+ }
+ return self->GetState();
+}
+
+static jint GetJvmtiThreadStateFromInternal(art::ThreadState internal_thread_state) {
+ jint jvmti_state = JVMTI_THREAD_STATE_ALIVE;
+
+ if (internal_thread_state == art::ThreadState::kSuspended) {
+ jvmti_state |= JVMTI_THREAD_STATE_SUSPENDED;
+ // Note: We do not have data about the previous state. Otherwise we should load the previous
+ // state here.
+ }
+
+ if (internal_thread_state == art::ThreadState::kNative) {
+ jvmti_state |= JVMTI_THREAD_STATE_IN_NATIVE;
+ }
+
+ if (internal_thread_state == art::ThreadState::kRunnable ||
+ internal_thread_state == art::ThreadState::kWaitingWeakGcRootRead ||
+ internal_thread_state == art::ThreadState::kSuspended) {
+ jvmti_state |= JVMTI_THREAD_STATE_RUNNABLE;
+ } else if (internal_thread_state == art::ThreadState::kBlocked) {
+ jvmti_state |= JVMTI_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
+ } else {
+ // Should be in waiting state.
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING;
+
+ if (internal_thread_state == art::ThreadState::kTimedWaiting ||
+ internal_thread_state == art::ThreadState::kSleeping) {
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING_WITH_TIMEOUT;
+ } else {
+ jvmti_state |= JVMTI_THREAD_STATE_WAITING_INDEFINITELY;
+ }
+
+ if (internal_thread_state == art::ThreadState::kSleeping) {
+ jvmti_state |= JVMTI_THREAD_STATE_SLEEPING;
+ }
+
+ if (internal_thread_state == art::ThreadState::kTimedWaiting ||
+ internal_thread_state == art::ThreadState::kWaiting) {
+ jvmti_state |= JVMTI_THREAD_STATE_IN_OBJECT_WAIT;
+ }
+
+ // TODO: PARKED. We'll have to inspect the stack.
+ }
+
+ return jvmti_state;
+}
+
+static jint GetJavaStateFromInternal(art::ThreadState internal_thread_state) {
+ switch (internal_thread_state) {
+ case art::ThreadState::kTerminated:
+ return JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+
+ case art::ThreadState::kRunnable:
+ case art::ThreadState::kNative:
+ case art::ThreadState::kWaitingWeakGcRootRead:
+ case art::ThreadState::kSuspended:
+ return JVMTI_JAVA_LANG_THREAD_STATE_RUNNABLE;
+
+ case art::ThreadState::kTimedWaiting:
+ case art::ThreadState::kSleeping:
+ return JVMTI_JAVA_LANG_THREAD_STATE_TIMED_WAITING;
+
+ case art::ThreadState::kBlocked:
+ return JVMTI_JAVA_LANG_THREAD_STATE_BLOCKED;
+
+ case art::ThreadState::kStarting:
+ return JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+
+ case art::ThreadState::kWaiting:
+ case art::ThreadState::kWaitingForGcToComplete:
+ case art::ThreadState::kWaitingPerformingGc:
+ case art::ThreadState::kWaitingForCheckPointsToRun:
+ case art::ThreadState::kWaitingForDebuggerSend:
+ case art::ThreadState::kWaitingForDebuggerToAttach:
+ case art::ThreadState::kWaitingInMainDebuggerLoop:
+ case art::ThreadState::kWaitingForDebuggerSuspension:
+ case art::ThreadState::kWaitingForDeoptimization:
+ case art::ThreadState::kWaitingForGetObjectsAllocated:
+ case art::ThreadState::kWaitingForJniOnLoad:
+ case art::ThreadState::kWaitingForSignalCatcherOutput:
+ case art::ThreadState::kWaitingInMainSignalCatcherLoop:
+ case art::ThreadState::kWaitingForMethodTracingStart:
+ case art::ThreadState::kWaitingForVisitObjects:
+ case art::ThreadState::kWaitingForGcThreadFlip:
+ return JVMTI_JAVA_LANG_THREAD_STATE_WAITING;
+ }
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+}
+
+jvmtiError ThreadUtil::GetThreadState(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ jthread thread,
+ jint* thread_state_ptr) {
+ if (thread_state_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::Thread* native_thread = nullptr;
+ art::ThreadState internal_thread_state = GetNativeThreadState(thread, soa, &native_thread);
+
+ if (internal_thread_state == art::ThreadState::kStarting) {
+ if (thread == nullptr) {
+ // No native thread, and no Java thread? We must be starting up. Report as wrong phase.
+ return ERR(WRONG_PHASE);
+ }
+
+ // Need to read the Java "started" field to know whether this is starting or terminated.
+ art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread);
+ art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
+ art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
+ CHECK(started_field != nullptr);
+ bool started = started_field->GetBoolean(peer) != 0;
+ constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
+ constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
+ JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
+ *thread_state_ptr = started ? kTerminatedState : kStartedState;
+ return ERR(NONE);
+ }
+ DCHECK(native_thread != nullptr);
+
+ // Translate internal thread state to JVMTI and Java state.
+ jint jvmti_state = GetJvmtiThreadStateFromInternal(internal_thread_state);
+ if (native_thread->IsInterrupted()) {
+ jvmti_state |= JVMTI_THREAD_STATE_INTERRUPTED;
+ }
+
+ // Java state is derived from nativeGetState.
+ // Note: Our implementation assigns "runnable" to suspended. As such, we will have slightly
+ // different mask. However, this is for consistency with the Java view.
+ jint java_state = GetJavaStateFromInternal(internal_thread_state);
+
+ *thread_state_ptr = jvmti_state | java_state;
+
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
new file mode 100644
index 0000000000..b6ffbb5f65
--- /dev/null
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -0,0 +1,51 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
+#define ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace openjdkjvmti {
+
+class ThreadUtil {
+ public:
+ static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr);
+
+ static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr);
+
+ static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_TI_THREAD_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2086d70791..df5fc5ce8a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1364,6 +1364,39 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
return true;
}
+static bool EnsureJvmtiPlugin(Runtime* runtime,
+ std::vector<Plugin>* plugins,
+ std::string* error_msg) {
+ constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
+
+ // Is the plugin already loaded?
+ for (Plugin p : *plugins) {
+ if (p.GetLibrary() == plugin_name) {
+ return true;
+ }
+ }
+
+ // Is the process debuggable? Otherwise, do not attempt to load the plugin.
+ if (!runtime->IsDebuggable()) {
+ *error_msg = "Process is not debuggable.";
+ return false;
+ }
+
+ Plugin new_plugin = Plugin::Create(plugin_name);
+
+ // Suspend all threads to protect ourself somewhat.
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self); // Now we know we have the shared lock.
+ ScopedThreadSuspension sts(self, art::kWaitingForDebuggerToAttach);
+ ScopedSuspendAll ssa("EnsureJvmtiPlugin");
+ if (!new_plugin.Load(error_msg)) {
+ return false;
+ }
+
+ plugins->push_back(std::move(new_plugin));
+ return true;
+}
+
// Attach a new agent and add it to the list of runtime agents
//
// TODO: once we decide on the threading model for agents,
@@ -1371,18 +1404,25 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// (and we synchronize access to any shared data structures like "agents_")
//
void Runtime::AttachAgent(const std::string& agent_arg) {
+ std::string error_msg;
+ if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
+ LOG(WARNING) << "Could not load plugin: " << error_msg;
+ ScopedObjectAccess soa(Thread::Current());
+ ThrowIOException("%s", error_msg.c_str());
+ return;
+ }
+
ti::Agent agent(agent_arg);
int res = 0;
- std::string err;
- ti::Agent::LoadError result = agent.Attach(&res, &err);
+ ti::Agent::LoadError result = agent.Attach(&res, &error_msg);
if (result == ti::Agent::kNoError) {
agents_.push_back(std::move(agent));
} else {
- LOG(ERROR) << "Agent attach failed (result=" << result << ") : " << err;
+ LOG(WARNING) << "Agent attach failed (result=" << result << ") : " << error_msg;
ScopedObjectAccess soa(Thread::Current());
- ThrowWrappedIOException("%s", err.c_str());
+ ThrowIOException("%s", error_msg.c_str());
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a4f063141b..bdd4ca6721 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2629,10 +2629,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pAllocArray)
QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
- QUICK_ENTRY_POINT_INFO(pAllocObject)
QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
- QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
+ QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
diff --git a/runtime/thread.h b/runtime/thread.h
index c7acfc4c9c..a3ef9bc0a3 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1418,7 +1418,7 @@ class Thread {
stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
- thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_start(nullptr),
+ thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr),
@@ -1542,12 +1542,12 @@ class Thread {
JniEntryPoints jni_entrypoints;
QuickEntryPoints quick_entrypoints;
+ // Thread-local allocation pointer. Moved here to force alignment for thread_local_pos on ARM.
+ uint8_t* thread_local_start;
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
- // Thread-local allocation pointer.
- uint8_t* thread_local_start;
size_t thread_local_objects;
diff --git a/test/129-ThreadGetId/expected.txt b/test/129-ThreadGetId/expected.txt
index 134d8d0b47..aadf90d9d7 100644
--- a/test/129-ThreadGetId/expected.txt
+++ b/test/129-ThreadGetId/expected.txt
@@ -1 +1,2 @@
+HeapTaskDaemon depth 0
Finishing
diff --git a/test/129-ThreadGetId/src/Main.java b/test/129-ThreadGetId/src/Main.java
index 9934bba95f..5aefd17f0e 100644
--- a/test/129-ThreadGetId/src/Main.java
+++ b/test/129-ThreadGetId/src/Main.java
@@ -22,6 +22,7 @@ public class Main implements Runnable {
public static void main(String[] args) throws Exception {
final Thread[] threads = new Thread[numberOfThreads];
+ test_getStackTraces();
for (int t = 0; t < threads.length; t++) {
threads[t] = new Thread(new Main());
threads[t].start();
@@ -32,6 +33,19 @@ public class Main implements Runnable {
System.out.println("Finishing");
}
+ static void test_getStackTraces() {
+ // Check all the current threads for positive IDs.
+ Map<Thread, StackTraceElement[]> map = Thread.getAllStackTraces();
+ for (Map.Entry<Thread, StackTraceElement[]> pair : map.entrySet()) {
+ Thread thread = pair.getKey();
+ // Expect empty stack trace since we do not support suspending the GC thread for
+ // obtaining stack traces. See b/28261069.
+ if (thread.getName().equals("HeapTaskDaemon")) {
+ System.out.println(thread.getName() + " depth " + pair.getValue().length);
+ }
+ }
+ }
+
public void test_getId() {
if (Thread.currentThread().getId() <= 0) {
System.out.println("current thread's ID is not positive");
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 5fd51e1dca..89b9cb45c3 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -192,13 +192,13 @@ public class Main extends UnresolvedSuperClass {
/// CHECK-START: void Main.testLicm(int) licm (before)
/// CHECK: <<Class:l\d+>> LoadClass loop:B2
/// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2
- /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2
/// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
/// CHECK-START: void Main.testLicm(int) licm (after)
/// CHECK: <<Class:l\d+>> LoadClass loop:none
/// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none
- /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2
+ /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>] loop:B2
/// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2
static public void testLicm(int count) {
// Test to make sure we keep the initialization check after loading an unresolved class.
diff --git a/test/621-checker-new-instance/expected.txt b/test/621-checker-new-instance/expected.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/test/621-checker-new-instance/expected.txt
+++ /dev/null
diff --git a/test/621-checker-new-instance/info.txt b/test/621-checker-new-instance/info.txt
deleted file mode 100644
index c27c45ca7f..0000000000
--- a/test/621-checker-new-instance/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Tests for removing useless load class.
diff --git a/test/621-checker-new-instance/src/Main.java b/test/621-checker-new-instance/src/Main.java
deleted file mode 100644
index 68a46449f0..0000000000
--- a/test/621-checker-new-instance/src/Main.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (before)
- /// CHECK: LoadClass
- /// CHECK: NewInstance
-
- /// CHECK-START: java.lang.Object Main.newObject() prepare_for_register_allocation (after)
- /// CHECK-NOT: LoadClass
- /// CHECK: NewInstance
- public static Object newObject() {
- return new Object();
- }
-
- /// CHECK-START: java.lang.Object Main.newFinalizableMayThrow() prepare_for_register_allocation (after)
- /// CHECK: LoadClass
- /// CHECK: NewInstance
- public static Object newFinalizableMayThrow() {
- return $inline$newFinalizableMayThrow();
- }
-
- public static Object $inline$newFinalizableMayThrow() {
- return new FinalizableMayThrow();
- }
-
- public static void main(String[] args) {
- newFinalizableMayThrow();
- newObject();
- }
-}
-
-class FinalizableMayThrow {
- // clinit may throw OOME.
- static Object o = new Object();
- static String s;
- public void finalize() {
- s = "Test";
- }
-}
diff --git a/test/909-attach-agent/expected.txt b/test/909-attach-agent/expected.txt
index eacc595aaf..c0bccd6486 100644
--- a/test/909-attach-agent/expected.txt
+++ b/test/909-attach-agent/expected.txt
@@ -1,3 +1,11 @@
Hello, world!
Attached Agent for test 909-attach-agent
Goodbye!
+Hello, world!
+Attached Agent for test 909-attach-agent
+Goodbye!
+Hello, world!
+java.io.IOException: Process is not debuggable.
+ at dalvik.system.VMDebug.attachAgent(Native Method)
+ at Main.main(Main.java:27)
+Goodbye!
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
index aed6e83d67..985341bd4f 100755
--- a/test/909-attach-agent/run
+++ b/test/909-attach-agent/run
@@ -24,4 +24,14 @@ fi
./default-run "$@" --experimental agents \
--experimental runtime-plugins \
--android-runtime-option -Xplugin:${plugin} \
+ --android-runtime-option -Xfully-deoptable \
+ --args agent:${agent}=909-attach-agent
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --android-runtime-option -Xfully-deoptable \
+ --args agent:${agent}=909-attach-agent
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
--args agent:${agent}=909-attach-agent
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index f8c97ce475..e40698acc5 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -4,72 +4,72 @@
From top
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 38 34
- main ([Ljava/lang/String;)V 6 24
----------
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 42 35
- main ([Ljava/lang/String;)V 6 24
+ print (Ljava/lang/Thread;II)V 0 183
+ printOrWait (IILMain$ControlData;)V 6 246
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ doTest ()V 38 41
+ main ([Ljava/lang/String;)V 6 27
+---------
+ print (Ljava/lang/Thread;II)V 0 183
+ printOrWait (IILMain$ControlData;)V 6 246
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ doTest ()V 42 42
+ main ([Ljava/lang/String;)V 6 27
---------
getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
- print (Ljava/lang/Thread;II)V 0 124
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
----------
- printOrWait (IILMain$ControlData;)V 6 151
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
+ print (Ljava/lang/Thread;II)V 0 183
+ printOrWait (IILMain$ControlData;)V 6 246
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+---------
+ printOrWait (IILMain$ControlData;)V 6 246
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
From bottom
---------
- main ([Ljava/lang/String;)V 6 24
+ main ([Ljava/lang/String;)V 6 27
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- doTest ()V 65 41
- main ([Ljava/lang/String;)V 6 24
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ doTest ()V 65 48
+ main ([Ljava/lang/String;)V 6 27
---------
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
################################
### Other thread (suspended) ###
@@ -77,132 +77,519 @@ From bottom
From top
---------
wait ()V -1 -2
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
----------
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
----------
- wait ()V -1 -2
- printOrWait (IILMain$ControlData;)V 24 157
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
----------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 61
+---------
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 61
+---------
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
From bottom
---------
- run ()V 4 54
+ run ()V 4 61
---------
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 54
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 61
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
###########################
### Other thread (live) ###
###########################
From top
---------
- printOrWait (IILMain$ControlData;)V 44 164
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
----------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
----------
- printOrWait (IILMain$ControlData;)V 44 164
- baz (IIILMain$ControlData;)Ljava/lang/Object; 2 142
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
----------
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
+ printOrWait (IILMain$ControlData;)V 44 259
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 95
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 95
+---------
+ printOrWait (IILMain$ControlData;)V 44 259
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+---------
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
From bottom
---------
- run ()V 4 88
+ run ()V 4 95
+---------
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 95
+---------
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+
+################################
+### Other threads (suspended) ###
+################################
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+
+---------
+Thread-11
+
+---------
+Thread-12
+
+---------
+Thread-13
+
+---------
+Thread-4
+
+---------
+Thread-5
+
+---------
+Thread-6
+
+---------
+Thread-7
+
+---------
+Thread-8
+
+---------
+Thread-9
+
+---------
+main
+
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-11
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-12
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-13
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-4
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-5
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-6
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-7
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-8
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+Thread-9
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+
+---------
+main
+ getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
+ printAll (I)V 0 219
+ doTestAllStackTraces ()V 107 156
+ main ([Ljava/lang/String;)V 15 31
+
+---------
+FinalizerDaemon
+<not printed>
+---------
+FinalizerWatchdogDaemon
+<not printed>
+---------
+HeapTaskDaemon
+<not printed>
+---------
+ReferenceQueueDaemon
+<not printed>
+---------
+Signal Catcher
+
+---------
+Thread-10
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-11
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
---------
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- run ()V 4 88
+Thread-12
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
---------
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
- foo (IIILMain$ControlData;)I 0 131
- baz (IIILMain$ControlData;)Ljava/lang/Object; 9 144
- bar (IIILMain$ControlData;)J 0 136
+Thread-13
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-4
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-5
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-6
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-7
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-8
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+Thread-9
+ wait ()V -1 -2
+ printOrWait (IILMain$ControlData;)V 24 252
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 2 237
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ baz (IIILMain$ControlData;)Ljava/lang/Object; 9 239
+ bar (IIILMain$ControlData;)J 0 231
+ foo (IIILMain$ControlData;)I 0 226
+ run ()V 4 144
+
+---------
+main
+ getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
+ printAll (I)V 0 219
+ doTestAllStackTraces ()V 112 158
+ main ([Ljava/lang/String;)V 15 31
+
+Done
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index 722bee8056..3479abbeae 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -14,7 +14,10 @@
* limitations under the License.
*/
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
import java.util.concurrent.CountDownLatch;
public class Main {
@@ -24,6 +27,10 @@ public class Main {
doTest();
doTestOtherThreadWait();
doTestOtherThreadBusyLoop();
+
+ doTestAllStackTraces();
+
+ System.out.println("Done");
}
public static void doTest() throws Exception {
@@ -109,6 +116,58 @@ public class Main {
t.join();
}
+ private final static List<Object> RETAIN = new ArrayList<Object>();
+
+ public static void doTestAllStackTraces() throws Exception {
+ System.out.println();
+ System.out.println("################################");
+ System.out.println("### Other threads (suspended) ###");
+ System.out.println("################################");
+
+ // Also create an unstarted and a dead thread.
+ RETAIN.add(new Thread());
+ Thread deadThread = new Thread();
+ RETAIN.add(deadThread);
+ deadThread.start();
+ deadThread.join();
+
+ final int N = 10;
+
+ final ControlData data = new ControlData(N);
+ data.waitFor = new Object();
+
+ Thread threads[] = new Thread[N];
+
+ for (int i = 0; i < N; i++) {
+ Thread t = new Thread() {
+ public void run() {
+ Recurse.foo(4, 0, 0, data);
+ }
+ };
+ t.start();
+ threads[i] = t;
+ }
+ data.reached.await();
+ Thread.yield();
+ Thread.sleep(500); // A little bit of time...
+
+ printAll(0);
+
+ printAll(5);
+
+ printAll(25);
+
+ // Let the thread make progress and die.
+ synchronized(data.waitFor) {
+ data.waitFor.notifyAll();
+ }
+ for (int i = 0; i < N; i++) {
+ threads[i].join();
+ }
+
+ RETAIN.clear();
+ }
+
public static void print(String[][] stack) {
System.out.println("---------");
for (String[] stackElement : stack) {
@@ -124,6 +183,42 @@ public class Main {
print(getStackTrace(t, start, max));
}
+ public static void printAll(Object[][] stacks) {
+ List<String> stringified = new ArrayList<String>(stacks.length);
+
+ for (Object[] stackInfo : stacks) {
+ Thread t = (Thread)stackInfo[0];
+ String name = (t != null) ? t.getName() : "null";
+ String stackSerialization;
+ if (name.contains("Daemon")) {
+ // Do not print daemon stacks, as they're non-deterministic.
+ stackSerialization = "<not printed>";
+ } else {
+ StringBuilder sb = new StringBuilder();
+ for (String[] stackElement : (String[][])stackInfo[1]) {
+ for (String part : stackElement) {
+ sb.append(' ');
+ sb.append(part);
+ }
+ sb.append('\n');
+ }
+ stackSerialization = sb.toString();
+ }
+ stringified.add(name + "\n" + stackSerialization);
+ }
+
+ Collections.sort(stringified);
+
+ for (String s : stringified) {
+ System.out.println("---------");
+ System.out.println(s);
+ }
+ }
+
+ public static void printAll(int max) {
+ printAll(getAllStackTraces(max));
+ }
+
// Wrap generated stack traces into a class to separate them nicely.
public static class Recurse {
@@ -170,10 +265,22 @@ public class Main {
}
public static class ControlData {
- CountDownLatch reached = new CountDownLatch(1);
+ CountDownLatch reached;
Object waitFor = null;
volatile boolean stop = false;
+
+ public ControlData() {
+ this(1);
+ }
+
+ public ControlData(int latchCount) {
+ reached = new CountDownLatch(latchCount);
+ }
}
public static native String[][] getStackTrace(Thread thread, int start, int max);
+ // Get all stack traces. This will return an array with an element for each thread. The element
+ // is an array itself with the first element being the thread, and the second element a nested
+ // String array as in getStackTrace.
+ public static native Object[][] getAllStackTraces(int max);
}
diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc
index cca163b79b..57d4f6d2ee 100644
--- a/test/911-get-stack-trace/stack_trace.cc
+++ b/test/911-get-stack-trace/stack_trace.cc
@@ -50,22 +50,9 @@ static jint FindLineNumber(jint line_number_count,
return line_number;
}
-extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
- JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
- std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
-
- jint count;
- {
- jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
- if (result != JVMTI_ERROR_NONE) {
- char* err;
- jvmti_env->GetErrorName(result, &err);
- printf("Failure running GetStackTrace: %s\n", err);
- jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
- return nullptr;
- }
- }
-
+static jobjectArray TranslateJvmtiFrameInfoArray(JNIEnv* env,
+ jvmtiFrameInfo* frames,
+ jint count) {
auto callback = [&](jint method_index) -> jobjectArray {
char* name;
char* sig;
@@ -140,5 +127,58 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
return CreateObjectArray(env, count, "[Ljava/lang/String;", callback);
}
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thread, jint start, jint max) {
+ std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
+
+ jint count;
+ {
+ jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count);
+ if (result != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(result, &err);
+ printf("Failure running GetStackTrace: %s\n", err);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ return nullptr;
+ }
+ }
+
+ return TranslateJvmtiFrameInfoArray(env, frames.get(), count);
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getAllStackTraces(
+ JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint max) {
+ std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]);
+
+ jint thread_count;
+ jvmtiStackInfo* stack_infos;
+ {
+ jvmtiError result = jvmti_env->GetAllStackTraces(max, &stack_infos, &thread_count);
+ if (result != JVMTI_ERROR_NONE) {
+ char* err;
+ jvmti_env->GetErrorName(result, &err);
+ printf("Failure running GetAllStackTraces: %s\n", err);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(err));
+ return nullptr;
+ }
+ }
+
+ auto callback = [&](jint thread_index) -> jobject {
+ auto inner_callback = [&](jint index) -> jobject {
+ if (index == 0) {
+ return stack_infos[thread_index].thread;
+ } else {
+ return TranslateJvmtiFrameInfoArray(env,
+ stack_infos[thread_index].frame_buffer,
+ stack_infos[thread_index].frame_count);
+ }
+ };
+ return CreateObjectArray(env, 2, "java/lang/Object", inner_callback);
+ };
+ jobjectArray ret = CreateObjectArray(env, thread_count, "[Ljava/lang/Object;", callback);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(stack_infos));
+ return ret;
+}
+
} // namespace Test911GetStackTrace
} // namespace art
diff --git a/test/924-threads/build b/test/924-threads/build
new file mode 100755
index 0000000000..898e2e54a2
--- /dev/null
+++ b/test/924-threads/build
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-build "$@" --experimental agents
diff --git a/test/924-threads/expected.txt b/test/924-threads/expected.txt
new file mode 100644
index 0000000000..54065223cf
--- /dev/null
+++ b/test/924-threads/expected.txt
@@ -0,0 +1,30 @@
+currentThread OK
+main
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+main
+5
+false
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+Daemon Thread
+5
+true
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+Daemon Thread
+5
+true
+java.lang.ThreadGroup[name=main,maxpri=10]
+class dalvik.system.PathClassLoader
+5
+5
+0 = NEW
+191 = ALIVE|WAITING_INDEFINITELY|WAITING|IN_OBJECT_WAIT
+1a1 = ALIVE|WAITING_WITH_TIMEOUT|WAITING|IN_OBJECT_WAIT
+401 = ALIVE|BLOCKED_ON_MONITOR_ENTER
+e1 = ALIVE|WAITING_WITH_TIMEOUT|SLEEPING|WAITING
+5 = ALIVE|RUNNABLE
+2 = TERMINATED
diff --git a/test/924-threads/info.txt b/test/924-threads/info.txt
new file mode 100644
index 0000000000..875a5f6ec1
--- /dev/null
+++ b/test/924-threads/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/924-threads/run b/test/924-threads/run
new file mode 100755
index 0000000000..4379349cb2
--- /dev/null
+++ b/test/924-threads/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --experimental agents \
+ --experimental runtime-plugins \
+ --jvmti
diff --git a/test/924-threads/src/Main.java b/test/924-threads/src/Main.java
new file mode 100644
index 0000000000..048766604f
--- /dev/null
+++ b/test/924-threads/src/Main.java
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.concurrent.CountDownLatch;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[1]);
+
+ doTest();
+ }
+
+ private static void doTest() throws Exception {
+ Thread t1 = Thread.currentThread();
+ Thread t2 = getCurrentThread();
+
+ if (t1 != t2) {
+ throw new RuntimeException("Expected " + t1 + " but got " + t2);
+ }
+ System.out.println("currentThread OK");
+
+ printThreadInfo(t1);
+ printThreadInfo(null);
+
+ Thread t3 = new Thread("Daemon Thread");
+ t3.setDaemon(true);
+ // Do not start this thread, yet.
+ printThreadInfo(t3);
+ // Start, and wait for it to die.
+ t3.start();
+ t3.join();
+ Thread.sleep(500); // Wait a little bit.
+ // Thread has died, check that we can still get info.
+ printThreadInfo(t3);
+
+ doStateTests();
+ }
+
+ private static class Holder {
+ volatile boolean flag = false;
+ }
+
+ private static void doStateTests() throws Exception {
+ System.out.println(Integer.toHexString(getThreadState(null)));
+ System.out.println(Integer.toHexString(getThreadState(Thread.currentThread())));
+
+ final CountDownLatch cdl1 = new CountDownLatch(1);
+ final CountDownLatch cdl2 = new CountDownLatch(1);
+ final CountDownLatch cdl3_1 = new CountDownLatch(1);
+ final CountDownLatch cdl3_2 = new CountDownLatch(1);
+ final CountDownLatch cdl4 = new CountDownLatch(1);
+ final CountDownLatch cdl5 = new CountDownLatch(1);
+ final Holder h = new Holder();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cdl1.countDown();
+ synchronized(cdl1) {
+ cdl1.wait();
+ }
+
+ cdl2.countDown();
+ synchronized(cdl2) {
+ cdl2.wait(1000); // Wait a second.
+ }
+
+ cdl3_1.await();
+ cdl3_2.countDown();
+ synchronized(cdl3_2) {
+ // Nothing, just wanted to block on cdl3.
+ }
+
+ cdl4.countDown();
+ Thread.sleep(1000);
+
+ cdl5.countDown();
+ while (!h.flag) {
+ // Busy-loop.
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+
+ Thread t = new Thread(r);
+ printThreadState(t);
+ t.start();
+
+ // Waiting.
+ cdl1.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl1) {
+ cdl1.notifyAll();
+ }
+
+ // Timed waiting.
+ cdl2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ synchronized(cdl2) {
+ cdl2.notifyAll();
+ }
+
+ // Blocked on monitor.
+ synchronized(cdl3_2) {
+ cdl3_1.countDown();
+ cdl3_2.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ }
+
+ // Sleeping.
+ cdl4.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+
+ // Running.
+ cdl5.await();
+ Thread.yield();
+ Thread.sleep(100);
+ printThreadState(t);
+ h.flag = true;
+
+ // Dying.
+ t.join();
+ Thread.yield();
+ Thread.sleep(100);
+
+ printThreadState(t);
+ }
+
+ private final static Map<Integer, String> STATE_NAMES = new HashMap<Integer, String>();
+ private final static List<Integer> STATE_KEYS = new ArrayList<Integer>();
+ static {
+ STATE_NAMES.put(0x1, "ALIVE");
+ STATE_NAMES.put(0x2, "TERMINATED");
+ STATE_NAMES.put(0x4, "RUNNABLE");
+ STATE_NAMES.put(0x400, "BLOCKED_ON_MONITOR_ENTER");
+ STATE_NAMES.put(0x80, "WAITING");
+ STATE_NAMES.put(0x10, "WAITING_INDEFINITELY");
+ STATE_NAMES.put(0x20, "WAITING_WITH_TIMEOUT");
+ STATE_NAMES.put(0x40, "SLEEPING");
+ STATE_NAMES.put(0x100, "IN_OBJECT_WAIT");
+ STATE_NAMES.put(0x200, "PARKED");
+ STATE_NAMES.put(0x100000, "SUSPENDED");
+ STATE_NAMES.put(0x200000, "INTERRUPTED");
+ STATE_NAMES.put(0x400000, "IN_NATIVE");
+ STATE_KEYS.addAll(STATE_NAMES.keySet());
+ Collections.sort(STATE_KEYS);
+ }
+
+ private static void printThreadState(Thread t) {
+ int state = getThreadState(t);
+
+ StringBuilder sb = new StringBuilder();
+
+ for (Integer i : STATE_KEYS) {
+ if ((state & i) != 0) {
+ if (sb.length()>0) {
+ sb.append('|');
+ }
+ sb.append(STATE_NAMES.get(i));
+ }
+ }
+
+ if (sb.length() == 0) {
+ sb.append("NEW");
+ }
+
+ System.out.println(Integer.toHexString(state) + " = " + sb.toString());
+ }
+
+ private static void printThreadInfo(Thread t) {
+ Object[] threadInfo = getThreadInfo(t);
+ if (threadInfo == null || threadInfo.length != 5) {
+ System.out.println(Arrays.toString(threadInfo));
+ throw new RuntimeException("threadInfo length wrong");
+ }
+
+ System.out.println(threadInfo[0]); // Name
+ System.out.println(threadInfo[1]); // Priority
+ System.out.println(threadInfo[2]); // Daemon
+ System.out.println(threadInfo[3]); // Threadgroup
+ System.out.println(threadInfo[4] == null ? "null" : threadInfo[4].getClass()); // Context CL.
+ }
+
+ private static native Thread getCurrentThread();
+ private static native Object[] getThreadInfo(Thread t);
+ private static native int getThreadState(Thread t);
+}
diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc
new file mode 100644
index 0000000000..4abf8fcf93
--- /dev/null
+++ b/test/924-threads/threads.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include "android-base/stringprintf.h"
+#include "base/macros.h"
+#include "base/logging.h"
+#include "jni.h"
+#include "openjdkjvmti/jvmti.h"
+#include "ScopedLocalRef.h"
+
+#include "ti-agent/common_helper.h"
+#include "ti-agent/common_load.h"
+
+namespace art {
+namespace Test924Threads {
+
+// private static native Thread getCurrentThread();
+// private static native Object[] getThreadInfo(Thread t);
+
+extern "C" JNIEXPORT jthread JNICALL Java_Main_getCurrentThread(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED) {
+ jthread thread = nullptr;
+ jvmtiError result = jvmti_env->GetCurrentThread(&thread);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+ return thread;
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getThreadInfo(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
+ jvmtiThreadInfo info;
+ memset(&info, 0, sizeof(jvmtiThreadInfo));
+
+ jvmtiError result = jvmti_env->GetThreadInfo(thread, &info);
+ if (JvmtiErrorToException(env, result)) {
+ return nullptr;
+ }
+
+ auto callback = [&](jint component_index) -> jobject {
+ switch (component_index) {
+ // The name.
+ case 0:
+ return (info.name == nullptr) ? nullptr : env->NewStringUTF(info.name);
+
+ // The priority. Use a string for simplicity of construction.
+ case 1:
+ return env->NewStringUTF(android::base::StringPrintf("%d", info.priority).c_str());
+
+ // Whether it's a daemon. Use a string for simplicity of construction.
+ case 2:
+ return env->NewStringUTF(info.is_daemon == JNI_TRUE ? "true" : "false");
+
+ // The thread group;
+ case 3:
+ return env->NewLocalRef(info.thread_group);
+
+ // The context classloader.
+ case 4:
+ return env->NewLocalRef(info.context_class_loader);
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ };
+ jobjectArray ret = CreateObjectArray(env, 5, "java/lang/Object", callback);
+
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(info.name));
+ if (info.thread_group != nullptr) {
+ env->DeleteLocalRef(info.thread_group);
+ }
+ if (info.context_class_loader != nullptr) {
+ env->DeleteLocalRef(info.context_class_loader);
+ }
+
+ return ret;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Main_getThreadState(
+ JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jthread thread) {
+ jint state;
+ jvmtiError result = jvmti_env->GetThreadState(thread, &state);
+ if (JvmtiErrorToException(env, result)) {
+ return 0;
+ }
+ return state;
+}
+
+} // namespace Test924Threads
+} // namespace art
diff --git a/test/Android.bp b/test/Android.bp
index a223c3aa29..b0f0e5a98d 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -264,6 +264,7 @@ art_cc_defaults {
"920-objects/objects.cc",
"922-properties/properties.cc",
"923-monitors/monitors.cc",
+ "924-threads/threads.cc",
],
shared_libs: [
"libbase",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index fd3a897dae..dd7876f09d 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -296,6 +296,7 @@ TEST_ART_BROKEN_TARGET_TESTS += \
921-hello-failure \
922-properties \
923-monitors \
+ 924-threads \
ifneq (,$(filter target,$(TARGET_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -721,6 +722,16 @@ endif
TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS :=
+# Tests that check semantics for a non-debuggable app.
+TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS := \
+ 909-attach-agent \
+
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(PICTEST_TYPES),debuggable,$(TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+
+TEST_ART_BROKEN_DEBUGGABLE_RUN_TESTS :=
+
# Tests incompatible with bisection bug search. Sorted by incompatibility reason.
# 000 through 595 do not compile anything. 089 tests a build failure. 018 through 137
# run dalvikvm more than once. 115 and 088 assume they are always compiled.