summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/quick/gen_common.cc135
-rw-r--r--compiler/driver/compiler_driver.cc26
-rw-r--r--compiler/driver/compiler_driver.h7
-rw-r--r--compiler/image_writer.cc1
-rw-r--r--compiler/llvm/gbc_expander.cc61
-rw-r--r--compiler/llvm/intrinsic_func_list.def7
-rw-r--r--runtime/Android.mk1
-rw-r--r--runtime/asm_support.h4
-rw-r--r--runtime/barrier.cc16
-rw-r--r--runtime/barrier.h3
-rw-r--r--runtime/class_linker.cc32
-rw-r--r--runtime/class_linker_test.cc14
-rw-r--r--runtime/entrypoints/entrypoint_utils.h9
-rw-r--r--runtime/interpreter/interpreter_common.cc27
-rw-r--r--runtime/interpreter/interpreter_common.h11
-rw-r--r--runtime/locks.cc4
-rw-r--r--runtime/locks.h6
-rw-r--r--runtime/mirror/art_method-inl.h6
-rw-r--r--runtime/mirror/art_method.cc5
-rw-r--r--runtime/mirror/art_method.h12
-rw-r--r--runtime/mirror/class.h13
-rw-r--r--runtime/mirror/dex_cache.cc22
-rw-r--r--runtime/mirror/dex_cache.h15
-rw-r--r--runtime/mirror/dex_cache_test.cc4
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc32
-rw-r--r--runtime/oat.cc2
-rw-r--r--runtime/profiler.cc448
-rw-r--r--runtime/profiler.h174
-rw-r--r--runtime/runtime.cc71
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/stack.h2
-rw-r--r--runtime/thread.cc343
-rw-r--r--runtime/thread.h15
-rw-r--r--runtime/thread_list.cc2
-rw-r--r--runtime/thread_state.h2
-rw-r--r--runtime/verifier/method_verifier.cc49
-rw-r--r--runtime/verifier/method_verifier.h2
-rw-r--r--runtime/verifier/reg_type_cache.cc19
-rw-r--r--runtime/verifier/reg_type_cache.h8
-rw-r--r--test/303-verification-stress/build28
-rw-r--r--test/303-verification-stress/classes-gen.c64
-rw-r--r--test/303-verification-stress/expected.txt42
-rw-r--r--test/303-verification-stress/info.txt7
-rw-r--r--test/303-verification-stress/src/Main.java27
44 files changed, 1353 insertions, 441 deletions
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index a426cc77de..6b4cbd4286 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -330,21 +330,22 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
bool is_object) {
int field_offset;
- int ssb_index;
+ int storage_index;
bool is_volatile;
bool is_referrers_class;
+ bool is_initialized;
bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
field_idx, mir_graph_->GetCurrentDexCompilationUnit(), true,
- &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
+ &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
if (fast_path && !SLOW_FIELD_PATH) {
DCHECK_GE(field_offset, 0);
- int rBase;
+ int r_base;
if (is_referrers_class) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
- rBase = AllocTemp();
+ r_base = AllocTemp();
LoadWordDisp(rl_method.low_reg,
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), rBase);
+ mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
if (IsTemp(rl_method.low_reg)) {
FreeTemp(rl_method.low_reg);
}
@@ -352,33 +353,44 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized.
// TODO: remove initialized check now that we are initializing classes in the compiler driver.
- DCHECK_GE(ssb_index, 0);
+ DCHECK_GE(storage_index, 0);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
int r_method = TargetReg(kArg1);
LockTemp(r_method);
LoadCurrMethodDirect(r_method);
- rBase = TargetReg(kArg0);
- LockTemp(rBase);
+ r_base = TargetReg(kArg0);
+ LockTemp(r_base);
LoadWordDisp(r_method,
- mirror::ArtMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
- rBase);
- LoadWordDisp(rBase,
- mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * ssb_index, rBase);
- // rBase now points at appropriate static storage base (Class*)
- // or NULL if not initialized. Check for NULL and call helper if NULL.
- // TUNING: fast path should fall through
- LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
- LoadConstant(TargetReg(kArg0), ssb_index);
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
- if (cu_->instruction_set == kMips) {
- // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
- OpRegCopy(rBase, TargetReg(kRet0));
+ mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ r_base);
+ LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ sizeof(int32_t*) * storage_index, r_base);
+ // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
+ if (!is_initialized) {
+ // Check if r_base is NULL or a not yet initialized class.
+ // TUNING: fast path should fall through
+ LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
+ int r_tmp = TargetReg(kArg2);
+ LockTemp(r_tmp);
+ // TODO: Fuse the compare of a constant with memory on X86 and avoid the load.
+ LoadWordDisp(r_base, mirror::Class::StatusOffset().Int32Value(), r_tmp);
+ LIR* initialized_branch = OpCmpImmBranch(kCondGe, r_tmp, mirror::Class::kStatusInitialized,
+ NULL);
+
+ LIR* unresolved_target = NewLIR0(kPseudoTargetLabel);
+ unresolved_branch->target = unresolved_target;
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), storage_index,
+ true);
+ // Copy helper's result into r_base, a no-op on all but MIPS.
+ OpRegCopy(r_base, TargetReg(kRet0));
+
+ LIR* initialized_target = NewLIR0(kPseudoTargetLabel);
+ initialized_branch->target = initialized_target;
+
+ FreeTemp(r_tmp);
}
- LIR* skip_target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = skip_target;
FreeTemp(r_method);
}
// rBase now holds static storage base
@@ -391,18 +403,18 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
+ StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
rl_src.high_reg);
} else {
- StoreWordDisp(rBase, field_offset, rl_src.low_reg);
+ StoreWordDisp(r_base, field_offset, rl_src.low_reg);
}
if (is_volatile) {
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.low_reg, rBase);
+ MarkGCCard(rl_src.low_reg, r_base);
}
- FreeTemp(rBase);
+ FreeTemp(r_base);
} else {
FlushAllRegs(); // Everything to home locations
ThreadOffset setter_offset =
@@ -416,64 +428,77 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do
void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
bool is_long_or_double, bool is_object) {
int field_offset;
- int ssb_index;
+ int storage_index;
bool is_volatile;
bool is_referrers_class;
+ bool is_initialized;
bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
field_idx, mir_graph_->GetCurrentDexCompilationUnit(), false,
- &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
+ &field_offset, &storage_index, &is_referrers_class, &is_volatile, &is_initialized);
if (fast_path && !SLOW_FIELD_PATH) {
DCHECK_GE(field_offset, 0);
- int rBase;
+ int r_base;
if (is_referrers_class) {
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
- rBase = AllocTemp();
+ r_base = AllocTemp();
LoadWordDisp(rl_method.low_reg,
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(), rBase);
+ mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
// class is initialized
- // TODO: remove initialized check now that we are initializing classes in the compiler driver.
- DCHECK_GE(ssb_index, 0);
+ DCHECK_GE(storage_index, 0);
// May do runtime call so everything to home locations.
FlushAllRegs();
// Using fixed register to sync with possible call to runtime support.
int r_method = TargetReg(kArg1);
LockTemp(r_method);
LoadCurrMethodDirect(r_method);
- rBase = TargetReg(kArg0);
- LockTemp(rBase);
+ r_base = TargetReg(kArg0);
+ LockTemp(r_base);
LoadWordDisp(r_method,
- mirror::ArtMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
- rBase);
- LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
- sizeof(int32_t*) * ssb_index, rBase);
- // rBase now points at appropriate static storage base (Class*)
- // or NULL if not initialized. Check for NULL and call helper if NULL.
- // TUNING: fast path should fall through
- LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
- if (cu_->instruction_set == kMips) {
- // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
- OpRegCopy(rBase, TargetReg(kRet0));
+ mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ r_base);
+ LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+ sizeof(int32_t*) * storage_index, r_base);
+ // r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
+ if (!is_initialized) {
+ // Check if r_base is NULL or a not yet initialized class.
+ // TUNING: fast path should fall through
+ LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL);
+ int r_tmp = TargetReg(kArg2);
+ LockTemp(r_tmp);
+ // TODO: Fuse the compare of a constant with memory on X86 and avoid the load.
+ LoadWordDisp(r_base, mirror::Class::StatusOffset().Int32Value(), r_tmp);
+ LIR* initialized_branch = OpCmpImmBranch(kCondGe, r_tmp, mirror::Class::kStatusInitialized,
+ NULL);
+
+ LIR* unresolved_target = NewLIR0(kPseudoTargetLabel);
+ unresolved_branch->target = unresolved_target;
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), storage_index,
+ true);
+ // Copy helper's result into r_base, a no-op on all but MIPS.
+ OpRegCopy(r_base, TargetReg(kRet0));
+
+ LIR* initialized_target = NewLIR0(kPseudoTargetLabel);
+ initialized_branch->target = initialized_target;
+
+ FreeTemp(r_tmp);
}
- LIR* skip_target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = skip_target;
FreeTemp(r_method);
}
- // rBase now holds static storage base
+ // r_base now holds static storage base
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
if (is_volatile) {
GenMemBarrier(kLoadLoad);
}
if (is_long_or_double) {
- LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
+ LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
rl_result.high_reg, INVALID_SREG);
} else {
- LoadWordDisp(rBase, field_offset, rl_result.low_reg);
+ LoadWordDisp(r_base, field_offset, rl_result.low_reg);
}
- FreeTemp(rBase);
+ FreeTemp(r_base);
if (is_long_or_double) {
StoreValueWide(rl_dest, rl_result);
} else {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9cffb3c451..5edc8b6771 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -992,14 +992,16 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
}
bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit,
- bool is_put, int* field_offset, int* ssb_index,
- bool* is_referrers_class, bool* is_volatile) {
+ bool is_put, int* field_offset, int* storage_index,
+ bool* is_referrers_class, bool* is_volatile,
+ bool* is_initialized) {
ScopedObjectAccess soa(Thread::Current());
// Conservative defaults.
*field_offset = -1;
- *ssb_index = -1;
+ *storage_index = -1;
*is_referrers_class = false;
*is_volatile = true;
+ *is_initialized = false;
// Try to resolve field and ignore if an Incompatible Class Change Error (ie isn't static).
mirror::ArtField* resolved_field = ComputeFieldReferencedFromCompilingMethod(soa, mUnit, field_idx);
if (resolved_field != NULL && resolved_field->IsStatic()) {
@@ -1010,6 +1012,7 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
mirror::Class* fields_class = resolved_field->GetDeclaringClass();
if (fields_class == referrer_class) {
*is_referrers_class = true; // implies no worrying about class initialization
+ *is_initialized = true;
*field_offset = resolved_field->GetOffset().Int32Value();
*is_volatile = resolved_field->IsVolatile();
stats_->ResolvedLocalStaticField();
@@ -1034,17 +1037,19 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
}
bool is_write_to_final_from_wrong_class = is_put && resolved_field->IsFinal();
if (access_ok && !is_write_to_final_from_wrong_class) {
- // We have the resolved field, we must make it into a ssbIndex for the referrer
- // in its static storage base (which may fail if it doesn't have a slot for it)
+ // We have the resolved field, we must make it into a index for the referrer
+ // in its static storage (which may fail if it doesn't have a slot for it)
// TODO: for images we can elide the static storage base null check
// if we know there's a non-null entry in the image
mirror::DexCache* dex_cache = mUnit->GetClassLinker()->FindDexCache(*mUnit->GetDexFile());
if (fields_class->GetDexCache() == dex_cache) {
// common case where the dex cache of both the referrer and the field are the same,
// no need to search the dex file
- *ssb_index = fields_class->GetDexTypeIndex();
+ *storage_index = fields_class->GetDexTypeIndex();
*field_offset = resolved_field->GetOffset().Int32Value();
*is_volatile = resolved_field->IsVolatile();
+ *is_initialized = fields_class->IsInitialized() &&
+ CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
stats_->ResolvedStaticField();
return true;
}
@@ -1057,9 +1062,11 @@ bool CompilerDriver::ComputeStaticFieldInfo(uint32_t field_idx, const DexCompila
mUnit->GetDexFile()->FindTypeId(mUnit->GetDexFile()->GetIndexForStringId(*string_id));
if (type_id != NULL) {
// medium path, needs check of static storage base being initialized
- *ssb_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
+ *storage_index = mUnit->GetDexFile()->GetIndexForTypeId(*type_id);
*field_offset = resolved_field->GetOffset().Int32Value();
*is_volatile = resolved_field->IsVolatile();
+ *is_initialized = fields_class->IsInitialized() &&
+ CanAssumeTypeIsPresentInDexCache(*mUnit->GetDexFile(), *storage_index);
stats_->ResolvedStaticField();
return true;
}
@@ -2184,11 +2191,6 @@ static void InitializeClass(const ParallelCompilationManager* manager, size_t cl
}
soa.Self()->AssertNoPendingException();
}
- // If successfully initialized place in SSB array.
- if (klass->IsInitialized()) {
- int32_t ssb_index = klass->GetDexTypeIndex();
- klass->GetDexCache()->GetInitializedStaticStorage()->Set(ssb_index, klass.get());
- }
}
// Record the final class status if necessary.
ClassReference ref(manager->GetDexFile(), class_def_index);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index f4cc84dfe7..9e316242ba 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -172,8 +172,7 @@ class CompilerDriver {
// Callbacks from compiler to see what runtime checks must be generated.
- bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
+ bool CanAssumeTypeIsPresentInDexCache(const DexFile& dex_file, uint32_t type_idx);
bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
LOCKS_EXCLUDED(Locks::mutator_lock_);
@@ -198,8 +197,8 @@ class CompilerDriver {
// Can we fastpath static field access? Computes field's offset, volatility and whether the
// field is within the referrer (which can avoid checking class initialization).
bool ComputeStaticFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
- int* field_offset, int* ssb_index,
- bool* is_referrers_class, bool* is_volatile)
+ int* field_offset, int* storage_index,
+ bool* is_referrers_class, bool* is_volatile, bool* is_initialized)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 02654ad55a..556dec25ad 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -316,7 +316,6 @@ void ImageWriter::PruneNonImageClasses() {
Class* klass = dex_cache->GetResolvedType(i);
if (klass != NULL && !IsImageClass(klass)) {
dex_cache->SetResolvedType(i, NULL);
- dex_cache->GetInitializedStaticStorage()->Set(i, NULL);
}
}
for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index b206a25f25..6423cd7dca 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -199,8 +199,6 @@ class GBCExpanderPass : public llvm::FunctionPass {
//----------------------------------------------------------------------------
llvm::Value* EmitLoadDexCacheAddr(art::MemberOffset dex_cache_offset);
- llvm::Value* EmitLoadDexCacheStaticStorageFieldAddr(uint32_t type_idx);
-
llvm::Value* EmitLoadDexCacheResolvedTypeFieldAddr(uint32_t type_idx);
llvm::Value* EmitLoadDexCacheResolvedMethodFieldAddr(uint32_t method_idx);
@@ -287,8 +285,6 @@ class GBCExpanderPass : public llvm::FunctionPass {
llvm::Value* Expand_LoadDeclaringClassSSB(llvm::Value* method_object_addr);
- llvm::Value* Expand_LoadClassSSBFromDexCache(llvm::Value* type_idx_value);
-
llvm::Value*
Expand_GetSDCalleeMethodObjAddrFast(llvm::Value* callee_method_idx_value);
@@ -720,16 +716,6 @@ llvm::Value* GBCExpanderPass::EmitLoadDexCacheAddr(art::MemberOffset offset) {
}
llvm::Value*
-GBCExpanderPass::EmitLoadDexCacheStaticStorageFieldAddr(uint32_t type_idx) {
- llvm::Value* static_storage_dex_cache_addr =
- EmitLoadDexCacheAddr(art::mirror::ArtMethod::DexCacheInitializedStaticStorageOffset());
-
- llvm::Value* type_idx_value = irb_.getPtrEquivInt(type_idx);
-
- return EmitArrayGEP(static_storage_dex_cache_addr, type_idx_value, kObject);
-}
-
-llvm::Value*
GBCExpanderPass::EmitLoadDexCacheResolvedTypeFieldAddr(uint32_t type_idx) {
llvm::Value* resolved_type_dex_cache_addr =
EmitLoadDexCacheAddr(art::mirror::ArtMethod::DexCacheResolvedTypesOffset());
@@ -1213,17 +1199,6 @@ GBCExpanderPass::Expand_LoadDeclaringClassSSB(llvm::Value* method_object_addr) {
}
llvm::Value*
-GBCExpanderPass::Expand_LoadClassSSBFromDexCache(llvm::Value* type_idx_value) {
- uint32_t type_idx =
- llvm::cast<llvm::ConstantInt>(type_idx_value)->getZExtValue();
-
- llvm::Value* storage_field_addr =
- EmitLoadDexCacheStaticStorageFieldAddr(type_idx);
-
- return irb_.CreateLoad(storage_field_addr, kTBAARuntimeInfo);
-}
-
-llvm::Value*
GBCExpanderPass::Expand_GetSDCalleeMethodObjAddrFast(llvm::Value* callee_method_idx_value) {
uint32_t callee_method_idx =
llvm::cast<llvm::ConstantInt>(callee_method_idx_value)->getZExtValue();
@@ -1837,21 +1812,31 @@ llvm::Value* GBCExpanderPass::EmitLoadStaticStorage(uint32_t dex_pc,
llvm::BasicBlock* block_load_static =
CreateBasicBlockWithDexPC(dex_pc, "load_static");
+ llvm::BasicBlock* block_check_init = CreateBasicBlockWithDexPC(dex_pc, "init");
llvm::BasicBlock* block_cont = CreateBasicBlockWithDexPC(dex_pc, "cont");
// Load static storage from dex cache
- llvm::Value* storage_field_addr =
- EmitLoadDexCacheStaticStorageFieldAddr(type_idx);
+ llvm::Value* storage_field_addr = EmitLoadDexCacheResolvedTypeFieldAddr(type_idx);
llvm::Value* storage_object_addr = irb_.CreateLoad(storage_field_addr, kTBAARuntimeInfo);
- llvm::BasicBlock* block_original = irb_.GetInsertBlock();
+ // Test: Is the class resolved?
+ llvm::Value* equal_null = irb_.CreateICmpEQ(storage_object_addr, irb_.getJNull());
- // Test: Is the static storage of this class initialized?
- llvm::Value* equal_null =
- irb_.CreateICmpEQ(storage_object_addr, irb_.getJNull());
+ irb_.CreateCondBr(equal_null, block_load_static, block_check_init, kUnlikely);
- irb_.CreateCondBr(equal_null, block_load_static, block_cont, kUnlikely);
+ // storage_object_addr != null, so check if its initialized.
+ irb_.SetInsertPoint(block_check_init);
+
+ llvm::Value* class_status =
+ irb_.LoadFromObjectOffset(storage_object_addr,
+ art::mirror::Class::StatusOffset().Int32Value(),
+ irb_.getJIntTy(), kTBAAHeapInstance);
+
+ llvm::Value* is_not_initialized =
+ irb_.CreateICmpULT(class_status, irb_.getInt32(art::mirror::Class::kStatusInitialized));
+
+ irb_.CreateCondBr(is_not_initialized, block_load_static, block_cont, kUnlikely);
// Failback routine to load the class object
irb_.SetInsertPoint(block_load_static);
@@ -1880,9 +1865,8 @@ llvm::Value* GBCExpanderPass::EmitLoadStaticStorage(uint32_t dex_pc,
llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
- phi->addIncoming(storage_object_addr, block_original);
+ phi->addIncoming(storage_object_addr, block_check_init);
phi->addIncoming(loaded_storage_object_addr, block_after_load_static);
-
return phi;
}
@@ -1895,10 +1879,11 @@ llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
int ssb_index;
bool is_referrers_class;
bool is_volatile;
+ bool is_initialized;
bool is_fast_path = driver_->ComputeStaticFieldInfo(
field_idx, dex_compilation_unit_, false,
- &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile, &is_initialized);
llvm::Value* static_field_value;
@@ -1979,10 +1964,11 @@ void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
int ssb_index;
bool is_referrers_class;
bool is_volatile;
+ bool is_initialized;
bool is_fast_path = driver_->ComputeStaticFieldInfo(
field_idx, dex_compilation_unit_, true,
- &field_offset, &ssb_index, &is_referrers_class, &is_volatile);
+ &field_offset, &ssb_index, &is_referrers_class, &is_volatile, &is_initialized);
if (!is_fast_path) {
llvm::Function* runtime_func;
@@ -3360,9 +3346,6 @@ GBCExpanderPass::ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id,
case IntrinsicHelper::LoadDeclaringClassSSB: {
return Expand_LoadDeclaringClassSSB(call_inst.getArgOperand(0));
}
- case IntrinsicHelper::LoadClassSSBFromDexCache: {
- return Expand_LoadClassSSBFromDexCache(call_inst.getArgOperand(0));
- }
case IntrinsicHelper::InitializeAndLoadClassSSB: {
return ExpandToRuntime(InitializeStaticStorage, call_inst);
}
diff --git a/compiler/llvm/intrinsic_func_list.def b/compiler/llvm/intrinsic_func_list.def
index 92537ba419..887a62666f 100644
--- a/compiler/llvm/intrinsic_func_list.def
+++ b/compiler/llvm/intrinsic_func_list.def
@@ -863,13 +863,6 @@ _EVAL_DEF_INTRINSICS_FUNC(LoadDeclaringClassSSB,
kJavaObjectTy,
_EXPAND_ARG1(kJavaMethodTy))
-// JavaObject* art_portable_load_class_ssb_from_dex_cache(uint32_t type_idx)
-_EVAL_DEF_INTRINSICS_FUNC(LoadClassSSBFromDexCache,
- art_portable_load_class_ssb_from_dex_cache,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
// JavaObject* art_portable_init_and_load_class_ssb(uint32_t type_idx,
// Method* referrer,
// Thread* thread)
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a602c832e9..576ed1bdac 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -131,6 +131,7 @@ LIBART_COMMON_SRC_FILES := \
thread_pool.cc \
throw_location.cc \
trace.cc \
+ profiler.cc \
utf.cc \
utils.cc \
verifier/dex_gc_map.cc \
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index e9bbf91761..06c7b53a66 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -39,7 +39,7 @@
#define STRING_DATA_OFFSET 12
// Offsets within java.lang.Method.
-#define METHOD_DEX_CACHE_METHODS_OFFSET 16
-#define METHOD_CODE_OFFSET 40
+#define METHOD_DEX_CACHE_METHODS_OFFSET 12
+#define METHOD_CODE_OFFSET 36
#endif // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index a64499848e..5f43bec01a 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -44,11 +44,27 @@ void Barrier::Init(Thread* self, int count) {
void Barrier::Increment(Thread* self, int delta) {
MutexLock mu(self, lock_);
SetCountLocked(self, count_ + delta);
+
+ // Increment the count. If it becomes zero after the increment
+ // then all the threads have already passed the barrier. If
+ // it is non-zero then there is still one or more threads
+ // that have not yet called the Pass function. When the
+ // Pass function is called by the last thread, the count will
+ // be decremented to zero and a Broadcast will be made on the
+ // condition variable, thus waking this up.
if (count_ != 0) {
condition_.Wait(self);
}
}
+void Barrier::Increment(Thread* self, int delta, uint32_t timeout_ms) {
+ MutexLock mu(self, lock_);
+ SetCountLocked(self, count_ + delta);
+ if (count_ != 0) {
+ condition_.TimedWait(self, timeout_ms, 0);
+ }
+}
+
void Barrier::SetCountLocked(Thread* self, int count) {
count_ = count;
if (count_ == 0) {
diff --git a/runtime/barrier.h b/runtime/barrier.h
index 22f08e1a3e..e335c327be 100644
--- a/runtime/barrier.h
+++ b/runtime/barrier.h
@@ -41,6 +41,9 @@ class Barrier {
// Increment the count by delta, wait on condition if count is non zero.
void Increment(Thread* self, int delta);
+ // Increment the count by delta, wait on condition if count is non zero, with a timeout
+ void Increment(Thread* self, int delta, uint32_t timeout_ms) LOCKS_EXCLUDED(lock_);
+
private:
void SetCountLocked(Thread* self, int count) EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fbb47bdfae..131ebf8bb2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1242,15 +1242,8 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
if (fields.get() == NULL) {
return NULL;
}
- SirtRef<mirror::ObjectArray<mirror::StaticStorageBase> >
- initialized_static_storage(self,
- AllocObjectArray<mirror::StaticStorageBase>(self, dex_file.NumTypeIds()));
- if (initialized_static_storage.get() == NULL) {
- return NULL;
- }
-
dex_cache->Init(&dex_file, location.get(), strings.get(), types.get(), methods.get(),
- fields.get(), initialized_static_storage.get());
+ fields.get());
return dex_cache.get();
}
@@ -1905,7 +1898,6 @@ mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file
dst->SetDexCacheStrings(klass->GetDexCache()->GetStrings());
dst->SetDexCacheResolvedMethods(klass->GetDexCache()->GetResolvedMethods());
dst->SetDexCacheResolvedTypes(klass->GetDexCache()->GetResolvedTypes());
- dst->SetDexCacheInitializedStaticStorage(klass->GetDexCache()->GetInitializedStaticStorage());
uint32_t access_flags = it.GetMemberAccessFlags();
@@ -2390,22 +2382,7 @@ mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
for (int32_t i = 0; i < dex_caches->GetLength(); ++i) {
mirror::DexCache* dex_cache = dex_caches->Get(i);
const DexFile* dex_file = dex_cache->GetDexFile();
- // First search using the class def map, but don't bother for non-class types.
- if (descriptor[0] == 'L') {
- const DexFile::StringId* descriptor_string_id = dex_file->FindStringId(descriptor);
- if (descriptor_string_id != NULL) {
- const DexFile::TypeId* type_id =
- dex_file->FindTypeId(dex_file->GetIndexForStringId(*descriptor_string_id));
- if (type_id != NULL) {
- mirror::Class* klass = dex_cache->GetResolvedType(dex_file->GetIndexForTypeId(*type_id));
- if (klass != NULL) {
- self->EndAssertNoThreadSuspension(old_no_suspend_cause);
- return klass;
- }
- }
- }
- }
- // Now try binary searching the string/type index.
+ // Try binary searching the string/type index.
const DexFile::StringId* string_id = dex_file->FindStringId(descriptor);
if (string_id != NULL) {
const DexFile::TypeId* type_id =
@@ -2941,8 +2918,6 @@ static void CheckProxyMethod(mirror::ArtMethod* method,
CHECK_EQ(prototype->GetDexCacheStrings(), method->GetDexCacheStrings());
CHECK_EQ(prototype->GetDexCacheResolvedMethods(), method->GetDexCacheResolvedMethods());
CHECK_EQ(prototype->GetDexCacheResolvedTypes(), method->GetDexCacheResolvedTypes());
- CHECK_EQ(prototype->GetDexCacheInitializedStaticStorage(),
- method->GetDexCacheInitializedStaticStorage());
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
MethodHelper mh(method);
@@ -4097,11 +4072,10 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
// Convert a ClassNotFoundException to a NoClassDefFoundError.
SirtRef<mirror::Throwable> cause(self, self->GetException(NULL));
if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) {
- SirtRef<mirror::Class> sirt_resolved(self, resolved);
+ DCHECK(resolved == NULL); // No SirtRef needed to preserve resolved.
Thread::Current()->ClearException();
ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor);
self->GetException(NULL)->SetCause(cause.get());
- resolved = sirt_resolved.get();
}
}
}
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 34134fae0e..1744050a00 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -159,15 +159,12 @@ class ClassLinkerTest : public CommonTest {
EXPECT_TRUE(method->GetDexCacheStrings() != NULL);
EXPECT_TRUE(method->GetDexCacheResolvedMethods() != NULL);
EXPECT_TRUE(method->GetDexCacheResolvedTypes() != NULL);
- EXPECT_TRUE(method->GetDexCacheInitializedStaticStorage() != NULL);
EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetStrings(),
method->GetDexCacheStrings());
EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods(),
method->GetDexCacheResolvedMethods());
EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes(),
method->GetDexCacheResolvedTypes());
- EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetInitializedStaticStorage(),
- method->GetDexCacheInitializedStaticStorage());
}
void AssertField(mirror::Class* klass, mirror::ArtField* field)
@@ -468,7 +465,6 @@ struct ArtMethodOffsets : public CheckOffsets<mirror::ArtMethod> {
ArtMethodOffsets() : CheckOffsets<mirror::ArtMethod>(false, "Ljava/lang/reflect/ArtMethod;") {
// alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, declaring_class_), "declaringClass"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_initialized_static_storage_), "dexCacheInitializedStaticStorage"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_methods_), "dexCacheResolvedMethods"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_resolved_types_), "dexCacheResolvedTypes"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_), "dexCacheStrings"));
@@ -607,7 +603,6 @@ struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
// alphabetical references
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"));
- offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, initialized_static_storage_), "initializedStaticStorage"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_fields_), "resolvedFields"));
offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::DexCache, resolved_methods_), "resolvedMethods"));
@@ -1006,13 +1001,12 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) {
const DexFile::TypeId* type_id = dex_file->FindTypeId(dex_file->GetIndexForStringId(*string_id));
ASSERT_TRUE(type_id != NULL);
uint32_t type_idx = dex_file->GetIndexForTypeId(*type_id);
- EXPECT_TRUE(clinit->GetDexCacheInitializedStaticStorage()->Get(type_idx) == NULL);
- mirror::StaticStorageBase* uninit = ResolveVerifyAndClinit(type_idx, clinit, Thread::Current(), true, false);
+ mirror::Class* uninit = ResolveVerifyAndClinit(type_idx, clinit, Thread::Current(), true, false);
EXPECT_TRUE(uninit != NULL);
- EXPECT_TRUE(clinit->GetDexCacheInitializedStaticStorage()->Get(type_idx) == NULL);
- mirror::StaticStorageBase* init = ResolveVerifyAndClinit(type_idx, getS0, Thread::Current(), true, false);
+ EXPECT_FALSE(uninit->IsInitialized());
+ mirror::Class* init = ResolveVerifyAndClinit(type_idx, getS0, Thread::Current(), true, false);
EXPECT_TRUE(init != NULL);
- EXPECT_EQ(init, clinit->GetDexCacheInitializedStaticStorage()->Get(type_idx));
+ EXPECT_TRUE(init->IsInitialized());
}
TEST_F(ClassLinkerTest, FinalizableBit) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index a60446caba..e7fe0725d4 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -517,15 +517,15 @@ static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->ResolveType(type_idx, referrer);
- if (UNLIKELY(klass == NULL)) {
+ if (UNLIKELY(klass == nullptr)) {
CHECK(self->IsExceptionPending());
- return NULL; // Failure - Indicate to caller to deliver exception
+ return nullptr; // Failure - Indicate to caller to deliver exception
}
// Perform access check if necessary.
mirror::Class* referring_class = referrer->GetDeclaringClass();
if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referring_class, klass);
- return NULL; // Failure - Indicate to caller to deliver exception
+ return nullptr; // Failure - Indicate to caller to deliver exception
}
// If we're just implementing const-class, we shouldn't call <clinit>.
if (!can_run_clinit) {
@@ -541,9 +541,8 @@ static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
SirtRef<mirror::Class> sirt_class(self, klass);
if (!class_linker->EnsureInitialized(sirt_class, true, true)) {
CHECK(self->IsExceptionPending());
- return NULL; // Failure - Indicate to caller to deliver exception
+ return nullptr; // Failure - Indicate to caller to deliver exception
}
- referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, sirt_class.get());
return sirt_class.get();
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index f1498287b5..be358e3822 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -38,7 +38,7 @@ static inline void AssignRegister(ShadowFrame& new_shadow_frame, const ShadowFra
}
template<bool is_range, bool do_assignability_check>
-bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shadow_frame,
+bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Compute method information.
MethodHelper mh(method);
@@ -66,17 +66,6 @@ bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shad
const DexFile::TypeList* params = mh.GetParameterTypeList();
const char* shorty = mh.GetShorty();
- // Handle receiver apart since it's not part of the shorty.
- size_t dest_reg = first_dest_reg;
- size_t arg_offset = 0;
- if (receiver != NULL) {
- DCHECK(!method->IsStatic());
- new_shadow_frame->SetVRegReference(dest_reg, receiver);
- ++dest_reg;
- ++arg_offset;
- } else {
- DCHECK(method->IsStatic());
- }
// TODO: find a cleaner way to separate non-range and range information without duplicating code.
uint32_t arg[5]; // only used in invoke-XXX.
uint32_t vregC; // only used in invoke-XXX-range.
@@ -85,6 +74,16 @@ bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shad
} else {
inst->GetArgs(arg, inst_data);
}
+
+ // Handle receiver apart since it's not part of the shorty.
+ size_t dest_reg = first_dest_reg;
+ size_t arg_offset = 0;
+ if (!method->IsStatic()) {
+ size_t receiver_reg = (is_range) ? vregC : arg[0];
+ new_shadow_frame->SetVRegReference(dest_reg, shadow_frame.GetVRegReference(receiver_reg));
+ ++dest_reg;
+ ++arg_offset;
+ }
for (size_t shorty_pos = 0; dest_reg < num_regs; ++shorty_pos, ++dest_reg, ++arg_offset) {
DCHECK_LT(shorty_pos + 1, mh.GetShortyLength());
const size_t src_reg = (is_range) ? vregC + arg_offset : arg[arg_offset];
@@ -336,8 +335,8 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh,
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
- bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Object* receiver, \
- Thread* self, ShadowFrame& shadow_frame, \
+ bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
+ ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
EXPLICIT_DO_CALL_TEMPLATE_DECL(false, false);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index a9b8909b2b..4481210e04 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -87,7 +87,7 @@ static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANA
// DoInvokeVirtualQuick functions.
// Returns true on success, otherwise throws an exception and returns false.
template<bool is_range, bool do_assignability_check>
-bool DoCall(ArtMethod* method, Object* receiver, Thread* self, ShadowFrame& shadow_frame,
+bool DoCall(ArtMethod* method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
// Handles invoke-XXX/range instructions.
@@ -101,10 +101,6 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr
ArtMethod* const method = FindMethodFromCode<type, do_access_check>(method_idx, receiver,
shadow_frame.GetMethod(),
self);
- if (type != kStatic) {
- // Reload the vreg since the GC may have moved the object.
- receiver = shadow_frame.GetVRegReference(vregC);
- }
if (UNLIKELY(method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
@@ -114,8 +110,7 @@ static inline bool DoInvoke(Thread* self, ShadowFrame& shadow_frame, const Instr
result->SetJ(0);
return false;
} else {
- return DoCall<is_range, do_access_check>(method, receiver, self, shadow_frame, inst,
- inst_data, result);
+ return DoCall<is_range, do_access_check>(method, self, shadow_frame, inst, inst_data, result);
}
}
@@ -145,7 +140,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
return false;
} else {
// No need to check since we've been quickened.
- return DoCall<is_range, false>(method, receiver, self, shadow_frame, inst, inst_data, result);
+ return DoCall<is_range, false>(method, self, shadow_frame, inst, inst_data, result);
}
}
diff --git a/runtime/locks.cc b/runtime/locks.cc
index 51a40c383a..5b462a138f 100644
--- a/runtime/locks.cc
+++ b/runtime/locks.cc
@@ -30,6 +30,7 @@ Mutex* Locks::runtime_shutdown_lock_ = NULL;
Mutex* Locks::thread_list_lock_ = NULL;
Mutex* Locks::thread_suspend_count_lock_ = NULL;
Mutex* Locks::trace_lock_ = NULL;
+Mutex* Locks::profiler_lock_ = NULL;
Mutex* Locks::unexpected_signal_lock_ = NULL;
void Locks::Init() {
@@ -44,6 +45,7 @@ void Locks::Init() {
DCHECK(thread_list_lock_ != NULL);
DCHECK(thread_suspend_count_lock_ != NULL);
DCHECK(trace_lock_ != NULL);
+ DCHECK(profiler_lock_ != NULL);
DCHECK(unexpected_signal_lock_ != NULL);
} else {
logging_lock_ = new Mutex("logging lock", kLoggingLock, true);
@@ -66,6 +68,8 @@ void Locks::Init() {
thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock);
DCHECK(trace_lock_ == NULL);
trace_lock_ = new Mutex("trace lock", kTraceLock);
+ DCHECK(profiler_lock_ == NULL);
+ profiler_lock_ = new Mutex("profiler lock", kProfilerLock);
DCHECK(unexpected_signal_lock_ == NULL);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true);
}
diff --git a/runtime/locks.h b/runtime/locks.h
index 72d4f652ff..341319c136 100644
--- a/runtime/locks.h
+++ b/runtime/locks.h
@@ -54,6 +54,7 @@ enum LockLevel {
kThreadListLock,
kBreakpointInvokeLock,
kTraceLock,
+ kProfilerLock,
kJdwpEventListLock,
kJdwpAttachLock,
kJdwpStartLock,
@@ -148,8 +149,11 @@ class Locks {
// Guards trace requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
+ // Guards profile objects.
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
+
// Guards lists of classes within the class linker.
- static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(trace_lock_);
+ static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_);
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index c9bf1609a0..088f616d41 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -73,12 +73,6 @@ inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() const {
OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_), false);
}
-inline ObjectArray<StaticStorageBase>* ArtMethod::GetDexCacheInitializedStaticStorage() const {
- return GetFieldObject<ObjectArray<StaticStorageBase>*>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_initialized_static_storage_),
- false);
-}
-
inline uint32_t ArtMethod::GetCodeSize() const {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
uintptr_t code = reinterpret_cast<uintptr_t>(GetEntryPointFromCompiledCode());
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index a4f6b3b460..f4a076cf13 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -86,11 +86,6 @@ void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_class
new_dex_cache_classes, false);
}
-void ArtMethod::SetDexCacheInitializedStaticStorage(ObjectArray<StaticStorageBase>* new_value) {
- SetFieldObject(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_initialized_static_storage_),
- new_value, false);
-}
-
size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) {
CHECK_LE(1, shorty.length());
uint32_t num_registers = 0;
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d5524ec87b..963b4d554b 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -184,11 +184,6 @@ class MANAGED ArtMethod : public Object {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
}
- static MemberOffset DexCacheInitializedStaticStorageOffset() {
- return OFFSET_OF_OBJECT_MEMBER(ArtMethod,
- dex_cache_initialized_static_storage_);
- }
-
ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() const;
void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -197,10 +192,6 @@ class MANAGED ArtMethod : public Object {
void SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<StaticStorageBase>* GetDexCacheInitializedStaticStorage() const;
- void SetDexCacheInitializedStaticStorage(ObjectArray<StaticStorageBase>* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Find the method that this method overrides
ArtMethod* FindOverriddenMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -391,9 +382,6 @@ class MANAGED ArtMethod : public Object {
Class* declaring_class_;
// short cuts to declaring_class_->dex_cache_ member for fast compiled code access
- ObjectArray<StaticStorageBase>* dex_cache_initialized_static_storage_;
-
- // short cuts to declaring_class_->dex_cache_ member for fast compiled code access
ObjectArray<ArtMethod>* dex_cache_resolved_methods_;
// short cuts to declaring_class_->dex_cache_ member for fast compiled code access
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 50ede668dd..9aa23d91d3 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -70,15 +70,8 @@ class ClassLoader;
class DexCache;
class IfTable;
-// Type for the InitializedStaticStorage table. Currently the Class
-// provides the static storage. However, this might change to an Array
-// to improve image sharing, so we use this type to avoid assumptions
-// on the current storage.
-class MANAGED StaticStorageBase : public Object {
-};
-
// C++ mirror of java.lang.Class
-class MANAGED Class : public StaticStorageBase {
+class MANAGED Class : public Object {
public:
// Class Status
//
@@ -133,6 +126,10 @@ class MANAGED Class : public StaticStorageBase {
void SetStatus(Status new_status, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static MemberOffset StatusOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, status_);
+ }
+
// Returns true if the class has failed to link.
bool IsErroneous() const {
return GetStatus() == kStatusError;
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 00531e3076..fa0900c0ec 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -36,15 +36,13 @@ void DexCache::Init(const DexFile* dex_file,
ObjectArray<String>* strings,
ObjectArray<Class>* resolved_types,
ObjectArray<ArtMethod>* resolved_methods,
- ObjectArray<ArtField>* resolved_fields,
- ObjectArray<StaticStorageBase>* initialized_static_storage) {
- CHECK(dex_file != NULL);
- CHECK(location != NULL);
- CHECK(strings != NULL);
- CHECK(resolved_types != NULL);
- CHECK(resolved_methods != NULL);
- CHECK(resolved_fields != NULL);
- CHECK(initialized_static_storage != NULL);
+ ObjectArray<ArtField>* resolved_fields) {
+ CHECK(dex_file != nullptr);
+ CHECK(location != nullptr);
+ CHECK(strings != nullptr);
+ CHECK(resolved_types != nullptr);
+ CHECK(resolved_methods != nullptr);
+ CHECK(resolved_fields != nullptr);
SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false);
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location, false);
@@ -52,8 +50,6 @@ void DexCache::Init(const DexFile* dex_file,
SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types, false);
SetFieldObject(ResolvedMethodsOffset(), resolved_methods, false);
SetFieldObject(ResolvedFieldsOffset(), resolved_fields, false);
- SetFieldObject(OFFSET_OF_OBJECT_MEMBER(DexCache, initialized_static_storage_),
- initialized_static_storage, false);
Runtime* runtime = Runtime::Current();
if (runtime->HasResolutionMethod()) {
@@ -68,11 +64,11 @@ void DexCache::Init(const DexFile* dex_file,
void DexCache::Fixup(ArtMethod* trampoline) {
// Fixup the resolve methods array to contain trampoline for resolution.
- CHECK(trampoline != NULL);
+ CHECK(trampoline != nullptr);
ObjectArray<ArtMethod>* resolved_methods = GetResolvedMethods();
size_t length = resolved_methods->GetLength();
for (size_t i = 0; i < length; i++) {
- if (resolved_methods->GetWithoutChecks(i) == NULL) {
+ if (resolved_methods->GetWithoutChecks(i) == nullptr) {
resolved_methods->SetWithoutChecks(i, trampoline);
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 0522f134af..a5fe598f5c 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -47,8 +47,7 @@ class MANAGED DexCache : public Object {
ObjectArray<String>* strings,
ObjectArray<Class>* types,
ObjectArray<ArtMethod>* methods,
- ObjectArray<ArtField>* fields,
- ObjectArray<StaticStorageBase>* initialized_static_storage)
+ ObjectArray<ArtField>* fields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -85,11 +84,6 @@ class MANAGED DexCache : public Object {
return GetResolvedFields()->GetLength();
}
- size_t NumInitializedStaticStorage() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetInitializedStaticStorage()->GetLength();
- }
-
String* GetResolvedString(uint32_t string_idx) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetStrings()->Get(string_idx);
@@ -149,12 +143,6 @@ class MANAGED DexCache : public Object {
return GetFieldObject< ObjectArray<ArtField>* >(ResolvedFieldsOffset(), false);
}
- ObjectArray<StaticStorageBase>* GetInitializedStaticStorage() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject< ObjectArray<StaticStorageBase>* >(
- OFFSET_OF_OBJECT_MEMBER(DexCache, initialized_static_storage_), false);
- }
-
const DexFile* GetDexFile() const {
return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), false);
}
@@ -165,7 +153,6 @@ class MANAGED DexCache : public Object {
private:
Object* dex_;
- ObjectArray<StaticStorageBase>* initialized_static_storage_;
String* location_;
ObjectArray<ArtField>* resolved_fields_;
ObjectArray<ArtMethod>* resolved_methods_;
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 441c6da8a0..6bed224bd5 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -39,13 +39,11 @@ TEST_F(DexCacheTest, Open) {
EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
- EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumInitializedStaticStorage());
EXPECT_LE(0, dex_cache->GetStrings()->GetLength());
EXPECT_LE(0, dex_cache->GetResolvedTypes()->GetLength());
EXPECT_LE(0, dex_cache->GetResolvedMethods()->GetLength());
EXPECT_LE(0, dex_cache->GetResolvedFields()->GetLength());
- EXPECT_LE(0, dex_cache->GetInitializedStaticStorage()->GetLength());
EXPECT_EQ(java_lang_dex_file_->NumStringIds(),
static_cast<uint32_t>(dex_cache->GetStrings()->GetLength()));
@@ -55,8 +53,6 @@ TEST_F(DexCacheTest, Open) {
static_cast<uint32_t>(dex_cache->GetResolvedMethods()->GetLength()));
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(),
static_cast<uint32_t>(dex_cache->GetResolvedFields()->GetLength()));
- EXPECT_EQ(java_lang_dex_file_->NumTypeIds(),
- static_cast<uint32_t>(dex_cache->GetInitializedStaticStorage()->GetLength()));
}
} // namespace mirror
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 726a8f1769..c9e255c99b 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -234,7 +234,6 @@ static void PreloadDexCachesResolveType(mirror::DexCache* dex_cache, uint32_t ty
return;
}
// LOG(INFO) << "VMRuntime.preloadDexCaches static storage klass=" << class_name;
- dex_cache->GetInitializedStaticStorage()->Set(type_idx, klass);
}
// Based on ClassLinker::ResolveField.
@@ -306,12 +305,10 @@ struct DexCacheStats {
uint32_t num_types;
uint32_t num_fields;
uint32_t num_methods;
- uint32_t num_static_storage;
DexCacheStats() : num_strings(0),
num_types(0),
num_fields(0),
- num_methods(0),
- num_static_storage(0) {}
+ num_methods(0) {}
};
static const bool kPreloadDexCachesEnabled = true;
@@ -339,7 +336,6 @@ static void PreloadDexCachesStatsTotal(DexCacheStats* total) {
total->num_fields += dex_file->NumFieldIds();
total->num_methods += dex_file->NumMethodIds();
total->num_types += dex_file->NumTypeIds();
- total->num_static_storage += dex_file->NumTypeIds();
}
}
@@ -378,12 +374,6 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
filled->num_methods++;
}
}
- for (size_t i = 0; i < dex_cache->NumInitializedStaticStorage(); i++) {
- mirror::StaticStorageBase* klass = dex_cache->GetInitializedStaticStorage()->Get(i);
- if (klass != NULL) {
- filled->num_static_storage++;
- }
- }
}
}
@@ -477,14 +467,25 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
total.num_fields, before.num_fields, after.num_fields);
LOG(INFO) << StringPrintf("VMRuntime.preloadDexCaches methods total=%d before=%d after=%d",
total.num_methods, before.num_methods, after.num_methods);
- LOG(INFO) << StringPrintf("VMRuntime.preloadDexCaches storage total=%d before=%d after=%d",
- total.num_static_storage,
- before.num_static_storage,
- after.num_static_storage);
LOG(INFO) << StringPrintf("VMRuntime.preloadDexCaches finished");
}
}
+
+/*
+ * This is called by the framework when it knows the application directory and
+ * process name. We use this information to start up the sampling profiler for
+ * for ART.
+ */
+static void VMRuntime_registerAppInfo(JNIEnv* env, jclass, jstring appDir, jstring procName) {
+ const char *appDirChars = env->GetStringUTFChars(appDir, NULL);
+ const char *procNameChars = env->GetStringUTFChars(procName, NULL);
+ std::string profileFile = std::string(appDirChars) + "/art-profile-" + std::string(procNameChars);
+ Runtime::Current()->StartProfiler(profileFile.c_str());
+ env->ReleaseStringUTFChars(appDir, appDirChars);
+ env->ReleaseStringUTFChars(procName, procNameChars);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, addressOf, "!(Ljava/lang/Object;)J"),
NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -506,6 +507,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, vmVersion, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, vmLibrary, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
+ NATIVE_METHOD(VMRuntime, registerAppInfo, "(Ljava/lang/String;Ljava/lang/String;)V"),
};
void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 52e74abd75..caf18f1c80 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '1', '2', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '1', '3', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
new file mode 100644
index 0000000000..0e738124f7
--- /dev/null
+++ b/runtime/profiler.cc
@@ -0,0 +1,448 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profiler.h"
+
+#include <sys/uio.h>
+
+#include "base/stl_util.h"
+#include "base/unix_file/fd_file.h"
+#include "class_linker.h"
+#include "common_throws.h"
+#include "debugger.h"
+#include "dex_file-inl.h"
+#include "instrumentation.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "os.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "thread.h"
+#include "thread_list.h"
+#if !defined(ART_USE_PORTABLE_COMPILER)
+#include "entrypoints/quick/quick_entrypoints.h"
+#endif
+
+namespace art {
+
+BackgroundMethodSamplingProfiler* BackgroundMethodSamplingProfiler::profiler_ = nullptr;
+pthread_t BackgroundMethodSamplingProfiler::profiler_pthread_ = 0U;
+volatile bool BackgroundMethodSamplingProfiler::shutting_down_ = false;
+
+
+// TODO: this profiler runs regardless of the state of the machine. Maybe we should use the
+// wakelock or something to modify the run characteristics. This can be done when we
+// have some performance data after it's been used for a while.
+
+
+// This is called from either a thread list traversal or from a checkpoint. Regardless
+// of which caller, the mutator lock must be held.
+static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ BackgroundMethodSamplingProfiler* profiler =
+ reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
+ mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
+ if (false && method == nullptr) {
+ LOG(INFO) << "No current method available";
+ std::ostringstream os;
+ thread->Dump(os);
+ std::string data(os.str());
+ LOG(INFO) << data;
+ }
+ profiler->RecordMethod(method);
+}
+
+
+
+// A closure that is called by the thread checkpoint code.
+class SampleCheckpoint : public Closure {
+ public:
+ explicit SampleCheckpoint(BackgroundMethodSamplingProfiler* const profiler) :
+ profiler_(profiler) {}
+
+ virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
+ Thread* self = Thread::Current();
+ if (thread == nullptr) {
+ LOG(ERROR) << "Checkpoint with nullptr thread";
+ return;
+ }
+
+ // Grab the mutator lock (shared access).
+ ScopedObjectAccess soa(self);
+
+ // Grab a sample.
+ GetSample(thread, this->profiler_);
+
+ // And finally tell the barrier that we're done.
+ this->profiler_->GetBarrier().Pass(self);
+ }
+
+ private:
+ BackgroundMethodSamplingProfiler* const profiler_;
+};
+
+bool BackgroundMethodSamplingProfiler::ShuttingDown(Thread* self) {
+ MutexLock mu(self, *Locks::profiler_lock_);
+ return shutting_down_;
+}
+
+void* BackgroundMethodSamplingProfiler::RunProfilerThread(void* arg) {
+ Runtime* runtime = Runtime::Current();
+ BackgroundMethodSamplingProfiler* profiler =
+ reinterpret_cast<BackgroundMethodSamplingProfiler*>(arg);
+
+ // Add a random delay for the first time run so that we don't hammer the CPU
+ // with all profiles running at the same time.
+ const int kRandomDelayMaxSecs = 30;
+ const double kMaxBackoffSecs = 24*60*60; // Max backoff time.
+
+ srand(MicroTime() * getpid());
+ int startup_delay = rand() % kRandomDelayMaxSecs; // random delay for startup.
+
+
+ CHECK(runtime->AttachCurrentThread("Profiler", true, runtime->GetSystemThreadGroup(),
+ !runtime->IsCompiler()));
+
+ Thread* self = Thread::Current();
+
+ while (true) {
+ if (ShuttingDown(self)) {
+ break;
+ }
+
+ {
+ // wait until we need to run another profile
+ uint64_t delay_secs = profiler->period_s_ * profiler->backoff_factor_;
+
+ // Add a startup delay to prevent all the profiles running at once.
+ delay_secs += startup_delay;
+
+ // Immediate startup for benchmarking?
+ if (profiler->start_immediately_ && startup_delay > 0) {
+ delay_secs = 0;
+ }
+
+ startup_delay = 0;
+
+ LOG(DEBUG) << "Delaying profile start for " << delay_secs << " secs";
+ MutexLock mu(self, profiler->wait_lock_);
+ profiler->period_condition_.TimedWait(self, delay_secs * 1000, 0);
+
+ // Expand the backoff by its coefficient, but don't go beyond the max.
+ double new_backoff = profiler->backoff_factor_ * profiler->backoff_coefficient_;
+ if (new_backoff < kMaxBackoffSecs) {
+ profiler->backoff_factor_ = new_backoff;
+ }
+ }
+
+ if (ShuttingDown(self)) {
+ break;
+ }
+
+
+ uint64_t start_us = MicroTime();
+ uint64_t end_us = start_us + profiler->duration_s_ * 1000000LL;
+ uint64_t now_us = start_us;
+
+ LOG(DEBUG) << "Starting profiling run now for " << PrettyDuration((end_us - start_us) * 1000);
+
+
+ SampleCheckpoint check_point(profiler);
+
+ while (now_us < end_us) {
+ if (ShuttingDown(self)) {
+ break;
+ }
+
+ usleep(profiler->interval_us_); // Non-interruptible sleep.
+
+ ThreadList* thread_list = runtime->GetThreadList();
+
+ profiler->profiler_barrier_->Init(self, 0);
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+
+ ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
+
+ // Wait for the barrier to be crossed by all runnable threads. This wait
+ // is done with a timeout so that we can detect problems with the checkpoint
+ // running code. We should never see this.
+ const uint32_t kWaitTimeoutMs = 10000;
+ const uint32_t kWaitTimeoutUs = kWaitTimeoutMs * 1000;
+
+ uint64_t waitstart_us = MicroTime();
+ // Wait for all threads to pass the barrier.
+ profiler->profiler_barrier_->Increment(self, barrier_count, kWaitTimeoutMs);
+ uint64_t waitend_us = MicroTime();
+ uint64_t waitdiff_us = waitend_us - waitstart_us;
+
+ // We should never get a timeout. If we do, it suggests a problem with the checkpoint
+ // code. Crash the process in this case.
+ CHECK_LT(waitdiff_us, kWaitTimeoutUs);
+
+ self->SetState(old_state);
+
+ // Update the current time.
+ now_us = MicroTime();
+ }
+
+ if (!ShuttingDown(self)) {
+ // After the profile has been taken, write it out.
+ ScopedObjectAccess soa(self); // Acquire the mutator lock.
+ uint32_t size = profiler->WriteProfile();
+ LOG(DEBUG) << "Profile size: " << size;
+ }
+ }
+
+ LOG(INFO) << "Profiler shutdown";
+ runtime->DetachCurrentThread();
+ return nullptr;
+}
+
+// Write out the profile file if we are generating a profile.
+uint32_t BackgroundMethodSamplingProfiler::WriteProfile() {
+ UniquePtr<File> profile_file;
+ Runtime* runtime = Runtime::Current();
+ std::string classpath = runtime->GetClassPathString();
+ size_t colon = classpath.find(':');
+ if (colon != std::string::npos) {
+ // More than one file in the classpath. Possible?
+ classpath = classpath.substr(0, colon);
+ }
+
+ std::replace(classpath.begin(), classpath.end(), '/', '@');
+ std::string full_name = profile_file_name_;
+ if (classpath != "") {
+ full_name = StringPrintf("%s-%s", profile_file_name_.c_str(), classpath.c_str());
+ }
+ LOG(DEBUG) << "Saving profile to " << full_name;
+
+ profile_file.reset(OS::CreateEmptyFile(full_name.c_str()));
+ if (profile_file.get() == nullptr) {
+ // Failed to open the profile file, ignore.
+ LOG(INFO) << "Failed to op file";
+ return 0;
+ }
+ std::ostringstream os;
+ uint32_t num_methods = DumpProfile(os);
+ std::string data(os.str());
+ profile_file->WriteFully(data.c_str(), data.length());
+ profile_file->Close();
+ return num_methods;
+}
+
+// Start a profile thread with the user-supplied arguments.
+void BackgroundMethodSamplingProfiler::Start(int period, int duration,
+ std::string profile_file_name, int interval_us,
+ double backoff_coefficient, bool startImmediately) {
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, *Locks::profiler_lock_);
+ // Don't start two profiler threads.
+ if (profiler_ != nullptr) {
+ return;
+ }
+ }
+
+ LOG(INFO) << "Starting profile with period " << period << "s, duration " << duration <<
+ "s, interval " << interval_us << "us. Profile file " << profile_file_name;
+
+ {
+ MutexLock mu(self, *Locks::profiler_lock_);
+ profiler_ = new BackgroundMethodSamplingProfiler(period, duration, profile_file_name,
+ backoff_coefficient,
+ interval_us, startImmediately);
+
+ CHECK_PTHREAD_CALL(pthread_create, (&profiler_pthread_, nullptr, &RunProfilerThread,
+ reinterpret_cast<void*>(profiler_)),
+ "Profiler thread");
+ }
+}
+
+
+
+void BackgroundMethodSamplingProfiler::Stop() {
+ BackgroundMethodSamplingProfiler* profiler = nullptr;
+ pthread_t profiler_pthread = 0U;
+ {
+ MutexLock trace_mu(Thread::Current(), *Locks::profiler_lock_);
+ profiler = profiler_;
+ shutting_down_ = true;
+ profiler_pthread = profiler_pthread_;
+ }
+
+ // Now wake up the sampler thread if it sleeping.
+ {
+ MutexLock profile_mu(Thread::Current(), profiler->wait_lock_);
+ profiler->period_condition_.Signal(Thread::Current());
+ }
+ // Wait for the sample thread to stop.
+ CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profiler thread shutdown");
+
+ {
+ MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
+ profiler_ = nullptr;
+ }
+ delete profiler;
+}
+
+
+void BackgroundMethodSamplingProfiler::Shutdown() {
+ Stop();
+}
+
+BackgroundMethodSamplingProfiler::BackgroundMethodSamplingProfiler(int period, int duration,
+ std::string profile_file_name,
+ double backoff_coefficient, int interval_us, bool startImmediately)
+ : profile_file_name_(profile_file_name),
+ period_s_(period), start_immediately_(startImmediately),
+ interval_us_(interval_us), backoff_factor_(1.0),
+ backoff_coefficient_(backoff_coefficient), duration_s_(duration),
+ wait_lock_("Profile wait lock"),
+ period_condition_("Profile condition", wait_lock_),
+ profile_table_(wait_lock_),
+ profiler_barrier_(new Barrier(0)) {
+ // Populate the filtered_methods set.
+ // This is empty right now, but to add a method, do this:
+ //
+ // filtered_methods_.insert("void java.lang.Object.wait(long, int)");
+}
+
+// A method has been hit, record its invocation in the method map.
+// The mutator_lock must be held (shared) when this is called.
+void BackgroundMethodSamplingProfiler::RecordMethod(mirror::ArtMethod* method) {
+ if (method == nullptr) {
+ profile_table_.NullMethod();
+ // Don't record a nullptr method.
+ return;
+ }
+
+ mirror::Class* cls = method->GetDeclaringClass();
+ if (cls != nullptr) {
+ if (cls->GetClassLoader() == nullptr) {
+ // Don't include things in the boot
+ profile_table_.BootMethod();
+ return;
+ }
+ }
+
+ bool is_filtered = false;
+
+ MethodHelper mh(method);
+ if (strcmp(mh.GetName(), "<clinit>") == 0) {
+ // always filter out class init
+ is_filtered = true;
+ }
+
+ // Filter out methods by name if there are any.
+ if (!is_filtered && filtered_methods_.size() > 0) {
+ std::string method_full_name = PrettyMethod(method);
+
+ // Don't include specific filtered methods.
+ is_filtered = filtered_methods_.count(method_full_name) != 0;
+ }
+
+ // Add to the profile table unless it is filtered out.
+ if (!is_filtered) {
+ profile_table_.Put(method);
+ }
+}
+
+// Clean out any recordings for the method traces.
+void BackgroundMethodSamplingProfiler::CleanProfile() {
+ profile_table_.Clear();
+}
+
+uint32_t BackgroundMethodSamplingProfiler::DumpProfile(std::ostream& os) {
+ return profile_table_.Write(os);
+}
+
+// Profile Table.
+// This holds a mapping of mirror::ArtMethod* to a count of how many times a sample
+// hit it at the top of the stack.
+ProfileSampleResults::ProfileSampleResults(Mutex& lock) : lock_(lock), num_samples_(0),
+ num_null_methods_(0),
+ num_boot_methods_(0) {
+ for (int i = 0; i < kHashSize; i++) {
+ table[i] = nullptr;
+ }
+}
+
+ProfileSampleResults::~ProfileSampleResults() {
+ for (int i = 0; i < kHashSize; i++) {
+ delete table[i];
+ }
+}
+
+// Add a method to the profile table. If it the first time the method
+// has been seen, add it with count=1, otherwise increment the count.
+void ProfileSampleResults::Put(mirror::ArtMethod* method) {
+ lock_.Lock(Thread::Current());
+ uint32_t index = Hash(method);
+ if (table[index] == nullptr) {
+ table[index] = new Map();
+ }
+ Map::iterator i = table[index]->find(method);
+ if (i == table[index]->end()) {
+ (*table[index])[method] = 1;
+ } else {
+ i->second++;
+ }
+ num_samples_++;
+ lock_.Unlock(Thread::Current());
+}
+
+// Write the profile table to the output stream.
+uint32_t ProfileSampleResults::Write(std::ostream &os) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(DEBUG) << "Profile: " << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_;
+ os << num_samples_ << "/" << num_null_methods_ << "/" << num_boot_methods_ << "\n";
+ uint32_t num_methods = 0;
+ for (int i = 0 ; i < kHashSize; i++) {
+ Map *map = table[i];
+ if (map != nullptr) {
+ for (const auto &meth_iter : *map) {
+ mirror::ArtMethod *method = meth_iter.first;
+ std::string method_name = PrettyMethod(method);
+ uint32_t method_size = method->GetCodeSize();
+ os << StringPrintf("%s/%u/%u\n", method_name.c_str(), meth_iter.second, method_size);
+ ++num_methods;
+ }
+ }
+ }
+ return num_methods;
+}
+
+void ProfileSampleResults::Clear() {
+ num_samples_ = 0;
+ num_null_methods_ = 0;
+ num_boot_methods_ = 0;
+ for (int i = 0; i < kHashSize; i++) {
+ delete table[i];
+ table[i] = nullptr;
+ }
+}
+
+uint32_t ProfileSampleResults::Hash(mirror::ArtMethod* method) {
+ uint32_t value = reinterpret_cast<uint32_t>(method);
+ value >>= 2;
+ return value % kHashSize;
+}
+
+} // namespace art
+
diff --git a/runtime/profiler.h b/runtime/profiler.h
new file mode 100644
index 0000000000..e3af47cf50
--- /dev/null
+++ b/runtime/profiler.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_PROFILER_H_
+#define ART_RUNTIME_PROFILER_H_
+
+#include <ostream>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/macros.h"
+#include "globals.h"
+#include "instrumentation.h"
+#include "os.h"
+#include "safe_map.h"
+#include "base/mutex.h"
+#include "locks.h"
+#include "UniquePtr.h"
+#include "barrier.h"
+
+namespace art {
+
+namespace mirror {
+ class ArtMethod;
+ class Class;
+} // namespace mirror
+class Thread;
+
+
+//
+// This class holds all the results for all runs of the profiler. It also
+// counts the number of null methods (where we can't determine the method) and
+// the number of methods in the boot path (where we have already compiled the method).
+//
+// This object is an internal profiler object and uses the same locking as the profiler
+// itself.
+class ProfileSampleResults {
+ public:
+ explicit ProfileSampleResults(Mutex& lock);
+ ~ProfileSampleResults();
+
+ void Put(mirror::ArtMethod* method);
+ uint32_t Write(std::ostream &os);
+ void Clear();
+ uint32_t GetNumSamples() { return num_samples_; }
+ void NullMethod() { ++num_null_methods_; }
+ void BootMethod() { ++num_boot_methods_; }
+ private:
+ uint32_t Hash(mirror::ArtMethod* method);
+ static constexpr int kHashSize = 17;
+ Mutex& lock_; // Reference to the main profiler lock - we don't need two of them.
+ uint32_t num_samples_; // Total number of samples taken.
+ uint32_t num_null_methods_; // Number of samples where can don't know the method.
+ uint32_t num_boot_methods_; // Number of samples in the boot path.
+
+ typedef std::map<mirror::ArtMethod*, uint32_t> Map; // Map of method vs its count.
+ Map *table[kHashSize];
+};
+
+//
+// The BackgroundMethodSamplingProfiler runs in a thread. Most of the time it is sleeping but
+// occasionally wakes up and counts the number of times a method is called. Each time
+// it ticks, it looks at the current method and records it in the ProfileSampleResults
+// table.
+//
+// The timing is controlled by a number of variables:
+// 1. Period: the time between sampling runs.
+// 2. Interval: the time between each sample in a run.
+// 3. Duration: the duration of a run.
+//
+// So the profiler thread is sleeping for the 'period' time. It wakes up and runs for the
+// 'duration'. The run consists of a series of samples, each of which is 'interval' microseconds
+// apart. At the end of a run, it writes the results table to a file and goes back to sleep.
+
+class BackgroundMethodSamplingProfiler {
+ public:
+ static void Start(int period, int duration, std::string profile_filename, int interval_us,
+ double backoff_coefficient, bool startImmediately)
+ LOCKS_EXCLUDED(Locks::mutator_lock_,
+ Locks::thread_list_lock_,
+ Locks::thread_suspend_count_lock_,
+ Locks::profiler_lock_);
+
+ static void Stop() LOCKS_EXCLUDED(Locks::profiler_lock_, wait_lock_);
+ static void Shutdown() LOCKS_EXCLUDED(Locks::profiler_lock_);
+
+ void RecordMethod(mirror::ArtMethod *method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Barrier& GetBarrier() {
+ return *profiler_barrier_;
+ }
+
+ private:
+ explicit BackgroundMethodSamplingProfiler(int period, int duration, std::string profile_filename,
+ double backoff_coefficient, int interval_us, bool startImmediately);
+
+ // The sampling interval in microseconds is passed as an argument.
+ static void* RunProfilerThread(void* arg) LOCKS_EXCLUDED(Locks::profiler_lock_);
+
+ uint32_t WriteProfile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void CleanProfile();
+ uint32_t DumpProfile(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static bool ShuttingDown(Thread* self) LOCKS_EXCLUDED(Locks::profiler_lock_);
+
+ static BackgroundMethodSamplingProfiler* profiler_ GUARDED_BY(Locks::profiler_lock_);
+
+ // We need to shut the sample thread down at exit. Setting this to true will do that.
+ static volatile bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
+
+ // Sampling thread, non-zero when sampling.
+ static pthread_t profiler_pthread_;
+
+ // Some measure of the number of samples that are significant
+ static constexpr uint32_t kSignificantSamples = 10;
+
+ // File to write profile data out to. Cannot be empty if we are profiling.
+ std::string profile_file_name_;
+
+ // Number of seconds between profile runs.
+ uint32_t period_s_;
+
+ // Most of the time we want to delay the profiler startup to prevent everything
+ // running at the same time (all processes). This is the default, but if we
+ // want to override this, set the 'start_immediately_' to true. This is done
+ // if the -Xprofile option is given on the command line.
+ bool start_immediately_;
+
+ uint32_t interval_us_;
+
+ // A backoff coefficent to adjust the profile period based on time.
+ double backoff_factor_;
+
+ // How much to increase the backoff by on each profile iteration.
+ double backoff_coefficient_;
+
+ // Duration of each profile run. The profile file will be written at the end
+ // of each run.
+ uint32_t duration_s_;
+
+ // Profile condition support.
+ Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ConditionVariable period_condition_ GUARDED_BY(wait_lock_);
+
+ ProfileSampleResults profile_table_;
+
+ UniquePtr<Barrier> profiler_barrier_;
+
+ // Set of methods to be filtered out. This will probably be rare because
+ // most of the methods we want to be filtered reside in the boot path and
+ // are automatically filtered.
+ typedef std::set<std::string> FilteredMethods;
+ FilteredMethods filtered_methods_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundMethodSamplingProfiler);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_PROFILER_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 25623a1fe7..5a28b2d2c6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -59,6 +59,7 @@
#include "thread.h"
#include "thread_list.h"
#include "trace.h"
+#include "profiler.h"
#include "UniquePtr.h"
#include "verifier/method_verifier.h"
#include "well_known_classes.h"
@@ -331,6 +332,24 @@ size_t ParseIntegerOrDie(const std::string& s) {
return result;
}
+double ParseDoubleOrDie(const std::string& option, const char* prefix,
+ double min, double max, bool ignore_unrecognized,
+ double defval) {
+ std::istringstream iss(option.substr(strlen(prefix)));
+ double value;
+ iss >> value;
+ // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
+ const bool sane_val = iss.eof() && (value >= min) && (value <= max);
+ if (!sane_val) {
+ if (ignore_unrecognized) {
+ return defval;
+ }
+ LOG(FATAL)<< "Invalid option '" << option << "'";
+ return defval;
+ }
+ return value;
+}
+
void Runtime::SweepSystemWeaks(RootVisitor* visitor, void* arg) {
GetInternTable()->SweepInternTableWeaks(visitor, arg);
GetMonitorList()->SweepMonitorList(visitor, arg);
@@ -408,6 +427,12 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b
parsed->method_trace_file_ = "/data/method-trace-file.bin";
parsed->method_trace_file_size_ = 10 * MB;
+ parsed->profile_ = false;
+ parsed->profile_period_s_ = 10; // Seconds.
+ parsed->profile_duration_s_ = 20; // Seconds.
+ parsed->profile_interval_us_ = 500; // Microseconds.
+ parsed->profile_backoff_coefficient_ = 2.0;
+
for (size_t i = 0; i < options.size(); ++i) {
const std::string option(options[i].first);
if (true && options[0].first == "-Xzygote") {
@@ -495,19 +520,9 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b
}
parsed->heap_max_free_ = size;
} else if (StartsWith(option, "-XX:HeapTargetUtilization=")) {
- std::istringstream iss(option.substr(strlen("-XX:HeapTargetUtilization=")));
- double value;
- iss >> value;
- // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range.
- const bool sane_val = iss.eof() && (value >= 0.1) && (value <= 0.9);
- if (!sane_val) {
- if (ignore_unrecognized) {
- continue;
- }
- LOG(FATAL) << "Invalid option '" << option << "'";
- return NULL;
- }
- parsed->heap_target_utilization_ = value;
+ parsed->heap_target_utilization_ = ParseDoubleOrDie(option, "-XX:HeapTargetUtilization=",
+ 0.1, 0.9, ignore_unrecognized,
+ parsed->heap_target_utilization_);
} else if (StartsWith(option, "-XX:ParallelGCThreads=")) {
parsed->parallel_gc_threads_ =
ParseMemoryOption(option.substr(strlen("-XX:ParallelGCThreads=")).c_str(), 1024);
@@ -631,6 +646,19 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b
Trace::SetDefaultClockSource(kProfilerClockSourceWall);
} else if (option == "-Xprofile:dualclock") {
Trace::SetDefaultClockSource(kProfilerClockSourceDual);
+ } else if (StartsWith(option, "-Xprofile:")) {
+ parsed->profile_output_filename_ = option.substr(strlen("-Xprofile:"));
+ parsed->profile_ = true;
+ } else if (StartsWith(option, "-Xprofile-period:")) {
+ parsed->profile_period_s_ = ParseIntegerOrDie(option);
+ } else if (StartsWith(option, "-Xprofile-duration:")) {
+ parsed->profile_duration_s_ = ParseIntegerOrDie(option);
+ } else if (StartsWith(option, "-Xprofile-interval:")) {
+ parsed->profile_interval_us_ = ParseIntegerOrDie(option);
+ } else if (StartsWith(option, "-Xprofile-backoff:")) {
+ parsed->profile_backoff_coefficient_ = ParseDoubleOrDie(option, "-Xprofile-backoff:",
+ 1.0, 10.0, ignore_unrecognized,
+ parsed->profile_backoff_coefficient_);
} else if (option == "-compiler-filter:interpret-only") {
parsed->compiler_filter_ = kInterpretOnly;
} else if (option == "-compiler-filter:space") {
@@ -779,6 +807,11 @@ bool Runtime::Start() {
finished_starting_ = true;
+ if (profile_) {
+ // User has asked for a profile using -Xprofile
+ StartProfiler(profile_output_filename_.c_str(), true);
+ }
+
return true;
}
@@ -970,6 +1003,14 @@ bool Runtime::Init(const Options& raw_options, bool ignore_unrecognized) {
method_trace_file_ = options->method_trace_file_;
method_trace_file_size_ = options->method_trace_file_size_;
+ // Extract the profile options.
+ profile_period_s_ = options->profile_period_s_;
+ profile_duration_s_ = options->profile_duration_s_;
+ profile_interval_us_ = options->profile_interval_us_;
+ profile_backoff_coefficient_ = options->profile_backoff_coefficient_;
+ profile_ = options->profile_;
+ profile_output_filename_ = options->profile_output_filename_;
+
if (options->method_trace_) {
Trace::Start(options->method_trace_file_.c_str(), -1, options->method_trace_file_size_, 0,
false, false, 0);
@@ -1401,4 +1442,8 @@ void Runtime::RemoveMethodVerifier(verifier::MethodVerifier* verifier) {
method_verifiers_.erase(it);
}
+void Runtime::StartProfiler(const char *appDir, bool startImmediately) {
+ BackgroundMethodSamplingProfiler::Start(profile_period_s_, profile_duration_s_, appDir, profile_interval_us_,
+ profile_backoff_coefficient_, startImmediately);
+}
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 7b57dda780..50da0dcfab 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -139,6 +139,12 @@ class Runtime {
size_t tiny_method_threshold_;
size_t num_dex_methods_threshold_;
bool sea_ir_mode_;
+ bool profile_;
+ std::string profile_output_filename_;
+ int profile_period_s_;
+ int profile_duration_s_;
+ int profile_interval_us_;
+ double profile_backoff_coefficient_;
private:
ParsedOptions() {}
@@ -455,6 +461,8 @@ class Runtime {
const std::vector<const DexFile*>& GetCompileTimeClassPath(jobject class_loader);
void SetCompileTimeClassPath(jobject class_loader, std::vector<const DexFile*>& class_path);
+ void StartProfiler(const char *appDir, bool startImmediately = false);
+
private:
static void InitPlatformSignalHandlers();
@@ -566,6 +574,14 @@ class Runtime {
bool stats_enabled_;
RuntimeStats stats_;
+ // Runtime profile support.
+ bool profile_;
+ std::string profile_output_filename_;
+ uint32_t profile_period_s_; // Generate profile every n seconds.
+ uint32_t profile_duration_s_; // Run profile for n seconds.
+ uint32_t profile_interval_us_; // Microseconds between samples.
+ double profile_backoff_coefficient_; // Coefficient to exponential backoff.
+
bool method_trace_;
std::string method_trace_file_;
size_t method_trace_file_size_;
diff --git a/runtime/stack.h b/runtime/stack.h
index 3d6b06a32d..590f406bb3 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -160,7 +160,7 @@ class ShadowFrame {
<< ") is in protected space, reference array " << true;
}
// If the vreg reference is not equal to the vreg then the vreg reference is stale.
- if (reinterpret_cast<uint32_t>(ref) != vregs_[i]) {
+ if (UNLIKELY(reinterpret_cast<uint32_t>(ref) != vregs_[i])) {
return nullptr;
}
return ref;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9faa60dbf8..e2d51b7a7e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -73,7 +73,7 @@ namespace art {
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
-ConditionVariable* Thread::resume_cond_ = NULL;
+ConditionVariable* Thread::resume_cond_ = nullptr;
static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
@@ -124,7 +124,7 @@ void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
ShadowFrame* sf = deoptimization_shadow_frame_;
- deoptimization_shadow_frame_ = NULL;
+ deoptimization_shadow_frame_ = nullptr;
ret_val->SetJ(deoptimization_return_value_.GetJ());
return sf;
}
@@ -142,14 +142,14 @@ void Thread::InitAfterFork() {
void* Thread::CreateCallback(void* arg) {
Thread* self = reinterpret_cast<Thread*>(arg);
Runtime* runtime = Runtime::Current();
- if (runtime == NULL) {
+ if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
- return NULL;
+ return nullptr;
}
{
// TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
// after self->Init().
- MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
+ MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
// Check that if we got here we cannot be shutting down (as shutdown should never have started
// while threads are being born).
CHECK(!runtime->IsShuttingDownLocked());
@@ -160,10 +160,10 @@ void* Thread::CreateCallback(void* arg) {
ScopedObjectAccess soa(self);
// Copy peer into self, deleting global reference when done.
- CHECK(self->jpeer_ != NULL);
+ CHECK(self->jpeer_ != nullptr);
self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_);
self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
- self->jpeer_ = NULL;
+ self->jpeer_ = nullptr;
{
SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
@@ -177,14 +177,14 @@ void* Thread::CreateCallback(void* arg) {
mirror::ArtMethod* m =
receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
JValue result;
- ArgArray arg_array(NULL, 0);
+ ArgArray arg_array(nullptr, 0);
arg_array.Append(reinterpret_cast<uint32_t>(receiver));
m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
}
// Detach and delete self.
Runtime::Current()->GetThreadList()->Unregister(self);
- return NULL;
+ return nullptr;
}
Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
@@ -195,7 +195,7 @@ Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
// to stop it from going away.
if (kIsDebugBuild) {
MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
- if (result != NULL && !result->IsSuspended()) {
+ if (result != nullptr && !result->IsSuspended()) {
Locks::thread_list_lock_->AssertHeld(soa.Self());
}
}
@@ -233,7 +233,7 @@ static size_t FixStackSize(size_t stack_size) {
}
void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
- CHECK(java_peer != NULL);
+ CHECK(java_peer != nullptr);
Thread* self = static_cast<JNIEnvExt*>(env)->self;
Runtime* runtime = Runtime::Current();
@@ -279,9 +279,9 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
}
// Manually delete the global reference since Thread::Init will not have been run.
env->DeleteGlobalRef(child_thread->jpeer_);
- child_thread->jpeer_ = NULL;
+ child_thread->jpeer_ = nullptr;
delete child_thread;
- child_thread = NULL;
+ child_thread = nullptr;
// TODO: remove from thread group?
env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
{
@@ -298,7 +298,7 @@ void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
// (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
// we can handshake with the corresponding native thread when it's ready.) Check this native
// thread hasn't been through here already...
- CHECK(Thread::Current() == NULL);
+ CHECK(Thread::Current() == nullptr);
SetUpAlternateSignalStack();
InitCpu();
InitTlsEntryPoints();
@@ -322,15 +322,15 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
bool create_peer) {
Thread* self;
Runtime* runtime = Runtime::Current();
- if (runtime == NULL) {
+ if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
- return NULL;
+ return nullptr;
}
{
- MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
+ MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
- return NULL;
+ return nullptr;
} else {
Runtime::Current()->StartThreadBirth();
self = new Thread(as_daemon);
@@ -350,7 +350,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
self->CreatePeer(thread_name, as_daemon, thread_group);
} else {
// These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
- if (thread_name != NULL) {
+ if (thread_name != nullptr) {
self->name_->assign(thread_name);
::art::SetThreadName(thread_name);
}
@@ -364,7 +364,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
CHECK(runtime->IsStarted());
JNIEnv* env = jni_env_;
- if (thread_group == NULL) {
+ if (thread_group == nullptr) {
thread_group = runtime->GetMainThreadGroup();
}
ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
@@ -372,7 +372,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
jboolean thread_is_daemon = as_daemon;
ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
- if (peer.get() == NULL) {
+ if (peer.get() == nullptr) {
CHECK(IsExceptionPending());
return;
}
@@ -393,7 +393,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
ScopedObjectAccess soa(self);
SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
- if (peer_thread_name.get() == NULL) {
+ if (peer_thread_name.get() == nullptr) {
// The Thread constructor should have set the Thread.name to a
// non-null value. However, because we can run without code
// available (in the compiler, in tests), we manually assign the
@@ -409,7 +409,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
peer_thread_name.reset(GetThreadName(soa));
}
// 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
- if (peer_thread_name.get() != NULL) {
+ if (peer_thread_name.get() != nullptr) {
SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
}
}
@@ -495,7 +495,7 @@ void Thread::Dump(std::ostream& os) const {
mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
- return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL;
+ return (opeer_ != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : nullptr;
}
void Thread::GetThreadName(std::string& name) const {
@@ -570,12 +570,33 @@ void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
}
void Thread::RunCheckpointFunction() {
- CHECK(checkpoint_function_ != NULL);
- ATRACE_BEGIN("Checkpoint function");
- checkpoint_function_->Run(this);
- ATRACE_END();
- checkpoint_function_ = NULL;
- AtomicClearFlag(kCheckpointRequest);
+ Closure *checkpoints[kMaxCheckpoints];
+
+ // Grab the suspend_count lock and copy the current set of
+ // checkpoints. Then clear the list and the flag. The RequestCheckpoint
+ // function will also grab this lock so we prevent a race between setting
+ // the kCheckpointRequest flag and clearing it.
+ {
+ MutexLock mu(this, *Locks::thread_suspend_count_lock_);
+ for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
+ checkpoints[i] = checkpoint_functions_[i];
+ checkpoint_functions_[i] = nullptr;
+ }
+ AtomicClearFlag(kCheckpointRequest);
+ }
+
+ // Outside the lock, run all the checkpoint functions that
+ // we collected.
+ bool found_checkpoint = false;
+ for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
+ if (checkpoints[i] != nullptr) {
+ ATRACE_BEGIN("Checkpoint function");
+ checkpoints[i]->Run(this);
+ ATRACE_END();
+ found_checkpoint = true;
+ }
+ }
+ CHECK(found_checkpoint);
}
bool Thread::RequestCheckpoint(Closure* function) {
@@ -584,23 +605,34 @@ bool Thread::RequestCheckpoint(Closure* function) {
if (old_state_and_flags.as_struct.state != kRunnable) {
return false; // Fail, thread is suspended and so can't run a checkpoint.
}
- if ((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0) {
- return false; // Fail, already a checkpoint pending.
+
+ uint32_t available_checkpoint = kMaxCheckpoints;
+ for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
+ if (checkpoint_functions_[i] == nullptr) {
+ available_checkpoint = i;
+ break;
+ }
}
- CHECK(checkpoint_function_ == nullptr);
- checkpoint_function_ = function;
+ if (available_checkpoint == kMaxCheckpoints) {
+ // No checkpoint functions available, we can't run a checkpoint
+ return false;
+ }
+ checkpoint_functions_[available_checkpoint] = function;
+
// Checkpoint function installed now install flag bit.
// We must be runnable to request a checkpoint.
DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
union StateAndFlags new_state_and_flags;
new_state_and_flags.as_int = old_state_and_flags.as_int;
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
- int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
+ int succeeded = android_atomic_acquire_cas(old_state_and_flags.as_int, new_state_and_flags.as_int,
&state_and_flags_.as_int);
if (UNLIKELY(succeeded != 0)) {
// The thread changed state before the checkpoint was installed.
- CHECK(checkpoint_function_ == function);
- checkpoint_function_ = NULL;
+ CHECK_EQ(checkpoint_functions_[available_checkpoint], function);
+ checkpoint_functions_[available_checkpoint] = nullptr;
+ } else {
+ CHECK_EQ(ReadFlag(kCheckpointRequest), true);
}
return succeeded == 0;
}
@@ -622,7 +654,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
bool is_daemon = false;
Thread* self = Thread::Current();
- if (self != NULL && thread != NULL && thread->opeer_ != NULL) {
+ if (self != nullptr && thread != nullptr && thread->opeer_ != nullptr) {
ScopedObjectAccessUnchecked soa(self);
priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
@@ -630,12 +662,12 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
mirror::Object* thread_group =
soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
- if (thread_group != NULL) {
+ if (thread_group != nullptr) {
mirror::ArtField* group_name_field =
soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
mirror::String* group_name_string =
reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
- group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
+ group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
}
} else {
priority = GetNativePriority();
@@ -646,7 +678,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
scheduler_group_name = "default";
}
- if (thread != NULL) {
+ if (thread != nullptr) {
os << '"' << *thread->name_ << '"';
if (is_daemon) {
os << " daemon";
@@ -664,7 +696,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
<< " (not attached)\n";
}
- if (thread != NULL) {
+ if (thread != nullptr) {
MutexLock mu(self, *Locks::thread_suspend_count_lock_);
os << " | group=\"" << group_name << "\""
<< " sCount=" << thread->suspend_count_
@@ -676,7 +708,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
os << " | sysTid=" << tid
<< " nice=" << getpriority(PRIO_PROCESS, tid)
<< " cgrp=" << scheduler_group_name;
- if (thread != NULL) {
+ if (thread != nullptr) {
int policy;
sched_param sp;
CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
@@ -705,7 +737,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
<< " stm=" << stime
<< " core=" << task_cpu
<< " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
- if (thread != NULL) {
+ if (thread != nullptr) {
os << " | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
<< " stackSize=" << PrettySize(thread->stack_size_) << "\n";
}
@@ -719,7 +751,7 @@ struct StackDumpVisitor : public StackVisitor {
StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
- last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
+ last_method(nullptr), last_line_number(0), repetition_count(0), frame_count(0) {
}
virtual ~StackDumpVisitor() {
@@ -737,12 +769,12 @@ struct StackDumpVisitor : public StackVisitor {
mirror::Class* c = m->GetDeclaringClass();
const mirror::DexCache* dex_cache = c->GetDexCache();
int line_number = -1;
- if (dex_cache != NULL) { // be tolerant of bad input
+ if (dex_cache != nullptr) { // be tolerant of bad input
const DexFile& dex_file = *dex_cache->GetDexFile();
line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
}
if (line_number == last_line_number && last_method == m) {
- repetition_count++;
+ ++repetition_count;
} else {
if (repetition_count >= kMaxRepetition) {
os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
@@ -758,7 +790,7 @@ struct StackDumpVisitor : public StackVisitor {
} else {
mh.ChangeMethod(m);
const char* source_file(mh.GetDeclaringClassSourceFile());
- os << "(" << (source_file != NULL ? source_file : "unavailable")
+ os << "(" << (source_file != nullptr ? source_file : "unavailable")
<< ":" << line_number << ")";
}
os << "\n";
@@ -808,8 +840,8 @@ static bool ShouldShowNativeStack(const Thread* thread)
// We don't just check kNative because native methods will be in state kSuspended if they're
// calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
// thread-startup states if it's early enough in their life cycle (http://b/7432159).
- mirror::ArtMethod* current_method = thread->GetCurrentMethod(NULL);
- return current_method != NULL && current_method->IsNative();
+ mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
+ return current_method != nullptr && current_method->IsNative();
}
void Thread::DumpStack(std::ostream& os) const {
@@ -850,11 +882,11 @@ void Thread::Startup() {
{
// MutexLock to keep annotalysis happy.
//
- // Note we use NULL for the thread because Thread::Current can
+ // Note we use nullptr for the thread because Thread::Current can
// return garbage since (is_started_ == true) and
// Thread::pthread_key_self_ is not yet initialized.
// This was seen on glibc.
- MutexLock mu(NULL, *Locks::thread_suspend_count_lock_);
+ MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
resume_cond_ = new ConditionVariable("Thread resumption condition variable",
*Locks::thread_suspend_count_lock_);
}
@@ -863,8 +895,8 @@ void Thread::Startup() {
CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
// Double-check the TLS slot allocation.
- if (pthread_getspecific(pthread_key_self_) != NULL) {
- LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
+ if (pthread_getspecific(pthread_key_self_) != nullptr) {
+ LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
}
}
@@ -884,50 +916,49 @@ void Thread::Shutdown() {
is_started_ = false;
CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
- if (resume_cond_ != NULL) {
+ if (resume_cond_ != nullptr) {
delete resume_cond_;
- resume_cond_ = NULL;
+ resume_cond_ = nullptr;
}
}
Thread::Thread(bool daemon)
: suspend_count_(0),
- card_table_(NULL),
- exception_(NULL),
- stack_end_(NULL),
+ card_table_(nullptr),
+ exception_(nullptr),
+ stack_end_(nullptr),
managed_stack_(),
- jni_env_(NULL),
- self_(NULL),
- opeer_(NULL),
- jpeer_(NULL),
- stack_begin_(NULL),
+ jni_env_(nullptr),
+ self_(nullptr),
+ opeer_(nullptr),
+ jpeer_(nullptr),
+ stack_begin_(nullptr),
stack_size_(0),
thin_lock_thread_id_(0),
- stack_trace_sample_(NULL),
+ stack_trace_sample_(nullptr),
trace_clock_base_(0),
tid_(0),
wait_mutex_(new Mutex("a thread wait mutex")),
wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
- wait_monitor_(NULL),
+ wait_monitor_(nullptr),
interrupted_(false),
- wait_next_(NULL),
- monitor_enter_object_(NULL),
- top_sirt_(NULL),
- runtime_(NULL),
- class_loader_override_(NULL),
- long_jump_context_(NULL),
+ wait_next_(nullptr),
+ monitor_enter_object_(nullptr),
+ top_sirt_(nullptr),
+ runtime_(nullptr),
+ class_loader_override_(nullptr),
+ long_jump_context_(nullptr),
throwing_OutOfMemoryError_(false),
debug_suspend_count_(0),
debug_invoke_req_(new DebugInvokeReq),
single_step_control_(new SingleStepControl),
- deoptimization_shadow_frame_(NULL),
+ deoptimization_shadow_frame_(nullptr),
instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>),
name_(new std::string(kThreadNameDuringStartup)),
daemon_(daemon),
pthread_self_(0),
no_thread_suspension_(0),
- last_no_thread_suspension_cause_(NULL),
- checkpoint_function_(0),
+ last_no_thread_suspension_cause_(nullptr),
thread_exit_check_count_(0),
thread_local_start_(nullptr),
thread_local_pos_(nullptr),
@@ -938,22 +969,25 @@ Thread::Thread(bool daemon)
state_and_flags_.as_struct.state = kNative;
memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
memset(rosalloc_runs_, 0, sizeof(rosalloc_runs_));
+ for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
+ checkpoint_functions_[i] = nullptr;
+ }
}
bool Thread::IsStillStarting() const {
// You might think you can check whether the state is kStarting, but for much of thread startup,
// the thread is in kNative; it might also be in kVmWait.
- // You might think you can check whether the peer is NULL, but the peer is actually created and
+ // You might think you can check whether the peer is nullptr, but the peer is actually created and
// assigned fairly early on, and needs to be.
// It turns out that the last thing to change is the thread name; that's a good proxy for "has
// this thread _ever_ entered kRunnable".
- return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup);
+ return (jpeer_ == nullptr && opeer_ == nullptr) || (*name_ == kThreadNameDuringStartup);
}
void Thread::AssertNoPendingException() const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException(NULL);
+ mirror::Throwable* exception = GetException(nullptr);
LOG(FATAL) << "No pending exception expected: " << exception->Dump();
}
}
@@ -976,7 +1010,7 @@ void Thread::Destroy() {
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
- if (opeer_ != NULL) {
+ if (opeer_ != nullptr) {
ScopedObjectAccess soa(self);
// We may need to call user-supplied managed code, do this before final clean-up.
HandleUncaughtExceptions(soa);
@@ -999,30 +1033,35 @@ void Thread::Destroy() {
}
// On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
- if (jni_env_ != NULL) {
+ if (jni_env_ != nullptr) {
jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
}
}
Thread::~Thread() {
- if (jni_env_ != NULL && jpeer_ != NULL) {
+ if (jni_env_ != nullptr && jpeer_ != nullptr) {
// If pthread_create fails we don't have a jni env here.
jni_env_->DeleteGlobalRef(jpeer_);
- jpeer_ = NULL;
+ jpeer_ = nullptr;
}
- opeer_ = NULL;
+ opeer_ = nullptr;
delete jni_env_;
- jni_env_ = NULL;
+ jni_env_ = nullptr;
CHECK_NE(GetState(), kRunnable);
+ CHECK_NE(ReadFlag(kCheckpointRequest), true);
+ CHECK(checkpoint_functions_[0] == nullptr);
+ CHECK(checkpoint_functions_[1] == nullptr);
+ CHECK(checkpoint_functions_[2] == nullptr);
+
// We may be deleting a still born thread.
SetStateUnsafe(kTerminated);
delete wait_cond_;
delete wait_mutex_;
- if (long_jump_context_ != NULL) {
+ if (long_jump_context_ != nullptr) {
delete long_jump_context_;
}
@@ -1052,7 +1091,7 @@ void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
ScopedLocalRef<jobject> handler(jni_env_,
jni_env_->GetObjectField(peer.get(),
WellKnownClasses::java_lang_Thread_uncaughtHandler));
- if (handler.get() == NULL) {
+ if (handler.get() == nullptr) {
// Otherwise use the thread group's default handler.
handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
}
@@ -1070,7 +1109,7 @@ void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
// this.group.removeThread(this);
// group can be null if we're in the compiler or a test.
mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
- if (ogroup != NULL) {
+ if (ogroup != nullptr) {
ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
ScopedThreadStateChange tsc(soa.Self(), kNative);
@@ -1101,7 +1140,7 @@ bool Thread::SirtContains(jobject obj) const {
void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
size_t num_refs = cur->NumberOfReferences();
- for (size_t j = 0; j < num_refs; j++) {
+ for (size_t j = 0; j < num_refs; ++j) {
mirror::Object* object = cur->GetReference(j);
if (object != nullptr) {
const mirror::Object* new_obj = visitor(object, arg);
@@ -1116,8 +1155,8 @@ void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
mirror::Object* Thread::DecodeJObject(jobject obj) const {
Locks::mutator_lock_->AssertSharedHeld(this);
- if (obj == NULL) {
- return NULL;
+ if (obj == nullptr) {
+ return nullptr;
}
IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
IndirectRefKind kind = GetIndirectRefKind(ref);
@@ -1146,13 +1185,13 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
DCHECK_EQ(kind, kWeakGlobal);
result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (result == kClearedJniWeakGlobal) {
- // This is a special case where it's okay to return NULL.
+ // This is a special case where it's okay to return nullptr.
return nullptr;
}
}
- if (UNLIKELY(result == NULL)) {
- JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
+ if (UNLIKELY(result == nullptr)) {
+ JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
} else {
if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) {
Runtime::Current()->GetHeap()->VerifyObject(result);
@@ -1192,7 +1231,7 @@ void Thread::Notify() {
}
void Thread::NotifyLocked(Thread* self) {
- if (wait_monitor_ != NULL) {
+ if (wait_monitor_ != nullptr) {
wait_cond_->Signal(self);
}
}
@@ -1201,7 +1240,7 @@ class CountStackDepthVisitor : public StackVisitor {
public:
explicit CountStackDepthVisitor(Thread* thread)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, NULL),
+ : StackVisitor(thread, nullptr),
depth_(0), skip_depth_(0), skipping_(true) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1240,8 +1279,8 @@ class CountStackDepthVisitor : public StackVisitor {
class BuildInternalStackTraceVisitor : public StackVisitor {
public:
explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
- : StackVisitor(thread, NULL), self_(self),
- skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
+ : StackVisitor(thread, nullptr), self_(self),
+ skip_depth_(skip_depth), count_(0), dex_pc_trace_(nullptr), method_trace_(nullptr) {}
bool Init(int depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1250,11 +1289,11 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
method_trace(self_,
Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
depth + 1));
- if (method_trace.get() == NULL) {
+ if (method_trace.get() == nullptr) {
return false;
}
mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
- if (dex_pc_trace == NULL) {
+ if (dex_pc_trace == nullptr) {
return false;
}
// Save PC trace in last element of method trace, also places it into the
@@ -1263,20 +1302,20 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
// Set the Object*s and assert that no thread suspension is now possible.
const char* last_no_suspend_cause =
self_->StartAssertNoThreadSuspension("Building internal stack trace");
- CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
+ CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
method_trace_ = method_trace.get();
dex_pc_trace_ = dex_pc_trace;
return true;
}
virtual ~BuildInternalStackTraceVisitor() {
- if (method_trace_ != NULL) {
- self_->EndAssertNoThreadSuspension(NULL);
+ if (method_trace_ != nullptr) {
+ self_->EndAssertNoThreadSuspension(nullptr);
}
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
+ if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) {
return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
}
if (skip_depth_ > 0) {
@@ -1320,13 +1359,13 @@ jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa)
BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this),
skip_depth);
if (!build_trace_visitor.Init(depth)) {
- return NULL; // Allocation failed.
+ return nullptr; // Allocation failed.
}
build_trace_visitor.WalkStack();
mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
if (kIsDebugBuild) {
for (int32_t i = 0; i < trace->GetLength(); ++i) {
- CHECK(trace->Get(i) != NULL);
+ CHECK(trace->Get(i) != nullptr);
}
}
return soa.AddLocalReference<jobjectArray>(trace);
@@ -1343,7 +1382,7 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
jobjectArray result;
- if (output_array != NULL) {
+ if (output_array != nullptr) {
// Reuse the array we were given.
result = output_array;
// ...adjusting the number of frames we'll write to not exceed the array length.
@@ -1354,13 +1393,13 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
// Create java_trace array and place in local reference table
mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
class_linker->AllocStackTraceElementArray(soa.Self(), depth);
- if (java_traces == NULL) {
- return NULL;
+ if (java_traces == nullptr) {
+ return nullptr;
}
result = soa.AddLocalReference<jobjectArray>(java_traces);
}
- if (stack_depth != NULL) {
+ if (stack_depth != nullptr) {
*stack_depth = depth;
}
@@ -1397,17 +1436,17 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, job
}
}
const char* method_name = mh.GetName();
- CHECK(method_name != NULL);
+ CHECK(method_name != nullptr);
SirtRef<mirror::String> method_name_object(soa.Self(),
mirror::String::AllocFromModifiedUtf8(soa.Self(),
method_name));
- if (method_name_object.get() == NULL) {
- return NULL;
+ if (method_name_object.get() == nullptr) {
+ return nullptr;
}
mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
- if (obj == NULL) {
- return NULL;
+ if (obj == nullptr) {
+ return nullptr;
}
soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set(i, obj);
}
@@ -1445,7 +1484,7 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
// Ignore the cause throw location. TODO: should we report this as a re-throw?
- SirtRef<mirror::Throwable> cause(this, GetException(NULL));
+ SirtRef<mirror::Throwable> cause(this, GetException(nullptr));
ClearException();
Runtime* runtime = Runtime::Current();
@@ -1457,7 +1496,7 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
SirtRef<mirror::Class>
exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor,
class_loader));
- if (UNLIKELY(exception_class.get() == NULL)) {
+ if (UNLIKELY(exception_class.get() == nullptr)) {
CHECK(IsExceptionPending());
LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
return;
@@ -1481,21 +1520,21 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
// Choose an appropriate constructor and set up the arguments.
const char* signature;
- SirtRef<mirror::String> msg_string(this, NULL);
- if (msg != NULL) {
+ SirtRef<mirror::String> msg_string(this, nullptr);
+ if (msg != nullptr) {
// Ensure we remember this and the method over the String allocation.
msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
- if (UNLIKELY(msg_string.get() == NULL)) {
+ if (UNLIKELY(msg_string.get() == nullptr)) {
CHECK(IsExceptionPending()); // OOME.
return;
}
- if (cause.get() == NULL) {
+ if (cause.get() == nullptr) {
signature = "(Ljava/lang/String;)V";
} else {
signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
}
} else {
- if (cause.get() == NULL) {
+ if (cause.get() == nullptr) {
signature = "()V";
} else {
signature = "(Ljava/lang/Throwable;)V";
@@ -1504,17 +1543,17 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
mirror::ArtMethod* exception_init_method =
exception_class->FindDeclaredDirectMethod("<init>", signature);
- CHECK(exception_init_method != NULL) << "No <init>" << signature << " in "
+ CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
<< PrettyDescriptor(exception_class_descriptor);
if (UNLIKELY(!runtime->IsStarted())) {
// Something is trying to throw an exception without a started runtime, which is the common
// case in the compiler. We won't be able to invoke the constructor of the exception, so set
// the exception fields directly.
- if (msg != NULL) {
+ if (msg != nullptr) {
exception->SetDetailMessage(msg_string.get());
}
- if (cause.get() != NULL) {
+ if (cause.get() != nullptr) {
exception->SetCause(cause.get());
}
ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
@@ -1523,10 +1562,10 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
} else {
ArgArray args("VLL", 3);
args.Append(reinterpret_cast<uint32_t>(exception.get()));
- if (msg != NULL) {
+ if (msg != nullptr) {
args.Append(reinterpret_cast<uint32_t>(msg_string.get()));
}
- if (cause.get() != NULL) {
+ if (cause.get() != nullptr) {
args.Append(reinterpret_cast<uint32_t>(cause.get()));
}
JValue result;
@@ -1709,12 +1748,12 @@ class CatchBlockStackVisitor : public StackVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, self->GetLongJumpContext()),
self_(self), exception_(exception), is_deoptimization_(is_deoptimization),
- to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location),
- handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0),
+ to_find_(is_deoptimization ? nullptr : exception->GetClass()), throw_location_(throw_location),
+ handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_dex_pc_(0),
native_method_count_(0), clear_exception_(false),
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
- instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) {
+ instrumentation_frames_to_pop_(0), top_shadow_frame_(nullptr), prev_shadow_frame_(nullptr) {
// Exception not in root sets, can't allow GC.
last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
}
@@ -1725,7 +1764,7 @@ class CatchBlockStackVisitor : public StackVisitor {
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = GetMethod();
- if (method == NULL) {
+ if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
handler_quick_frame_pc_ = GetCurrentQuickFramePc();
handler_quick_frame_ = GetCurrentQuickFrame();
@@ -1734,7 +1773,7 @@ class CatchBlockStackVisitor : public StackVisitor {
if (UNLIKELY(method_tracing_active_ &&
GetQuickInstrumentationExitPc() == GetReturnPc())) {
// Keep count of the number of unwinds during instrumentation.
- instrumentation_frames_to_pop_++;
+ ++instrumentation_frames_to_pop_;
}
if (method->IsRuntimeMethod()) {
// Ignore callee save method.
@@ -1751,7 +1790,7 @@ class CatchBlockStackVisitor : public StackVisitor {
bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t dex_pc = DexFile::kDexNoIndex;
if (method->IsNative()) {
- native_method_count_++;
+ ++native_method_count_;
} else {
dex_pc = GetDexPc();
}
@@ -1771,12 +1810,12 @@ class CatchBlockStackVisitor : public StackVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
MethodHelper mh(m);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
- CHECK(code_item != NULL);
+ CHECK(code_item != nullptr);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
+ ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
SirtRef<mirror::DexCache> dex_cache(self_, mh.GetDexCache());
SirtRef<mirror::ClassLoader> class_loader(self_, mh.GetClassLoader());
verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
@@ -1784,7 +1823,7 @@ class CatchBlockStackVisitor : public StackVisitor {
m->GetAccessFlags(), false, true);
verifier.Verify();
std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
- for (uint16_t reg = 0; reg < num_regs; reg++) {
+ for (uint16_t reg = 0; reg < num_regs; ++reg) {
VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
switch (kind) {
case kUndefined:
@@ -1802,7 +1841,7 @@ class CatchBlockStackVisitor : public StackVisitor {
break;
}
}
- if (prev_shadow_frame_ != NULL) {
+ if (prev_shadow_frame_ != nullptr) {
prev_shadow_frame_->SetLink(new_frame);
} else {
top_shadow_frame_ = new_frame;
@@ -1813,7 +1852,7 @@ class CatchBlockStackVisitor : public StackVisitor {
void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* catch_method = *handler_quick_frame_;
- if (catch_method == NULL) {
+ if (catch_method == nullptr) {
if (kDebugExceptionDelivery) {
LOG(INFO) << "Handler is upcall";
}
@@ -1893,7 +1932,7 @@ void Thread::QuickDeliverException() {
// Get exception from thread.
ThrowLocation throw_location;
mirror::Throwable* exception = GetException(&throw_location);
- CHECK(exception != NULL);
+ CHECK(exception != nullptr);
// Don't leave exception visible while we try to find the handler, which may cause class
// resolution.
ClearException();
@@ -1901,7 +1940,7 @@ void Thread::QuickDeliverException() {
if (kDebugExceptionDelivery) {
if (!is_deoptimization) {
mirror::String* msg = exception->GetDetailMessage();
- std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
+ std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
<< ": " << str_msg << "\n");
} else {
@@ -1916,10 +1955,10 @@ void Thread::QuickDeliverException() {
Context* Thread::GetLongJumpContext() {
Context* result = long_jump_context_;
- if (result == NULL) {
+ if (result == nullptr) {
result = Context::Create();
} else {
- long_jump_context_ = NULL; // Avoid context being shared.
+ long_jump_context_ = nullptr; // Avoid context being shared.
result->Reset();
}
return result;
@@ -1928,14 +1967,14 @@ Context* Thread::GetLongJumpContext() {
struct CurrentMethodVisitor : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {}
+ : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0) {}
virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
return true;
}
- if (context_ != NULL) {
+ if (context_ != nullptr) {
this_object_ = GetThisObject();
}
method_ = m;
@@ -1948,9 +1987,9 @@ struct CurrentMethodVisitor : public StackVisitor {
};
mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL);
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr);
visitor.WalkStack(false);
- if (dex_pc != NULL) {
+ if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
}
return visitor.method_;
@@ -1965,7 +2004,7 @@ ThrowLocation Thread::GetCurrentLocationForThrow() {
}
bool Thread::HoldsLock(mirror::Object* object) {
- if (object == NULL) {
+ if (object == nullptr) {
return false;
}
return object->GetLockOwnerThreadId() == thin_lock_thread_id_;
@@ -1985,7 +2024,7 @@ class ReferenceMapVisitor : public StackVisitor {
<< StringPrintf("@ PC:%04x", GetDexPc());
}
ShadowFrame* shadow_frame = GetCurrentShadowFrame();
- if (shadow_frame != NULL) {
+ if (shadow_frame != nullptr) {
mirror::ArtMethod* m = shadow_frame->GetMethod();
size_t num_regs = shadow_frame->NumberOfVRegs();
if (m->IsNative() || shadow_frame->HasReferenceArray()) {
@@ -2007,7 +2046,7 @@ class ReferenceMapVisitor : public StackVisitor {
verifier::DexPcToReferenceMap dex_gc_map(gc_map);
uint32_t dex_pc = GetDexPc();
const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
- DCHECK(reg_bitmap != NULL);
+ DCHECK(reg_bitmap != nullptr);
num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
for (size_t reg = 0; reg < num_regs; ++reg) {
if (TestBitmap(reg, reg_bitmap)) {
@@ -2026,23 +2065,23 @@ class ReferenceMapVisitor : public StackVisitor {
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
const uint8_t* native_gc_map = m->GetNativeGcMap();
- CHECK(native_gc_map != NULL) << PrettyMethod(m);
+ CHECK(native_gc_map != nullptr) << PrettyMethod(m);
mh_.ChangeMethod(m);
const DexFile::CodeItem* code_item = mh_.GetCodeItem();
- DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be nullptr or how would we compile its instructions?
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = std::min(map.RegWidth() * 8,
static_cast<size_t>(code_item->registers_size_));
if (num_regs > 0) {
const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
- DCHECK(reg_bitmap != NULL);
+ DCHECK(reg_bitmap != nullptr);
const VmapTable vmap_table(m->GetVmapTable());
uint32_t core_spills = m->GetCoreSpillMask();
uint32_t fp_spills = m->GetFpSpillMask();
size_t frame_size = m->GetFrameSizeInBytes();
// For all dex registers in the bitmap
mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
- DCHECK(cur_quick_frame != NULL);
+ DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
// Does this register hold a reference?
if (TestBitmap(reg, reg_bitmap)) {
diff --git a/runtime/thread.h b/runtime/thread.h
index b01ec945de..30c7e8ff86 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -133,9 +133,19 @@ class PACKED(4) Thread {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ThreadState GetState() const {
+ DCHECK(state_and_flags_.as_struct.state >= kTerminated && state_and_flags_.as_struct.state <= kSuspended);
return static_cast<ThreadState>(state_and_flags_.as_struct.state);
}
+ // This function can be used to make sure a thread's state is valid.
+ void CheckState(int id) const {
+ if (state_and_flags_.as_struct.state >= kTerminated && state_and_flags_.as_struct.state <= kSuspended) {
+ return;
+ }
+ LOG(INFO) << "Thread " << this << " state is invalid: " << state_and_flags_.as_struct.state << " id=" << id;
+ CHECK(false);
+ }
+
ThreadState SetState(ThreadState new_state);
int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
@@ -780,9 +790,12 @@ class PACKED(4) Thread {
// Cause for last suspension.
const char* last_no_thread_suspension_cause_;
+ // Maximum number of checkpoint functions.
+ static constexpr uint32_t kMaxCheckpoints = 3;
+
// Pending checkpoint function or NULL if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
- Closure* checkpoint_function_;
+ Closure* checkpoint_functions_[kMaxCheckpoints];
public:
// Entrypoint function pointers
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index aed8c7788e..8bf099bb2c 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -211,7 +211,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
if (thread != self) {
while (true) {
if (thread->RequestCheckpoint(checkpoint_function)) {
- // This thread will run it's checkpoint some time in the near future.
+ // This thread will run its checkpoint some time in the near future.
count++;
break;
} else {
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 4d4bfb751e..7615c41425 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -21,7 +21,7 @@ namespace art {
enum ThreadState {
// Thread.State JDWP state
- kTerminated, // TERMINATED TS_ZOMBIE Thread.run has returned, but Thread* still around
+ kTerminated = 66, // TERMINATED TS_ZOMBIE Thread.run has returned, but Thread* still around
kRunnable, // RUNNABLE TS_RUNNING runnable
kTimedWaiting, // TIMED_WAITING TS_WAIT in Object.wait() with a timeout
kSleeping, // TIMED_WAITING TS_SLEEPING in Thread.sleep()
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f03cdcdcd4..9a2de47296 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1432,6 +1432,9 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
UniquePtr<RegisterLine> branch_line;
UniquePtr<RegisterLine> fallthrough_line;
+ // We need precise constant types only for deoptimization which happens at runtime.
+ const bool need_precise_constant = !Runtime::Current()->IsCompiler();
+
switch (inst->Opcode()) {
case Instruction::NOP:
/*
@@ -1582,22 +1585,28 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* could be boolean, int, float, or a null reference */
case Instruction::CONST_4: {
int32_t val = static_cast<int32_t>(inst->VRegB_11n() << 28) >> 28;
- work_line_->SetRegisterType(inst->VRegA_11n(), reg_types_.FromCat1Const(val, true));
+ work_line_->SetRegisterType(inst->VRegA_11n(),
+ DetermineCat1Constant(val, need_precise_constant));
break;
}
case Instruction::CONST_16: {
int16_t val = static_cast<int16_t>(inst->VRegB_21s());
- work_line_->SetRegisterType(inst->VRegA_21s(), reg_types_.FromCat1Const(val, true));
+ work_line_->SetRegisterType(inst->VRegA_21s(),
+ DetermineCat1Constant(val, need_precise_constant));
break;
}
- case Instruction::CONST:
+ case Instruction::CONST: {
+ int32_t val = inst->VRegB_31i();
work_line_->SetRegisterType(inst->VRegA_31i(),
- reg_types_.FromCat1Const(inst->VRegB_31i(), true));
+ DetermineCat1Constant(val, need_precise_constant));
break;
- case Instruction::CONST_HIGH16:
+ }
+ case Instruction::CONST_HIGH16: {
+ int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
work_line_->SetRegisterType(inst->VRegA_21h(),
- reg_types_.FromCat1Const(inst->VRegB_21h() << 16, true));
+ DetermineCat1Constant(val, need_precise_constant));
break;
+ }
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
int64_t val = static_cast<int16_t>(inst->VRegB_21s());
@@ -3928,6 +3937,34 @@ std::vector<int32_t> MethodVerifier::DescribeVRegs(uint32_t dex_pc) {
return result;
}
+const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+ if (precise) {
+ // Precise constant type.
+ return reg_types_.FromCat1Const(value, true);
+ } else {
+ // Imprecise constant type.
+ if (value < -32768) {
+ return reg_types_.IntConstant();
+ } else if (value < -128) {
+ return reg_types_.ShortConstant();
+ } else if (value < 0) {
+ return reg_types_.ByteConstant();
+ } else if (value == 0) {
+ return reg_types_.Zero();
+ } else if (value == 1) {
+ return reg_types_.One();
+ } else if (value < 128) {
+ return reg_types_.PosByteConstant();
+ } else if (value < 32768) {
+ return reg_types_.PosShortConstant();
+ } else if (value < 65536) {
+ return reg_types_.CharConstant();
+ } else {
+ return reg_types_.IntConstant();
+ }
+ }
+}
+
void MethodVerifier::Init() {
art::verifier::RegTypeCache::Init();
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index ac36a7ed21..053cee55ca 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -610,6 +610,8 @@ class MethodVerifier {
InstructionFlags* CurrentInsnFlags();
+ const RegType& DetermineCat1Constant(int32_t value, bool precise)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
RegTypeCache reg_types_;
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 3d24414493..c8a03d6965 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -438,6 +438,13 @@ const ImpreciseConstType& RegTypeCache::ByteConstant() {
return *down_cast<const ImpreciseConstType*>(&result);
}
+const ImpreciseConstType& RegTypeCache::CharConstant() {
+ int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
+ const ConstantType& result = FromCat1Const(jchar_max, false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
+}
+
const ImpreciseConstType& RegTypeCache::ShortConstant() {
const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
DCHECK(result.IsImpreciseConstant());
@@ -450,6 +457,18 @@ const ImpreciseConstType& RegTypeCache::IntConstant() {
return *down_cast<const ImpreciseConstType*>(&result);
}
+const ImpreciseConstType& RegTypeCache::PosByteConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
+}
+
+const ImpreciseConstType& RegTypeCache::PosShortConstant() {
+ const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
+ DCHECK(result.IsImpreciseConstant());
+ return *down_cast<const ImpreciseConstType*>(&result);
+}
+
const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index a811696125..41bc8c9c36 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -77,9 +77,12 @@ class RegTypeCache {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Throwable;", precise);
}
- const RegType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
+ const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FromCat1Const(1, true);
+ }
size_t GetCacheSize() {
return entries_.size();
}
@@ -133,8 +136,11 @@ class RegTypeCache {
const RegType& FromUninitialized(const RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/test/303-verification-stress/build b/test/303-verification-stress/build
new file mode 100644
index 0000000000..2ef9beafd1
--- /dev/null
+++ b/test/303-verification-stress/build
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (C) 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Write out a bunch of source files.
+gcc -o classes-gen classes-gen.c
+./classes-gen
+
+mkdir classes
+${JAVAC} -d classes src/*.java
+
+${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/303-verification-stress/classes-gen.c b/test/303-verification-stress/classes-gen.c
new file mode 100644
index 0000000000..be6cfa733d
--- /dev/null
+++ b/test/303-verification-stress/classes-gen.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Generate a big pile of classes with big <clinit>.
+ */
+#include <stdio.h>
+
+/*
+ * Create N files.
+ */
+static int createFiles(int count, int array_size)
+{
+ FILE* fp;
+ int i;
+ int k;
+
+ for (i = 0; i < count; i++) {
+ char nameBuf[32];
+
+ snprintf(nameBuf, sizeof(nameBuf), "src/Test%03d.java", i);
+ fp = fopen(nameBuf, "w");
+ if (fp == NULL) {
+ fprintf(stderr, "ERROR: unable to open %s\n", nameBuf);
+ return -1;
+ }
+
+ fprintf(fp, "public class Test%03d {\n", i);
+ fprintf(fp, " static String[] array = new String[%d];\n", array_size);
+ fprintf(fp, " static {\n", array_size);
+ for (k = 0; k < array_size; k++) {
+ fprintf(fp, " array[%d] = \"string_%04d\";\n", k, k);
+ }
+ fprintf(fp, " }\n", array_size);
+ fprintf(fp, "}\n");
+ fclose(fp);
+ }
+
+ // Create test class.
+ fp = fopen("src/MainTest.java", "w");
+ if (fp == NULL) {
+ fprintf(stderr, "ERROR: unable to open src/MainTest.java\n");
+ return -1;
+ }
+ fprintf(fp, "public class MainTest {\n");
+ fprintf(fp, " public static void run() {\n");
+ for (i = 0; i < count; i++) {
+ fprintf(fp, " System.out.println(\"Create new Test%03d\");\n", i);
+ fprintf(fp, " new Test%03d();\n", i);
+ }
+ fprintf(fp, " }\n");
+ fprintf(fp, "}\n");
+ fclose(fp);
+
+ return 0;
+}
+
+int main()
+{
+ int result;
+
+ result = createFiles(40, 2000);
+
+ return (result != 0);
+}
diff --git a/test/303-verification-stress/expected.txt b/test/303-verification-stress/expected.txt
new file mode 100644
index 0000000000..4fa1b5733d
--- /dev/null
+++ b/test/303-verification-stress/expected.txt
@@ -0,0 +1,42 @@
+Starting test
+Create new Test000
+Create new Test001
+Create new Test002
+Create new Test003
+Create new Test004
+Create new Test005
+Create new Test006
+Create new Test007
+Create new Test008
+Create new Test009
+Create new Test010
+Create new Test011
+Create new Test012
+Create new Test013
+Create new Test014
+Create new Test015
+Create new Test016
+Create new Test017
+Create new Test018
+Create new Test019
+Create new Test020
+Create new Test021
+Create new Test022
+Create new Test023
+Create new Test024
+Create new Test025
+Create new Test026
+Create new Test027
+Create new Test028
+Create new Test029
+Create new Test030
+Create new Test031
+Create new Test032
+Create new Test033
+Create new Test034
+Create new Test035
+Create new Test036
+Create new Test037
+Create new Test038
+Create new Test039
+Done
diff --git a/test/303-verification-stress/info.txt b/test/303-verification-stress/info.txt
new file mode 100644
index 0000000000..131682cbc8
--- /dev/null
+++ b/test/303-verification-stress/info.txt
@@ -0,0 +1,7 @@
+This is more a benchmark for the verifier than a real test. We create many
+classes, each one initializing a big array of string in its class initializer.
+This generates big <clinit> methods in these classes. The goal is to stress the
+verifier on such method.
+
+Note: these classes are generated automatically. The number of classes and the
+size of string array can be modified in the script.
diff --git a/test/303-verification-stress/src/Main.java b/test/303-verification-stress/src/Main.java
new file mode 100644
index 0000000000..d906baec49
--- /dev/null
+++ b/test/303-verification-stress/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String args[]) {
+ System.out.println("Starting test");
+
+ // MainTest class is generated automatically.
+ MainTest.run();
+
+ System.out.println("Done");
+ }
+}