diff options
132 files changed, 694 insertions, 686 deletions
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index 2c5465e120..c1a03abd96 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -174,7 +174,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; - UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false); + UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false); qpoints->pReadBarrierMarkReg12 = nullptr; // Cannot use register 12 (IP) to pass arguments. qpoints->pReadBarrierMarkReg13 = nullptr; // Cannot use register 13 (SP) to pass arguments. qpoints->pReadBarrierMarkReg14 = nullptr; // Cannot use register 14 (LR) to pass arguments. diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index 4c43b7ed3d..e681d63919 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -190,7 +190,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMarkReg16 = nullptr; // IP0 is used as a temp by the asm stub. - UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false); + UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false); qpoints->pReadBarrierSlow = artReadBarrierSlow; qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow; } diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 05172dbe43..cbf5681d64 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -184,7 +184,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; // Alloc - ResetQuickAllocEntryPoints(qpoints, /*is_active*/ false); + ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false); // Cast qpoints->pInstanceofNonTrivial = artInstanceOfFromCode; @@ -445,7 +445,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct."); - UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false); + UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false); // Cannot use the following registers to pass arguments: // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA). // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8). diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 2acfe147f8..741d41a263 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -191,7 +191,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; - UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false); + UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false); // Cannot use the following registers to pass arguments: // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA). // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8). diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index e8df90eccd..de1931794f 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1899,7 +1899,7 @@ TEST_F(StubTest, DISABLED_IMT) { LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc(); ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc); ImtConflictTable* empty_conflict_table = - Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc); + Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count=*/0u, linear_alloc); void* data = linear_alloc->Alloc( self, ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize)); diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index ffb0c94cc7..3db4edefa1 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -98,7 +98,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; - UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false); + UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false); qpoints->pReadBarrierMarkReg04 = nullptr; // Cannot use register 4 (ESP) to pass arguments. // x86 has only 8 core registers. qpoints->pReadBarrierMarkReg08 = nullptr; diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 6bae69c495..db011bab62 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -120,7 +120,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; - UpdateReadBarrierEntrypoints(qpoints, /*is_active*/ false); + UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false); qpoints->pReadBarrierMarkReg04 = nullptr; // Cannot use register 4 (RSP) to pass arguments. // x86-64 has only 16 core registers. qpoints->pReadBarrierMarkReg16 = nullptr; diff --git a/runtime/art_field.cc b/runtime/art_field.cc index 6cbd9e4cfc..e20e7f3f5e 100644 --- a/runtime/art_field.cc +++ b/runtime/art_field.cc @@ -47,7 +47,7 @@ void ArtField::SetOffset(MemberOffset num_bytes) { ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) { DCHECK(GetDeclaringClass()->IsProxyClass()); ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupClass( - Thread::Current(), descriptor, /* class_loader */ nullptr); + Thread::Current(), descriptor, /* class_loader= */ nullptr); DCHECK(klass != nullptr); return klass; } diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 68ccfee089..4a19b108ab 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -324,12 +324,12 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) { if (IsStatic()) { art::interpreter::EnterInterpreterFromInvoke( - self, this, nullptr, args, result, /*stay_in_interpreter*/ true); + self, this, nullptr, args, result, /*stay_in_interpreter=*/ true); } else { mirror::Object* receiver = reinterpret_cast<StackReference<mirror::Object>*>(&args[0])->AsMirrorPtr(); art::interpreter::EnterInterpreterFromInvoke( - self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true); + self, this, receiver, args + 1, result, /*stay_in_interpreter=*/ true); } } else { DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc index 851c23f1cb..50b42d4f7b 100644 --- a/runtime/base/mem_map_arena_pool.cc +++ b/runtime/base/mem_map_arena_pool.cc @@ -58,7 +58,7 @@ MemMap MemMapArena::Allocate(size_t size, bool low_4gb, const char* name) { size = RoundUp(size, kPageSize); std::string error_msg; MemMap map = MemMap::MapAnonymous(name, - /* addr */ nullptr, + /* addr= */ nullptr, size, PROT_READ | PROT_WRITE, low_4gb, diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index c11e3d1e6e..9952283272 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -1250,9 +1250,9 @@ void Locks::Init() { #undef UPDATE_CURRENT_LOCK_LEVEL // List of mutexes that we may hold when accessing a weak ref. - AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false); - AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false); - AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false); + AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false); + AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false); + AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false); InitConditions(); } diff --git a/runtime/cha.cc b/runtime/cha.cc index b600df6bca..de4aebed36 100644 --- a/runtime/cha.cc +++ b/runtime/cha.cc @@ -363,7 +363,7 @@ void ClassHierarchyAnalysis::CheckVirtualMethodSingleImplementationInfo( // non-single-implementation already. VerifyNonSingleImplementation(klass->GetSuperClass()->GetSuperClass(), method_in_super->GetMethodIndex(), - nullptr /* excluded_method */); + /* excluded_method= */ nullptr); return; } diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h index 2536b23416..0dc62d346d 100644 --- a/runtime/class_linker-inl.h +++ b/runtime/class_linker-inl.h @@ -315,7 +315,7 @@ inline ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx, ArtMethod* // Check if the invoke type matches the class type. ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache(); ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader(); - if (CheckInvokeClassMismatch</* kThrow */ false>(dex_cache, type, method_idx, class_loader)) { + if (CheckInvokeClassMismatch</* kThrow= */ false>(dex_cache, type, method_idx, class_loader)) { return nullptr; } // Check access. @@ -366,7 +366,7 @@ inline ArtMethod* ClassLinker::ResolveMethod(Thread* self, // Check if the invoke type matches the class type. ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache(); ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader(); - if (CheckInvokeClassMismatch</* kThrow */ true>(dex_cache, type, method_idx, class_loader)) { + if (CheckInvokeClassMismatch</* kThrow= */ true>(dex_cache, type, method_idx, class_loader)) { DCHECK(Thread::Current()->IsExceptionPending()); return nullptr; } diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 7549c04b6f..c18abab8cb 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -496,7 +496,7 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b // Space (LOS) -- see the comment about the dirty card scanning logic in // art::gc::collector::ConcurrentCopying::MarkingPhase. Handle<mirror::Class> java_lang_String(hs.NewHandle( - AllocClass</* kMovable */ false>( + AllocClass</* kMovable= */ false>( self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_)))); java_lang_String->SetStringClass(); mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self); @@ -1039,8 +1039,8 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) { std::vector<std::unique_ptr<const DexFile>> dex_files; if (!AddImageSpace(image_space, ScopedNullHandle<mirror::ClassLoader>(), - /*dex_elements*/nullptr, - /*dex_location*/nullptr, + /*dex_elements=*/nullptr, + /*dex_location=*/nullptr, /*out*/&dex_files, error_msg)) { return false; @@ -1127,7 +1127,10 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader, } return true; // Continue with the next Element. }; - bool error = VisitClassLoaderDexElements(soa, handle, add_element_names, /* error */ false); + bool error = VisitClassLoaderDexElements(soa, + handle, + add_element_names, + /* defaultReturn= */ false); if (error) { // An error occurred during DexPathList Element visiting. return false; @@ -1259,16 +1262,16 @@ bool VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks:: REQUIRES_SHARED(Locks::mutator_lock_) { if (space.HasAddress(obj)) { if (obj->IsDexCache()) { - obj->VisitReferences</* kVisitNativeRoots */ true, - kVerifyNone, - kWithoutReadBarrier>(visitor, visitor); + obj->VisitReferences</*kVisitNativeRoots=*/ true, + kVerifyNone, + kWithoutReadBarrier>(visitor, visitor); } else { // Don't visit native roots for non-dex-cache as they can't contain // native references to strings. This is verified during compilation // by ImageWriter::VerifyNativeGCRootInvariants. - obj->VisitReferences</* kVisitNativeRoots */ false, - kVerifyNone, - kWithoutReadBarrier>(visitor, visitor); + obj->VisitReferences</*kVisitNativeRoots=*/ false, + kVerifyNone, + kWithoutReadBarrier>(visitor, visitor); } } }); @@ -2241,7 +2244,7 @@ ClassLinker::~ClassLinker() { for (const ClassLoaderData& data : class_loaders_) { // CHA unloading analysis is not needed. No negative consequences are expected because // all the classloaders are deleted at the same time. - DeleteClassLoader(self, data, false /*cleanup_cha*/); + DeleteClassLoader(self, data, /*cleanup_cha=*/ false); } class_loaders_.clear(); } @@ -2345,7 +2348,7 @@ ObjPtr<mirror::Class> ClassLinker::AllocPrimitiveArrayClass(Thread* self, // in the `klass_` field of one of its instances allocated in the Large-Object // Space (LOS) -- see the comment about the dirty card scanning logic in // art::gc::collector::ConcurrentCopying::MarkingPhase. - return AllocClass</* kMovable */ false>( + return AllocClass</* kMovable= */ false>( self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_)); } @@ -3441,7 +3444,7 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file, CHECK(dex_cache != nullptr) << dex_file.GetLocation(); boot_class_path_.push_back(&dex_file); WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_); - RegisterDexFileLocked(dex_file, dex_cache, /* class_loader */ nullptr); + RegisterDexFileLocked(dex_file, dex_cache, /* class_loader= */ nullptr); } void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file, @@ -5012,7 +5015,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, ArtField* art_field = ResolveField(field.GetIndex(), dex_cache, class_loader, - /* is_static */ true); + /* is_static= */ true); if (Runtime::Current()->IsActiveTransaction()) { value_it.ReadValueToField<true>(art_field); } else { @@ -6412,8 +6415,8 @@ void ClassLinker::FillIMTAndConflictTables(ObjPtr<mirror::Class> klass) { unimplemented_method, conflict_method, klass, - /*create_conflict_tables*/true, - /*ignore_copied_methods*/false, + /*create_conflict_tables=*/true, + /*ignore_copied_methods=*/false, &new_conflict, &imt_data[0]); } @@ -6901,8 +6904,8 @@ void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass, unimplemented_method, imt_conflict_method, klass.Get(), - /*create_conflict_table*/false, - /*ignore_copied_methods*/true, + /*create_conflict_tables=*/false, + /*ignore_copied_methods=*/true, /*out*/new_conflict, /*out*/imt); } @@ -8120,7 +8123,7 @@ ArtMethod* ClassLinker::ResolveMethod(uint32_t method_idx, // Check if the invoke type matches the class type. if (kResolveMode == ResolveMode::kCheckICCEAndIAE && - CheckInvokeClassMismatch</* kThrow */ true>( + CheckInvokeClassMismatch</* kThrow= */ true>( dex_cache.Get(), type, [klass]() { return klass; })) { DCHECK(Thread::Current()->IsExceptionPending()); return nullptr; @@ -9088,7 +9091,7 @@ void ClassLinker::CleanupClassLoaders() { } for (ClassLoaderData& data : to_delete) { // CHA unloading analysis and SingleImplementaion cleanups are required. - DeleteClassLoader(self, data, true /*cleanup_cha*/); + DeleteClassLoader(self, data, /*cleanup_cha=*/ true); } } @@ -9234,11 +9237,11 @@ template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::ResolveMode::kNoChec InvokeType type); // Instantiate ClassLinker::AllocClass. -template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ true>( +template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ true>( Thread* self, ObjPtr<mirror::Class> java_lang_Class, uint32_t class_size); -template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ false>( +template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable= */ false>( Thread* self, ObjPtr<mirror::Class> java_lang_Class, uint32_t class_size); diff --git a/runtime/class_linker.h b/runtime/class_linker.h index b6f1f865d6..a48dfafab5 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -1043,12 +1043,12 @@ class ClassLinker { public: // This slot must become a default conflict method. static MethodTranslation CreateConflictingMethod() { - return MethodTranslation(Type::kConflict, /*translation*/nullptr); + return MethodTranslation(Type::kConflict, /*translation=*/nullptr); } // This slot must become an abstract method. static MethodTranslation CreateAbstractMethod() { - return MethodTranslation(Type::kAbstract, /*translation*/nullptr); + return MethodTranslation(Type::kAbstract, /*translation=*/nullptr); } // Use the given method as the current value for this vtable slot during translation. diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index ab7182a75e..27ac90b5b4 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -1034,8 +1034,8 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) { // Force initialization to turn the class erroneous. bool initialized = class_linker_->EnsureInitialized(soa.Self(), klass, - /* can_init_fields */ true, - /* can_init_parents */ true); + /* can_init_fields= */ true, + /* can_init_parents= */ true); EXPECT_FALSE(initialized); EXPECT_TRUE(soa.Self()->IsExceptionPending()); soa.Self()->ClearException(); @@ -1320,15 +1320,15 @@ TEST_F(ClassLinkerTest, ResolveVerifyAndClinit) { ObjPtr<mirror::Class> uninit = ResolveVerifyAndClinit(type_idx, clinit, soa.Self(), - /* can_run_clinit */ true, - /* verify_access */ false); + /* can_run_clinit= */ true, + /* verify_access= */ false); EXPECT_TRUE(uninit != nullptr); EXPECT_FALSE(uninit->IsInitialized()); ObjPtr<mirror::Class> init = ResolveVerifyAndClinit(type_idx, getS0, soa.Self(), - /* can_run_clinit */ true, - /* verify_access */ false); + /* can_run_clinit= */ true, + /* verify_access= */ false); EXPECT_TRUE(init != nullptr); EXPECT_TRUE(init->IsInitialized()); } @@ -1530,7 +1530,7 @@ TEST_F(ClassLinkerTest, RegisterDexFileName) { { WriterMutexLock mu(soa.Self(), *Locks::dex_lock_); // Check that inserting with a UTF16 name works. - class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader */ nullptr); + class_linker->RegisterDexFileLocked(*dex_file, dex_cache.Get(), /* class_loader= */ nullptr); } } @@ -1699,14 +1699,14 @@ TEST_F(ClassLinkerClassLoaderTest, CreatePathClassLoader) { jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr); VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a); VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr); - VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false); + VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false); } TEST_F(ClassLinkerClassLoaderTest, CreateDelegateLastClassLoader) { jobject class_loader_a = LoadDexInDelegateLastClassLoader("ForClassLoaderA", nullptr); VerifyClassResolution("LDefinedInA;", class_loader_a, class_loader_a); VerifyClassResolution("Ljava/lang/String;", class_loader_a, nullptr); - VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find*/ false); + VerifyClassResolution("LDefinedInB;", class_loader_a, nullptr, /*should_find=*/ false); } TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) { @@ -1753,7 +1753,7 @@ TEST_F(ClassLinkerClassLoaderTest, CreateClassLoaderChain) { VerifyClassResolution("LDefinedInAC;", class_loader_d, class_loader_a); // Sanity check that we don't find an undefined class. - VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find*/ false); + VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find=*/ false); } } // namespace art diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc index 5c8d68527b..dd10f3c4dd 100644 --- a/runtime/class_loader_context.cc +++ b/runtime/class_loader_context.cc @@ -223,7 +223,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla if (!dex_file_loader.Open(location.c_str(), location.c_str(), Runtime::Current()->IsVerificationEnabled(), - /*verify_checksum*/ true, + /*verify_checksum=*/ true, &error_msg, &info.opened_dex_files)) { // If we fail to open the dex file because it's been stripped, try to open the dex file @@ -298,12 +298,12 @@ bool ClassLoaderContext::RemoveLocationsFromClassPaths( } std::string ClassLoaderContext::EncodeContextForDex2oat(const std::string& base_dir) const { - return EncodeContext(base_dir, /*for_dex2oat*/ true, /*stored_context*/ nullptr); + return EncodeContext(base_dir, /*for_dex2oat=*/ true, /*stored_context=*/ nullptr); } std::string ClassLoaderContext::EncodeContextForOatFile(const std::string& base_dir, ClassLoaderContext* stored_context) const { - return EncodeContext(base_dir, /*for_dex2oat*/ false, stored_context); + return EncodeContext(base_dir, /*for_dex2oat=*/ false, stored_context); } std::string ClassLoaderContext::EncodeContext(const std::string& base_dir, @@ -663,7 +663,7 @@ std::unique_ptr<ClassLoaderContext> ClassLoaderContext::CreateContextForClassLoa Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements)); - std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files*/ false)); + std::unique_ptr<ClassLoaderContext> result(new ClassLoaderContext(/*owns_the_dex_files=*/ false)); if (result->AddInfoToContextFromClassLoader(soa, h_class_loader, h_dex_elements)) { return result; } else { diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc index 5e3f48c100..ea624f1e9c 100644 --- a/runtime/class_loader_context_test.cc +++ b/runtime/class_loader_context_test.cc @@ -125,7 +125,7 @@ class ClassLoaderContextTest : public CommonRuntimeTest { std::unique_ptr<ClassLoaderContext> ParseContextWithChecksums(const std::string& context_spec) { std::unique_ptr<ClassLoaderContext> context(new ClassLoaderContext()); - if (!context->Parse(context_spec, /*parse_checksums*/ true)) { + if (!context->Parse(context_spec, /*parse_checksums=*/ true)) { return nullptr; } return context; @@ -263,7 +263,7 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFiles) { "PCL[" + multidex_name + ":" + myclass_dex_name + "];" + "DLC[" + dex_name + "]"); - ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ "")); + ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ "")); VerifyContextSize(context.get(), 2); @@ -314,7 +314,7 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFilesRelative) { "PCL[" + multidex_name + ":" + myclass_dex_name + "];" + "DLC[" + dex_name + "]"); - ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir*/ "")); + ASSERT_TRUE(context->OpenDexFiles(InstructionSet::kArm, /*classpath_dir=*/ "")); std::vector<std::unique_ptr<const DexFile>> all_dex_files0 = OpenTestDexFiles("MultiDex"); std::vector<std::unique_ptr<const DexFile>> myclass_dex_files = OpenTestDexFiles("MyClass"); diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h index 78ad568d25..945d659396 100644 --- a/runtime/class_loader_utils.h +++ b/runtime/class_loader_utils.h @@ -160,7 +160,7 @@ inline void VisitClassLoaderDexFiles(ScopedObjectAccessAlreadyRunnable& soa, VisitClassLoaderDexFiles<decltype(helper), void*>(soa, class_loader, helper, - /* default */ nullptr); + /* default= */ nullptr); } } // namespace art diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index 14605623e5..774f19e7cd 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -78,8 +78,8 @@ class CommonRuntimeTestImpl : public CommonArtTestImpl { const ArtDexFileLoader dex_file_loader; CHECK(dex_file_loader.Open(input_jar.c_str(), input_jar.c_str(), - /*verify*/ true, - /*verify_checksum*/ true, + /*verify=*/ true, + /*verify_checksum=*/ true, &error_msg, &dex_files)) << error_msg; EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported"; diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 7199d5e192..7a08cb3df4 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -105,10 +105,10 @@ void ThrowAbstractMethodError(ArtMethod* method) { } void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file) { - ThrowException("Ljava/lang/AbstractMethodError;", /* referrer */ nullptr, + ThrowException("Ljava/lang/AbstractMethodError;", /* referrer= */ nullptr, StringPrintf("abstract method \"%s\"", dex_file.PrettyMethod(method_idx, - /* with_signature */ true).c_str()).c_str()); + /* with_signature= */ true).c_str()).c_str()); } // ArithmeticException @@ -324,7 +324,7 @@ void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const cha void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) { DCHECK(method != nullptr); ThrowException("Ljava/lang/IncompatibleClassChangeError;", - /*referrer*/nullptr, + /*referrer=*/nullptr, StringPrintf("Conflicting default method implementations %s", ArtMethod::PrettyMethod(method).c_str()).c_str()); } @@ -633,7 +633,7 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) { ArtField* field = Runtime::Current()->GetClassLinker()->ResolveField(instr.VRegC_22c(), method, false); Thread::Current()->ClearException(); // Resolution may fail, ignore. - ThrowNullPointerExceptionForFieldAccess(field, true /* read */); + ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true); break; } case Instruction::IGET_QUICK: @@ -647,9 +647,9 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) { ArtField* field = nullptr; CHECK_NE(field_idx, DexFile::kDexNoIndex16); field = Runtime::Current()->GetClassLinker()->ResolveField( - field_idx, method, /* is_static */ false); + field_idx, method, /* is_static= */ false); Thread::Current()->ClearException(); // Resolution may fail, ignore. - ThrowNullPointerExceptionForFieldAccess(field, true /* read */); + ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ true); break; } case Instruction::IPUT: @@ -660,9 +660,9 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) { case Instruction::IPUT_CHAR: case Instruction::IPUT_SHORT: { ArtField* field = Runtime::Current()->GetClassLinker()->ResolveField( - instr.VRegC_22c(), method, /* is_static */ false); + instr.VRegC_22c(), method, /* is_static= */ false); Thread::Current()->ClearException(); // Resolution may fail, ignore. - ThrowNullPointerExceptionForFieldAccess(field, false /* write */); + ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false); break; } case Instruction::IPUT_QUICK: @@ -676,9 +676,9 @@ void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) { ArtField* field = nullptr; CHECK_NE(field_idx, DexFile::kDexNoIndex16); field = Runtime::Current()->GetClassLinker()->ResolveField( - field_idx, method, /* is_static */ false); + field_idx, method, /* is_static= */ false); Thread::Current()->ClearException(); // Resolution may fail, ignore. - ThrowNullPointerExceptionForFieldAccess(field, false /* write */); + ThrowNullPointerExceptionForFieldAccess(field, /* is_read= */ false); break; } case Instruction::AGET: diff --git a/runtime/debug_print.cc b/runtime/debug_print.cc index cb334b569f..2939b00e1b 100644 --- a/runtime/debug_print.cc +++ b/runtime/debug_print.cc @@ -37,7 +37,7 @@ std::string DescribeSpace(ObjPtr<mirror::Class> klass) { std::ostringstream oss; gc::Heap* heap = Runtime::Current()->GetHeap(); gc::space::ContinuousSpace* cs = - heap->FindContinuousSpaceFromObject(klass, /* fail_ok */ true); + heap->FindContinuousSpaceFromObject(klass, /* fail_ok= */ true); if (cs != nullptr) { if (cs->IsImageSpace()) { gc::space::ImageSpace* ispace = cs->AsImageSpace(); @@ -50,7 +50,7 @@ std::string DescribeSpace(ObjPtr<mirror::Class> klass) { } } else { gc::space::DiscontinuousSpace* ds = - heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true); + heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok= */ true); if (ds != nullptr) { oss << "discontinuous;" << ds->GetName(); } else { diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 9b5b84a548..099cadc07d 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -944,7 +944,7 @@ JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, std::vector<uint64_t>* counts) { gc::Heap* heap = Runtime::Current()->GetHeap(); - heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger); + heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger); VariableSizedHandleScope hs(Thread::Current()); std::vector<Handle<mirror::Class>> classes; counts->clear(); @@ -965,7 +965,7 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>* instances) { gc::Heap* heap = Runtime::Current()->GetHeap(); // We only want reachable instances, so do a GC. - heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger); + heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger); JDWP::JdwpError error; ObjPtr<mirror::Class> c = DecodeClass(class_id, &error); if (c == nullptr) { @@ -975,7 +975,7 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<Handle<mirror::Object>> raw_instances; Runtime::Current()->GetHeap()->GetInstances(hs, hs.NewHandle(c), - /* use_is_assignable_from */ false, + /* use_is_assignable_from= */ false, max_count, raw_instances); for (size_t i = 0; i < raw_instances.size(); ++i) { @@ -987,7 +987,7 @@ JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, std::vector<JDWP::ObjectId>* referring_objects) { gc::Heap* heap = Runtime::Current()->GetHeap(); - heap->CollectGarbage(/* clear_soft_references */ false, gc::GcCause::kGcCauseDebugger); + heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger); JDWP::JdwpError error; ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error); if (o == nullptr) { @@ -3075,7 +3075,7 @@ void Dbg::PostException(mirror::Throwable* exception_object) { Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object)); std::unique_ptr<Context> context(Context::Create()); CatchLocationFinder clf(self, h_exception, context.get()); - clf.WalkStack(/* include_transitions */ false); + clf.WalkStack(/* include_transitions= */ false); JDWP::EventLocation exception_throw_location; SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc()); JDWP::EventLocation exception_catch_location; @@ -3734,7 +3734,7 @@ class ScopedDebuggerThreadSuspension { bool timed_out; ThreadList* const thread_list = Runtime::Current()->GetThreadList(); suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, - /* request_suspension */ true, + /* request_suspension= */ true, SuspendReason::kForDebugger, &timed_out); } @@ -4745,7 +4745,7 @@ class HeapChunkContext { REQUIRES_SHARED(Locks::mutator_lock_) { if (ProcessRecord(start, used_bytes)) { uint8_t state = ExamineNativeObject(start); - AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/); + AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true); startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_; } } @@ -4757,7 +4757,7 @@ class HeapChunkContext { // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. // If it's the same, we should combine them. uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start)); - AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/); + AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false); startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_; } } diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc index b50a430843..fb63c82a17 100644 --- a/runtime/dex/dex_file_annotations.cc +++ b/runtime/dex/dex_file_annotations.cc @@ -1251,7 +1251,7 @@ static void DCheckNativeAnnotation(const char* descriptor, jclass cls) { // WellKnownClasses may not be initialized yet, so `klass` may be null. if (klass != nullptr) { // Lookup using the boot class path loader should yield the annotation class. - CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader */ nullptr)); + CHECK_EQ(klass, linker->LookupClass(soa.Self(), descriptor, /* class_loader= */ nullptr)); } } } diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h index 2cbf557c1f..fbcee3901f 100644 --- a/runtime/dex2oat_environment_test.h +++ b/runtime/dex2oat_environment_test.h @@ -87,7 +87,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest { std::vector<std::unique_ptr<const DexFile>> multi1; ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(), GetMultiDexSrc1().c_str(), - /* verify */ true, + /* verify= */ true, kVerifyChecksum, &error_msg, &multi1)) << error_msg; @@ -96,7 +96,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest { std::vector<std::unique_ptr<const DexFile>> multi2; ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(), GetMultiDexSrc2().c_str(), - /* verify */ true, + /* verify= */ true, kVerifyChecksum, &error_msg, &multi2)) << error_msg; diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc index 429ecd3c32..13f5fcb20e 100644 --- a/runtime/dexopt_test.cc +++ b/runtime/dexopt_test.cc @@ -206,7 +206,7 @@ void DexoptTest::ReserveImageSpaceChunk(uintptr_t start, uintptr_t end) { reinterpret_cast<uint8_t*>(start), end - start, PROT_NONE, - /* low_4gb*/ false, + /* low_4gb=*/ false, &error_msg)); ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg; LOG(INFO) << "Reserved space for image " << diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index ce742fe47e..4e5fe5ff0b 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -86,7 +86,7 @@ ElfFileImpl<ElfTypes>* ElfFileImpl<ElfTypes>::Open(File* file, bool low_4gb, std::string* error_msg) { std::unique_ptr<ElfFileImpl<ElfTypes>> elf_file( - new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only */ false)); + new ElfFileImpl<ElfTypes>(file, (prot & PROT_WRITE) != 0, /* program_header_only= */ false)); if (!elf_file->Setup(file, prot, flags, low_4gb, error_msg)) { return nullptr; } @@ -1163,7 +1163,7 @@ bool ElfFileImpl<ElfTypes>::Load(File* file, vaddr_size, PROT_NONE, low_4gb, - /* reuse */ false, + /* reuse= */ false, reservation, error_msg); if (!local_reservation.IsValid()) { @@ -1237,10 +1237,10 @@ bool ElfFileImpl<ElfTypes>::Load(File* file, flags, file->Fd(), program_header->p_offset, - /* low4_gb */ false, + /* low_4gb= */ false, file->GetPath().c_str(), - /* reuse */ true, // implies MAP_FIXED - /* reservation */ nullptr, + /* reuse= */ true, // implies MAP_FIXED + /* reservation= */ nullptr, error_msg); if (!segment.IsValid()) { *error_msg = StringPrintf("Failed to map ELF file segment %d from %s: %s", @@ -1262,9 +1262,9 @@ bool ElfFileImpl<ElfTypes>::Load(File* file, p_vaddr + program_header->p_filesz, program_header->p_memsz - program_header->p_filesz, prot, - /* low_4gb */ false, - /* reuse */ true, - /* reservation */ nullptr, + /* low_4gb= */ false, + /* reuse= */ true, + /* reservation= */ nullptr, error_msg); if (!segment.IsValid()) { *error_msg = StringPrintf("Failed to map zero-initialized ELF file segment %d from %s: %s", @@ -1763,7 +1763,7 @@ ElfFile* ElfFile::Open(File* file, int mmap_prot, int mmap_flags, /*out*/std::st PROT_READ, MAP_PRIVATE, file->Fd(), - /* start */ 0, + /* start= */ 0, low_4gb, file->GetPath().c_str(), error_msg); @@ -1886,7 +1886,7 @@ bool ElfFile::GetLoadedSize(size_t* size, std::string* error_msg) const { } bool ElfFile::Strip(File* file, std::string* error_msg) { - std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, error_msg)); + std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file, true, false, /*low_4gb=*/false, error_msg)); if (elf_file.get() == nullptr) { return false; } diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 35bfa91aed..120a0e9ea9 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -191,7 +191,7 @@ inline mirror::Object* AllocObjectFromCode(mirror::Class* klass, return nullptr; } // CheckObjectAlloc can cause thread suspension which means we may now be instrumented. - return klass->Alloc</*kInstrumented*/true>( + return klass->Alloc</*kInstrumented=*/true>( self, Runtime::Current()->GetHeap()->GetCurrentAllocator()).Ptr(); } @@ -216,7 +216,7 @@ inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass, // Pass in false since the object cannot be finalizable. // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be // instrumented. - return klass->Alloc</*kInstrumented*/true, false>(self, heap->GetCurrentAllocator()).Ptr(); + return klass->Alloc</*kInstrumented=*/true, false>(self, heap->GetCurrentAllocator()).Ptr(); } // Pass in false since the object cannot be finalizable. return klass->Alloc<kInstrumented, false>(self, allocator_type).Ptr(); @@ -287,11 +287,11 @@ inline ObjPtr<mirror::Array> AllocArrayFromCode(dex::TypeIndex type_idx, } gc::Heap* heap = Runtime::Current()->GetHeap(); // CheckArrayAlloc can cause thread suspension which means we may now be instrumented. - return mirror::Array::Alloc</*kInstrumented*/true>(self, - klass, - component_count, - klass->GetComponentSizeShift(), - heap->GetCurrentAllocator()); + return mirror::Array::Alloc</*kInstrumented=*/true>(self, + klass, + component_count, + klass->GetComponentSizeShift(), + heap->GetCurrentAllocator()); } return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, klass->GetComponentSizeShift(), allocator_type); diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 5421f69fbd..12136bf476 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -244,7 +244,7 @@ CallerAndOuterMethod GetCalleeSaveMethodCallerAndOuterMethod(Thread* self, Calle result.outer_method = outer_caller_and_pc.first; uintptr_t caller_pc = outer_caller_and_pc.second; result.caller = - DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check */ true); + DoGetCalleeSaveMethodCaller(result.outer_method, caller_pc, /* do_caller_check= */ true); return result; } diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h index 8e784c164c..ce12fdee5f 100644 --- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h +++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h @@ -31,7 +31,7 @@ static void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qp jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; // Alloc - ResetQuickAllocEntryPoints(qpoints, /* is_marking */ true); + ResetQuickAllocEntryPoints(qpoints, /* is_marking= */ true); // Resolution and initialization qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc index c782c9c949..2431bce059 100644 --- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -74,9 +74,9 @@ extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind, JValue return_value; return_value.SetJ(0); // we never deoptimize from compiled code with an invoke result. self->PushDeoptimizationContext(return_value, - false /* is_reference */, + /* is_reference= */ false, self->GetException(), - true /* from_code */, + /* from_code= */ true, DeoptimizationMethodType::kDefault); artDeoptimizeImpl(self, kind, true); } diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc index c4d85a3ef8..e9399827f5 100644 --- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -140,7 +140,7 @@ extern "C" mirror::Class* artInitializeStaticStorageFromCode(mirror::Class* klas StackHandleScope<1> hs(self); Handle<mirror::Class> h_klass = hs.NewHandle(klass); bool success = class_linker->EnsureInitialized( - self, h_klass, /* can_init_fields */ true, /* can_init_parents */ true); + self, h_klass, /* can_init_fields= */ true, /* can_init_parents= */ true); if (UNLIKELY(!success)) { return nullptr; } @@ -157,8 +157,8 @@ extern "C" mirror::Class* artResolveTypeFromCode(uint32_t type_idx, Thread* self ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, - /* can_run_clinit */ false, - /* verify_access */ false); + /* can_run_clinit= */ false, + /* verify_access= */ false); if (LIKELY(result != nullptr) && CanReferenceBss(caller_and_outer.outer_method, caller)) { StoreTypeInBss(caller_and_outer.outer_method, dex::TypeIndex(type_idx), result); } @@ -175,8 +175,8 @@ extern "C" mirror::Class* artResolveTypeAndVerifyAccessFromCode(uint32_t type_id ObjPtr<mirror::Class> result = ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, - /* can_run_clinit */ false, - /* verify_access */ true); + /* can_run_clinit= */ false, + /* verify_access= */ true); // Do not StoreTypeInBss(); access check entrypoint is never used together with .bss. return result.Ptr(); } diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index d38e3edce9..56232c5cd8 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -392,7 +392,7 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS constexpr ReadBarrierOption kReadBarrierOption = kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; mirror::Object* result = - ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kReadBarrierOption>( + ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>( obj, MemberOffset(offset), ref_addr); diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index ba7fb6b9db..2e447ec7d7 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -67,7 +67,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self) ScopedQuickEntrypointChecks sqec(self); // We come from an explicit check in the generated code. This path is triggered // only if the object is indeed null. - ThrowNullPointerExceptionFromDexPC(/* check_address */ false, 0U); + ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U); self->QuickDeliverException(); } @@ -75,7 +75,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self) extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); - ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr); + ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr); self->QuickDeliverException(); } diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 84631c377e..147249000f 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -763,7 +763,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, uint16_t num_regs = accessor.RegistersSize(); // No last shadow coming from quick. ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = - CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0); + CREATE_SHADOW_FRAME(num_regs, /* link= */ nullptr, method, /* dex_pc= */ 0); ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); size_t first_arg_reg = accessor.RegistersSize() - accessor.InsSize(); BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len, @@ -820,7 +820,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, result, shorty[0] == 'L' || shorty[0] == '[', /* class or array */ force_frame_pop ? nullptr : self->GetException(), - false /* from_code */, + /* from_code= */ false, DeoptimizationMethodType::kDefault); // Set special exception to cause deoptimization. @@ -912,7 +912,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler( uint32_t shorty_len = 0; const char* shorty = non_proxy_method->GetShorty(&shorty_len); BuildQuickArgumentVisitor local_ref_visitor( - sp, /* is_static */ false, shorty, shorty_len, &soa, &args); + sp, /* is_static= */ false, shorty, shorty_len, &soa, &args); local_ref_visitor.VisitArguments(); DCHECK_GT(args.size(), 0U) << proxy_method->PrettyMethod(); @@ -975,7 +975,7 @@ class GetQuickReferenceArgumentAtVisitor final : public QuickArgumentVisitor { const char* shorty, uint32_t shorty_len, size_t arg_pos) - : QuickArgumentVisitor(sp, /* is_static */ false, shorty, shorty_len), + : QuickArgumentVisitor(sp, /* is_static= */ false, shorty, shorty_len), cur_pos_(0u), arg_pos_(arg_pos), ref_arg_(nullptr) { @@ -1061,7 +1061,7 @@ std::vector<StackReference<mirror::Object>*> GetProxyReferenceArguments(ArtMetho << proxy_method->PrettyMethod() << " " << non_proxy_method->PrettyMethod(); uint32_t shorty_len = 0; const char* shorty = non_proxy_method->GetShorty(&shorty_len); - GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /* is_static */ false, shorty, shorty_len); + GetQuickReferenceArgumentsVisitor ref_args_visitor(sp, /*is_static=*/ false, shorty, shorty_len); ref_args_visitor.VisitArguments(); std::vector<StackReference<mirror::Object>*> ref_args = ref_args_visitor.GetReferenceArguments(); return ref_args; @@ -2709,7 +2709,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho conflict_method, interface_method, method, - /*force_new_conflict_method*/false); + /*force_new_conflict_method=*/false); if (new_conflict_method != conflict_method) { // Update the IMT if we create a new conflict method. No fence needed here, as the // data is consistent. @@ -2784,7 +2784,7 @@ extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* s const size_t num_vregs = is_range ? inst.VRegA_4rcc() : inst.VRegA_45cc(); const size_t first_arg = 0; ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = - CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, resolved_method, dex_pc); + CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, resolved_method, dex_pc); ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); @@ -2877,7 +2877,7 @@ extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMet const size_t first_arg = 0; const size_t num_vregs = ArtMethod::NumArgRegisters(shorty); ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr = - CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, caller_method, dex_pc); + CREATE_SHADOW_FRAME(num_vregs, /* link= */ nullptr, caller_method, dex_pc); ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get(); ScopedStackedShadowFramePusher frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction); diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 10af10d1a6..313b2b4fe4 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -253,10 +253,10 @@ class AtomicStack { void Init() { std::string error_msg; mem_map_ = MemMap::MapAnonymous(name_.c_str(), - /* addr */ nullptr, + /* addr= */ nullptr, capacity_ * sizeof(begin_[0]), PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg; uint8_t* addr = mem_map_.Begin(); diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc index bb2beaa94c..80c4c76bd3 100644 --- a/runtime/gc/accounting/bitmap.cc +++ b/runtime/gc/accounting/bitmap.cc @@ -49,10 +49,10 @@ MemMap Bitmap::AllocateMemMap(const std::string& name, size_t num_bits) { RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize); std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - /* addr */ nullptr, + /* addr= */ nullptr, bitmap_size, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); if (UNLIKELY(!mem_map.IsValid())) { LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg; diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc index 7cddec6242..9a5bde86b1 100644 --- a/runtime/gc/accounting/card_table.cc +++ b/runtime/gc/accounting/card_table.cc @@ -65,10 +65,10 @@ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) { /* Allocate an extra 256 bytes to allow fixed low-byte of base */ std::string error_msg; MemMap mem_map = MemMap::MapAnonymous("card table", - /* addr */ nullptr, + /* addr= */ nullptr, capacity + 256, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg; // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc index 40dc6e146a..b4026fc3f3 100644 --- a/runtime/gc/accounting/mod_union_table.cc +++ b/runtime/gc/accounting/mod_union_table.cc @@ -462,7 +462,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectVisitor* vis for (mirror::HeapReference<mirror::Object>* obj_ptr : references) { if (obj_ptr->AsMirrorPtr() != nullptr) { all_null = false; - visitor->MarkHeapReference(obj_ptr, /*do_atomic_update*/ false); + visitor->MarkHeapReference(obj_ptr, /*do_atomic_update=*/ false); } } count += references.size(); diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h index 8bdf6da6fe..b369a6685e 100644 --- a/runtime/gc/accounting/read_barrier_table.h +++ b/runtime/gc/accounting/read_barrier_table.h @@ -40,10 +40,10 @@ class ReadBarrierTable { static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize))); std::string error_msg; mem_map_ = MemMap::MapAnonymous("read barrier table", - /* addr */ nullptr, + /* addr= */ nullptr, capacity, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr) << "couldn't allocate read barrier table: " << error_msg; diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc index 9dea2f80d1..fba62c3d67 100644 --- a/runtime/gc/accounting/remembered_set.cc +++ b/runtime/gc/accounting/remembered_set.cc @@ -75,7 +75,7 @@ class RememberedSetReferenceVisitor { mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset); if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) { *contains_reference_to_target_space_ = true; - collector_->MarkHeapReference(ref_ptr, /*do_atomic_update*/ false); + collector_->MarkHeapReference(ref_ptr, /*do_atomic_update=*/ false); DCHECK(!target_space_->HasAddress(ref_ptr->AsMirrorPtr())); } } diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 2946486dfb..76d5d9de7e 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -85,10 +85,10 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create( const size_t bitmap_size = ComputeBitmapSize(heap_capacity); std::string error_msg; MemMap mem_map = MemMap::MapAnonymous(name.c_str(), - /* addr */ nullptr, + /* addr= */ nullptr, bitmap_size, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); if (UNLIKELY(!mem_map.IsValid())) { LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg; diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index 0dbafde2a5..8cc0c4ebf8 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -92,10 +92,10 @@ RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, size_t max_num_of_pages = max_capacity_ / kPageSize; std::string error_msg; page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map", - /* addr */ nullptr, + /* addr= */ nullptr, RoundUp(max_num_of_pages, kPageSize), PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg; page_map_ = page_map_mem_map_.Begin(); diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index 3095f9f679..8fd235f338 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -76,8 +76,8 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion( // we can avoid an expensive CAS. // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is // set. - success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(), - /* rb_state */ ReadBarrier::GrayState()); + success = ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(), + /* rb_state= */ ReadBarrier::GrayState()); } else { success = !bitmap->AtomicTestAndSet(ref); } @@ -113,8 +113,8 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(Thread* const self, } // This may or may not succeed, which is ok because the object may already be gray. bool success = - ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::NonGrayState(), - /* rb_state */ ReadBarrier::GrayState()); + ref->AtomicSetReadBarrierState(/* expected_rb_state= */ ReadBarrier::NonGrayState(), + /* rb_state= */ ReadBarrier::GrayState()); if (success) { MutexLock mu(self, immune_gray_stack_lock_); immune_gray_stack_.push_back(ref); @@ -186,7 +186,7 @@ inline mirror::Object* ConcurrentCopying::Mark(Thread* const self, region_space_->Unprotect(); LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref); region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT)); - heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true); + heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true); UNREACHABLE(); } } else { @@ -209,8 +209,8 @@ inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* fr if (UNLIKELY(mark_from_read_barrier_measurements_)) { ret = MarkFromReadBarrierWithMeasurements(self, from_ref); } else { - ret = Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, - from_ref); + ret = Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self, + from_ref); } // Only set the mark bit for baker barrier. if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) { diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index 46cc79ce9c..2ae4676cac 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -135,10 +135,10 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, std::string error_msg; sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous( "concurrent copying sweep array free buffer", - /* addr */ nullptr, + /* addr= */ nullptr, RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(sweep_array_free_buffer_mem_map_.IsValid()) << "Couldn't allocate sweep array free buffer: " << error_msg; @@ -488,7 +488,7 @@ class ConcurrentCopying::FlipCallback : public Closure { TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings()); // Only change live bytes for full CC. cc->region_space_->SetFromSpace( - cc->rb_table_, evac_mode, /*clear_live_bytes*/ !cc->young_gen_); + cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_); } cc->SwapStacks(); if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) { @@ -601,7 +601,7 @@ void ConcurrentCopying::VerifyGrayImmuneObjects() { REQUIRES_SHARED(Locks::mutator_lock_) { // If an object is not gray, it should only have references to things in the immune spaces. if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) { - obj->VisitReferences</*kVisitNativeRoots*/true, + obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(visitor, visitor); } @@ -669,8 +669,8 @@ void ConcurrentCopying::VerifyNoMissingCardMarks() { // Objects on clean cards should never have references to newly allocated regions. Note // that aged cards are also not clean. if (heap_->GetCardTable()->GetCard(obj) == gc::accounting::CardTable::kCardClean) { - VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder*/ obj); - obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>( + VerifyNoMissingCardMarkVisitor internal_visitor(this, /*holder=*/ obj); + obj->VisitReferences</*kVisitNativeRoots=*/true, kVerifyNone, kWithoutReadBarrier>( internal_visitor, internal_visitor); } }; @@ -742,7 +742,7 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() { TimingLogger::ScopedTiming split("GrayAllDirtyImmuneObjects", GetTimings()); accounting::CardTable* const card_table = heap_->GetCardTable(); Thread* const self = Thread::Current(); - using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ true>; + using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ true>; VisitorType visitor(self); WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) { @@ -769,11 +769,11 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() { : card; }, /* card modified visitor */ VoidFunctor()); - card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(), - space->Begin(), - space->End(), - visitor, - gc::accounting::CardTable::kCardAged); + card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(), + space->Begin(), + space->End(), + visitor, + gc::accounting::CardTable::kCardAged); } } } @@ -781,7 +781,7 @@ void ConcurrentCopying::GrayAllDirtyImmuneObjects() { void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() { TimingLogger::ScopedTiming split("(Paused)GrayAllNewlyDirtyImmuneObjects", GetTimings()); accounting::CardTable* const card_table = heap_->GetCardTable(); - using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent */ false>; + using VisitorType = GrayImmuneObjectVisitor</* kIsConcurrent= */ false>; Thread* const self = Thread::Current(); VisitorType visitor(self); WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); @@ -791,11 +791,11 @@ void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() { // Don't need to scan aged cards since we did these before the pause. Note that scanning cards // also handles the mod-union table cards. - card_table->Scan</* kClearCard */ false>(space->GetMarkBitmap(), - space->Begin(), - space->End(), - visitor, - gc::accounting::CardTable::kCardDirty); + card_table->Scan</*kClearCard=*/ false>(space->GetMarkBitmap(), + space->Begin(), + space->End(), + visitor, + gc::accounting::CardTable::kCardDirty); if (table != nullptr) { // Add the cards to the mod-union table so that we can clear cards to save RAM. table->ProcessCards(); @@ -1376,7 +1376,7 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() { space::RegionSpace* region_space = RegionSpace(); CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space"; VerifyNoFromSpaceRefsFieldVisitor visitor(this); - obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( + obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( visitor, visitor); if (kUseBakerReadBarrier) { @@ -1558,8 +1558,8 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed); if (mark_stack_mode == kMarkStackModeThreadLocal) { // Process the thread-local mark stacks and the GC mark stack. - count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false, - /* checkpoint_callback */ nullptr); + count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false, + /* checkpoint_callback= */ nullptr); while (!gc_mark_stack_->IsEmpty()) { mirror::Object* to_ref = gc_mark_stack_->PopBack(); ProcessMarkStackRef(to_ref); @@ -1734,7 +1734,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) { CHECK(!region_space->IsInFromSpace(to_ref)) << "Scanning object " << to_ref << " in from space"; AssertToSpaceInvariant(nullptr, MemberOffset(0), to_ref); AssertToSpaceInvariantFieldVisitor visitor(this); - to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( + to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( visitor, visitor); } @@ -1769,7 +1769,7 @@ void ConcurrentCopying::SwitchToSharedMarkStackMode() { DisableWeakRefAccessCallback dwrac(this); // Process the thread local mark stacks one last time after switching to the shared mark stack // mode and disable weak ref accesses. - ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ true, &dwrac); + ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac); if (kVerboseMode) { LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access"; } @@ -1833,7 +1833,7 @@ void ConcurrentCopying::SweepSystemWeaks(Thread* self) { void ConcurrentCopying::Sweep(bool swap_bitmaps) { if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) { // Only sweep objects on the live stack. - SweepArray(heap_->GetLiveStack(), /* swap_bitmaps */ false); + SweepArray(heap_->GetLiveStack(), /* swap_bitmaps= */ false); } else { { TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings()); @@ -2060,7 +2060,7 @@ void ConcurrentCopying::ReclaimPhase() { { WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); - Sweep(/* swap_bitmaps */ false); + Sweep(/* swap_bitmaps= */ false); SwapBitmaps(); heap_->UnBindBitmaps(); @@ -2171,7 +2171,7 @@ void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:"; region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT)); PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT); - MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true); + MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true); LOG(FATAL) << "Invalid reference " << ref << " referenced from object " << obj << " at offset " << offset; } @@ -2264,12 +2264,12 @@ void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source, LOG(FATAL_WITHOUT_ABORT) << "Non-free regions:"; region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT)); PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT); - MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true); + MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true); LOG(FATAL) << "Invalid reference " << ref; } } else { // Check to-space invariant in non-moving space. - AssertToSpaceInvariantInNonMovingSpace(/* obj */ nullptr, ref); + AssertToSpaceInvariantInNonMovingSpace(/* obj= */ nullptr, ref); } } } @@ -2440,7 +2440,7 @@ class ConcurrentCopying::RefFieldsVisitor { void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) { - collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root); + collector_->MarkRoot</*kGrayImmuneObject=*/false>(thread_, root); } private: @@ -2462,7 +2462,7 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) { DCHECK_EQ(Thread::Current(), thread_running_gc_); RefFieldsVisitor<kNoUnEvac> visitor(this, thread_running_gc_); // Disable the read barrier for a performance reason. - to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( + to_ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( visitor, visitor); if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) { thread_running_gc_->ModifyDebugDisallowReadBarrier(-1); @@ -2476,10 +2476,10 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) DCHECK_EQ(Thread::Current(), thread_running_gc_); mirror::Object* ref = obj->GetFieldObject< mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset); - mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, kNoUnEvac, /*kFromGCThread*/true>( + mirror::Object* to_ref = Mark</*kGrayImmuneObject=*/false, kNoUnEvac, /*kFromGCThread=*/true>( thread_running_gc_, ref, - /*holder*/ obj, + /*holder=*/ obj, offset); if (to_ref == ref) { return; @@ -2553,7 +2553,7 @@ inline void ConcurrentCopying::VisitRoots( mirror::CompressedReference<mirror::Object>* const root = roots[i]; if (!root->IsNull()) { // kGrayImmuneObject is true because this is used for the thread flip. - MarkRoot</*kGrayImmuneObject*/true>(self, root); + MarkRoot</*kGrayImmuneObject=*/true>(self, root); } } } @@ -2702,7 +2702,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self, if (UNLIKELY(klass == nullptr)) { // Remove memory protection from the region space and log debugging information. region_space_->Unprotect(); - heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal */ true); + heap_->GetVerification()->LogHeapCorruption(holder, offset, from_ref, /* fatal= */ true); } // There must not be a read barrier to avoid nested RB that might violate the to-space invariant. // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta @@ -2716,7 +2716,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self, size_t bytes_allocated = 0U; size_t dummy; bool fall_back_to_non_moving = false; - mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>( + mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac=*/ true>( region_space_alloc_size, ®ion_space_bytes_allocated, nullptr, &dummy); bytes_allocated = region_space_bytes_allocated; if (LIKELY(to_ref != nullptr)) { @@ -2790,7 +2790,7 @@ mirror::Object* ConcurrentCopying::Copy(Thread* const self, DCHECK(region_space_->IsInToSpace(to_ref)); if (bytes_allocated > space::RegionSpace::kRegionSize) { // Free the large alloc. - region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated); + region_space_->FreeLarge</*kForEvac=*/ true>(to_ref, bytes_allocated); } else { // Record the lost copy for later reuse. heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed); @@ -3017,7 +3017,7 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self, // AtomicSetReadBarrierState since it will fault if the address is not // valid. region_space_->Unprotect(); - heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal */ true); + heap_->GetVerification()->LogHeapCorruption(holder, offset, ref, /* fatal= */ true); } // Not marked nor on the allocation stack. Try to mark it. // This may or may not succeed, which is ok. @@ -3131,7 +3131,7 @@ bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror } while (!field->CasWeakRelaxed(from_ref, to_ref)); } else { // TODO: Why is this seq_cst when the above is relaxed? Document memory ordering. - field->Assign</* kIsVolatile */ true>(to_ref); + field->Assign</* kIsVolatile= */ true>(to_ref); } } return true; @@ -3151,7 +3151,7 @@ void ConcurrentCopying::ProcessReferences(Thread* self) { // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps. WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); GetHeap()->GetReferenceProcessor()->ProcessReferences( - true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); + /*concurrent=*/ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this); } void ConcurrentCopying::RevokeAllThreadLocalBuffers() { @@ -3169,7 +3169,8 @@ mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* c ScopedTrace tr(__FUNCTION__); const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u; mirror::Object* ret = - Mark</*kGrayImmuneObject*/true, /*kNoUnEvac*/false, /*kFromGCThread*/false>(self, from_ref); + Mark</*kGrayImmuneObject=*/true, /*kNoUnEvac=*/false, /*kFromGCThread=*/false>(self, + from_ref); if (measure_read_barrier_slow_path_) { rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed); } diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc index 3b5961899f..3c20e5156f 100644 --- a/runtime/gc/collector/immune_spaces.cc +++ b/runtime/gc/collector/immune_spaces.cc @@ -57,7 +57,7 @@ void ImmuneSpaces::CreateLargestImmuneRegion() { if (image_oat_file != nullptr) { intervals.push_back(Interval(reinterpret_cast<uintptr_t>(image_oat_file->Begin()), reinterpret_cast<uintptr_t>(image_oat_file->End()), - /*image*/false)); + /*image=*/false)); } } intervals.push_back(Interval(space_begin, space_end, /*is_heap*/true)); diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc index 3f85c711e1..0e5fac123e 100644 --- a/runtime/gc/collector/immune_spaces_test.cc +++ b/runtime/gc/collector/immune_spaces_test.cc @@ -32,7 +32,7 @@ namespace collector { class DummyOatFile : public OatFile { public: - DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*is_executable*/ false) { + DummyOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) { begin_ = begin; end_ = end; } @@ -45,7 +45,7 @@ class DummyImageSpace : public space::ImageSpace { std::unique_ptr<DummyOatFile>&& oat_file, MemMap&& oat_map) : ImageSpace("DummyImageSpace", - /*image_location*/"", + /*image_location=*/"", std::move(map), std::move(live_bitmap), map.End()), @@ -87,7 +87,7 @@ class ImmuneSpacesTest : public CommonRuntimeTest { image_begin, image_size, PROT_READ | PROT_WRITE, - /*low_4gb*/true, + /*low_4gb=*/true, &error_str); if (!map.IsValid()) { LOG(ERROR) << error_str; @@ -100,7 +100,7 @@ class ImmuneSpacesTest : public CommonRuntimeTest { oat_begin, oat_size, PROT_READ | PROT_WRITE, - /*low_4gb*/true, + /*low_4gb=*/true, &error_str); if (!oat_map.IsValid()) { LOG(ERROR) << error_str; @@ -110,23 +110,23 @@ class ImmuneSpacesTest : public CommonRuntimeTest { // Create image header. ImageSection sections[ImageHeader::kSectionCount]; new (map.Begin()) ImageHeader( - /*image_begin*/PointerToLowMemUInt32(map.Begin()), - /*image_size*/map.Size(), + /*image_begin=*/PointerToLowMemUInt32(map.Begin()), + /*image_size=*/map.Size(), sections, - /*image_roots*/PointerToLowMemUInt32(map.Begin()) + 1, - /*oat_checksum*/0u, + /*image_roots=*/PointerToLowMemUInt32(map.Begin()) + 1, + /*oat_checksum=*/0u, // The oat file data in the header is always right after the image space. - /*oat_file_begin*/PointerToLowMemUInt32(oat_begin), - /*oat_data_begin*/PointerToLowMemUInt32(oat_begin), - /*oat_data_end*/PointerToLowMemUInt32(oat_begin + oat_size), - /*oat_file_end*/PointerToLowMemUInt32(oat_begin + oat_size), - /*boot_image_begin*/0u, - /*boot_image_size*/0u, - /*boot_oat_begin*/0u, - /*boot_oat_size*/0u, - /*pointer_size*/sizeof(void*), + /*oat_file_begin=*/PointerToLowMemUInt32(oat_begin), + /*oat_data_begin=*/PointerToLowMemUInt32(oat_begin), + /*oat_data_end=*/PointerToLowMemUInt32(oat_begin + oat_size), + /*oat_file_end=*/PointerToLowMemUInt32(oat_begin + oat_size), + /*boot_image_begin=*/0u, + /*boot_image_size=*/0u, + /*boot_oat_begin=*/0u, + /*boot_oat_size=*/0u, + /*pointer_size=*/sizeof(void*), ImageHeader::kStorageModeUncompressed, - /*storage_size*/0u); + /*data_size=*/0u); return new DummyImageSpace(std::move(map), std::move(live_bitmap), std::move(oat_file), @@ -138,10 +138,10 @@ class ImmuneSpacesTest : public CommonRuntimeTest { static uint8_t* GetContinuousMemoryRegion(size_t size) { std::string error_str; MemMap map = MemMap::MapAnonymous("reserve", - /* addr */ nullptr, + /* addr= */ nullptr, size, PROT_READ | PROT_WRITE, - /*low_4gb*/ true, + /*low_4gb=*/ true, &error_str); if (!map.IsValid()) { LOG(ERROR) << "Failed to allocate memory region " << error_str; @@ -163,7 +163,7 @@ class DummySpace : public space::ContinuousSpace { space::kGcRetentionPolicyNeverCollect, begin, end, - /*limit*/end) {} + /*limit=*/end) {} space::SpaceType GetType() const override { return space::kSpaceTypeMallocSpace; diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 5f44a72e19..399f9ff301 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -105,10 +105,10 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre std::string error_msg; sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous( "mark sweep sweep array free buffer", - /* addr */ nullptr, + /* addr= */ nullptr, RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize), PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(sweep_array_free_buffer_mem_map_.IsValid()) << "Couldn't allocate sweep array free buffer: " << error_msg; @@ -283,9 +283,9 @@ void MarkSweep::MarkingPhase() { // cards (during the call to Heap::ProcessCard) are not reordered // *after* marking actually starts? heap_->ProcessCards(GetTimings(), - /* use_rem_sets */ false, - /* process_alloc_space_cards */ true, - /* clear_alloc_space_cards */ GetGcType() != kGcTypeSticky); + /* use_rem_sets= */ false, + /* process_alloc_space_cards= */ true, + /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky); WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); MarkRoots(self); MarkReachableObjects(); @@ -446,7 +446,7 @@ class MarkSweep::MarkObjectSlowPath { !large_object_space->Contains(obj)))) { // Lowest priority logging first: PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT); - MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true); + MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true); // Buffer the output in the string stream since it is more important than the stack traces // and we want it to have log priority. The stack traces are printed from Runtime::Abort // which is called from LOG(FATAL) but before the abort message. diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index c58b59de48..19b1fc7878 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -728,7 +728,7 @@ void SemiSpace::ScanObject(Object* obj) { DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; MarkObjectVisitor visitor(this); // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.) - obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( + obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>( visitor, visitor); } diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index af9000b27a..e253dfb868 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -129,10 +129,10 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, if (!self->IsExceptionPending()) { // AllocObject will pick up the new allocator type, and instrumented as true is the safe // default. - return AllocObject</*kInstrumented*/true>(self, - klass, - byte_count, - pre_fence_visitor); + return AllocObject</*kInstrumented=*/true>(self, + klass, + byte_count, + pre_fence_visitor); } return nullptr; } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 78e8422887..a31cbe755f 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -433,8 +433,8 @@ Heap::Heap(size_t initial_size, request_begin, capacity_, PROT_READ | PROT_WRITE, - /* low_4gb */ true, - /* reuse */ false, + /* low_4gb= */ true, + /* reuse= */ false, heap_reservation.IsValid() ? &heap_reservation : nullptr, &error_str); } @@ -463,7 +463,7 @@ Heap::Heap(size_t initial_size, initial_size, size, size, - /* can_move_objects */ false); + /* can_move_objects= */ false); CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space " << non_moving_space_mem_map_begin; non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity()); @@ -505,11 +505,11 @@ Heap::Heap(size_t initial_size, // Create bump pointer spaces instead of a backup space. main_mem_map_2.Reset(); bump_pointer_space_ = space::BumpPointerSpace::Create( - "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr); + "Bump pointer space 1", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr); CHECK(bump_pointer_space_ != nullptr); AddSpace(bump_pointer_space_); temp_space_ = space::BumpPointerSpace::Create( - "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin */ nullptr); + "Bump pointer space 2", kGSSBumpPointerSpaceCapacity, /* requested_begin= */ nullptr); CHECK(temp_space_ != nullptr); AddSpace(temp_space_); } else if (main_mem_map_2.IsValid()) { @@ -519,7 +519,7 @@ Heap::Heap(size_t initial_size, growth_limit_, capacity_, name, - /* can_move_objects */ true)); + /* can_move_objects= */ true)); CHECK(main_space_backup_.get() != nullptr); // Add the space so its accounted for in the heap_begin and heap_end. AddSpace(main_space_backup_.get()); @@ -634,13 +634,13 @@ Heap::Heap(size_t initial_size, } if (MayUseCollector(kCollectorTypeCC)) { concurrent_copying_collector_ = new collector::ConcurrentCopying(this, - /*young_gen*/false, + /*young_gen=*/false, "", measure_gc_performance); if (kEnableGenerationalConcurrentCopyingCollection) { young_concurrent_copying_collector_ = new collector::ConcurrentCopying( this, - /*young_gen*/true, + /*young_gen=*/true, "young", measure_gc_performance); } @@ -671,7 +671,7 @@ Heap::Heap(size_t initial_size, bool no_gap = MemMap::CheckNoGaps(*first_space->GetMemMap(), *non_moving_space_->GetMemMap()); if (!no_gap) { PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); - MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse */ true); + MemMap::DumpMaps(LOG_STREAM(ERROR), /* terse= */ true); LOG(FATAL) << "There's a gap between the image space and the non-moving space"; } } @@ -696,7 +696,7 @@ MemMap Heap::MapAnonymousPreferredAddress(const char* name, request_begin, capacity, PROT_READ | PROT_WRITE, - /* low_4gb*/ true, + /* low_4gb=*/ true, out_error_str); if (map.IsValid() || request_begin == nullptr) { return map; @@ -1323,7 +1323,7 @@ void Heap::DoPendingCollectorTransition() { // Invoke CC full compaction. CollectGarbageInternal(collector::kGcTypeFull, kGcCauseCollectorTransition, - /*clear_soft_references*/false); + /*clear_soft_references=*/false); } else { VLOG(gc) << "CC background compaction ignored due to jank perceptible process state"; } @@ -1783,7 +1783,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, break; } // Try to transition the heap if the allocation failure was due to the space being full. - if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) { + if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) { // If we aren't out of memory then the OOM was probably from the non moving space being // full. Attempt to disable compaction and turn the main space into a non moving space. DisableMovingGc(); @@ -3870,7 +3870,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { // Trigger another GC because there have been enough native bytes // allocated since the last GC. if (IsGcConcurrent()) { - RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full*/true); + RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true); } else { CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false); } @@ -3916,7 +3916,7 @@ void Heap::CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte << " IsVariableSize=" << c->IsVariableSize() << " ObjectSize=" << c->GetObjectSize() << " sizeof(Class)=" << sizeof(mirror::Class) - << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass"); + << " " << verification_->DumpObjectInfo(c.Ptr(), /*tag=*/ "klass"); CHECK_GE(byte_count, sizeof(mirror::Object)); } @@ -4012,7 +4012,7 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) { { static constexpr size_t kMaxFrames = 16u; FixedSizeBacktrace<kMaxFrames> backtrace; - backtrace.Collect(/* skip_frames */ 2); + backtrace.Collect(/* skip_count= */ 2); uint64_t hash = backtrace.Hash(); MutexLock mu(self, *backtrace_lock_); new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end(); @@ -4023,7 +4023,7 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) { if (new_backtrace) { StackHandleScope<1> hs(self); auto h = hs.NewHandleWrapper(obj); - CollectGarbage(/* clear_soft_references */ false); + CollectGarbage(/* clear_soft_references= */ false); unique_backtrace_count_.fetch_add(1); } else { seen_backtrace_count_.fetch_add(1); diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index 05a04f21db..a133a1058c 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -37,7 +37,7 @@ class HeapTest : public CommonRuntimeTest { gc::Heap::kPreferredAllocSpaceBegin, 16 * KB, PROT_READ, - /*low_4gb*/ true, + /*low_4gb=*/ true, &error_msg); ASSERT_TRUE(reserved_.IsValid()) << error_msg; CommonRuntimeTest::SetUp(); @@ -77,7 +77,7 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) { } } } - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); } TEST_F(HeapTest, HeapBitmapCapacityTest) { @@ -91,7 +91,7 @@ TEST_F(HeapTest, HeapBitmapCapacityTest) { } TEST_F(HeapTest, DumpGCPerformanceOnShutdown) { - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); Runtime::Current()->SetDumpGCPerformanceOnShutdown(true); } diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index c212bad530..d4af117e46 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -60,16 +60,16 @@ static inline MemberOffset GetSlowPathFlagOffset(ObjPtr<mirror::Class> reference static inline void SetSlowPathFlag(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr<mirror::Class> reference_class = GetClassRoot<mirror::Reference>(); MemberOffset slow_path_offset = GetSlowPathFlagOffset(reference_class); - reference_class->SetFieldBoolean</* kTransactionActive */ false, /* kCheckTransaction */ false>( + reference_class->SetFieldBoolean</* kTransactionActive= */ false, /* kCheckTransaction= */ false>( slow_path_offset, enabled ? 1 : 0); } void ReferenceProcessor::EnableSlowPath() { - SetSlowPathFlag(/* enabled */ true); + SetSlowPathFlag(/* enabled= */ true); } void ReferenceProcessor::DisableSlowPath(Thread* self) { - SetSlowPathFlag(/* enabled */ false); + SetSlowPathFlag(/* enabled= */ false); condition_.Broadcast(self); } @@ -238,13 +238,13 @@ void ReferenceProcessor::DelayReferenceReferent(ObjPtr<mirror::Class> klass, mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr(); // do_atomic_update needs to be true because this happens outside of the reference processing // phase. - if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update*/true)) { + if (!collector->IsNullOrMarkedHeapReference(referent, /*do_atomic_update=*/true)) { if (UNLIKELY(collector->IsTransactionActive())) { // In transaction mode, keep the referent alive and avoid any reference processing to avoid the // issue of rolling back reference processing. do_atomic_update needs to be true because this // happens outside of the reference processing phase. if (!referent->IsNull()) { - collector->MarkHeapReference(referent, /*do_atomic_update*/ true); + collector->MarkHeapReference(referent, /*do_atomic_update=*/ true); } return; } diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc index e25e279ea6..5c11e502c2 100644 --- a/runtime/gc/reference_queue.cc +++ b/runtime/gc/reference_queue.cc @@ -136,7 +136,7 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); // do_atomic_update is false because this happens during the reference processing phase where // Reference.clear() would block. - if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) { + if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) { // Referent is white, clear it. if (Runtime::Current()->IsActiveTransaction()) { ref->ClearReferent<true>(); @@ -158,7 +158,7 @@ void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_referenc mirror::HeapReference<mirror::Object>* referent_addr = ref->GetReferentReferenceAddr(); // do_atomic_update is false because this happens during the reference processing phase where // Reference.clear() would block. - if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) { + if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update=*/false)) { ObjPtr<mirror::Object> forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); // Move the updated referent to the zombie field. if (Runtime::Current()->IsActiveTransaction()) { @@ -187,7 +187,7 @@ void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { if (referent_addr->AsMirrorPtr() != nullptr) { // do_atomic_update is false because mutators can't access the referent due to the weak ref // access blocking. - visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false); + visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ false); } ref = ref->GetPendingNext(); } while (LIKELY(ref != head)); diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index 80af7001ff..497a0c2e5f 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -32,7 +32,7 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac requested_begin, capacity, PROT_READ | PROT_WRITE, - /* low_4gb */ true, + /* low_4gb= */ true, &error_msg); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 36d2161262..73582a00c0 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -54,7 +54,7 @@ DlMallocSpace::DlMallocSpace(MemMap&& mem_map, end, limit, growth_limit, - /* create_bitmaps */ true, + /* create_bitmaps= */ true, can_move_objects, starting_size, initial_size), mspace_(mspace) { diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index b783cfecbb..a7f82f6e36 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -137,10 +137,10 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_tl_bulk_allocated) { std::string error_msg; MemMap mem_map = MemMap::MapAnonymous("large object space allocation", - /* addr */ nullptr, + /* addr= */ nullptr, num_bytes, PROT_READ | PROT_WRITE, - /* low_4gb */ true, + /* low_4gb= */ true, &error_msg); if (UNLIKELY(!mem_map.IsValid())) { LOG(WARNING) << "Large object allocation failed: " << error_msg; @@ -353,7 +353,7 @@ FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested requested_begin, size, PROT_READ | PROT_WRITE, - /* low_4gb */ true, + /* low_4gb= */ true, &error_msg); CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg; return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End()); @@ -372,10 +372,10 @@ FreeListSpace::FreeListSpace(const std::string& name, std::string error_msg; allocation_info_map_ = MemMap::MapAnonymous("large object free list space allocation info map", - /* addr */ nullptr, + /* addr= */ nullptr, alloc_info_size, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg; allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin()); diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index 445560ad8d..be75efec6d 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -109,7 +109,7 @@ MemMap MallocSpace::CreateMemMap(const std::string& name, requested_begin, *capacity, PROT_READ | PROT_WRITE, - /* low_4gb */ true, + /* low_4gb= */ true, &error_msg); if (!mem_map.IsValid()) { LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index bda1f1c561..8cb079d53a 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -409,7 +409,7 @@ inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_alloc } else { DCHECK(reg->IsLargeTail()); } - reg->Clear(/*zero_and_release_pages*/true); + reg->Clear(/*zero_and_release_pages=*/true); if (kForEvac) { --num_evac_regions_; } else { diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index eba6faccb1..31bbfb8f00 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -58,7 +58,7 @@ MemMap RegionSpace::CreateMemMap(const std::string& name, requested_begin, capacity + kRegionSize, PROT_READ | PROT_WRITE, - /* low_4gb */ true, + /* low_4gb= */ true, &error_msg); if (mem_map.IsValid() || requested_begin == nullptr) { break; @@ -393,7 +393,7 @@ void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes, uint8_t* clear_block_begin = nullptr; uint8_t* clear_block_end = nullptr; auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) { - r->Clear(/*zero_and_release_pages*/false); + r->Clear(/*zero_and_release_pages=*/false); if (clear_block_end != r->Begin()) { // Region `r` is not adjacent to the current clear block; zero and release // pages within the current block and restart a new clear block at the @@ -656,7 +656,7 @@ void RegionSpace::Clear() { if (!r->IsFree()) { --num_non_free_regions_; } - r->Clear(/*zero_and_release_pages*/true); + r->Clear(/*zero_and_release_pages=*/true); } SetNonFreeRegionLimit(0); DCHECK_EQ(num_non_free_regions_, 0u); @@ -735,7 +735,7 @@ bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) { RevokeThreadLocalBuffersLocked(self); // Retain sufficient free regions for full evacuation. - Region* r = AllocateRegion(/*for_evac*/ false); + Region* r = AllocateRegion(/*for_evac=*/ false); if (r != nullptr) { r->is_a_tlab_ = true; r->thread_ = self; diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index 5af1dd3cf7..cc371b8d01 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -206,12 +206,12 @@ class RegionSpace final : public ContinuousMemMapAllocSpace { // Go through all of the blocks and visit the continuous objects. template <typename Visitor> ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) { - WalkInternal<false /* kToSpaceOnly */>(visitor); + WalkInternal</* kToSpaceOnly= */ false>(visitor); } template <typename Visitor> ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_) { - WalkInternal<true /* kToSpaceOnly */>(visitor); + WalkInternal</* kToSpaceOnly= */ true>(visitor); } accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override { diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc index f16ed2d835..4fe8027c2d 100644 --- a/runtime/gc/system_weak_test.cc +++ b/runtime/gc/system_weak_test.cc @@ -145,7 +145,7 @@ TEST_F(SystemWeakTest, Keep) { cswh.Set(GcRoot<mirror::Object>(s.Get())); // Trigger a GC. - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); // Expect the holder to have been called. EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); @@ -166,7 +166,7 @@ TEST_F(SystemWeakTest, Discard) { cswh.Set(GcRoot<mirror::Object>(mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABC"))); // Trigger a GC. - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); // Expect the holder to have been called. EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); @@ -190,7 +190,7 @@ TEST_F(SystemWeakTest, Remove) { cswh.Set(GcRoot<mirror::Object>(s.Get())); // Trigger a GC. - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); // Expect the holder to have been called. ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); @@ -205,7 +205,7 @@ TEST_F(SystemWeakTest, Remove) { Runtime::Current()->RemoveSystemWeakHolder(&cswh); // Trigger another GC. - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); // Expectation: no change in the numbers. EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_); diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc index 0281eeedb9..47c54bd189 100644 --- a/runtime/gc/verification.cc +++ b/runtime/gc/verification.cc @@ -87,7 +87,7 @@ void Verification::LogHeapCorruption(ObjPtr<mirror::Object> holder, bool fatal) const { // Lowest priority logging first: PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT); - MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true); + MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true); Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT)); // Buffer the output in the string stream since it is more important than the stack traces // and we want it to have log priority. The stack traces are printed from Runtime::Abort diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h index d091e7f371..f61c700a36 100644 --- a/runtime/handle_scope-inl.h +++ b/runtime/handle_scope-inl.h @@ -199,7 +199,7 @@ inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle(ObjPtr<Mirr inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self) : BaseHandleScope(self->GetTopHandleScope()), self_(self) { - current_scope_ = new LocalScopeType(/*link*/ nullptr); + current_scope_ = new LocalScopeType(/*link=*/ nullptr); self_->PushHandleScope(this); } diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h index 580224e439..c16e7f347e 100644 --- a/runtime/hidden_api.h +++ b/runtime/hidden_api.h @@ -242,9 +242,9 @@ inline Action GetMemberAction(T* member, AccessMethod access_method) REQUIRES_SHARED(Locks::mutator_lock_) { bool is_caller_trusted = - detail::IsCallerTrusted(/* caller */ nullptr, caller_class_loader, caller_dex_cache); + detail::IsCallerTrusted(/* caller= */ nullptr, caller_class_loader, caller_dex_cache); return GetMemberAction(member, - /* thread */ nullptr, + /* thread= */ nullptr, [is_caller_trusted] (Thread*) { return is_caller_trusted; }, access_method); } diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index d20522574b..6db47903b2 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -80,10 +80,10 @@ IndirectReferenceTable::IndirectReferenceTable(size_t max_count, const size_t table_bytes = max_count * sizeof(IrtEntry); table_mem_map_ = MemMap::MapAnonymous("indirect ref table", - /* addr */ nullptr, + /* addr= */ nullptr, table_bytes, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, error_msg); if (!table_mem_map_.IsValid() && error_msg->empty()) { *error_msg = "Unable to map memory for indirect ref table"; @@ -223,10 +223,10 @@ bool IndirectReferenceTable::Resize(size_t new_size, std::string* error_msg) { const size_t table_bytes = new_size * sizeof(IrtEntry); MemMap new_map = MemMap::MapAnonymous("indirect ref table", - /* addr */ nullptr, + /* addr= */ nullptr, table_bytes, PROT_READ | PROT_WRITE, - /* is_low_4gb */ false, + /* low_4gb= */ false, error_msg); if (!new_map.IsValid()) { return false; diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 5c7b0aeeaf..d533054f6a 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -1495,8 +1495,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method); self->PushDeoptimizationContext(return_value, return_shorty == 'L' || return_shorty == '[', - nullptr /* no pending exception */, - false /* from_code */, + /* exception= */ nullptr , + /* from_code= */ false, deopt_method_type); return GetTwoWordSuccessValue(*return_pc, reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint())); diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc index 31cfeb6af5..d97368931a 100644 --- a/runtime/instrumentation_test.cc +++ b/runtime/instrumentation_test.cc @@ -509,9 +509,9 @@ TEST_F(InstrumentationTest, MethodEntryEvent) { ASSERT_TRUE(method->IsDirect()); ASSERT_TRUE(method->GetDeclaringClass() == klass); TestEvent(instrumentation::Instrumentation::kMethodEntered, - /*event_method*/ method, - /*event_field*/ nullptr, - /*with_object*/ true); + /*event_method=*/ method, + /*event_field=*/ nullptr, + /*with_object=*/ true); } TEST_F(InstrumentationTest, MethodExitObjectEvent) { @@ -529,9 +529,9 @@ TEST_F(InstrumentationTest, MethodExitObjectEvent) { ASSERT_TRUE(method->IsDirect()); ASSERT_TRUE(method->GetDeclaringClass() == klass); TestEvent(instrumentation::Instrumentation::kMethodExited, - /*event_method*/ method, - /*event_field*/ nullptr, - /*with_object*/ true); + /*event_method=*/ method, + /*event_field=*/ nullptr, + /*with_object=*/ true); } TEST_F(InstrumentationTest, MethodExitPrimEvent) { @@ -548,9 +548,9 @@ TEST_F(InstrumentationTest, MethodExitPrimEvent) { ASSERT_TRUE(method->IsDirect()); ASSERT_TRUE(method->GetDeclaringClass() == klass); TestEvent(instrumentation::Instrumentation::kMethodExited, - /*event_method*/ method, - /*event_field*/ nullptr, - /*with_object*/ false); + /*event_method=*/ method, + /*event_field=*/ nullptr, + /*with_object=*/ false); } TEST_F(InstrumentationTest, MethodUnwindEvent) { @@ -582,9 +582,9 @@ TEST_F(InstrumentationTest, FieldWriteObjectEvent) { ASSERT_TRUE(field != nullptr); TestEvent(instrumentation::Instrumentation::kFieldWritten, - /*event_method*/ nullptr, - /*event_field*/ field, - /*with_object*/ true); + /*event_method=*/ nullptr, + /*event_field=*/ field, + /*with_object=*/ true); } TEST_F(InstrumentationTest, FieldWritePrimEvent) { @@ -600,9 +600,9 @@ TEST_F(InstrumentationTest, FieldWritePrimEvent) { ASSERT_TRUE(field != nullptr); TestEvent(instrumentation::Instrumentation::kFieldWritten, - /*event_method*/ nullptr, - /*event_field*/ field, - /*with_object*/ false); + /*event_method=*/ nullptr, + /*event_field=*/ field, + /*with_object=*/ false); } TEST_F(InstrumentationTest, ExceptionHandledEvent) { diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 8a31985a97..b37a2781b5 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -587,8 +587,8 @@ void EnterInterpreterFromDeoptimize(Thread* self, accessor, *shadow_frame, value, - /* stay_in_interpreter */ true, - /* from_deoptimize */ true); + /* stay_in_interpreter= */ true, + /* from_deoptimize= */ true); } ShadowFrame* old_frame = shadow_frame; shadow_frame = shadow_frame->GetLink(); diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index cb91953458..2cee8137b2 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -714,12 +714,12 @@ bool DoMethodHandleInvokeExact(Thread* self, if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) { static const bool kIsRange = false; return DoMethodHandleInvokeCommon<kIsRange>( - self, shadow_frame, true /* is_exact */, inst, inst_data, result); + self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result); } else { DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE); static const bool kIsRange = true; return DoMethodHandleInvokeCommon<kIsRange>( - self, shadow_frame, true /* is_exact */, inst, inst_data, result); + self, shadow_frame, /* invoke_exact= */ true, inst, inst_data, result); } } @@ -731,12 +731,12 @@ bool DoMethodHandleInvoke(Thread* self, if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) { static const bool kIsRange = false; return DoMethodHandleInvokeCommon<kIsRange>( - self, shadow_frame, false /* is_exact */, inst, inst_data, result); + self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result); } else { DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_POLYMORPHIC_RANGE); static const bool kIsRange = true; return DoMethodHandleInvokeCommon<kIsRange>( - self, shadow_frame, false /* is_exact */, inst, inst_data, result); + self, shadow_frame, /* invoke_exact= */ false, inst, inst_data, result); } } diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 26bfba9ffc..7055e8aa90 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -290,7 +290,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame, if (jit != nullptr) { jit->InvokeVirtualOrInterface( receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method); - jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false); + jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/false); } // No need to check since we've been quickened. return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index d9f76eee6e..4757b57707 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -167,7 +167,7 @@ namespace interpreter { #define HOTNESS_UPDATE() \ do { \ if (jit != nullptr) { \ - jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/ true); \ + jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges=*/ true); \ } \ } while (false) @@ -1754,7 +1754,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) case Instruction::INVOKE_POLYMORPHIC: { PREAMBLE(); DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); - bool success = DoInvokePolymorphic<false /* is_range */>( + bool success = DoInvokePolymorphic</* is_range= */ false>( self, shadow_frame, inst, inst_data, &result_register); POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success); break; @@ -1762,7 +1762,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) case Instruction::INVOKE_POLYMORPHIC_RANGE: { PREAMBLE(); DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); - bool success = DoInvokePolymorphic<true /* is_range */>( + bool success = DoInvokePolymorphic</* is_range= */ true>( self, shadow_frame, inst, inst_data, &result_register); POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success); break; @@ -1770,7 +1770,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) case Instruction::INVOKE_CUSTOM: { PREAMBLE(); DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); - bool success = DoInvokeCustom<false /* is_range */>( + bool success = DoInvokeCustom</* is_range= */ false>( self, shadow_frame, inst, inst_data, &result_register); POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success); break; @@ -1778,7 +1778,7 @@ ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) case Instruction::INVOKE_CUSTOM_RANGE: { PREAMBLE(); DCHECK(Runtime::Current()->IsMethodHandlesEnabled()); - bool success = DoInvokeCustom<true /* is_range */>( + bool success = DoInvokeCustom</* is_range= */ true>( self, shadow_frame, inst, inst_data, &result_register); POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success); break; diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index c9a8adc826..4b6f430e65 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -220,7 +220,7 @@ extern "C" size_t MterpInvokeCustom(Thread* self, REQUIRES_SHARED(Locks::mutator_lock_) { JValue* result_register = shadow_frame->GetResultRegister(); const Instruction* inst = Instruction::At(dex_pc_ptr); - return DoInvokeCustom<false /* is_range */>( + return DoInvokeCustom</* is_range= */ false>( self, *shadow_frame, inst, inst_data, result_register); } @@ -231,7 +231,7 @@ extern "C" size_t MterpInvokePolymorphic(Thread* self, REQUIRES_SHARED(Locks::mutator_lock_) { JValue* result_register = shadow_frame->GetResultRegister(); const Instruction* inst = Instruction::At(dex_pc_ptr); - return DoInvokePolymorphic<false /* is_range */>( + return DoInvokePolymorphic</* is_range= */ false>( self, *shadow_frame, inst, inst_data, result_register); } @@ -297,7 +297,7 @@ extern "C" size_t MterpInvokeCustomRange(Thread* self, REQUIRES_SHARED(Locks::mutator_lock_) { JValue* result_register = shadow_frame->GetResultRegister(); const Instruction* inst = Instruction::At(dex_pc_ptr); - return DoInvokeCustom<true /* is_range */>(self, *shadow_frame, inst, inst_data, result_register); + return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register); } extern "C" size_t MterpInvokePolymorphicRange(Thread* self, @@ -307,7 +307,7 @@ extern "C" size_t MterpInvokePolymorphicRange(Thread* self, REQUIRES_SHARED(Locks::mutator_lock_) { JValue* result_register = shadow_frame->GetResultRegister(); const Instruction* inst = Instruction::At(dex_pc_ptr); - return DoInvokePolymorphic<true /* is_range */>( + return DoInvokePolymorphic</* is_range= */ true>( self, *shadow_frame, inst, inst_data, result_register); } @@ -375,8 +375,8 @@ extern "C" size_t MterpConstClass(uint32_t index, ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(index), shadow_frame->GetMethod(), self, - /* can_run_clinit */ false, - /* verify_access */ false); + /* can_run_clinit= */ false, + /* verify_access= */ false); if (UNLIKELY(c == nullptr)) { return true; } @@ -463,8 +463,8 @@ extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()), shadow_frame->GetMethod(), self, - /* can_run_clinit */ false, - /* verify_access */ false); + /* can_run_clinit= */ false, + /* verify_access= */ false); if (LIKELY(c != nullptr)) { if (UNLIKELY(c->IsStringClass())) { gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator(); @@ -682,8 +682,8 @@ ALWAYS_INLINE void MterpFieldAccess(Instruction* inst, if (kIsPrimitive) { if (kIsRead) { PrimType value = UNLIKELY(is_volatile) - ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset) - : obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset); + ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset) + : obj->GetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset); if (sizeof(PrimType) == sizeof(uint64_t)) { shadow_frame->SetVRegLong(vRegA, value); // Set two consecutive registers. } else { @@ -694,9 +694,9 @@ ALWAYS_INLINE void MterpFieldAccess(Instruction* inst, ? shadow_frame->GetVRegLong(vRegA) : shadow_frame->GetVReg(vRegA); if (UNLIKELY(is_volatile)) { - obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset, value); + obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ true>(offset, value); } else { - obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset, value); + obj->SetFieldPrimitive<PrimType, /*kIsVolatile=*/ false>(offset, value); } } } else { // Object. @@ -708,9 +708,9 @@ ALWAYS_INLINE void MterpFieldAccess(Instruction* inst, } else { // Write. ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA); if (UNLIKELY(is_volatile)) { - obj->SetFieldObjectVolatile</*kTransactionActive*/ false>(offset, value); + obj->SetFieldObjectVolatile</*kTransactionActive=*/ false>(offset, value); } else { - obj->SetFieldObject</*kTransactionActive*/ false>(offset, value); + obj->SetFieldObject</*kTransactionActive=*/ false>(offset, value); } } } @@ -729,7 +729,7 @@ NO_INLINE bool MterpFieldAccessSlow(Instruction* inst, shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst)); ArtMethod* referrer = shadow_frame->GetMethod(); uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c(); - ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>( + ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>( field_idx, referrer, self, sizeof(PrimType)); if (UNLIKELY(field == nullptr)) { DCHECK(self->IsExceptionPending()); @@ -770,7 +770,7 @@ ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst, : tls_value; if (kIsDebugBuild) { uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c(); - ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>( + ArtField* field = FindFieldFromCode<kAccessType, /* access_checks= */ false>( field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType)); DCHECK_EQ(offset, field->GetOffset().SizeValue()); } @@ -779,7 +779,7 @@ ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst, : MakeObjPtr(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data))); if (LIKELY(obj != nullptr)) { MterpFieldAccess<PrimType, kAccessType>( - inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile */ false); + inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile= */ false); return true; } } @@ -798,7 +798,7 @@ ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst, if (LIKELY(field != nullptr)) { bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized(); if (LIKELY(initialized)) { - DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks */ false>( + DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>( field_idx, referrer, self, sizeof(PrimType)))); ObjPtr<mirror::Object> obj = kIsStatic ? field->GetDeclaringClass().Ptr() @@ -930,7 +930,7 @@ extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method, jit::Jit* jit = Runtime::Current()->GetJit(); if (jit != nullptr) { int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown(); - jit->AddSamples(self, method, count, /*with_backedges*/ true); + jit->AddSamples(self, method, count, /*with_backedges=*/ true); } return MterpSetUpHotnessCountdown(method, shadow_frame, self); } @@ -955,7 +955,7 @@ extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self, osr_countdown = jit::Jit::kJitRecheckOSRThreshold; if (offset <= 0) { // Keep updating hotness in case a compilation request was dropped. Eventually it will retry. - jit->AddSamples(self, method, osr_countdown, /*with_backedges*/ true); + jit->AddSamples(self, method, osr_countdown, /*with_backedges=*/ true); } did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result); } diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 38ecc5a53d..07afba4cce 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -865,10 +865,10 @@ void UnstartedRuntime::UnstartedSystemArraycopy( // checking version, however, does. if (Runtime::Current()->IsActiveTransaction()) { dst->AssignableCheckingMemcpy<true>( - dst_pos, src, src_pos, length, true /* throw_exception */); + dst_pos, src, src_pos, length, /* throw_exception= */ true); } else { dst->AssignableCheckingMemcpy<false>( - dst_pos, src, src_pos, length, true /* throw_exception */); + dst_pos, src, src_pos, length, /* throw_exception= */ true); } } } else if (src_type->IsPrimitiveByte()) { @@ -1478,9 +1478,9 @@ void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject( reinterpret_cast<uint8_t*>(obj) + static_cast<size_t>(offset)); ReadBarrier::Barrier< mirror::Object, - /* kIsVolatile */ false, + /* kIsVolatile= */ false, kWithReadBarrier, - /* kAlwaysUpdateField */ true>( + /* kAlwaysUpdateField= */ true>( obj, MemberOffset(offset), field_addr); diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc index bd2705d530..3fafc31e21 100644 --- a/runtime/interpreter/unstarted_runtime_test.cc +++ b/runtime/interpreter/unstarted_runtime_test.cc @@ -695,7 +695,7 @@ TEST_F(UnstartedRuntimeTest, Ceil) { { ld2, ld2 } }; - TestCeilFloor(true /* ceil */, self, tmp.get(), test_pairs, arraysize(test_pairs)); + TestCeilFloor(/* ceil= */ true, self, tmp.get(), test_pairs, arraysize(test_pairs)); } TEST_F(UnstartedRuntimeTest, Floor) { @@ -722,7 +722,7 @@ TEST_F(UnstartedRuntimeTest, Floor) { { ld2, ld2 } }; - TestCeilFloor(false /* floor */, self, tmp.get(), test_pairs, arraysize(test_pairs)); + TestCeilFloor(/* ceil= */ false, self, tmp.get(), test_pairs, arraysize(test_pairs)); } TEST_F(UnstartedRuntimeTest, ToLowerUpper) { diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index c1f69b8712..ef893eec30 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -599,12 +599,12 @@ class JitCompileTask final : public Task { void Run(Thread* self) override { ScopedObjectAccess soa(self); if (kind_ == kCompile) { - Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ false); + Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ false); } else if (kind_ == kCompileOsr) { - Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr */ true); + Runtime::Current()->GetJit()->CompileMethod(method_, self, /* osr= */ true); } else { DCHECK(kind_ == kAllocateProfile); - if (ProfilingInfo::Create(self, method_, /* retry_allocation */ true)) { + if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) { VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_); } } @@ -673,7 +673,7 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_ if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) { if ((new_count >= WarmMethodThreshold()) && (method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) { - bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false); + bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false); if (success) { VLOG(jit) << "Start profiling " << method->PrettyMethod(); } @@ -741,7 +741,7 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) { if (np_method->IsCompilable()) { if (!np_method->IsNative()) { // The compiler requires a ProfilingInfo object for non-native methods. - ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true); + ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true); } JitCompileTask compile_task(method, JitCompileTask::kCompile); // Fake being in a runtime thread so that class-load behavior will be the same as normal jit. @@ -761,7 +761,7 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) { Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( method, profiling_info->GetSavedEntryPoint()); } else { - AddSamples(thread, method, 1, /* with_backedges */false); + AddSamples(thread, method, 1, /* with_backedges= */false); } } diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 63cb6a4593..8600b41c71 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -221,7 +221,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, unique_fd mem_fd; // Bionic supports memfd_create, but the call may fail on older kernels. - mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0)); + mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0)); if (mem_fd.get() < 0) { VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno); @@ -281,8 +281,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, kProtRW, base_flags, mem_fd, - /* start */ 0, - /* low_4gb */ true, + /* start= */ 0, + /* low_4gb= */ true, "data-code-cache", &error_str); } else { @@ -303,12 +303,12 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, base_flags = MAP_PRIVATE | MAP_ANON; data_pages = MemMap::MapAnonymous( "data-code-cache", - /* addr */ nullptr, + /* addr= */ nullptr, data_capacity + exec_capacity, kProtRW, - /* low_4gb */ true, - /* reuse */ false, - /* reservation */ nullptr, + /* low_4gb= */ true, + /* reuse= */ false, + /* reservation= */ nullptr, &error_str); } @@ -347,8 +347,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, kProtR, base_flags, mem_fd, - /* start */ data_capacity, - /* low_4GB */ false, + /* start= */ data_capacity, + /* low_4GB= */ false, "jit-code-cache-rw", &error_str); if (!non_exec_pages.IsValid()) { @@ -1008,7 +1008,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, // Simply discard the compiled code. Clear the counter so that it may be recompiled later. // Hopefully the class hierarchy will be more stable when compilation is retried. single_impl_still_valid = false; - ClearMethodCounter(method, /*was_warm*/ false); + ClearMethodCounter(method, /*was_warm=*/ false); break; } } @@ -1156,7 +1156,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { // method. The compiled code for the method (if there is any) must not be in any threads call stack. void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) { MutexLock mu(Thread::Current(), lock_); - RemoveMethodLocked(method, /* release_memory */ true); + RemoveMethodLocked(method, /* release_memory= */ true); } // This invalidates old_method. Once this function returns one can no longer use old_method to @@ -1314,7 +1314,7 @@ class MarkCodeClosure final : public Closure { // its stack frame, it is not the method owning return_pc_. We just pass null to // LookupMethodHeader: the method is only checked against in debug builds. OatQuickMethodHeader* method_header = - code_cache_->LookupMethodHeader(frame.return_pc_, /* method */ nullptr); + code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr); if (method_header != nullptr) { const void* code = method_header->GetCode(); CHECK(code_cache_->GetLiveBitmap()->Test(FromCodeToAllocation(code))); @@ -1438,7 +1438,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { << PrettySize(CodeCacheSize()) << ", data=" << PrettySize(DataCacheSize()); - DoCollection(self, /* collect_profiling_info */ do_full_collection); + DoCollection(self, /* collect_profiling_info= */ do_full_collection); VLOG(jit) << "After code cache collection, code=" << PrettySize(CodeCacheSize()) @@ -1551,7 +1551,7 @@ void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { info->SetSavedEntryPoint(nullptr); // We are going to move this method back to interpreter. Clear the counter now to // give it a chance to be hot again. - ClearMethodCounter(info->GetMethod(), /*was_warm*/ true); + ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true); } } } else if (kIsDebugBuild) { @@ -1933,7 +1933,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled"; // Because the counter is not atomic, there are some rare cases where we may not hit the // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this. - ClearMethodCounter(method, /*was_warm*/ false); + ClearMethodCounter(method, /*was_warm=*/ false); return false; } @@ -2009,7 +2009,7 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, // and clear the counter to get the method Jitted again. Runtime::Current()->GetInstrumentation()->UpdateMethodsCode( method, GetQuickToInterpreterBridge()); - ClearMethodCounter(method, /*was_warm*/ profiling_info != nullptr); + ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr); } else { MutexLock mu(Thread::Current(), lock_); auto it = osr_code_map_.find(method); diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 9043f267fb..e3248eaf24 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -129,7 +129,7 @@ void ProfileSaver::Run() { } total_ms_of_sleep_ += options_.GetSaveResolvedClassesDelayMs(); } - FetchAndCacheResolvedClassesAndMethods(/*startup*/ true); + FetchAndCacheResolvedClassesAndMethods(/*startup=*/ true); // When we save without waiting for JIT notifications we use a simple @@ -183,7 +183,7 @@ void ProfileSaver::Run() { uint16_t number_of_new_methods = 0; uint64_t start_work = NanoTime(); - bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods); + bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save=*/false, &number_of_new_methods); // Update the notification counter based on result. Note that there might be contention on this // but we don't care about to be 100% precise. if (!profile_saved_to_disk) { @@ -501,7 +501,7 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number // We only need to do this once, not once per dex location. // TODO: Figure out a way to only do it when stuff has changed? It takes 30-50ms. - FetchAndCacheResolvedClassesAndMethods(/*startup*/ false); + FetchAndCacheResolvedClassesAndMethods(/*startup=*/ false); for (const auto& it : tracked_locations) { if (!force_save && ShuttingDown(Thread::Current())) { @@ -521,7 +521,7 @@ bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number } { ProfileCompilationInfo info(Runtime::Current()->GetArenaPool()); - if (!info.Load(filename, /*clear_if_invalid*/ true)) { + if (!info.Load(filename, /*clear_if_invalid=*/ true)) { LOG(WARNING) << "Could not forcefully load profile " << filename; continue; } @@ -607,9 +607,9 @@ void* ProfileSaver::RunProfileSaverThread(void* arg) { Runtime* runtime = Runtime::Current(); bool attached = runtime->AttachCurrentThread("Profile Saver", - /*as_daemon*/true, + /*as_daemon=*/true, runtime->GetSystemThreadGroup(), - /*create_peer*/true); + /*create_peer=*/true); if (!attached) { CHECK(runtime->IsShuttingDown(Thread::Current())); return nullptr; @@ -751,7 +751,7 @@ void ProfileSaver::Stop(bool dump_info) { // Force save everything before destroying the thread since we want profiler_pthread_ to remain // valid. - instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr); + instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr); // Wait for the saver thread to stop. CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown"); @@ -838,7 +838,7 @@ void ProfileSaver::ForceProcessProfiles() { // but we only use this in testing when we now this won't happen. // Refactor the way we handle the instance so that we don't end up in this situation. if (saver != nullptr) { - saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr); + saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr); } } @@ -846,7 +846,7 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile, bool hot, MethodRef MutexLock mu(Thread::Current(), *Locks::profiler_lock_); if (instance_ != nullptr) { ProfileCompilationInfo info(Runtime::Current()->GetArenaPool()); - if (!info.Load(profile, /*clear_if_invalid*/false)) { + if (!info.Load(profile, /*clear_if_invalid=*/false)) { return false; } ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref); diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h index a3dae8330a..f6139bb82b 100644 --- a/runtime/jit/profiling_info.h +++ b/runtime/jit/profiling_info.h @@ -125,7 +125,7 @@ class ProfilingInfo { } bool IsInUseByCompiler() const { - return IsMethodBeingCompiled(/*osr*/ true) || IsMethodBeingCompiled(/*osr*/ false) || + return IsMethodBeingCompiled(/*osr=*/ true) || IsMethodBeingCompiled(/*osr=*/ false) || (current_inline_uses_ > 0); } diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc index 6f61f5e37c..48f99815fd 100644 --- a/runtime/jni/check_jni.cc +++ b/runtime/jni/check_jni.cc @@ -286,7 +286,7 @@ bool CheckAttachedThread(const char* function_name) { // to get reasonable stacks and environment, rather than relying on // tombstoned. JNIEnv* env; - Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thread_args */ nullptr); + Runtime::Current()->GetJavaVM()->AttachCurrentThread(&env, /* thr_args= */ nullptr); std::string tmp = android::base::StringPrintf( "a thread (tid %" PRId64 " is making JNI calls without being attached", diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc index 5200607e9b..52509fde66 100644 --- a/runtime/jni/jni_internal.cc +++ b/runtime/jni/jni_internal.cc @@ -82,7 +82,7 @@ namespace art { static constexpr bool kWarnJniAbort = false; static bool IsCallerTrusted(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { - return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames */ 1)); + return hiddenapi::IsCallerTrusted(GetCallingClass(self, /* num_frames= */ 1)); } template<typename T> @@ -106,9 +106,9 @@ static void NotifySetObjectField(ArtField* field, jobject obj, jobject jval) instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); if (UNLIKELY(instrumentation->HasFieldWriteListeners())) { Thread* self = Thread::Current(); - ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr, - /*check_suspended*/ true, - /*abort_on_error*/ false); + ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr, + /*check_suspended=*/ true, + /*abort_on_error=*/ false); if (cur_method == nullptr) { // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all @@ -133,9 +133,9 @@ static void NotifySetPrimitiveField(ArtField* field, jobject obj, JValue val) instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); if (UNLIKELY(instrumentation->HasFieldWriteListeners())) { Thread* self = Thread::Current(); - ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr, - /*check_suspended*/ true, - /*abort_on_error*/ false); + ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr, + /*check_suspended=*/ true, + /*abort_on_error=*/ false); if (cur_method == nullptr) { // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all @@ -157,9 +157,9 @@ static void NotifyGetField(ArtField* field, jobject obj) instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); if (UNLIKELY(instrumentation->HasFieldReadListeners())) { Thread* self = Thread::Current(); - ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc*/ nullptr, - /*check_suspended*/ true, - /*abort_on_error*/ false); + ArtMethod* cur_method = self->GetCurrentMethod(/*dex_pc=*/ nullptr, + /*check_suspended=*/ true, + /*abort_on_error=*/ false); if (cur_method == nullptr) { // Set/Get Fields can be issued without a method during runtime startup/teardown. Ignore all diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc index 4ad4c14e0b..57346b7f78 100644 --- a/runtime/jni/jni_internal_test.cc +++ b/runtime/jni/jni_internal_test.cc @@ -962,11 +962,11 @@ TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) { // Make sure we can actually use it. jstring s = env_->NewStringUTF("poop"); if (mirror::kUseStringCompression) { - ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible */ true), + ASSERT_EQ(mirror::String::GetFlaggedCount(4, /* compressible= */ true), env_->GetIntField(s, fid2)); // Create incompressible string jstring s_16 = env_->NewStringUTF("\u0444\u0444"); - ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible */ false), + ASSERT_EQ(mirror::String::GetFlaggedCount(2, /* compressible= */ false), env_->GetIntField(s_16, fid2)); } else { ASSERT_EQ(4, env_->GetIntField(s, fid2)); @@ -1485,7 +1485,7 @@ TEST_F(JniInternalTest, StaleWeakGlobal) { ASSERT_NE(weak_global, nullptr); env_->DeleteLocalRef(local_ref); // GC should clear the weak global. - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); jobject new_global_ref = env_->NewGlobalRef(weak_global); EXPECT_EQ(new_global_ref, nullptr); jobject new_local_ref = env_->NewLocalRef(weak_global); diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc index 570fc48272..86ad32ef9b 100644 --- a/runtime/method_handles.cc +++ b/runtime/method_handles.cc @@ -745,7 +745,7 @@ bool DoInvokePolymorphicMethod(Thread* self, callee_type, self, shadow_frame, - method_handle /* receiver */, + /* receiver= */ method_handle, operands, result); } else { @@ -1103,7 +1103,7 @@ static inline bool MethodHandleInvokeInternal(Thread* self, if (IsInvokeVarHandle(handle_kind)) { return DoVarHandleInvokeTranslation(self, shadow_frame, - /*invokeExact*/ false, + /*invokeExact=*/ false, method_handle, callsite_type, operands, @@ -1155,7 +1155,7 @@ static inline bool MethodHandleInvokeExactInternal( } else if (IsInvokeVarHandle(handle_kind)) { return DoVarHandleInvokeTranslation(self, shadow_frame, - /*invokeExact*/ true, + /*invokeExact=*/ true, method_handle, callsite_type, operands, diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 31bc5e4399..50b1b903ab 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -1073,8 +1073,8 @@ inline void Class::FixupNativePointer( T old_value = GetFieldPtrWithSize<T, kVerifyFlags>(member_offset, pointer_size); T new_value = visitor(old_value, address); if (old_value != new_value) { - dest->SetFieldPtrWithSize</* kTransactionActive */ false, - /* kCheckTransaction */ true, + dest->SetFieldPtrWithSize</* kTransactionActive= */ false, + /* kCheckTransaction= */ true, kVerifyNone>(member_offset, new_value, pointer_size); } } diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 26dba024c6..6a378f0ca5 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -83,7 +83,7 @@ ObjPtr<mirror::Class> Class::GetPrimitiveClass(ObjPtr<mirror::String> name) { Thread* self = Thread::Current(); if (name == nullptr) { // Note: ThrowNullPointerException() requires a message which we deliberately want to omit. - self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg */ nullptr); + self->ThrowNewException("Ljava/lang/NullPointerException;", /* msg= */ nullptr); } else { self->ThrowNewException("Ljava/lang/ClassNotFoundException;", name->ToModifiedUtf8().c_str()); } diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc index e9e7ca8688..36c5ae2ab9 100644 --- a/runtime/mirror/dex_cache_test.cc +++ b/runtime/mirror/dex_cache_test.cc @@ -108,7 +108,7 @@ TEST_F(DexCacheTest, TestResolvedFieldAccess) { EXPECT_NE(klass1->NumStaticFields(), 0u); for (ArtField& field : klass2->GetSFields()) { EXPECT_FALSE( - klass1->ResolvedFieldAccessTest</*throw_on_failure*/ false>( + klass1->ResolvedFieldAccessTest</*throw_on_failure=*/ false>( klass2.Get(), &field, klass1->GetDexCache(), diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index fbe002a9f0..8ae79a8c66 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -80,11 +80,11 @@ inline uint32_t Object::GetLockOwnerThreadId() { } inline mirror::Object* Object::MonitorEnter(Thread* self) { - return Monitor::MonitorEnter(self, this, /*trylock*/false); + return Monitor::MonitorEnter(self, this, /*trylock=*/false); } inline mirror::Object* Object::MonitorTryEnter(Thread* self) { - return Monitor::MonitorEnter(self, this, /*trylock*/true); + return Monitor::MonitorEnter(self, this, /*trylock=*/true); } inline bool Object::MonitorExit(Thread* self) { @@ -738,7 +738,7 @@ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVer inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value) { VerifyTransaction<kTransactionActive, kCheckTransaction>(); - VerifyCAS<kVerifyFlags>(new_value, /*old_value*/ nullptr); + VerifyCAS<kVerifyFlags>(new_value, /*old_value=*/ nullptr); uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h index 8689e4dadb..ee84997fe6 100644 --- a/runtime/mirror/object-readbarrier-inl.h +++ b/runtime/mirror/object-readbarrier-inl.h @@ -131,7 +131,7 @@ inline uint32_t Object::GetReadBarrierState() { UNREACHABLE(); } DCHECK(kUseBakerReadBarrier); - LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile*/false>(MonitorOffset())); + LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset())); uint32_t rb_state = lw.ReadBarrierState(); DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state; return rb_state; diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h index 8fa2c6cf7f..3752d6dde9 100644 --- a/runtime/mirror/string-inl.h +++ b/runtime/mirror/string-inl.h @@ -237,7 +237,7 @@ inline String* String::Alloc(Thread* self, int32_t utf16_length_with_flag, template <bool kIsInstrumented> inline String* String::AllocEmptyString(Thread* self, gc::AllocatorType allocator_type) { - const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible */ true); + const int32_t length_with_flag = String::GetFlaggedCount(0, /* compressible= */ true); SetStringCountVisitor visitor(length_with_flag); return Alloc<kIsInstrumented>(self, length_with_flag, allocator_type, visitor); } diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 02aa1a823a..0f0a378142 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -289,7 +289,7 @@ struct NthCallerWithDexPcVisitor final : public StackVisitor { // Is this the requested frame? if (current_frame_number_ == wanted_frame_number_) { method_ = m; - dex_pc_ = GetDexPc(false /* abort_on_error*/); + dex_pc_ = GetDexPc(/* abort_on_failure=*/ false); return false; } @@ -385,7 +385,7 @@ bool Monitor::TryLockLocked(Thread* self) { } else { return false; } - AtraceMonitorLock(self, GetObject(), false /* is_wait */); + AtraceMonitorLock(self, GetObject(), /* is_wait= */ false); return true; } @@ -777,7 +777,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, AtraceMonitorUnlock(); // For the implict Unlock() just above. This will only end the deepest // nesting, but that is enough for the visualization, and corresponds to // the single Lock() we do afterwards. - AtraceMonitorLock(self, GetObject(), true /* is_wait */); + AtraceMonitorLock(self, GetObject(), /* is_wait= */ true); bool was_interrupted = false; bool timed_out = false; @@ -1042,7 +1042,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr // No ordering required for preceding lockword read, since we retest. LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState())); if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) { - AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); + AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false); return h_obj.Get(); // Success! } continue; // Go again. @@ -1060,8 +1060,8 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr // Only this thread pays attention to the count. Thus there is no need for stronger // than relaxed memory ordering. if (!kUseReadBarrier) { - h_obj->SetLockWord(thin_locked, false /* volatile */); - AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); + h_obj->SetLockWord(thin_locked, /* as_volatile= */ false); + AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false); return h_obj.Get(); // Success! } else { // Use CAS to preserve the read barrier state. @@ -1069,7 +1069,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr thin_locked, CASMode::kWeak, std::memory_order_relaxed)) { - AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); + AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false); return h_obj.Get(); // Success! } } diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc index 0b168f86f4..8610899b9b 100644 --- a/runtime/monitor_test.cc +++ b/runtime/monitor_test.cc @@ -361,7 +361,7 @@ TEST_F(MonitorTest, TestTryLock) { thread_pool.AddTask(self, new TryLockTask(obj1)); thread_pool.StartWorkers(self); ScopedThreadSuspension sts(self, kSuspended); - thread_pool.Wait(Thread::Current(), /*do_work*/false, /*may_hold_locks*/false); + thread_pool.Wait(Thread::Current(), /*do_work=*/false, /*may_hold_locks=*/false); } // Test that the trylock actually locks the object. { diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index 6becd36bf1..69f7648dcb 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -174,10 +174,10 @@ static MemMap AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) { std::string error_message; size_t length = static_cast<size_t>(end - start); MemMap dex_mem_map = MemMap::MapAnonymous("DEX data", - /* addr */ nullptr, + /* addr= */ nullptr, length, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_message); if (!dex_mem_map.IsValid()) { ScopedObjectAccess soa(env); @@ -196,8 +196,8 @@ static const DexFile* CreateDexFile(JNIEnv* env, MemMap&& dex_mem_map) { std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location, 0, std::move(dex_mem_map), - /* verify */ true, - /* verify_location */ true, + /* verify= */ true, + /* verify_checksum= */ true, &error_message)); if (dex_file == nullptr) { ScopedObjectAccess soa(env); @@ -551,7 +551,7 @@ static jstring DexFile_getDexFileStatus(JNIEnv* env, } OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set, - false /* load_executable */); + /* load_executable= */ false); return env->NewStringUTF(oat_file_assistant.GetStatusDump().c_str()); } @@ -774,7 +774,7 @@ static jobjectArray DexFile_getDexFileOutputPaths(JNIEnv* env, OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set, - false /* load_executable */); + /* load_executable= */ false); std::unique_ptr<OatFile> best_oat_file = oat_file_assistant.GetBestOatFile(); if (best_oat_file == nullptr) { diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index 96f15ded2a..24c8d14a96 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -367,7 +367,7 @@ static jobjectArray VMDebug_getInstancesOfClasses(JNIEnv* env, VariableSizedHandleScope hs2(soa.Self()); std::vector<Handle<mirror::Object>> raw_instances; - heap->GetInstances(hs2, h_class, includeAssignable, /* max_count */ 0, raw_instances); + heap->GetInstances(hs2, h_class, includeAssignable, /* max_count= */ 0, raw_instances); jobjectArray array = env->NewObjectArray(raw_instances.size(), WellKnownClasses::java_lang_Object, nullptr); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 861d1db880..2a3ea46225 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -404,7 +404,7 @@ static void PreloadDexCachesResolveField(ObjPtr<mirror::DexCache> dex_cache, const DexFile* dex_file = dex_cache->GetDexFile(); const DexFile::FieldId& field_id = dex_file->GetFieldId(field_idx); ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType( - field_id.class_idx_, dex_cache, /* class_loader */ nullptr); + field_id.class_idx_, dex_cache, /* class_loader= */ nullptr); if (klass == nullptr) { return; } @@ -432,12 +432,12 @@ static void PreloadDexCachesResolveMethod(ObjPtr<mirror::DexCache> dex_cache, ui ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ObjPtr<mirror::Class> klass = class_linker->LookupResolvedType( - method_id.class_idx_, dex_cache, /* class_loader */ nullptr); + method_id.class_idx_, dex_cache, /* class_loader= */ nullptr); if (klass == nullptr) { return; } // Call FindResolvedMethod to populate the dex cache. - class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader */ nullptr, method_idx); + class_linker->FindResolvedMethod(klass, dex_cache, /* class_loader= */ nullptr, method_idx); } struct DexCacheStats { diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc index e3932df5c0..32733a8409 100644 --- a/runtime/native/dalvik_system_VMStack.cc +++ b/runtime/native/dalvik_system_VMStack.cc @@ -59,7 +59,7 @@ static ResultT GetThreadStack(const ScopedFastNativeObjectAccess& soa, ThreadList* thread_list = Runtime::Current()->GetThreadList(); bool timed_out; Thread* thread = thread_list->SuspendThreadByPeer(peer, - /* request_suspension */ true, + /* request_suspension= */ true, SuspendReason::kInternal, &timed_out); if (thread != nullptr) { diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index 72dae4791c..f54bf87216 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -152,7 +152,8 @@ static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) { // Drop the shared mutator lock. ScopedThreadSuspension sts(self, art::ThreadState::kNative); // Get exclusive mutator lock with suspend all. - ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false); + ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", + /*long_suspend=*/false); MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes); } @@ -399,7 +400,7 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, env, is_system_server, Runtime::NativeBridgeAction::kUnload, - /*isa*/ nullptr, + /*isa=*/ nullptr, profile_system_server); } } diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index f5039d1929..6d94fa1922 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -647,7 +647,7 @@ static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis) ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array = mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), annotation_array_class, - /* length */ 0); + /* length= */ 0); return soa.AddLocalReference<jobjectArray>(empty_array); } return soa.AddLocalReference<jobjectArray>(annotations::GetAnnotationsForClass(klass)); diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc index b7f0a7aabc..67ad0a47b8 100644 --- a/runtime/native/java_lang_Thread.cc +++ b/runtime/native/java_lang_Thread.cc @@ -147,7 +147,7 @@ static void Thread_setNativeName(JNIEnv* env, jobject peer, jstring java_name) { bool timed_out; // Take suspend thread lock to avoid races with threads trying to suspend this one. Thread* thread = thread_list->SuspendThreadByPeer(peer, - /* request_suspension */ true, + /* request_suspension= */ true, SuspendReason::kInternal, &timed_out); if (thread != nullptr) { diff --git a/runtime/native/java_lang_invoke_MethodHandleImpl.cc b/runtime/native/java_lang_invoke_MethodHandleImpl.cc index 1f2bf09f0e..0b26bd7c4a 100644 --- a/runtime/native/java_lang_invoke_MethodHandleImpl.cc +++ b/runtime/native/java_lang_invoke_MethodHandleImpl.cc @@ -48,7 +48,7 @@ static jobject MethodHandleImpl_getMemberInternal(JNIEnv* env, jobject thiz) { if (handle_kind >= mirror::MethodHandle::kFirstAccessorKind) { ArtField* const field = handle->GetTargetField(); h_object.Assign(mirror::Field::CreateFromArtField<kRuntimePointerSize, false>( - soa.Self(), field, false /* force_resolve */)); + soa.Self(), field, /* force_resolve= */ false)); } else { ArtMethod* const method = handle->GetTargetMethod(); if (method->IsConstructor()) { diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc index 46444808d7..e021b77dae 100644 --- a/runtime/native/sun_misc_Unsafe.cc +++ b/runtime/native/sun_misc_Unsafe.cc @@ -74,8 +74,8 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb mirror::HeapReference<mirror::Object>* field_addr = reinterpret_cast<mirror::HeapReference<mirror::Object>*>( reinterpret_cast<uint8_t*>(obj.Ptr()) + static_cast<size_t>(offset)); - ReadBarrier::Barrier<mirror::Object, /* kIsVolatile */ false, kWithReadBarrier, - /* kAlwaysUpdateField */ true>( + ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier, + /* kAlwaysUpdateField= */ true>( obj.Ptr(), MemberOffset(offset), field_addr); diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index f16c46bdea..7c320d8101 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -269,7 +269,7 @@ bool OatFileBase::LoadVdex(const std::string& vdex_filename, vdex_filename, writable, low_4gb, - /* unquicken*/ false, + /* unquicken=*/ false, error_msg); if (vdex_.get() == nullptr) { *error_msg = StringPrintf("Failed to load vdex file '%s' %s", diff --git a/runtime/oat_file.h b/runtime/oat_file.h index ba08e5e38b..4294baf23a 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -226,12 +226,12 @@ class OatFile { // A representation of an invalid OatClass, used when an OatClass can't be found. // See FindOatClass(). static OatClass Invalid() { - return OatClass(/* oat_file */ nullptr, + return OatClass(/* oat_file= */ nullptr, ClassStatus::kErrorUnresolved, kOatClassNoneCompiled, - /* bitmap_size */ 0, - /* bitmap_pointer */ nullptr, - /* methods_pointer */ nullptr); + /* bitmap_size= */ 0, + /* bitmap_pointer= */ nullptr, + /* methods_pointer= */ nullptr); } private: diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 754aa406db..a06be4c719 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -91,8 +91,8 @@ OatFileAssistant::OatFileAssistant(const char* dex_location, : isa_(isa), load_executable_(load_executable), only_load_system_executable_(only_load_system_executable), - odex_(this, /*is_oat_location*/ false), - oat_(this, /*is_oat_location*/ true), + odex_(this, /*is_oat_location=*/ false), + oat_(this, /*is_oat_location=*/ true), zip_fd_(zip_fd) { CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location"; @@ -700,9 +700,9 @@ OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() { } } else { vdex = VdexFile::Open(vdex_filename, - false /*writeable*/, - false /*low_4gb*/, - false /*unquicken*/, + /*writable=*/ false, + /*low_4gb=*/ false, + /*unquicken=*/ false, &error_msg); } if (vdex == nullptr) { diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index aba2eaeb69..521e419d2f 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -182,8 +182,8 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) { EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded( CompilerFilter::kDefaultCompilerFilter, - /* downgrade */ false, - /* profile_changed */ false, + /* profile_changed= */ false, + /* downgrade= */ false, relative_context.get())); } @@ -336,7 +336,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) { GenerateOatForTest(dex_location.c_str(), odex_location.c_str(), CompilerFilter::kSpeed, - /* with_alternate_image */ false); + /* with_alternate_image= */ false); android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC)); android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC)); @@ -375,7 +375,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) { GenerateOatForTest(dex_location.c_str(), odex_location.c_str(), CompilerFilter::kSpeed, - /* with_alternate_image */ false); + /* with_alternate_image= */ false); android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC)); android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC)); @@ -385,7 +385,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) { false, false, vdex_fd.get(), - -1 /* oat_fd */, + /* oat_fd= */ -1, zip_fd.get()); EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -408,7 +408,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) { GenerateOatForTest(dex_location.c_str(), odex_location.c_str(), CompilerFilter::kSpeed, - /* with_alternate_image */ false); + /* with_alternate_image= */ false); android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC)); android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC)); @@ -417,7 +417,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) { kRuntimeISA, false, false, - -1 /* vdex_fd */, + /* vdex_fd= */ -1, odex_fd.get(), zip_fd.get()); @@ -441,8 +441,8 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) { kRuntimeISA, false, false, - -1 /* vdex_fd */, - -1 /* oat_fd */, + /* vdex_fd= */ -1, + /* oat_fd= */ -1, zip_fd); EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -637,7 +637,7 @@ TEST_F(OatFileAssistantTest, StrippedMultiDexNonMainOutOfDate) { // Strip the dex file. Copy(GetStrippedDexSrc1(), dex_location); - OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable*/false); + OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, /*load_executable=*/false); // Because the dex file is stripped, the odex file is considered the source // of truth for the dex checksums. The oat file should be considered @@ -730,7 +730,7 @@ TEST_F(OatFileAssistantTest, OatImageOutOfDate) { Copy(GetDexSrc1(), dex_location); GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed, - /* with_alternate_image */ true); + /* with_alternate_image= */ true); ScopedNonWritable scoped_non_writable(dex_location); ASSERT_TRUE(scoped_non_writable.IsSuccessful()); @@ -765,7 +765,7 @@ TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) { Copy(GetDexSrc1(), dex_location); GenerateOatForTest(dex_location.c_str(), CompilerFilter::kExtract, - /* with_alternate_image */ true); + /* with_alternate_image= */ true); ScopedNonWritable scoped_non_writable(dex_location); ASSERT_TRUE(scoped_non_writable.IsSuccessful()); @@ -1167,7 +1167,7 @@ class RaceGenerateTask : public Task { dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat( dex_location_.c_str(), Runtime::Current()->GetSystemClassLoader(), - /*dex_elements*/nullptr, + /*dex_elements=*/nullptr, &oat_file, &error_msgs); CHECK(!dex_files.empty()) << android::base::Join(error_msgs, '\n'); @@ -1213,7 +1213,7 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) { tasks.push_back(std::move(task)); } thread_pool.StartWorkers(self); - thread_pool.Wait(self, /* do_work */ true, /* may_hold_locks */ false); + thread_pool.Wait(self, /* do_work= */ true, /* may_hold_locks= */ false); // Verify that tasks which got an oat file got a unique one. std::set<const OatFile*> oat_files; @@ -1335,8 +1335,8 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) { EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch, oat_file_assistant.GetDexOptNeeded( CompilerFilter::kDefaultCompilerFilter, - /* downgrade */ false, - /* profile_changed */ false, + /* profile_changed= */ false, + /* downgrade= */ false, updated_context.get())); } diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc index b9e9d384c2..7ac1ab40a2 100644 --- a/runtime/oat_file_manager.cc +++ b/runtime/oat_file_manager.cc @@ -181,7 +181,7 @@ class TypeIndexInfo { private: static BitVector GenerateTypeIndexes(const DexFile* dex_file) { - BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator()); + BitVector type_indexes(/*start_bits=*/0, /*expandable=*/true, Allocator::GetMallocAllocator()); for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(i); uint16_t type_idx = class_def.class_idx_.index_; @@ -302,12 +302,12 @@ static bool CheckClassCollision(const OatFile* oat_file, std::priority_queue<DexFileAndClassPair> queue; for (size_t i = 0; i < dex_files_loaded.size(); ++i) { if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) { - queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true); + queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat=*/true); } } for (size_t i = 0; i < dex_files_unloaded.size(); ++i) { if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) { - queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false); + queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat=*/false); } } @@ -385,8 +385,8 @@ OatFileManager::CheckCollisionResult OatFileManager::CheckCollision( // the oat file without addition checks ClassLoaderContext::VerificationResult result = context->VerifyClassLoaderContextMatch( oat_file->GetClassLoaderContext(), - /*verify_names*/ true, - /*verify_checksums*/ true); + /*verify_names=*/ true, + /*verify_checksums=*/ true); switch (result) { case ClassLoaderContext::VerificationResult::kForcedToSkipChecks: return CheckCollisionResult::kSkippedClassLoaderContextSharedLibrary; diff --git a/runtime/proxy_test.h b/runtime/proxy_test.h index 411dc7af82..23e536d27e 100644 --- a/runtime/proxy_test.h +++ b/runtime/proxy_test.h @@ -47,7 +47,7 @@ ObjPtr<mirror::Class> GenerateProxyClass(ScopedObjectAccess& soa, // Builds the interfaces array. jobjectArray proxyClassInterfaces = - soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement */ nullptr); + soa.Env()->NewObjectArray(interfaces.size(), javaLangClass, /* initialElement= */ nullptr); soa.Self()->AssertNoPendingException(); for (size_t i = 0; i < interfaces.size(); ++i) { soa.Env()->SetObjectArrayElement(proxyClassInterfaces, i, @@ -62,7 +62,7 @@ ObjPtr<mirror::Class> GenerateProxyClass(ScopedObjectAccess& soa, jobjectArray proxyClassMethods = soa.Env()->NewObjectArray( methods_count, soa.AddLocalReference<jclass>(GetClassRoot<mirror::Method>()), - /* initialElement */ nullptr); + /* initialElement= */ nullptr); soa.Self()->AssertNoPendingException(); jsize array_index = 0; diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 36a6b7fc47..afdfefaffa 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -126,7 +126,7 @@ class CatchBlockStackVisitor final : public StackVisitor { exception_handler_->SetHandlerDexPc(found_dex_pc); exception_handler_->SetHandlerQuickFramePc( GetCurrentOatQuickMethodHeader()->ToNativeQuickPc( - method, found_dex_pc, /* is_catch_handler */ true)); + method, found_dex_pc, /* is_for_catch_handler= */ true)); exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame()); exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader()); return false; // End stack walk. @@ -218,7 +218,10 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) { } // Walk the stack to find catch handler. - CatchBlockStackVisitor visitor(self_, context_, &exception_ref, this, /*skip*/already_popped); + CatchBlockStackVisitor visitor(self_, context_, + &exception_ref, + this, + /*skip_frames=*/already_popped); visitor.WalkStack(true); uint32_t new_pop_count = handler_frame_depth_; DCHECK_GE(new_pop_count, already_popped); @@ -606,7 +609,7 @@ void QuickExceptionHandler::DeoptimizeSingleFrame(DeoptimizationKind kind) { << deopt_method->PrettyMethod() << " due to " << GetDeoptimizationKindName(kind); - DumpFramesWithType(self_, /* details */ true); + DumpFramesWithType(self_, /* details= */ true); } if (Runtime::Current()->UseJitCompilation()) { Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor( diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 4d77b9d993..7fa5607582 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -305,15 +305,15 @@ Runtime::~Runtime() { // Very few things are actually capable of distinguishing between the peer & peerless states so // this should be fine. bool thread_attached = AttachCurrentThread("Shutdown thread", - /* as_daemon */ false, + /* as_daemon= */ false, GetSystemThreadGroup(), - /* Create peer */ IsStarted()); + /* create_peer= */ IsStarted()); if (UNLIKELY(!thread_attached)) { LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer."; CHECK(AttachCurrentThread("Shutdown thread (no java peer)", - /* as_daemon */ false, - /* thread_group*/ nullptr, - /* Create peer */ false)); + /* as_daemon= */ false, + /* thread_group=*/ nullptr, + /* create_peer= */ false)); } self = Thread::Current(); } else { @@ -614,7 +614,7 @@ bool Runtime::ParseOptions(const RuntimeOptions& raw_options, bool ignore_unrecognized, RuntimeArgumentMap* runtime_options) { Locks::Init(); - InitLogging(/* argv */ nullptr, Abort); // Calls Locks::Init() as a side effect. + InitLogging(/* argv= */ nullptr, Abort); // Calls Locks::Init() as a side effect. bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options); if (!parsed) { LOG(ERROR) << "Failed to parse options"; @@ -815,7 +815,7 @@ bool Runtime::Start() { ? NativeBridgeAction::kInitialize : NativeBridgeAction::kUnload; InitNonZygoteOrPostFork(self->GetJniEnv(), - /* is_system_server */ false, + /* is_system_server= */ false, action, GetInstructionSetString(kRuntimeISA)); } @@ -1002,9 +1002,9 @@ static bool OpenDexFilesFromImage(const std::string& image_location, std::string error_msg; std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename, - false /* writable */, - false /* low_4gb */, - false, /* unquicken */ + /* writable= */ false, + /* low_4gb= */ false, + /* unquicken= */ false, &error_msg)); if (vdex_file.get() == nullptr) { return false; @@ -1015,15 +1015,15 @@ static bool OpenDexFilesFromImage(const std::string& image_location, return false; } std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(), - false /* writable */, - false /* program_header_only */, - false /* low_4gb */, + /* writable= */ false, + /* program_header_only= */ false, + /* low_4gb= */ false, &error_msg)); if (elf_file.get() == nullptr) { return false; } std::unique_ptr<const OatFile> oat_file( - OatFile::OpenWithElfFile(/* zip_fd */ -1, + OatFile::OpenWithElfFile(/* zip_fd= */ -1, elf_file.release(), vdex_file.release(), oat_location, @@ -1117,7 +1117,7 @@ static inline void CreatePreAllocatedException(Thread* self, CHECK(klass != nullptr); gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator(); ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast( - klass->Alloc</* kIsInstrumented */ true>(self, allocator_type)); + klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type)); CHECK(exception_object != nullptr); *exception = GcRoot<mirror::Throwable>(exception_object); // Initialize the "detailMessage" field. @@ -1127,7 +1127,7 @@ static inline void CreatePreAllocatedException(Thread* self, ArtField* detailMessageField = throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;"); CHECK(detailMessageField != nullptr); - detailMessageField->SetObject</* kTransactionActive */ false>(exception->Read(), message); + detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message); } bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { @@ -1160,8 +1160,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { reinterpret_cast<uint8_t*>(kSentinelAddr), kPageSize, PROT_NONE, - /* low_4g */ true, - /* error_msg */ nullptr); + /* low_4gb= */ true, + /* error_msg= */ nullptr); if (!protected_fault_page_.IsValid()) { LOG(WARNING) << "Could not reserve sentinel fault page"; } else if (reinterpret_cast<uintptr_t>(protected_fault_page_.Begin()) != kSentinelAddr) { @@ -1371,13 +1371,13 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { arena_pool_.reset(new MallocArenaPool()); jit_arena_pool_.reset(new MallocArenaPool()); } else { - arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false)); - jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false, "CompilerMetadata")); + arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false)); + jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ false, "CompilerMetadata")); } if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) { // 4gb, no malloc. Explanation in header. - low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ true)); + low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb= */ true)); } linear_alloc_.reset(CreateLinearAlloc()); @@ -2148,7 +2148,7 @@ ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) { method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub()); } // Create empty conflict table. - method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc), + method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count=*/0u, linear_alloc), pointer_size); return method; } @@ -2280,7 +2280,7 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths, LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty."; return; } - if (!OS::FileExists(profile_output_filename.c_str(), false /*check_file_type*/)) { + if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) { LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits."; return; } @@ -2519,12 +2519,12 @@ void Runtime::FixupConflictTables() { const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize(); if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) { imt_unimplemented_method_->SetImtConflictTable( - ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size), + ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size), pointer_size); } if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) { imt_conflict_method_->SetImtConflictTable( - ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size), + ClassLinker::CreateImtConflictTable(/*count=*/0u, GetLinearAlloc(), pointer_size), pointer_size); } } diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc index 4bd3b3ae3a..55ba293f52 100644 --- a/runtime/runtime_android.cc +++ b/runtime/runtime_android.cc @@ -30,8 +30,8 @@ void HandleUnexpectedSignalAndroid(int signal_number, siginfo_t* info, void* raw HandleUnexpectedSignalCommon(signal_number, info, raw_context, - /* handle_timeout_signal */ false, - /* dump_on_stderr */ false); + /* handle_timeout_signal= */ false, + /* dump_on_stderr= */ false); // Run the old signal handler. old_action.sa_sigaction(signal_number, info, raw_context); @@ -44,7 +44,7 @@ void Runtime::InitPlatformSignalHandlers() { if (android_root != nullptr && strcmp(android_root, "/system") != 0) { InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid, &old_action, - /* handle_timeout_signal */ false); + /* handle_timeout_signal= */ false); } } diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc index 89f312457a..20b33277b3 100644 --- a/runtime/runtime_callbacks_test.cc +++ b/runtime/runtime_callbacks_test.cc @@ -191,10 +191,10 @@ TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackJava) TEST_F(ThreadLifecycleCallbackRuntimeCallbacksTest, ThreadLifecycleCallbackAttach) { std::string error_msg; MemMap stack = MemMap::MapAnonymous("ThreadLifecycleCallback Thread", - /* addr */ nullptr, + /* addr= */ nullptr, 128 * kPageSize, // Just some small stack. PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); ASSERT_TRUE(stack.IsValid()) << error_msg; @@ -505,10 +505,10 @@ TEST_F(MonitorWaitCallbacksTest, WaitUnlocked) { self, // Just a random class soa.Decode<mirror::Class>(WellKnownClasses::java_util_Collections).Ptr(), - /*ms*/0, - /*ns*/0, - /*interruptShouldThrow*/false, - /*why*/kWaiting); + /*ms=*/0, + /*ns=*/0, + /*interruptShouldThrow=*/false, + /*why=*/kWaiting); } } ASSERT_TRUE(cb_.saw_wait_start_); diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc index 6313553255..cfa8ea6342 100644 --- a/runtime/runtime_linux.cc +++ b/runtime/runtime_linux.cc @@ -31,8 +31,8 @@ void HandleUnexpectedSignalLinux(int signal_number, siginfo_t* info, void* raw_c HandleUnexpectedSignalCommon(signal_number, info, raw_context, - /* handle_timeout_signal */ true, - /* dump_on_stderr */ true); + /* handle_timeout_signal= */ true, + /* dump_on_stderr= */ true); if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) { pid_t tid = GetTid(); @@ -77,7 +77,7 @@ void Runtime::InitPlatformSignalHandlers() { // On the host, we don't have debuggerd to dump a stack for us when something unexpected happens. InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux, nullptr, - /* handle_timeout_signal */ true); + /* handle_timeout_signal= */ true); } } // namespace art diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc index f4a27b8397..38ea9cc3b8 100644 --- a/runtime/signal_catcher.cc +++ b/runtime/signal_catcher.cc @@ -118,7 +118,7 @@ void SignalCatcher::Output(const std::string& s) { ScopedThreadStateChange tsc(Thread::Current(), kWaitingForSignalCatcherOutput); - std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage */)); + std::unique_ptr<File> file(new File(output_fd.release(), true /* check_usage= */)); bool success = file->WriteFully(s.data(), s.size()); if (success) { success = file->FlushCloseOrErase() == 0; @@ -169,7 +169,7 @@ void SignalCatcher::HandleSigQuit() { void SignalCatcher::HandleSigUsr1() { LOG(INFO) << "SIGUSR1 forcing GC (no HPROF) and profile save"; - Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false); + Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false); ProfileSaver::ForceProcessProfiles(); } diff --git a/runtime/stack_map.h b/runtime/stack_map.h index 5f44286089..811e23b2d2 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -358,7 +358,7 @@ class CodeInfo { ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const { if (stack_map.HasDexRegisterMap()) { DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid()); - DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register */ 0, &map); + DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register= */ 0, &map); return map; } return DexRegisterMap(0, DexRegisterLocation::None()); diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h index aac547eb78..106c7f1f75 100644 --- a/runtime/subtype_check.h +++ b/runtime/subtype_check.h @@ -237,7 +237,7 @@ struct SubtypeCheck { static SubtypeCheckInfo::State EnsureInitialized(ClassPtr klass) REQUIRES(Locks::subtype_check_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { - return InitializeOrAssign(klass, /*assign*/false).GetState(); + return InitializeOrAssign(klass, /*assign=*/false).GetState(); } // Force this class's SubtypeCheckInfo state into Assigned|Overflowed. @@ -250,7 +250,7 @@ struct SubtypeCheck { static SubtypeCheckInfo::State EnsureAssigned(ClassPtr klass) REQUIRES(Locks::subtype_check_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { - return InitializeOrAssign(klass, /*assign*/true).GetState(); + return InitializeOrAssign(klass, /*assign=*/true).GetState(); } // Resets the SubtypeCheckInfo into the Uninitialized state. @@ -398,7 +398,7 @@ struct SubtypeCheck { // Force all ancestors to Assigned | Overflowed. ClassPtr parent_klass = GetParentClass(klass); - size_t parent_depth = InitializeOrAssign(parent_klass, /*assign*/true).GetDepth(); + size_t parent_depth = InitializeOrAssign(parent_klass, /*assign=*/true).GetDepth(); if (kIsDebugBuild) { SubtypeCheckInfo::State parent_state = GetSubtypeCheckInfo(parent_klass).GetState(); DCHECK(parent_state == SubtypeCheckInfo::kAssigned || @@ -542,17 +542,17 @@ struct SubtypeCheck { int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_) { if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) { - return klass->template CasField32</*kTransactionActive*/true>(offset, - old_value, - new_value, - CASMode::kWeak, - std::memory_order_seq_cst); - } else { - return klass->template CasField32</*kTransactionActive*/false>(offset, + return klass->template CasField32</*kTransactionActive=*/true>(offset, old_value, new_value, CASMode::kWeak, std::memory_order_seq_cst); + } else { + return klass->template CasField32</*kTransactionActive=*/false>(offset, + old_value, + new_value, + CASMode::kWeak, + std::memory_order_seq_cst); } } diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h index 462f203978..23d8ac371e 100644 --- a/runtime/subtype_check_bits.h +++ b/runtime/subtype_check_bits.h @@ -56,9 +56,9 @@ namespace art { * * See subtype_check.h and subtype_check_info.h for more details. */ -BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u) - BitStructField<BitString, /*lsb*/ 0> bitstring_; - BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_; +BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size=*/ BitString::BitStructSizeOf() + 1u) + BitStructField<BitString, /*lsb=*/ 0> bitstring_; + BitStructUint</*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1> overflow_; BITSTRUCT_DEFINE_END(SubtypeCheckBits); } // namespace art diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h index 321a723985..eec6e21832 100644 --- a/runtime/subtype_check_bits_and_status.h +++ b/runtime/subtype_check_bits_and_status.h @@ -68,11 +68,11 @@ static constexpr size_t NonNumericBitSizeOf() { static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast)); static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits."); BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>()) - BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_; + BitStructField<SubtypeCheckBits, /*lsb=*/ 0> subtype_check_info_; BitStructField<ClassStatus, - /*lsb*/ SubtypeCheckBits::BitStructSizeOf(), - /*width*/ kClassStatusBitSize> status_; - BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_; + /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(), + /*width=*/ kClassStatusBitSize> status_; + BitStructInt</*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()> int32_alias_; BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus); // Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data. diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc index 9bd135e4c2..44a2a6933e 100644 --- a/runtime/subtype_check_info_test.cc +++ b/runtime/subtype_check_info_test.cc @@ -87,7 +87,7 @@ BitString SetBitStringCharAt(BitString bit_string, size_t i, size_t val) { struct SubtypeCheckInfoTest : public ::testing::Test { protected: void SetUp() override { - android::base::InitLogging(/*argv*/nullptr); + android::base::InitLogging(/*argv=*/nullptr); } void TearDown() override { @@ -158,33 +158,33 @@ TEST_F(SubtypeCheckInfoTest, IllegalValues) { // Illegal values during construction would cause a Dcheck failure and crash. ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}), - /*next*/MakeBitStringChar(0), - /*overflow*/false, - /*depth*/0u), + /*next=*/MakeBitStringChar(0), + /*overflow=*/false, + /*depth=*/0u), GetExpectedMessageForDeathTest("Path was too long for the depth")); ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({1u, 1u}), - /*overflow*/false, - /*depth*/0u), + /*overflow=*/false, + /*depth=*/0u), GetExpectedMessageForDeathTest("Bitstring too long for depth")); ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({1u}), - /*next*/MakeBitStringChar(0), - /*overflow*/false, - /*depth*/1u), + /*next=*/MakeBitStringChar(0), + /*overflow=*/false, + /*depth=*/1u), GetExpectedMessageForDeathTest("Expected \\(Assigned\\|Initialized\\) " "state to have >0 Next value")); ASSERT_DEATH(MakeSubtypeCheckInfoInfused(MakeBitString({0u, 2u, 1u}), - /*overflow*/false, - /*depth*/2u), + /*overflow=*/false, + /*depth=*/2u), GetExpectedMessageForDeathTest("Path to root had non-0s following 0s")); ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 2u}), - /*next*/MakeBitStringChar(1u), - /*overflow*/false, - /*depth*/2u), + /*next=*/MakeBitStringChar(1u), + /*overflow=*/false, + /*depth=*/2u), GetExpectedMessageForDeathTest("Path to root had non-0s following 0s")); ASSERT_DEATH(MakeSubtypeCheckInfo(MakeBitString({0u, 1u, 1u}), - /*next*/MakeBitStringChar(0), - /*overflow*/false, - /*depth*/3u), + /*next=*/MakeBitStringChar(0), + /*overflow=*/false, + /*depth=*/3u), GetExpectedMessageForDeathTest("Path to root had non-0s following 0s")); // These are really slow (~1sec per death test on host), @@ -194,62 +194,62 @@ TEST_F(SubtypeCheckInfoTest, IllegalValues) { TEST_F(SubtypeCheckInfoTest, States) { EXPECT_EQ(SubtypeCheckInfo::kUninitialized, MakeSubtypeCheckInfo().GetState()); EXPECT_EQ(SubtypeCheckInfo::kInitialized, - MakeSubtypeCheckInfo(/*path*/{}, /*next*/MakeBitStringChar(1)).GetState()); + MakeSubtypeCheckInfo(/*path_to_root=*/{}, /*next=*/MakeBitStringChar(1)).GetState()); EXPECT_EQ(SubtypeCheckInfo::kOverflowed, - MakeSubtypeCheckInfo(/*path*/{}, - /*next*/MakeBitStringChar(1), - /*overflow*/true, - /*depth*/1u).GetState()); + MakeSubtypeCheckInfo(/*path_to_root=*/{}, + /*next=*/MakeBitStringChar(1), + /*overflow=*/true, + /*depth=*/1u).GetState()); EXPECT_EQ(SubtypeCheckInfo::kAssigned, - MakeSubtypeCheckInfo(/*path*/MakeBitString({1u}), - /*next*/MakeBitStringChar(1), - /*overflow*/false, - /*depth*/1u).GetState()); + MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitString({1u}), + /*next=*/MakeBitStringChar(1), + /*overflow=*/false, + /*depth=*/1u).GetState()); // Test edge conditions: depth == BitString::kCapacity (No Next value). EXPECT_EQ(SubtypeCheckInfo::kAssigned, - MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(), - /*next*/MakeBitStringChar(0), - /*overflow*/false, - /*depth*/BitString::kCapacity).GetState()); + MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(), + /*next=*/MakeBitStringChar(0), + /*overflow=*/false, + /*depth=*/BitString::kCapacity).GetState()); EXPECT_EQ(SubtypeCheckInfo::kInitialized, - MakeSubtypeCheckInfo(/*path*/MakeBitStringMax<BitString::kCapacity - 1u>(), - /*next*/MakeBitStringChar(0), - /*overflow*/false, - /*depth*/BitString::kCapacity).GetState()); + MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax<BitString::kCapacity - 1u>(), + /*next=*/MakeBitStringChar(0), + /*overflow=*/false, + /*depth=*/BitString::kCapacity).GetState()); // Test edge conditions: depth > BitString::kCapacity (Must overflow). EXPECT_EQ(SubtypeCheckInfo::kOverflowed, - MakeSubtypeCheckInfo(/*path*/MakeBitStringMax(), - /*next*/MakeBitStringChar(0), - /*overflow*/true, - /*depth*/BitString::kCapacity + 1u).GetState()); + MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(), + /*next=*/MakeBitStringChar(0), + /*overflow=*/true, + /*depth=*/BitString::kCapacity + 1u).GetState()); } TEST_F(SubtypeCheckInfoTest, NextValue) { // Validate "Next" is correctly aliased as the Bitstring[Depth] character. EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}), - /*overflow*/false, - /*depth*/0u).GetNext()); + /*overflow=*/false, + /*depth=*/0u).GetNext()); EXPECT_EQ(MakeBitStringChar(2u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}), - /*overflow*/false, - /*depth*/1u).GetNext()); + /*overflow=*/false, + /*depth=*/1u).GetNext()); EXPECT_EQ(MakeBitStringChar(3u), MakeSubtypeCheckInfoUnchecked(MakeBitString({1u, 2u, 3u}), - /*overflow*/false, - /*depth*/2u).GetNext()); + /*overflow=*/false, + /*depth=*/2u).GetNext()); EXPECT_EQ(MakeBitStringChar(1u), MakeSubtypeCheckInfoUnchecked(MakeBitString({0u, 2u, 1u}), - /*overflow*/false, - /*depth*/2u).GetNext()); + /*overflow=*/false, + /*depth=*/2u).GetNext()); // Test edge conditions: depth == BitString::kCapacity (No Next value). EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(), - /*overflow*/false, - /*depth*/BitString::kCapacity))); + /*overflow=*/false, + /*depth=*/BitString::kCapacity))); // Anything with depth >= BitString::kCapacity has no next value. EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<BitString::kCapacity>(), - /*overflow*/false, - /*depth*/BitString::kCapacity + 1u))); + /*overflow=*/false, + /*depth=*/BitString::kCapacity + 1u))); EXPECT_FALSE(HasNext(MakeSubtypeCheckInfoUnchecked(MakeBitStringMax(), - /*overflow*/false, - /*depth*/std::numeric_limits<size_t>::max()))); + /*overflow=*/false, + /*depth=*/std::numeric_limits<size_t>::max()))); } template <size_t kPos = BitString::kCapacity> @@ -259,10 +259,10 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) { using StorageType = BitString::StorageType; SubtypeCheckInfo sci = - MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(), - /*next*/BitStringChar{}, - /*overflow*/false, - /*depth*/BitString::kCapacity); + MakeSubtypeCheckInfo(/*path_to_root=*/MakeBitStringMax(), + /*next=*/BitStringChar{}, + /*overflow=*/false, + /*depth=*/BitString::kCapacity); // 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation. EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot()); @@ -275,8 +275,8 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) { SubtypeCheckInfo sci2 = MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(), - /*overflow*/false, - /*depth*/BitString::kCapacity); + /*overflow=*/false, + /*depth=*/BitString::kCapacity); #define MAKE_ENCODED_PATH(pos0, pos1, pos2) \ (((pos0) << 0) | \ @@ -290,8 +290,8 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) { SubtypeCheckInfo sci3 = MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(), - /*overflow*/false, - /*depth*/BitString::kCapacity - 1u); + /*overflow=*/false, + /*depth=*/BitString::kCapacity - 1u); EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0), sci3.GetEncodedPathToRoot()); @@ -300,8 +300,8 @@ TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) { SubtypeCheckInfo sci4 = MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}), - /*overflow*/false, - /*depth*/BitString::kCapacity - 2u); + /*overflow=*/false, + /*depth=*/BitString::kCapacity - 2u); EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot()); EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0), @@ -320,7 +320,7 @@ TEST_F(SubtypeCheckInfoTest, CopyCleared) { SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot(); EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); - SubtypeCheckInfo childC = root.CreateChild(/*assign*/true); + SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState()); EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign. EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC)); @@ -331,7 +331,7 @@ TEST_F(SubtypeCheckInfoTest, CopyCleared) { // CopyCleared is just a thin wrapper around value-init and providing the depth. SubtypeCheckInfo cleared_copy_value = - SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth*/1u); + SubtypeCheckInfo::Create(SubtypeCheckBits{}, /*depth=*/1u); EXPECT_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy_value.GetState()); EXPECT_EQ(MakeBitString({}), GetPathToRoot(cleared_copy_value)); } @@ -340,7 +340,7 @@ TEST_F(SubtypeCheckInfoTest, NewForChild2) { SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot(); EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); - SubtypeCheckInfo childC = root.CreateChild(/*assign*/true); + SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState()); EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign. EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC)); @@ -350,17 +350,17 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) { SubtypeCheckInfo root = SubtypeCheckInfo::CreateRoot(); EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); - SubtypeCheckInfo childA = root.CreateChild(/*assign*/false); + SubtypeCheckInfo childA = root.CreateChild(/*assign_next=*/false); EXPECT_EQ(SubtypeCheckInfo::kInitialized, childA.GetState()); EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); // Next unchanged for Initialize. EXPECT_EQ(MakeBitString({}), GetPathToRoot(childA)); - SubtypeCheckInfo childB = root.CreateChild(/*assign*/false); + SubtypeCheckInfo childB = root.CreateChild(/*assign_next=*/false); EXPECT_EQ(SubtypeCheckInfo::kInitialized, childB.GetState()); EXPECT_EQ(MakeBitStringChar(1u), root.GetNext()); // Next unchanged for Initialize. EXPECT_EQ(MakeBitString({}), GetPathToRoot(childB)); - SubtypeCheckInfo childC = root.CreateChild(/*assign*/true); + SubtypeCheckInfo childC = root.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kAssigned, childC.GetState()); EXPECT_EQ(MakeBitStringChar(2u), root.GetNext()); // Next incremented for Assign. EXPECT_EQ(MakeBitString({1u}), GetPathToRoot(childC)); @@ -369,19 +369,19 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) { size_t cur_depth = 1u; SubtypeCheckInfo latest_child = childC; while (cur_depth != BitString::kCapacity) { - latest_child = latest_child.CreateChild(/*assign*/true); + latest_child = latest_child.CreateChild(/*assign_next=*/true); ASSERT_EQ(SubtypeCheckInfo::kAssigned, latest_child.GetState()); ASSERT_EQ(cur_depth + 1u, GetPathToRoot(latest_child).Length()); cur_depth++; } // Future assignments will result in a too-deep overflow. - SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign*/true); + SubtypeCheckInfo child_of_deep = latest_child.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep.GetState()); EXPECT_EQ(GetPathToRoot(latest_child), GetPathToRoot(child_of_deep)); // Assignment of too-deep overflow also causes overflow. - SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign*/true); + SubtypeCheckInfo child_of_deep_2 = child_of_deep.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of_deep_2.GetState()); EXPECT_EQ(GetPathToRoot(child_of_deep), GetPathToRoot(child_of_deep_2)); } @@ -393,7 +393,7 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) { break; } - SubtypeCheckInfo child = root.CreateChild(/*assign*/true); + SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/true); ASSERT_EQ(SubtypeCheckInfo::kAssigned, child.GetState()); ASSERT_EQ(MakeBitStringChar(cur_next+1u), root.GetNext()); ASSERT_EQ(MakeBitString({cur_next}), GetPathToRoot(child)); @@ -403,20 +403,20 @@ TEST_F(SubtypeCheckInfoTest, NewForChild) { // Now the root will be in a state that further assigns will be too-wide overflow. // Initialization still succeeds. - SubtypeCheckInfo child = root.CreateChild(/*assign*/false); + SubtypeCheckInfo child = root.CreateChild(/*assign_next=*/false); EXPECT_EQ(SubtypeCheckInfo::kInitialized, child.GetState()); EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext()); EXPECT_EQ(MakeBitString({}), GetPathToRoot(child)); // Assignment goes to too-wide Overflow. - SubtypeCheckInfo child_of = root.CreateChild(/*assign*/true); + SubtypeCheckInfo child_of = root.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of.GetState()); EXPECT_EQ(MakeBitStringChar(cur_next), root.GetNext()); EXPECT_EQ(MakeBitString({}), GetPathToRoot(child_of)); // Assignment of overflowed child still succeeds. // The path to root is the same. - SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign*/true); + SubtypeCheckInfo child_of2 = child_of.CreateChild(/*assign_next=*/true); EXPECT_EQ(SubtypeCheckInfo::kOverflowed, child_of2.GetState()); EXPECT_EQ(GetPathToRoot(child_of), GetPathToRoot(child_of2)); } diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc index 9aa30325c2..719e5d917c 100644 --- a/runtime/subtype_check_test.cc +++ b/runtime/subtype_check_test.cc @@ -302,7 +302,7 @@ struct MockScopedLockMutator { struct SubtypeCheckTest : public ::testing::Test { protected: void SetUp() override { - android::base::InitLogging(/*argv*/nullptr); + android::base::InitLogging(/*argv=*/nullptr); CreateRootedTree(BitString::kCapacity + 2u, BitString::kCapacity + 2u); } @@ -312,8 +312,8 @@ struct SubtypeCheckTest : public ::testing::Test { void CreateRootedTree(size_t width, size_t height) { all_classes_.clear(); - root_ = CreateClassFor(/*parent*/nullptr, /*x*/0, /*y*/0); - CreateTreeFor(root_, /*width*/width, /*depth*/height); + root_ = CreateClassFor(/*parent=*/nullptr, /*x=*/0, /*y=*/0); + CreateTreeFor(root_, /*width=*/width, /*levels=*/height); } MockClass* CreateClassFor(MockClass* parent, size_t x, size_t y) { @@ -681,7 +681,7 @@ void EnsureStateChangedTest( const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) { ASSERT_EQ(depth, transitions.size()); - EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions); + EnsureStateChangedTestRecursive(root, /*cur_depth=*/0u, depth, transitions); } TEST_F(SubtypeCheckTest, EnsureInitialized_NoOverflow) { @@ -869,8 +869,8 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide) { { // Create too-wide siblings at the kTargetDepth level. - MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u); - CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1); + MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u); + CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1); ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()); ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild()); // Leave the rest of the tree as the default. @@ -914,15 +914,15 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide_TooWide) { { // Create too-wide siblings at the kTargetDepth level. - MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1); - CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1); + MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1); + CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1); ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()) << *child; ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild()); // Leave the rest of the tree as the default. // Create too-wide children for a too-wide parent. - MockClass* child_subchild = child->FindChildAt(/*x*/0, kTargetDepth); - CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*depth*/1); + MockClass* child_subchild = child->FindChildAt(/*x=*/0, kTargetDepth); + CreateTreeFor(child_subchild, kMaxWidthCutOffSub*2, /*levels=*/1); ASSERT_LE(kMaxWidthCutOffSub*2, child_subchild->GetNumberOfChildren()) << *child_subchild; ASSERT_TRUE(IsTooWide(child_subchild->GetMaxChild())) << *(child_subchild->GetMaxChild()); } @@ -1035,8 +1035,8 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide_TooDeep) { { // Create too-wide siblings at the kTargetDepth level. - MockClass* child = root_->FindChildAt(/*x*/0, kTargetDepth - 1u); - CreateTreeFor(child, kMaxWidthCutOff*2, /*depth*/1); + MockClass* child = root_->FindChildAt(/*x=*/0, kTargetDepth - 1u); + CreateTreeFor(child, kMaxWidthCutOff*2, /*levels=*/1); ASSERT_LE(kMaxWidthCutOff*2, child->GetNumberOfChildren()); ASSERT_TRUE(IsTooWide(child->GetMaxChild())) << *(child->GetMaxChild()); // Leave the rest of the tree as the default. @@ -1045,7 +1045,7 @@ TEST_F(SubtypeCheckTest, EnsureInitialized_TooWide_TooDeep) { MockClass* child_subchild = child->GetMaxChild(); ASSERT_TRUE(child_subchild != nullptr); ASSERT_EQ(0u, child_subchild->GetNumberOfChildren()) << *child_subchild; - CreateTreeFor(child_subchild, /*width*/1, /*levels*/kTooDeepTargetDepth); + CreateTreeFor(child_subchild, /*width=*/1, /*levels=*/kTooDeepTargetDepth); MockClass* too_deep_child = child_subchild->FindChildAt(0, kTooDeepTargetDepth + 2); ASSERT_TRUE(too_deep_child != nullptr) << child_subchild->ToDotGraph(); ASSERT_TRUE(IsTooWide(too_deep_child)) << *(too_deep_child); diff --git a/runtime/thread.cc b/runtime/thread.cc index 2e04e0c559..a3de4e2215 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -152,7 +152,7 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { CHECK(kUseReadBarrier); tls32_.is_gc_marking = is_marking; - UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking); + UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking); ResetQuickAllocEntryPointsForThread(is_marking); } @@ -579,7 +579,7 @@ void Thread::InstallImplicitProtection() { VLOG(threads) << "installing stack protected region at " << std::hex << static_cast<void*>(pregion) << " to " << static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); - if (ProtectStack(/* fatal_on_error */ false)) { + if (ProtectStack(/* fatal_on_error= */ false)) { // Tell the kernel that we won't be needing these pages any more. // NB. madvise will probably write zeroes into the memory (on linux it does). uint32_t unwanted_size = stack_top - pregion - kPageSize; @@ -648,7 +648,7 @@ void Thread::InstallImplicitProtection() { static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); // Protect the bottom of the stack to prevent read/write to it. - ProtectStack(/* fatal_on_error */ true); + ProtectStack(/* fatal_on_error= */ true); // Tell the kernel that we won't be needing these pages any more. // NB. madvise will probably write zeroes into the memory (on linux it does). @@ -2014,13 +2014,13 @@ void Thread::DumpStack(std::ostream& os, DumpKernelStack(os, GetTid(), " kernel: ", false); ArtMethod* method = GetCurrentMethod(nullptr, - /*check_suspended*/ !force_dump_stack, - /*abort_on_error*/ !(dump_for_abort || force_dump_stack)); + /*check_suspended=*/ !force_dump_stack, + /*abort_on_error=*/ !(dump_for_abort || force_dump_stack)); DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); } DumpJavaStack(os, - /*check_suspended*/ !force_dump_stack, - /*dump_locks*/ !force_dump_stack); + /*check_suspended=*/ !force_dump_stack, + /*dump_locks=*/ !force_dump_stack); } else { os << "Not able to dump stack of thread that isn't suspended"; } @@ -2911,8 +2911,8 @@ jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRu // Make sure the AnnotatedStackTraceElement.class is initialized, b/76208924 . class_linker->EnsureInitialized(soa.Self(), h_aste_class, - /* can_init_fields */ true, - /* can_init_parents */ true); + /* can_init_fields= */ true, + /* can_init_parents= */ true); if (soa.Self()->IsExceptionPending()) { // This should not fail in a healthy runtime. return nullptr; @@ -3429,9 +3429,9 @@ void Thread::QuickDeliverException() { } PushDeoptimizationContext( JValue(), - false /* is_reference */, + /* is_reference= */ false, (force_deopt ? nullptr : exception), - false /* from_code */, + /* from_code= */ false, method_type); artDeoptimize(this); UNREACHABLE(); @@ -3557,7 +3557,7 @@ class ReferenceMapVisitor : public StackVisitor { } } // Mark lock count map required for structured locking checks. - shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg */ -1, this); + shadow_frame->GetLockCountData().VisitMonitors(visitor_, /* vreg= */ -1, this); } private: @@ -3573,7 +3573,7 @@ class ReferenceMapVisitor : public StackVisitor { if (kVerifyImageObjectsMarked) { gc::Heap* const heap = Runtime::Current()->GetHeap(); gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, - /*fail_ok*/true); + /*fail_ok=*/true); if (space != nullptr && space->IsImageSpace()) { bool failed = false; if (!space->GetLiveBitmap()->Test(klass.Ptr())) { @@ -3595,7 +3595,7 @@ class ReferenceMapVisitor : public StackVisitor { } } mirror::Object* new_ref = klass.Ptr(); - visitor_(&new_ref, /* vreg */ -1, this); + visitor_(&new_ref, /* vreg= */ -1, this); if (new_ref != klass) { method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); } @@ -3668,7 +3668,7 @@ class ReferenceMapVisitor : public StackVisitor { mirror::Object* ref = ref_addr->AsMirrorPtr(); if (ref != nullptr) { mirror::Object* new_ref = ref; - visitor_(&new_ref, /* vreg */ -1, this); + visitor_(&new_ref, /* vreg= */ -1, this); if (ref != new_ref) { ref_addr->Assign(new_ref); } @@ -3861,9 +3861,9 @@ void Thread::VisitRoots(RootVisitor* visitor) { void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { - VisitRoots</* kPrecise */ true>(visitor); + VisitRoots</* kPrecise= */ true>(visitor); } else { - VisitRoots</* kPrecise */ false>(visitor); + VisitRoots</* kPrecise= */ false>(visitor); } } @@ -4078,7 +4078,7 @@ mirror::Object* Thread::GetPeerFromOtherThread() const { void Thread::SetReadBarrierEntrypoints() { // Make sure entrypoints aren't null. - UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true); + UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active=*/ true); } void Thread::ClearAllInterpreterCaches() { diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index f8c90b1637..d21b600566 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -438,7 +438,7 @@ void ThreadList::RunEmptyCheckpoint() { // Wake up the threads blocking for weak ref access so that they will respond to the empty // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state. Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self); - Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true); + Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint=*/true); { ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun); uint64_t total_wait_time = 0; @@ -491,9 +491,9 @@ void ThreadList::RunEmptyCheckpoint() { // Found a runnable thread that hasn't responded to the empty checkpoint request. // Assume it's stuck and safe to dump its stack. thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT), - /*dump_native_stack*/ true, - /*backtrace_map*/ nullptr, - /*force_dump_stack*/ true); + /*dump_native_stack=*/ true, + /*backtrace_map=*/ nullptr, + /*force_dump_stack=*/ true); } } } diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index f1c808bb35..a245f659d7 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -47,10 +47,10 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n stack_size += kPageSize; std::string error_msg; stack_ = MemMap::MapAnonymous(name.c_str(), - /* addr */ nullptr, + /* addr= */ nullptr, stack_size, PROT_READ | PROT_WRITE, - /* low_4gb */ false, + /* low_4gb= */ false, &error_msg); CHECK(stack_.IsValid()) << error_msg; CHECK_ALIGNED(stack_.Begin(), kPageSize); diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc index 2600f55695..9e7c44a078 100644 --- a/runtime/thread_pool_test.cc +++ b/runtime/thread_pool_test.cc @@ -119,7 +119,7 @@ TEST_F(ThreadPoolTest, StopWait) { // Drain the task list. Note: we have to restart here, as no tasks will be finished when // the pool is stopped. thread_pool.StartWorkers(self); - thread_pool.Wait(self, /* do_work */ true, false); + thread_pool.Wait(self, /* do_work= */ true, false); } class TreeTask : public Task { diff --git a/runtime/trace.cc b/runtime/trace.cc index 4ee983db21..ad58c2ea99 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -435,7 +435,7 @@ void Trace::Start(std::unique_ptr<File>&& trace_file_in, // want to use the trampolines anyway since it is faster. It makes the story with disabling // jit-gc more complex though. runtime->GetInstrumentation()->EnableMethodTracing( - kTracerInstrumentationKey, /*needs_interpreter*/!runtime->IsJavaDebuggable()); + kTracerInstrumentationKey, /*needs_interpreter=*/!runtime->IsJavaDebuggable()); } } } diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc index 452cd8e359..bd59e73192 100644 --- a/runtime/vdex_file.cc +++ b/runtime/vdex_file.cc @@ -150,11 +150,11 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr, (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ, unquicken ? MAP_PRIVATE : MAP_SHARED, file_fd, - /* start */ 0u, + /* start= */ 0u, low_4gb, vdex_filename.c_str(), mmap_reuse, - /* reservation */ nullptr, + /* reservation= */ nullptr, error_msg); if (!mmap.IsValid()) { *error_msg = "Failed to mmap file " + vdex_filename + " : " + *error_msg; @@ -173,7 +173,7 @@ std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr, return nullptr; } vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files), - /* decompile_return_instruction */ false); + /* decompile_return_instruction= */ false); // Update the quickening info size to pretend there isn't any. size_t offset = vdex->GetDexSectionHeaderOffset(); reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0; @@ -213,13 +213,13 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_ std::unique_ptr<const DexFile> dex(dex_file_loader.OpenWithDataSection( dex_file_start, size, - /*data_base*/ nullptr, - /*data_size*/ 0u, + /*data_base=*/ nullptr, + /*data_size=*/ 0u, location, GetLocationChecksum(i), - nullptr /*oat_dex_file*/, - false /*verify*/, - false /*verify_checksum*/, + /*oat_dex_file=*/ nullptr, + /*verify=*/ false, + /*verify_checksum=*/ false, error_msg)); if (dex == nullptr) { return false; diff --git a/runtime/vdex_file_test.cc b/runtime/vdex_file_test.cc index ced6e28577..9d92b42140 100644 --- a/runtime/vdex_file_test.cc +++ b/runtime/vdex_file_test.cc @@ -34,14 +34,14 @@ TEST_F(VdexFileTest, OpenEmptyVdex) { std::unique_ptr<VdexFile> vdex = VdexFile::Open(tmp.GetFd(), 0, tmp.GetFilename(), - /*writable*/false, - /*low_4gb*/false, - /*quicken*/false, + /*writable=*/false, + /*low_4gb=*/false, + /*unquicken=*/false, &error_msg); EXPECT_TRUE(vdex == nullptr); vdex = VdexFile::Open( - tmp.GetFilename(), /*writable*/false, /*low_4gb*/false, /*quicken*/ false, &error_msg); + tmp.GetFilename(), /*writable=*/false, /*low_4gb=*/false, /*unquicken=*/ false, &error_msg); EXPECT_TRUE(vdex == nullptr); } diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 5fce892ee6..7b07389057 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -242,7 +242,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self, *previous_idx = method_idx; const InvokeType type = method.GetInvokeType(class_def.access_flags_); ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( - method_idx, dex_cache, class_loader, /* referrer */ nullptr, type); + method_idx, dex_cache, class_loader, /* referrer= */ nullptr, type); if (resolved_method == nullptr) { DCHECK(self->IsExceptionPending()); // We couldn't resolve the method, but continue regardless. @@ -263,7 +263,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self, callbacks, allow_soft_failures, log_level, - /*need_precise_constants*/ false, + /*need_precise_constants=*/ false, api_level, &hard_failure_msg); if (result.kind == FailureKind::kHardFailure) { @@ -340,11 +340,11 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self, method_idx, method, method_access_flags, - true /* can_load_classes */, + /* can_load_classes= */ true, allow_soft_failures, need_precise_constants, - false /* verify to dump */, - true /* allow_thread_suspension */, + /* verify to dump */ false, + /* allow_thread_suspension= */ true, api_level); if (verifier.Verify()) { // Verification completed, however failures may be pending that didn't cause the verification @@ -475,11 +475,11 @@ MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self, dex_method_idx, method, method_access_flags, - true /* can_load_classes */, - true /* allow_soft_failures */, - true /* need_precise_constants */, - true /* verify_to_dump */, - true /* allow_thread_suspension */, + /* can_load_classes= */ true, + /* allow_soft_failures= */ true, + /* need_precise_constants= */ true, + /* verify_to_dump= */ true, + /* allow_thread_suspension= */ true, api_level); verifier->Verify(); verifier->DumpFailures(vios->Stream()); @@ -570,11 +570,11 @@ void MethodVerifier::FindLocksAtDexPc( m->GetDexMethodIndex(), m, m->GetAccessFlags(), - false /* can_load_classes */, - true /* allow_soft_failures */, - false /* need_precise_constants */, - false /* verify_to_dump */, - false /* allow_thread_suspension */, + /* can_load_classes= */ false, + /* allow_soft_failures= */ true, + /* need_precise_constants= */ false, + /* verify_to_dump= */ false, + /* allow_thread_suspension= */ false, api_level); verifier.interesting_dex_pc_ = dex_pc; verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs; diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc index cedc583986..7519257cae 100644 --- a/runtime/verifier/method_verifier_test.cc +++ b/runtime/verifier/method_verifier_test.cc @@ -42,7 +42,7 @@ class MethodVerifierTest : public CommonRuntimeTest { // Verify the class std::string error_msg; FailureKind failure = MethodVerifier::VerifyClass( - self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level */ 0u, &error_msg); + self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level= */ 0u, &error_msg); if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) { ASSERT_TRUE(failure == FailureKind::kSoftFailure || diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index 4a3f9e6365..91be00d34a 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -756,13 +756,13 @@ const RegType& RegType::Merge(const RegType& incoming_type, VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(), join_class, GetClass(), - /* strict */ true, - /* is_assignable */ true); + /* is_strict= */ true, + /* is_assignable= */ true); VerifierDeps::MaybeRecordAssignability(verifier->GetDexFile(), join_class, incoming_type.GetClass(), - /* strict */ true, - /* is_assignable */ true); + /* is_strict= */ true, + /* is_assignable= */ true); } if (GetClass() == join_class && !IsPreciseReference()) { return *this; @@ -771,7 +771,7 @@ const RegType& RegType::Merge(const RegType& incoming_type, } else { std::string temp; const char* descriptor = join_class->GetDescriptor(&temp); - return reg_types->FromClass(descriptor, join_class, /* precise */ false); + return reg_types->FromClass(descriptor, join_class, /* precise= */ false); } } } else { diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h index 9f87adfa31..f62e8b6f54 100644 --- a/runtime/verifier/reg_type_cache-inl.h +++ b/runtime/verifier/reg_type_cache-inl.h @@ -126,7 +126,7 @@ inline const ImpreciseConstType& RegTypeCache::PosShortConstant() { inline const PreciseReferenceType& RegTypeCache::JavaLangClass() { const RegType* result = &FromClass("Ljava/lang/Class;", GetClassRoot<mirror::Class>(), - /* precise */ true); + /* precise= */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); } @@ -135,7 +135,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangString() { // String is final and therefore always precise. const RegType* result = &FromClass("Ljava/lang/String;", GetClassRoot<mirror::String>(), - /* precise */ true); + /* precise= */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); } @@ -143,7 +143,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangString() { inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() { const RegType* result = &FromClass("Ljava/lang/invoke/MethodHandle;", GetClassRoot<mirror::MethodHandle>(), - /* precise */ true); + /* precise= */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); } @@ -151,7 +151,7 @@ inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodHandle() { inline const PreciseReferenceType& RegTypeCache::JavaLangInvokeMethodType() { const RegType* result = &FromClass("Ljava/lang/invoke/MethodType;", GetClassRoot<mirror::MethodType>(), - /* precise */ true); + /* precise= */ true); DCHECK(result->IsPreciseReference()); return *down_cast<const PreciseReferenceType*>(result); } diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index f1f3488a3c..ceba7484dd 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -438,14 +438,14 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, // Is the resolved part a primitive array? if (resolved_merged_is_array && !resolved_parts_merged.IsObjectArrayTypes()) { - return JavaLangObject(false /* precise */); + return JavaLangObject(/* precise= */ false); } // Is any part not an array (but exists)? if ((!left_unresolved_is_array && left_resolved != &left) || (!right_unresolved_is_array && right_resolved != &right) || !resolved_merged_is_array) { - return JavaLangObject(false /* precise */); + return JavaLangObject(/* precise= */ false); } } diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index 0430d205af..32243857f8 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -80,8 +80,8 @@ TEST_F(RegTypeTest, Pairs) { EXPECT_FALSE(precise_lo.CheckWidePair(precise_const)); EXPECT_TRUE(precise_lo.CheckWidePair(precise_hi)); // Test Merging. - EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier */ nullptr)).IsLongTypes()); - EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier */ nullptr)).IsLongHighTypes()); + EXPECT_TRUE((long_lo.Merge(precise_lo, &cache, /* verifier= */ nullptr)).IsLongTypes()); + EXPECT_TRUE((long_hi.Merge(precise_hi, &cache, /* verifier= */ nullptr)).IsLongHighTypes()); } TEST_F(RegTypeTest, Primitives) { @@ -429,7 +429,7 @@ TEST_F(RegTypeReferenceTest, Dump) { const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10); const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12); const RegType& unresolved_merged = cache.FromUnresolvedMerge( - unresolved_ref, unresolved_ref_another, /* verifier */ nullptr); + unresolved_ref, unresolved_ref_another, /* verifier= */ nullptr); std::string expected = "Unresolved Reference: java.lang.DoesNotExist"; EXPECT_EQ(expected, unresolved_ref.Dump()); @@ -490,14 +490,14 @@ TEST_F(RegTypeReferenceTest, Merging) { RegTypeCache cache_new(true, allocator); const RegType& string = cache_new.JavaLangString(); const RegType& Object = cache_new.JavaLangObject(true); - EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier */ nullptr).IsJavaLangObject()); + EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier= */ nullptr).IsJavaLangObject()); // Merge two unresolved types. const RegType& ref_type_0 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true); EXPECT_TRUE(ref_type_0.IsUnresolvedReference()); const RegType& ref_type_1 = cache_new.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistToo;", true); EXPECT_FALSE(ref_type_0.Equals(ref_type_1)); - const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier */ nullptr); + const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsUnresolvedMergedReference()); RegType& merged_nonconst = const_cast<RegType&>(merged); @@ -520,22 +520,22 @@ TEST_F(RegTypeTest, MergingFloat) { const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false); { // float MERGE precise cst => float. - const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier */ nullptr); + const RegType& merged = float_type.Merge(precise_cst, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsFloat()); } { // precise cst MERGE float => float. - const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr); + const RegType& merged = precise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsFloat()); } { // float MERGE imprecise cst => float. - const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier */ nullptr); + const RegType& merged = float_type.Merge(imprecise_cst, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsFloat()); } { // imprecise cst MERGE float => float. - const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier */ nullptr); + const RegType& merged = imprecise_cst.Merge(float_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsFloat()); } } @@ -556,46 +556,46 @@ TEST_F(RegTypeTest, MergingLong) { const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false); { // lo MERGE precise cst lo => lo. - const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier */ nullptr); + const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // precise cst lo MERGE lo => lo. - const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier */ nullptr); + const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // lo MERGE imprecise cst lo => lo. const RegType& merged = long_lo_type.Merge( - imprecise_cst_lo, &cache_new, /* verifier */ nullptr); + imprecise_cst_lo, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // imprecise cst lo MERGE lo => lo. const RegType& merged = imprecise_cst_lo.Merge( - long_lo_type, &cache_new, /* verifier */ nullptr); + long_lo_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongLo()); } { // hi MERGE precise cst hi => hi. - const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier */ nullptr); + const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } { // precise cst hi MERGE hi => hi. - const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier */ nullptr); + const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } { // hi MERGE imprecise cst hi => hi. const RegType& merged = long_hi_type.Merge( - imprecise_cst_hi, &cache_new, /* verifier */ nullptr); + imprecise_cst_hi, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } { // imprecise cst hi MERGE hi => hi. const RegType& merged = imprecise_cst_hi.Merge( - long_hi_type, &cache_new, /* verifier */ nullptr); + long_hi_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsLongHi()); } } @@ -617,49 +617,49 @@ TEST_F(RegTypeTest, MergingDouble) { { // lo MERGE precise cst lo => lo. const RegType& merged = double_lo_type.Merge( - precise_cst_lo, &cache_new, /* verifier */ nullptr); + precise_cst_lo, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // precise cst lo MERGE lo => lo. const RegType& merged = precise_cst_lo.Merge( - double_lo_type, &cache_new, /* verifier */ nullptr); + double_lo_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // lo MERGE imprecise cst lo => lo. const RegType& merged = double_lo_type.Merge( - imprecise_cst_lo, &cache_new, /* verifier */ nullptr); + imprecise_cst_lo, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // imprecise cst lo MERGE lo => lo. const RegType& merged = imprecise_cst_lo.Merge( - double_lo_type, &cache_new, /* verifier */ nullptr); + double_lo_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleLo()); } { // hi MERGE precise cst hi => hi. const RegType& merged = double_hi_type.Merge( - precise_cst_hi, &cache_new, /* verifier */ nullptr); + precise_cst_hi, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } { // precise cst hi MERGE hi => hi. const RegType& merged = precise_cst_hi.Merge( - double_hi_type, &cache_new, /* verifier */ nullptr); + double_hi_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } { // hi MERGE imprecise cst hi => hi. const RegType& merged = double_hi_type.Merge( - imprecise_cst_hi, &cache_new, /* verifier */ nullptr); + imprecise_cst_hi, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } { // imprecise cst hi MERGE hi => hi. const RegType& merged = imprecise_cst_hi.Merge( - double_hi_type, &cache_new, /* verifier */ nullptr); + double_hi_type, &cache_new, /* verifier= */ nullptr); EXPECT_TRUE(merged.IsDoubleHi()); } } diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc index fb91976781..b666c1582c 100644 --- a/runtime/verifier/verifier_deps.cc +++ b/runtime/verifier/verifier_deps.cc @@ -43,7 +43,7 @@ VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, bool ou } VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files) - : VerifierDeps(dex_files, /*output_only*/ true) {} + : VerifierDeps(dex_files, /*output_only=*/ true) {} void VerifierDeps::MergeWith(const VerifierDeps& other, const std::vector<const DexFile*>& dex_files) { @@ -439,7 +439,7 @@ void VerifierDeps::AddAssignability(const DexFile& dex_file, AddAssignability(dex_file, destination_component, source_component, - /* is_strict */ true, + /* is_strict= */ true, is_assignable); return; } @@ -707,7 +707,7 @@ void VerifierDeps::Encode(const std::vector<const DexFile*>& dex_files, VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<const uint8_t> data) - : VerifierDeps(dex_files, /*output_only*/ false) { + : VerifierDeps(dex_files, /*output_only=*/ false) { if (data.empty()) { // Return eagerly, as the first thing we expect from VerifierDeps data is // the number of created strings, even if there is no dependency. @@ -1089,9 +1089,9 @@ bool VerifierDeps::VerifyDexFile(Handle<mirror::ClassLoader> class_loader, const DexFileDeps& deps, Thread* self) const { bool result = VerifyAssignability( - class_loader, dex_file, deps.assignable_types_, /* expected_assignability */ true, self); + class_loader, dex_file, deps.assignable_types_, /* expected_assignability= */ true, self); result = result && VerifyAssignability( - class_loader, dex_file, deps.unassignable_types_, /* expected_assignability */ false, self); + class_loader, dex_file, deps.unassignable_types_, /* expected_assignability= */ false, self); result = result && VerifyClasses(class_loader, dex_file, deps.classes_, self); result = result && VerifyFields(class_loader, dex_file, deps.fields_, self); |