Use C++17's [[maybe_unused]] attribute in ART

Bug: 169680875
Test: mmm art
Change-Id: Ic0cc320891c42b07a2b5520a584d2b62052e7235
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 30a19c8..be4ab2b 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -39,11 +39,11 @@
   void SetSdkChecker(std::unique_ptr<SdkChecker>&& sdk_checker_);
   const SdkChecker* GetSdkChecker() const;
 
-  bool DenyAccessBasedOnPublicSdk(ArtMethod* art_method ATTRIBUTE_UNUSED) const override
+  bool DenyAccessBasedOnPublicSdk([[maybe_unused]] ArtMethod* art_method) const override
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool DenyAccessBasedOnPublicSdk(ArtField* art_field ATTRIBUTE_UNUSED) const override
+  bool DenyAccessBasedOnPublicSdk([[maybe_unused]] ArtField* art_field) const override
       REQUIRES_SHARED(Locks::mutator_lock_);
-  bool DenyAccessBasedOnPublicSdk(const char* type_descriptor ATTRIBUTE_UNUSED) const override;
+  bool DenyAccessBasedOnPublicSdk([[maybe_unused]] const char* type_descriptor) const override;
   void SetEnablePublicSdkChecks(bool enabled) override;
 
  protected:
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index f02ec65..bf3eaa7 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -45,7 +45,7 @@
   return instr_size;
 }
 
-uintptr_t FaultManager::GetFaultPc(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context) {
+uintptr_t FaultManager::GetFaultPc([[maybe_unused]] siginfo_t* siginfo, void* context) {
   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
   mcontext_t* mc = reinterpret_cast<mcontext_t*>(&uc->uc_mcontext);
   if (mc->arm_sp == 0) {
@@ -61,7 +61,7 @@
   return mc->arm_sp;
 }
 
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action([[maybe_unused]] int sig, siginfo_t* info, void* context) {
   uintptr_t fault_address = reinterpret_cast<uintptr_t>(info->si_addr);
   if (!IsValidFaultAddress(fault_address)) {
     return false;
@@ -115,7 +115,8 @@
 // The offset from r9 is Thread::ThreadSuspendTriggerOffset().
 // To check for a suspend check, we examine the instructions that caused
 // the fault (at PC-4 and PC).
-bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+bool SuspensionHandler::Action([[maybe_unused]] int sig,
+                               [[maybe_unused]] siginfo_t* info,
                                void* context) {
   // These are the instructions to check for.  The first one is the ldr r0,[r9,#xxx]
   // where xxx is the offset of the suspend trigger.
@@ -186,7 +187,8 @@
 // If we determine this is a stack overflow we need to move the stack pointer
 // to the overflow region below the protected region.
 
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+bool StackOverflowHandler::Action([[maybe_unused]] int sig,
+                                  [[maybe_unused]] siginfo_t* info,
                                   void* context) {
   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
   mcontext_t* mc = reinterpret_cast<mcontext_t*>(&uc->uc_mcontext);
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 749476b..3309523 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -243,9 +243,9 @@
 // A signal handler called by a fault for an illegal instruction.  We record the fact in r0
 // and then increment the PC in the signal context to return to the next instruction.  We know the
 // instruction is 4 bytes long.
-static void bad_instr_handle(int signo ATTRIBUTE_UNUSED,
-                            siginfo_t* si ATTRIBUTE_UNUSED,
-                            void* data) {
+static void bad_instr_handle([[maybe_unused]] int signo,
+                             [[maybe_unused]] siginfo_t* si,
+                             void* data) {
 #if defined(__arm__)
   ucontext_t* uc = reinterpret_cast<ucontext_t*>(data);
   mcontext_t* mc = &uc->uc_mcontext;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 3878b57..cebff9b 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -62,7 +62,7 @@
   return mc->sp;
 }
 
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action([[maybe_unused]] int sig, siginfo_t* info, void* context) {
   uintptr_t fault_address = reinterpret_cast<uintptr_t>(info->si_addr);
   if (!IsValidFaultAddress(fault_address)) {
     return false;
@@ -96,7 +96,8 @@
 // A suspend check is done using the following instruction:
 //      0x...: f94002b5  ldr x21, [x21, #0]
 // To check for a suspend check, we examine the instruction that caused the fault (at PC).
-bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+bool SuspensionHandler::Action([[maybe_unused]] int sig,
+                               [[maybe_unused]] siginfo_t* info,
                                void* context) {
   constexpr uint32_t kSuspendCheckRegister = 21;
   constexpr uint32_t checkinst =
@@ -128,7 +129,8 @@
   return true;
 }
 
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+bool StackOverflowHandler::Action([[maybe_unused]] int sig,
+                                  [[maybe_unused]] siginfo_t* info,
                                   void* context) {
   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
   mcontext_t* mc = reinterpret_cast<mcontext_t*>(&uc->uc_mcontext);
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index be7adc7..efeacd6 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -90,9 +90,7 @@
 
   // Set `new_value` to the physical register containing the dex PC pointer in
   // an nterp frame.
-  virtual void SetNterpDexPC(uintptr_t new_value ATTRIBUTE_UNUSED) {
-    abort();
-  }
+  virtual void SetNterpDexPC([[maybe_unused]] uintptr_t new_value) { abort(); }
 
   // Switches execution of the executing context to this context
   NO_RETURN virtual void DoLongJump() = 0;
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index d88c544..17b9dc3 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -313,7 +313,7 @@
 }
 
 std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::AddRuntimeDetectedFeatures(
-    const InstructionSetFeatures *features ATTRIBUTE_UNUSED) const {
+    [[maybe_unused]] const InstructionSetFeatures* features) const {
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
   UNREACHABLE();
 }
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index 1f41b39..1cb0fbb 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -146,8 +146,8 @@
                                  std::string* error_msg) const = 0;
 
   // Add run-time detected architecture specific features in sub-classes.
-  virtual std::unique_ptr<const InstructionSetFeatures>
-      AddRuntimeDetectedFeatures(const InstructionSetFeatures *features ATTRIBUTE_UNUSED) const;
+  virtual std::unique_ptr<const InstructionSetFeatures> AddRuntimeDetectedFeatures(
+      [[maybe_unused]] const InstructionSetFeatures* features) const;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(InstructionSetFeatures);
diff --git a/runtime/arch/riscv64/instruction_set_features_riscv64.cc b/runtime/arch/riscv64/instruction_set_features_riscv64.cc
index 2ef4f84..544b717 100644
--- a/runtime/arch/riscv64/instruction_set_features_riscv64.cc
+++ b/runtime/arch/riscv64/instruction_set_features_riscv64.cc
@@ -30,7 +30,7 @@
 }
 
 Riscv64FeaturesUniquePtr Riscv64InstructionSetFeatures::FromVariant(
-    const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
+    const std::string& variant, [[maybe_unused]] std::string* error_msg) {
   if (variant != "generic") {
     LOG(WARNING) << "Unexpected CPU variant for Riscv64 using defaults: " << variant;
   }
@@ -90,8 +90,8 @@
 
 std::unique_ptr<const InstructionSetFeatures>
 Riscv64InstructionSetFeatures::AddFeaturesFromSplitString(
-    const std::vector<std::string>& features ATTRIBUTE_UNUSED,
-    std::string* error_msg ATTRIBUTE_UNUSED) const {
+    [[maybe_unused]] const std::vector<std::string>& features,
+    [[maybe_unused]] std::string* error_msg) const {
   UNIMPLEMENTED(WARNING);
   return std::unique_ptr<const InstructionSetFeatures>(new Riscv64InstructionSetFeatures(bits_));
 }
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index cd2d38f..efc5249 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -259,7 +259,7 @@
 #undef FETCH_OR_SKIP_BYTE
 }
 
-uintptr_t FaultManager::GetFaultPc(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context) {
+uintptr_t FaultManager::GetFaultPc([[maybe_unused]] siginfo_t* siginfo, void* context) {
   ucontext_t* uc = reinterpret_cast<ucontext_t*>(context);
   if (uc->CTX_ESP == 0) {
     VLOG(signals) << "Missing SP";
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index f11aca9..6976e9c 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -119,9 +119,9 @@
   }
 }
 
-X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(
-    const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED,
-    bool x86_64) {
+X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(const std::string& variant,
+                                                            [[maybe_unused]] std::string* error_msg,
+                                                            bool x86_64) {
   const bool is_runtime_isa =
       kRuntimeISA == (x86_64 ? InstructionSet::kX86_64 : InstructionSet::kX86);
   if (is_runtime_isa && variant == "default") {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 3ea5130..7353b14 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -52,7 +52,7 @@
 
 template <> struct ShortyTraits<'V'> {
   using Type = void;
-  static Type Get(const JValue& value ATTRIBUTE_UNUSED) {}
+  static Type Get([[maybe_unused]] const JValue& value) {}
   // `kVRegCount` and `Set()` are not defined.
 };
 
@@ -152,8 +152,8 @@
 }
 
 template <char... ArgType>
-inline ALWAYS_INLINE void FillVRegs(uint32_t* vregs ATTRIBUTE_UNUSED,
-                                    typename ShortyTraits<ArgType>::Type... args ATTRIBUTE_UNUSED)
+inline ALWAYS_INLINE void FillVRegs([[maybe_unused]] uint32_t* vregs,
+                                    [[maybe_unused]] typename ShortyTraits<ArgType>::Type... args)
     REQUIRES_SHARED(Locks::mutator_lock_) {}
 
 template <char FirstArgType, char... ArgType>
diff --git a/runtime/base/quasi_atomic.h b/runtime/base/quasi_atomic.h
index 5aa4dde..95d7bb2 100644
--- a/runtime/base/quasi_atomic.h
+++ b/runtime/base/quasi_atomic.h
@@ -46,7 +46,7 @@
 // quasiatomic operations that are performed on partially-overlapping
 // memory.
 class QuasiAtomic {
-  static constexpr bool NeedSwapMutexes(InstructionSet isa ATTRIBUTE_UNUSED) {
+  static constexpr bool NeedSwapMutexes([[maybe_unused]] InstructionSet isa) {
     // TODO: Remove this function now that mips support has been removed.
     return false;
   }
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index fca86a5..f3562a4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1496,16 +1496,14 @@
   // Visit Class Fields
   void operator()(ObjPtr<mirror::Object> obj,
                   MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+                  [[maybe_unused]] bool is_static) const REQUIRES_SHARED(Locks::mutator_lock_) {
     // References within image or across images don't need a read barrier.
     ObjPtr<mirror::Object> referred_obj =
         obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
     TestObject(referred_obj);
   }
 
-  void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
-                  ObjPtr<mirror::Reference> ref) const
+  void operator()([[maybe_unused]] ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
   }
@@ -3340,7 +3338,7 @@
     return Finish(h_klass);
   }
 
-  ObjPtr<mirror::Class> Finish(nullptr_t np ATTRIBUTE_UNUSED)
+  ObjPtr<mirror::Class> Finish([[maybe_unused]] nullptr_t np)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ScopedNullHandle<mirror::Class> snh;
     return Finish(snh);
@@ -7361,8 +7359,8 @@
 
   class VTableIndexCheckerRelease {
    protected:
-    explicit VTableIndexCheckerRelease(size_t vtable_length ATTRIBUTE_UNUSED) {}
-    void CheckIndex(uint32_t index ATTRIBUTE_UNUSED) const {}
+    explicit VTableIndexCheckerRelease([[maybe_unused]] size_t vtable_length) {}
+    void CheckIndex([[maybe_unused]] uint32_t index) const {}
   };
 
   using VTableIndexChecker =
@@ -10927,27 +10925,27 @@
       Runtime::Current()->GetJavaVM()->DecodeWeakGlobalAsStrong(result));
 }
 
-bool ClassLinker::DenyAccessBasedOnPublicSdk(ArtMethod* art_method ATTRIBUTE_UNUSED) const
+bool ClassLinker::DenyAccessBasedOnPublicSdk([[maybe_unused]] ArtMethod* art_method) const
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Should not be called on ClassLinker, only on AotClassLinker that overrides this.
   LOG(FATAL) << "UNREACHABLE";
   UNREACHABLE();
 }
 
-bool ClassLinker::DenyAccessBasedOnPublicSdk(ArtField* art_field ATTRIBUTE_UNUSED) const
+bool ClassLinker::DenyAccessBasedOnPublicSdk([[maybe_unused]] ArtField* art_field) const
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Should not be called on ClassLinker, only on AotClassLinker that overrides this.
   LOG(FATAL) << "UNREACHABLE";
   UNREACHABLE();
 }
 
-bool ClassLinker::DenyAccessBasedOnPublicSdk(const char* type_descriptor ATTRIBUTE_UNUSED) const {
+bool ClassLinker::DenyAccessBasedOnPublicSdk([[maybe_unused]] const char* type_descriptor) const {
   // Should not be called on ClassLinker, only on AotClassLinker that overrides this.
   LOG(FATAL) << "UNREACHABLE";
   UNREACHABLE();
 }
 
-void ClassLinker::SetEnablePublicSdkChecks(bool enabled ATTRIBUTE_UNUSED) {
+void ClassLinker::SetEnablePublicSdkChecks([[maybe_unused]] bool enabled) {
   // Should not be called on ClassLinker, only on AotClassLinker that overrides this.
   LOG(FATAL) << "UNREACHABLE";
   UNREACHABLE();
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d14e46a..6fdd94d 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -1414,13 +1414,13 @@
   //       different object. It is the listener's responsibility to handle this.
   // Note: This callback is rarely useful so a default implementation has been given that does
   //       nothing.
-  virtual void ClassPreDefine(const char* descriptor ATTRIBUTE_UNUSED,
-                              Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
-                              Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
-                              const DexFile& initial_dex_file ATTRIBUTE_UNUSED,
-                              const dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
-                              /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
-                              /*out*/dex::ClassDef const** final_class_def ATTRIBUTE_UNUSED)
+  virtual void ClassPreDefine([[maybe_unused]] const char* descriptor,
+                              [[maybe_unused]] Handle<mirror::Class> klass,
+                              [[maybe_unused]] Handle<mirror::ClassLoader> class_loader,
+                              [[maybe_unused]] const DexFile& initial_dex_file,
+                              [[maybe_unused]] const dex::ClassDef& initial_class_def,
+                              [[maybe_unused]] /*out*/ DexFile const** final_dex_file,
+                              [[maybe_unused]] /*out*/ dex::ClassDef const** final_class_def)
       REQUIRES_SHARED(Locks::mutator_lock_) {}
 
   // A class has been loaded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 95b224f..981f5ea 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -443,7 +443,7 @@
 
   class TestRootVisitor : public SingleRootVisitor {
    public:
-    void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) override {
+    void VisitRoot(mirror::Object* root, [[maybe_unused]] const RootInfo& info) override {
       EXPECT_TRUE(root != nullptr);
     }
   };
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 85c48a2..0f82c4b 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -136,7 +136,7 @@
 
  protected:
   // Allow subclases such as CommonCompilerTest to add extra options.
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
+  virtual void SetUpRuntimeOptions([[maybe_unused]] RuntimeOptions* options) {}
 
   // Called before the runtime is created.
   virtual void PreRuntimeCreate() {}
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index f76ee66..3fa2fa3 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -52,25 +52,25 @@
   virtual void ClassRejected(ClassReference ref) = 0;
 
   virtual verifier::VerifierDeps* GetVerifierDeps() const = 0;
-  virtual void SetVerifierDeps(verifier::VerifierDeps* deps ATTRIBUTE_UNUSED) {}
+  virtual void SetVerifierDeps([[maybe_unused]] verifier::VerifierDeps* deps) {}
 
   // Return the class status of a previous stage of the compilation. This can be used, for example,
   // when class unloading is enabled during multidex compilation.
-  virtual ClassStatus GetPreviousClassState(ClassReference ref ATTRIBUTE_UNUSED) {
+  virtual ClassStatus GetPreviousClassState([[maybe_unused]] ClassReference ref) {
     return ClassStatus::kNotReady;
   }
 
-  virtual void SetDoesClassUnloading(bool does_class_unloading ATTRIBUTE_UNUSED,
-                                     CompilerDriver* compiler_driver ATTRIBUTE_UNUSED) {}
+  virtual void SetDoesClassUnloading([[maybe_unused]] bool does_class_unloading,
+                                     [[maybe_unused]] CompilerDriver* compiler_driver) {}
 
   bool IsBootImage() {
     return mode_ == CallbackMode::kCompileBootImage;
   }
 
-  virtual void UpdateClassState(ClassReference ref ATTRIBUTE_UNUSED,
-                                ClassStatus state ATTRIBUTE_UNUSED) {}
+  virtual void UpdateClassState([[maybe_unused]] ClassReference ref,
+                                [[maybe_unused]] ClassStatus state) {}
 
-  virtual bool CanUseOatStatusForVerification(mirror::Class* klass ATTRIBUTE_UNUSED)
+  virtual bool CanUseOatStatusForVerification([[maybe_unused]] mirror::Class* klass)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     return false;
   }
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 0b4f1f3..2ceda12 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1076,7 +1076,7 @@
 }
 
 static InstructionSet GetInstructionSetFromELF(uint16_t e_machine,
-                                               uint32_t e_flags ATTRIBUTE_UNUSED) {
+                                               [[maybe_unused]] uint32_t e_flags) {
   switch (e_machine) {
     case EM_ARM:
       return InstructionSet::kArm;
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index e2fc232..fb32c95 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -420,7 +420,7 @@
   return ReadBarrier::Mark(obj);
 }
 
-extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUSED,
+extern "C" mirror::Object* artReadBarrierSlow([[maybe_unused]] mirror::Object* ref,
                                               mirror::Object* obj,
                                               uint32_t offset) {
   // Used only in connection with non-volatile loads.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7e96f29..905cee2 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1634,9 +1634,8 @@
   }
 
   virtual void WalkHeader(
-      BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-  }
+      [[maybe_unused]] BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
+      REQUIRES_SHARED(Locks::mutator_lock_) {}
 
   void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
     BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
@@ -2174,10 +2173,8 @@
 }
 
 extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
-    uint32_t method_idx,
-    mirror::Object* this_object ATTRIBUTE_UNUSED,
-    Thread* self,
-    ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint32_t method_idx, [[maybe_unused]] mirror::Object* this_object, Thread* self, ArtMethod** sp)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   // For static, this_object is not required and may be random garbage. Don't pass it down so that
   // it doesn't cause ObjPtr alignment failure check.
   return artInvokeCommon<kStatic>(method_idx, nullptr, self, sp);
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index a3c1f3b..7e5e745 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -223,7 +223,7 @@
   return false;
 }
 
-bool FaultManager::HandleSigbusFault(int sig, siginfo_t* info, void* context ATTRIBUTE_UNUSED) {
+bool FaultManager::HandleSigbusFault(int sig, siginfo_t* info, [[maybe_unused]] void* context) {
   DCHECK_EQ(sig, SIGBUS);
   if (VLOG_IS_ON(signals)) {
     PrintSignalInfo(VLOG_STREAM(signals) << "Handling SIGBUS fault:\n", info);
@@ -578,7 +578,7 @@
   manager_->AddHandler(this, false);
 }
 
-bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, void* context) {
+bool JavaStackTraceHandler::Action([[maybe_unused]] int sig, siginfo_t* siginfo, void* context) {
   // Make sure that we are in the generated code, but we may not have a dex pc.
   bool in_generated_code = manager_->IsInGeneratedCode(siginfo, context);
   if (in_generated_code) {
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 4a84799..85f7164 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -43,7 +43,7 @@
 
   inline void operator()(uint8_t* card,
                          uint8_t expected_value,
-                         uint8_t new_value ATTRIBUTE_UNUSED) const {
+                         [[maybe_unused]] uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       cleared_cards_->insert(card);
     }
@@ -60,7 +60,7 @@
 
   inline void operator()(uint8_t* card,
                          uint8_t expected_value,
-                         uint8_t new_value ATTRIBUTE_UNUSED) const {
+                         [[maybe_unused]] uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       // We want the address the card represents, not the address of the card.
       bitmap_->Set(reinterpret_cast<uintptr_t>(card_table_->AddrFromCard(card)));
@@ -78,7 +78,7 @@
       : cleared_cards_(cleared_cards) {
   }
 
-  void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card ATTRIBUTE_UNUSED) const {
+  void operator()(uint8_t* card, uint8_t expected_card, [[maybe_unused]] uint8_t new_card) const {
     if (expected_card == CardTable::kCardDirty) {
       cleared_cards_->push_back(card);
     }
@@ -100,7 +100,7 @@
       contains_reference_to_other_space_(contains_reference_to_other_space) {}
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+  void operator()(mirror::Object* obj, MemberOffset offset, [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     MarkReference(obj->GetFieldObjectReferenceAddr(offset));
   }
@@ -195,7 +195,7 @@
         has_target_reference_(has_target_reference) {}
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+  void operator()(mirror::Object* obj, MemberOffset offset, [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     mirror::Object* ref = ref_ptr->AsMirrorPtr();
@@ -270,7 +270,7 @@
         references_(references) {}
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+  void operator()(mirror::Object* obj, MemberOffset offset, [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
     if (ref != nullptr &&
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 3f38f50..f1f10d2 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -100,7 +100,7 @@
  public:
   explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
   void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                         bool do_atomic_update ATTRIBUTE_UNUSED) override
+                         [[maybe_unused]] bool do_atomic_update) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(ref != nullptr);
     MarkObject(ref->AsMirrorPtr());
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index fba62c3..e4ee305 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -42,7 +42,7 @@
   explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
       : dirty_cards_(dirty_cards) {}
 
-  void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value ATTRIBUTE_UNUSED) const {
+  void operator()(uint8_t* card, uint8_t expected_value, [[maybe_unused]] uint8_t new_value) const {
     if (expected_value == CardTable::kCardDirty) {
       dirty_cards_->insert(card);
     }
@@ -69,8 +69,7 @@
 
   void operator()(ObjPtr<mirror::Object> obj,
                   MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+                  [[maybe_unused]] bool is_static) const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
     mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
     if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 8fcf102..72fcef0 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -134,9 +134,7 @@
  public:
   explicit SimpleCounter(size_t* counter) : count_(counter) {}
 
-  void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
-    (*count_)++;
-  }
+  void operator()([[maybe_unused]] mirror::Object* obj) const { (*count_)++; }
 
   size_t* const count_;
 };
@@ -203,9 +201,7 @@
                           uintptr_t range_end,
                           size_t manual_count) {
     size_t count = 0;
-    auto count_fn = [&count](mirror::Object* obj ATTRIBUTE_UNUSED) {
-      count++;
-    };
+    auto count_fn = [&count]([[maybe_unused]] mirror::Object* obj) { count++; };
     space_bitmap->VisitMarkedRange(range_begin, range_end, count_fn);
     EXPECT_EQ(count, manual_count);
   };
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index 376b524..f286c6c 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -54,9 +54,9 @@
   // PreObjectAlloc and the newly allocated object being visible to heap-walks.
   //
   // This can also be used to make any last-minute changes to the type or size of the allocation.
-  virtual void PreObjectAllocated(Thread* self ATTRIBUTE_UNUSED,
-                                  MutableHandle<mirror::Class> type ATTRIBUTE_UNUSED,
-                                  size_t* byte_count ATTRIBUTE_UNUSED)
+  virtual void PreObjectAllocated([[maybe_unused]] Thread* self,
+                                  [[maybe_unused]] MutableHandle<mirror::Class> type,
+                                  [[maybe_unused]] size_t* byte_count)
       REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) {}
   // Fast check if we want to get the PreObjectAllocated callback, to avoid the expense of creating
   // handles. Defaults to false.
diff --git a/runtime/gc/allocator/art-dlmalloc.cc b/runtime/gc/allocator/art-dlmalloc.cc
index de0c85a..6296acd 100644
--- a/runtime/gc/allocator/art-dlmalloc.cc
+++ b/runtime/gc/allocator/art-dlmalloc.cc
@@ -83,8 +83,8 @@
   }
 }
 
-extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
-                                               void* end ATTRIBUTE_UNUSED,
+extern "C" void DlmallocBytesAllocatedCallback([[maybe_unused]] void* start,
+                                               [[maybe_unused]] void* end,
                                                size_t used_bytes,
                                                void* arg) {
   if (used_bytes == 0) {
@@ -94,8 +94,8 @@
   *bytes_allocated += used_bytes + sizeof(size_t);
 }
 
-extern "C" void DlmallocObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED,
-                                                 void* end ATTRIBUTE_UNUSED,
+extern "C" void DlmallocObjectsAllocatedCallback([[maybe_unused]] void* start,
+                                                 [[maybe_unused]] void* end,
                                                  size_t used_bytes,
                                                  void* arg) {
   if (used_bytes == 0) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 320440d..656e29d 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1720,8 +1720,10 @@
   DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]);
 }
 
-void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
-                                      size_t used_bytes, void* arg) {
+void RosAlloc::BytesAllocatedCallback([[maybe_unused]] void* start,
+                                      [[maybe_unused]] void* end,
+                                      size_t used_bytes,
+                                      void* arg) {
   if (used_bytes == 0) {
     return;
   }
@@ -1729,8 +1731,10 @@
   *bytes_allocated += used_bytes;
 }
 
-void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
-                                        size_t used_bytes, void* arg) {
+void RosAlloc::ObjectsAllocatedCallback([[maybe_unused]] void* start,
+                                        [[maybe_unused]] void* end,
+                                        size_t used_bytes,
+                                        void* arg) {
   if (used_bytes == 0) {
     return;
   }
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 9a09c88..bb2f426 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -303,7 +303,7 @@
     // The number of slots in the list. This is used to make it fast to check if a free list is all
     // free without traversing the whole free list.
     uint32_t size_;
-    uint32_t padding_ ATTRIBUTE_UNUSED;
+    [[maybe_unused]] uint32_t padding_;
     friend class RosAlloc;
   };
 
@@ -354,7 +354,7 @@
     uint8_t is_thread_local_;           // True if this run is used as a thread-local run.
     bool to_be_bulk_freed_;             // Used within BulkFree() to flag a run that's involved with
                                         // a bulk free.
-    uint32_t padding_ ATTRIBUTE_UNUSED;
+    [[maybe_unused]] uint32_t padding_;
     // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail.
     SlotFreeList<false> free_list_;
     SlotFreeList<true> bulk_free_list_;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 1f123aa..3e95871 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -300,7 +300,7 @@
   explicit ActivateReadBarrierEntrypointsCallback(ConcurrentCopying* concurrent_copying)
       : concurrent_copying_(concurrent_copying) {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
+  void Run([[maybe_unused]] Thread* self) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a race with ThreadList::Register().
     CHECK(!concurrent_copying_->is_using_read_barrier_entrypoints_);
@@ -509,7 +509,7 @@
 
   void VisitRoots(mirror::Object*** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) override
+                  [[maybe_unused]] const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     for (size_t i = 0; i < count; ++i) {
@@ -526,7 +526,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) override
+                  [[maybe_unused]] const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     for (size_t i = 0; i < count; ++i) {
@@ -700,7 +700,7 @@
 
   void operator()(ObjPtr<mirror::Object> obj,
                   MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
+                  [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
      CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
@@ -952,7 +952,7 @@
 
   void VisitRoots(mirror::Object*** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) override
+                  [[maybe_unused]] const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mirror::Object** root = roots[i];
@@ -965,7 +965,7 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) override
+                  [[maybe_unused]] const RootInfo& info) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mirror::CompressedReference<mirror::Object>* const root = roots[i];
@@ -1770,7 +1770,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
+  void Run([[maybe_unused]] Thread* self) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a race with ThreadList::Register().
     CHECK(concurrent_copying_->is_marking_);
@@ -1941,8 +1941,8 @@
     }
   }
 
-  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void VisitRoot(mirror::Object* root, [[maybe_unused]] const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(root != nullptr);
     operator()(root);
   }
@@ -1958,7 +1958,7 @@
 
   void operator()(ObjPtr<mirror::Object> obj,
                   MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
+                  [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
@@ -2053,13 +2053,13 @@
 
   void operator()(ObjPtr<mirror::Object> obj,
                   MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
+                  [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     mirror::Object* ref =
         obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
     collector_->AssertToSpaceInvariant(obj.Ptr(), offset, ref);
   }
-  void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<mirror::Class> klass, [[maybe_unused]] ObjPtr<mirror::Reference> ref) const
       REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
     CHECK(klass->IsTypeOfReferenceClass());
   }
@@ -2417,7 +2417,7 @@
       : concurrent_copying_(concurrent_copying) {
   }
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES(Locks::thread_list_lock_) {
+  void Run([[maybe_unused]] Thread* self) override REQUIRES(Locks::thread_list_lock_) {
     // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
     // to avoid a deadlock b/31500969.
     CHECK(concurrent_copying_->weak_ref_access_enabled_);
@@ -3266,8 +3266,9 @@
 }
 
 // Process some roots.
-inline void ConcurrentCopying::VisitRoots(
-    mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
+inline void ConcurrentCopying::VisitRoots(mirror::Object*** roots,
+                                          size_t count,
+                                          [[maybe_unused]] const RootInfo& info) {
   Thread* const self = Thread::Current();
   for (size_t i = 0; i < count; ++i) {
     mirror::Object** root = roots[i];
@@ -3308,9 +3309,9 @@
   }
 }
 
-inline void ConcurrentCopying::VisitRoots(
-    mirror::CompressedReference<mirror::Object>** roots, size_t count,
-    const RootInfo& info ATTRIBUTE_UNUSED) {
+inline void ConcurrentCopying::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                                          size_t count,
+                                          [[maybe_unused]] const RootInfo& info) {
   Thread* const self = Thread::Current();
   for (size_t i = 0; i < count; ++i) {
     mirror::CompressedReference<mirror::Object>* const root = roots[i];
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index f9a4e72..ea303ec 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -625,7 +625,7 @@
  public:
   explicit FlipCallback(MarkCompact* collector) : collector_(collector) {}
 
-  void Run(Thread* thread ATTRIBUTE_UNUSED) override REQUIRES(Locks::mutator_lock_) {
+  void Run([[maybe_unused]] Thread* thread) override REQUIRES(Locks::mutator_lock_) {
     collector_->CompactionPause();
   }
 
@@ -851,7 +851,7 @@
   explicit ConcurrentCompactionGcTask(MarkCompact* collector, size_t idx)
       : collector_(collector), index_(idx) {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Run([[maybe_unused]] Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_) {
     if (collector_->CanCompactMovingSpaceWithMinorFault()) {
       collector_->ConcurrentCompaction<MarkCompact::kMinorFaultMode>(/*buf=*/nullptr);
     } else {
@@ -1331,9 +1331,10 @@
     DCHECK(!kCheckEnd || end != nullptr);
   }
 
-  void operator()(mirror::Object* old ATTRIBUTE_UNUSED, MemberOffset offset, bool /* is_static */)
-      const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+  void operator()([[maybe_unused]] mirror::Object* old,
+                  MemberOffset offset,
+                  [[maybe_unused]] bool is_static) const ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
     bool update = true;
     if (kCheckBegin || kCheckEnd) {
       uint8_t* ref = reinterpret_cast<uint8_t*>(obj_) + offset.Int32Value();
@@ -1348,12 +1349,11 @@
   // VisitReferenes().
   // TODO: Optimize reference updating using SIMD instructions. Object arrays
   // are perfect as all references are tightly packed.
-  void operator()(mirror::Object* old ATTRIBUTE_UNUSED,
+  void operator()([[maybe_unused]] mirror::Object* old,
                   MemberOffset offset,
-                  bool /*is_static*/,
-                  bool /*is_obj_array*/)
-      const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+                  [[maybe_unused]] bool is_static,
+                  [[maybe_unused]] bool is_obj_array) const ALWAYS_INLINE
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
     collector_->UpdateRef(obj_, offset);
   }
 
@@ -1455,51 +1455,38 @@
                                   << " start_addr=" << static_cast<void*>(start_addr);
                              };
   obj = GetFromSpaceAddr(obj);
-  live_words_bitmap_->VisitLiveStrides(offset,
-                                       black_allocations_begin_,
-                                       kPageSize,
-                                       [&addr,
-                                        &last_stride,
-                                        &stride_count,
-                                        &last_stride_begin,
-                                        verify_obj_callback,
-                                        this] (uint32_t stride_begin,
-                                               size_t stride_size,
-                                               bool /*is_last*/)
-                                        REQUIRES_SHARED(Locks::mutator_lock_) {
-                                         const size_t stride_in_bytes = stride_size * kAlignment;
-                                         DCHECK_LE(stride_in_bytes, kPageSize);
-                                         last_stride_begin = stride_begin;
-                                         DCHECK(IsAligned<kAlignment>(addr));
-                                         memcpy(addr,
-                                                from_space_begin_ + stride_begin * kAlignment,
-                                                stride_in_bytes);
-                                         if (kIsDebugBuild) {
-                                           uint8_t* space_begin = bump_pointer_space_->Begin();
-                                           // We can interpret the first word of the stride as an
-                                           // obj only from second stride onwards, as the first
-                                           // stride's first-object may have started on previous
-                                           // page. The only exception is the first page of the
-                                           // moving space.
-                                           if (stride_count > 0
-                                               || stride_begin * kAlignment < kPageSize) {
-                                             mirror::Object* o =
-                                                reinterpret_cast<mirror::Object*>(space_begin
-                                                                                  + stride_begin
-                                                                                  * kAlignment);
-                                             CHECK(live_words_bitmap_->Test(o)) << "ref=" << o;
-                                             CHECK(moving_space_bitmap_->Test(o))
-                                                 << "ref=" << o
-                                                 << " bitmap: "
-                                                 << moving_space_bitmap_->DumpMemAround(o);
-                                             VerifyObject(reinterpret_cast<mirror::Object*>(addr),
-                                                          verify_obj_callback);
-                                           }
-                                         }
-                                         last_stride = addr;
-                                         addr += stride_in_bytes;
-                                         stride_count++;
-                                       });
+  live_words_bitmap_->VisitLiveStrides(
+      offset,
+      black_allocations_begin_,
+      kPageSize,
+      [&addr, &last_stride, &stride_count, &last_stride_begin, verify_obj_callback, this](
+          uint32_t stride_begin, size_t stride_size, [[maybe_unused]] bool is_last)
+          REQUIRES_SHARED(Locks::mutator_lock_) {
+            const size_t stride_in_bytes = stride_size * kAlignment;
+            DCHECK_LE(stride_in_bytes, kPageSize);
+            last_stride_begin = stride_begin;
+            DCHECK(IsAligned<kAlignment>(addr));
+            memcpy(addr, from_space_begin_ + stride_begin * kAlignment, stride_in_bytes);
+            if (kIsDebugBuild) {
+              uint8_t* space_begin = bump_pointer_space_->Begin();
+              // We can interpret the first word of the stride as an
+              // obj only from second stride onwards, as the first
+              // stride's first-object may have started on previous
+              // page. The only exception is the first page of the
+              // moving space.
+              if (stride_count > 0 || stride_begin * kAlignment < kPageSize) {
+                mirror::Object* o =
+                    reinterpret_cast<mirror::Object*>(space_begin + stride_begin * kAlignment);
+                CHECK(live_words_bitmap_->Test(o)) << "ref=" << o;
+                CHECK(moving_space_bitmap_->Test(o))
+                    << "ref=" << o << " bitmap: " << moving_space_bitmap_->DumpMemAround(o);
+                VerifyObject(reinterpret_cast<mirror::Object*>(addr), verify_obj_callback);
+              }
+            }
+            last_stride = addr;
+            addr += stride_in_bytes;
+            stride_count++;
+          });
   DCHECK_LT(last_stride, start_addr + kPageSize);
   DCHECK_GT(stride_count, 0u);
   size_t obj_size = 0;
@@ -3580,9 +3567,10 @@
     Flush();
   }
 
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
+  void VisitRoots(mirror::Object*** roots,
+                  size_t count,
+                  [[maybe_unused]] const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; i++) {
       mirror::Object* obj = *roots[i];
       if (mark_compact_->MarkObjectNonNullNoPush</*kParallel*/true>(obj)) {
@@ -3593,9 +3581,8 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
+                  [[maybe_unused]] const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; i++) {
       mirror::Object* obj = roots[i]->AsMirrorPtr();
       if (mark_compact_->MarkObjectNonNullNoPush</*kParallel*/true>(obj)) {
@@ -3762,9 +3749,7 @@
                                accounting::CardTable* const card_table)
       : visitor_(mark_compact), bitmap_(bitmap), card_table_(card_table) {}
 
-  void operator()(uint8_t* card,
-                  uint8_t expected_value,
-                  uint8_t new_value ATTRIBUTE_UNUSED) const {
+  void operator()(uint8_t* card, uint8_t expected_value, [[maybe_unused]] uint8_t new_value) const {
     if (expected_value == accounting::CardTable::kCardDirty) {
       uintptr_t start = reinterpret_cast<uintptr_t>(card_table_->AddrFromCard(card));
       bitmap_->VisitMarkedRange(start, start + accounting::CardTable::kCardSize, visitor_);
@@ -3917,9 +3902,8 @@
 
   ALWAYS_INLINE void operator()(mirror::Object* obj,
                                 MemberOffset offset,
-                                bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES(Locks::heap_bitmap_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+                                [[maybe_unused]] bool is_static) const
+      REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -4096,7 +4080,7 @@
 }
 
 void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
-                                    bool do_atomic_update ATTRIBUTE_UNUSED) {
+                                    [[maybe_unused]] bool do_atomic_update) {
   MarkObject(obj->AsMirrorPtr(), nullptr, MemberOffset(0));
 }
 
@@ -4166,7 +4150,7 @@
 }
 
 bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
-                                              bool do_atomic_update ATTRIBUTE_UNUSED) {
+                                              [[maybe_unused]] bool do_atomic_update) {
   mirror::Object* ref = obj->AsMirrorPtr();
   if (ref == nullptr) {
     return true;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 4fefe65..5209354 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -416,7 +416,7 @@
 }
 
 bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                            bool do_atomic_update ATTRIBUTE_UNUSED) {
+                                            [[maybe_unused]] bool do_atomic_update) {
   mirror::Object* obj = ref->AsMirrorPtr();
   if (obj == nullptr) {
     return true;
@@ -558,7 +558,7 @@
 }
 
 void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
-                                  bool do_atomic_update ATTRIBUTE_UNUSED) {
+                                  [[maybe_unused]] bool do_atomic_update) {
   MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
 }
 
@@ -588,7 +588,7 @@
 
 void MarkSweep::VisitRoots(mirror::Object*** roots,
                            size_t count,
-                           const RootInfo& info ATTRIBUTE_UNUSED) {
+                           [[maybe_unused]] const RootInfo& info) {
   for (size_t i = 0; i < count; ++i) {
     MarkObjectNonNull(*roots[i]);
   }
@@ -596,7 +596,7 @@
 
 void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                            size_t count,
-                           const RootInfo& info ATTRIBUTE_UNUSED) {
+                           [[maybe_unused]] const RootInfo& info) {
   for (size_t i = 0; i < count; ++i) {
     MarkObjectNonNull(roots[i]->AsMirrorPtr());
   }
@@ -698,8 +698,8 @@
         : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
 
     ALWAYS_INLINE void operator()(mirror::Object* obj,
-                    MemberOffset offset,
-                    bool is_static ATTRIBUTE_UNUSED) const
+                                  MemberOffset offset,
+                                  [[maybe_unused]] bool is_static) const
         REQUIRES_SHARED(Locks::mutator_lock_) {
       Mark(obj->GetFieldObject<mirror::Object>(offset));
     }
@@ -793,8 +793,7 @@
   }
 
   // Scans all of the objects
-  void Run(Thread* self ATTRIBUTE_UNUSED) override
-      REQUIRES(Locks::heap_bitmap_lock_)
+  void Run([[maybe_unused]] Thread* self) override REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ScanObjectParallelVisitor visitor(this);
     // TODO: Tune this.
@@ -1142,9 +1141,10 @@
             revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
   }
 
-  void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
+  void VisitRoots(mirror::Object*** roots,
+                  size_t count,
+                  [[maybe_unused]] const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
     }
@@ -1152,9 +1152,8 @@
 
   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
                   size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(Locks::heap_bitmap_lock_) {
+                  [[maybe_unused]] const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
     for (size_t i = 0; i < count; ++i) {
       mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
     }
@@ -1352,9 +1351,8 @@
 
   ALWAYS_INLINE void operator()(mirror::Object* obj,
                                 MemberOffset offset,
-                                bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES(Locks::heap_bitmap_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+                                [[maybe_unused]] bool is_static) const
+      REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index acd4807..a7e2b59 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -467,12 +467,13 @@
 }
 
 void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr,
-                                  bool do_atomic_update ATTRIBUTE_UNUSED) {
+                                  [[maybe_unused]] bool do_atomic_update) {
   MarkObject(obj_ptr);
 }
 
-void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
-                           const RootInfo& info ATTRIBUTE_UNUSED) {
+void SemiSpace::VisitRoots(mirror::Object*** roots,
+                           size_t count,
+                           [[maybe_unused]] const RootInfo& info) {
   for (size_t i = 0; i < count; ++i) {
     auto* root = roots[i];
     auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
@@ -485,8 +486,9 @@
   }
 }
 
-void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
-                           const RootInfo& info ATTRIBUTE_UNUSED) {
+void SemiSpace::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+                           size_t count,
+                           [[maybe_unused]] const RootInfo& info) {
   for (size_t i = 0; i < count; ++i) {
     MarkObjectIfNotInToSpace(roots[i]);
   }
@@ -610,7 +612,7 @@
 
 bool SemiSpace::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* object,
                                             // SemiSpace does the GC in a pause. No CAS needed.
-                                            bool do_atomic_update ATTRIBUTE_UNUSED) {
+                                            [[maybe_unused]] bool do_atomic_update) {
   mirror::Object* obj = object->AsMirrorPtr();
   if (obj == nullptr) {
     return true;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index d93bd89..e1bd16c 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -73,7 +73,7 @@
       static_cast<VisitRootFlags>(flags | kVisitRootFlagClassLoader));
 }
 
-void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
+void StickyMarkSweep::Sweep([[maybe_unused]] bool swap_bitmaps) {
   SweepArray(GetHeap()->GetLiveStack(), false);
 }
 
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 922b588..0f1a44f 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -441,7 +441,7 @@
   return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
 }
 
-inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type ATTRIBUTE_UNUSED,
+inline bool Heap::IsOutOfMemoryOnAllocation([[maybe_unused]] AllocatorType allocator_type,
                                             size_t alloc_size,
                                             bool grow) {
   size_t old_target = target_footprint_.load(std::memory_order_relaxed);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f27bddb..381271f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -132,7 +132,7 @@
 
 // Disable the heap sampler Callback function used by Perfetto.
 void DisableHeapSamplerCallback(void* disable_ptr,
-                                const AHeapProfileDisableCallbackInfo* info_ptr ATTRIBUTE_UNUSED) {
+                                [[maybe_unused]] const AHeapProfileDisableCallbackInfo* info_ptr) {
   HeapSampler* sampler_self = reinterpret_cast<HeapSampler*>(disable_ptr);
   sampler_self->DisableHeapSampler();
 }
@@ -2342,7 +2342,7 @@
     }
   }
 
-  bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
+  bool ShouldSweepSpace([[maybe_unused]] space::ContinuousSpace* space) const override {
     // Don't sweep any spaces since we probably blasted the internal accounting of the free list
     // allocator.
     return false;
@@ -2986,7 +2986,7 @@
     CHECK_EQ(self_, Thread::Current());
   }
 
-  void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const
+  void operator()([[maybe_unused]] ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (verify_referent_) {
       VerifyReference(ref.Ptr(), ref->GetReferent(), mirror::Reference::ReferentOffset());
@@ -2995,8 +2995,7 @@
 
   void operator()(ObjPtr<mirror::Object> obj,
                   MemberOffset offset,
-                  bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+                  [[maybe_unused]] bool is_static) const REQUIRES_SHARED(Locks::mutator_lock_) {
     VerifyReference(obj.Ptr(), obj->GetFieldObject<mirror::Object>(offset), offset);
   }
 
@@ -3251,9 +3250,9 @@
   }
 
   // There is no card marks for native roots on a class.
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-      const {}
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+  void VisitRootIfNonNull(
+      [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
+  void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
 
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
   // annotalysis on visitors.
@@ -3502,7 +3501,7 @@
   }
 }
 
-void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc ATTRIBUTE_UNUSED) {
+void Heap::PrePauseRosAllocVerification([[maybe_unused]] collector::GarbageCollector* gc) {
   // TODO: Add a new runtime option for this?
   if (verify_pre_gc_rosalloc_) {
     RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 56efcab..f4af50f 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -332,7 +332,7 @@
   }
 
   template <typename T>
-  T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
+  T* operator()(T* ptr, [[maybe_unused]] void** dest_addr) const {
     return (ptr != nullptr) ? native_visitor_(ptr) : nullptr;
   }
 
@@ -373,9 +373,9 @@
     this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
   }
   // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-      const {}
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+  void VisitRootIfNonNull(
+      [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
+  void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
 
   template <typename T> void VisitNativeDexCacheArray(mirror::NativeArray<T>* array)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -516,8 +516,8 @@
   // Visitor for VisitReferences().
   ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> object,
                                 MemberOffset field_offset,
-                                bool is_static ATTRIBUTE_UNUSED)
-      const REQUIRES_SHARED(Locks::mutator_lock_) {
+                                [[maybe_unused]] bool is_static) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     ObjPtr<mirror::Object> old_value =
         object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(field_offset);
     if (old_value != nullptr &&
@@ -538,9 +538,9 @@
     this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
   }
   // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-      const {}
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+  void VisitRootIfNonNull(
+      [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
+  void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
 
  private:
   mirror::Class* GetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1179,15 +1179,14 @@
 
     // Fix up separately since we also need to fix up method entrypoints.
     ALWAYS_INLINE void VisitRootIfNonNull(
-        mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+        [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
 
-    ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-        const {}
+    ALWAYS_INLINE void VisitRoot(
+        [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
 
     ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
                                   MemberOffset offset,
-                                  bool is_static ATTRIBUTE_UNUSED) const
-        NO_THREAD_SAFETY_ANALYSIS {
+                                  [[maybe_unused]] bool is_static) const NO_THREAD_SAFETY_ANALYSIS {
       // Space is not yet added to the heap, don't do a read barrier.
       mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
           offset);
@@ -1898,7 +1897,7 @@
     // TODO: Rewrite ProfileCompilationInfo to provide a better interface and
     // to store the dex locations in uncompressed section of the file.
     auto collect_fn = [&dex_locations](const std::string& dex_location,
-                                       uint32_t checksum ATTRIBUTE_UNUSED) {
+                                       [[maybe_unused]] uint32_t checksum) {
       dex_locations.insert(dex_location);  // Just collect locations.
       return false;                        // Do not read the profile data.
     };
@@ -2188,8 +2187,8 @@
                                                  bool allow_in_memory_compilation,
                                                  /*out*/ std::string* error_msg) {
   auto filename_fn = [image_isa](const std::string& location,
-                                 /*out*/std::string* filename,
-                                 /*out*/std::string* err_msg ATTRIBUTE_UNUSED) {
+                                 /*out*/ std::string* filename,
+                                 [[maybe_unused]] /*out*/ std::string* err_msg) {
     *filename = GetSystemImageFilename(location.c_str(), image_isa);
     return true;
   };
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index f1df45f..80ed9b3 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -585,7 +585,7 @@
   }
 }
 
-bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
+bool FreeListSpace::IsZygoteLargeObject([[maybe_unused]] Thread* self, mirror::Object* obj) const {
   const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
   DCHECK(info != nullptr);
   return info->IsZygoteObject();
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index 33bddfa..ce72b5b 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -48,7 +48,7 @@
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) override {}
+  void RegisterRecentFree([[maybe_unused]] mirror::Object* ptr) override {}
 
   size_t MaxBytesBulkAllocatedFor(size_t num_bytes) override;
 
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 1026f42..4376137 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -26,7 +26,7 @@
 namespace gc {
 namespace space {
 
-inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED,
+inline mirror::Object* RegionSpace::Alloc([[maybe_unused]] Thread* self,
                                           size_t num_bytes,
                                           /* out */ size_t* bytes_allocated,
                                           /* out */ size_t* usable_size,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index c5e3a70..f40061f 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -34,9 +34,7 @@
   explicit CountObjectsAllocated(size_t* objects_allocated)
       : objects_allocated_(objects_allocated) {}
 
-  void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
-    ++*objects_allocated_;
-  }
+  void operator()([[maybe_unused]] mirror::Object* obj) const { ++*objects_allocated_; }
 
  private:
   size_t* const objects_allocated_;
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index 77b9548..57d593c 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -62,7 +62,7 @@
     allow_new_system_weak_ = false;
   }
 
-  void Broadcast(bool broadcast_for_checkpoint ATTRIBUTE_UNUSED) override
+  void Broadcast([[maybe_unused]] bool broadcast_for_checkpoint) override
       REQUIRES(!allow_disallow_lock_) {
     MutexLock mu(Thread::Current(), allow_disallow_lock_);
     new_weak_condition_.Broadcast(Thread::Current());
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 7cb678b..a3666e0 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -105,7 +105,7 @@
   TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
      : HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
   }
-  void Run(Thread* thread ATTRIBUTE_UNUSED) override {
+  void Run([[maybe_unused]] Thread* thread) override {
     ASSERT_EQ(*counter_, expected_counter_);
     ++*counter_;
   }
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 195986f..ad04860 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -133,7 +133,7 @@
  public:
   explicit BFSFindReachable(ObjectSet* visited) : visited_(visited) {}
 
-  void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+  void operator()(mirror::Object* obj, MemberOffset offset, [[maybe_unused]] bool is_static) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     ArtField* field = obj->FindFieldByOffset(offset);
     Visit(obj->GetFieldObject<mirror::Object>(offset),
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index a43e889..0c50312 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -122,9 +122,7 @@
   ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
 
   // Offset of link within HandleScope, used by generated code.
-  static constexpr size_t LinkOffset(PointerSize pointer_size ATTRIBUTE_UNUSED) {
-    return 0;
-  }
+  static constexpr size_t LinkOffset([[maybe_unused]] PointerSize pointer_size) { return 0; }
 
   // Offset of length within handle scope, used by generated code.
   static constexpr size_t NumberOfReferencesOffset(PointerSize pointer_size) {
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 5e4a5f3..ea64cc1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -237,7 +237,7 @@
     HandleU4List(values, count);
     length_ += count * sizeof(uint32_t);
   }
-  virtual void UpdateU4(size_t offset, uint32_t new_value ATTRIBUTE_UNUSED) {
+  virtual void UpdateU4(size_t offset, [[maybe_unused]] uint32_t new_value) {
     DCHECK_LE(offset, length_ - 4);
   }
   void AddU8List(const uint64_t* values, size_t count) {
@@ -271,21 +271,16 @@
   }
 
  protected:
-  virtual void HandleU1List(const uint8_t* values ATTRIBUTE_UNUSED,
-                            size_t count ATTRIBUTE_UNUSED) {
+  virtual void HandleU1List([[maybe_unused]] const uint8_t* values, [[maybe_unused]] size_t count) {
   }
-  virtual void HandleU1AsU2List(const uint8_t* values ATTRIBUTE_UNUSED,
-                                size_t count ATTRIBUTE_UNUSED) {
-  }
-  virtual void HandleU2List(const uint16_t* values ATTRIBUTE_UNUSED,
-                            size_t count ATTRIBUTE_UNUSED) {
-  }
-  virtual void HandleU4List(const uint32_t* values ATTRIBUTE_UNUSED,
-                            size_t count ATTRIBUTE_UNUSED) {
-  }
-  virtual void HandleU8List(const uint64_t* values ATTRIBUTE_UNUSED,
-                            size_t count ATTRIBUTE_UNUSED) {
-  }
+  virtual void HandleU1AsU2List([[maybe_unused]] const uint8_t* values,
+                                [[maybe_unused]] size_t count) {}
+  virtual void HandleU2List([[maybe_unused]] const uint16_t* values,
+                            [[maybe_unused]] size_t count) {}
+  virtual void HandleU4List([[maybe_unused]] const uint32_t* values,
+                            [[maybe_unused]] size_t count) {}
+  virtual void HandleU8List([[maybe_unused]] const uint64_t* values,
+                            [[maybe_unused]] size_t count) {}
   virtual void HandleEndRecord() {
   }
 
@@ -382,7 +377,7 @@
     buffer_.clear();
   }
 
-  virtual void HandleFlush(const uint8_t* buffer ATTRIBUTE_UNUSED, size_t length ATTRIBUTE_UNUSED) {
+  virtual void HandleFlush([[maybe_unused]] const uint8_t* buffer, [[maybe_unused]] size_t length) {
   }
 
   std::vector<uint8_t> buffer_;
@@ -743,7 +738,7 @@
     }
   }
 
-  bool DumpToDdmsBuffered(size_t overall_size ATTRIBUTE_UNUSED, size_t max_length ATTRIBUTE_UNUSED)
+  bool DumpToDdmsBuffered([[maybe_unused]] size_t overall_size, [[maybe_unused]] size_t max_length)
       REQUIRES(Locks::mutator_lock_) {
     LOG(FATAL) << "Unimplemented";
     UNREACHABLE();
diff --git a/runtime/image.h b/runtime/image.h
index 324cd3c..5580e27 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -274,7 +274,7 @@
     kSectionCount,  // Number of elements in enum.
   };
 
-  static size_t NumberOfImageRoots(bool app_image ATTRIBUTE_UNUSED) {
+  static size_t NumberOfImageRoots([[maybe_unused]] bool app_image) {
     // At the moment, boot image and app image have the same number of roots,
     // though the meaning of the kSpecialRoots is different.
     return kImageRootsMax;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 5ce2b10..ba0d63d 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1073,7 +1073,7 @@
   }
 }
 
-static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
+static void ResetQuickAllocEntryPointsForThread(Thread* thread, [[maybe_unused]] void* arg) {
   thread->ResetQuickAllocEntryPointsForThread();
 }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 144ee09..7676080 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -149,8 +149,8 @@
   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
   // either return or exceptions. Normally instrumentation listeners should ensure that there are
   // shadow-frames by deoptimizing stacks.
-  virtual void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
-                               const ShadowFrame& frame ATTRIBUTE_UNUSED)
+  virtual void WatchedFramePop([[maybe_unused]] Thread* thread,
+                               [[maybe_unused]] const ShadowFrame& frame)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
 
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index fc05298..1e98e57 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -55,93 +55,93 @@
 
   virtual ~TestInstrumentationListener() {}
 
-  void MethodEntered(Thread* thread ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED) override
+  void MethodEntered([[maybe_unused]] Thread* thread, [[maybe_unused]] ArtMethod* method) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_enter_event = true;
   }
 
-  void MethodExited(Thread* thread ATTRIBUTE_UNUSED,
-                    ArtMethod* method ATTRIBUTE_UNUSED,
-                    instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
-                    MutableHandle<mirror::Object>& return_value ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void MethodExited([[maybe_unused]] Thread* thread,
+                    [[maybe_unused]] ArtMethod* method,
+                    [[maybe_unused]] instrumentation::OptionalFrame frame,
+                    [[maybe_unused]] MutableHandle<mirror::Object>& return_value) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_object_event = true;
   }
 
-  void MethodExited(Thread* thread ATTRIBUTE_UNUSED,
-                    ArtMethod* method ATTRIBUTE_UNUSED,
-                    instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
-                    JValue& return_value ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void MethodExited([[maybe_unused]] Thread* thread,
+                    [[maybe_unused]] ArtMethod* method,
+                    [[maybe_unused]] instrumentation::OptionalFrame frame,
+                    [[maybe_unused]] JValue& return_value) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_event = true;
   }
 
-  void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
-                    ArtMethod* method ATTRIBUTE_UNUSED,
-                    uint32_t dex_pc ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void MethodUnwind([[maybe_unused]] Thread* thread,
+                    [[maybe_unused]] ArtMethod* method,
+                    [[maybe_unused]] uint32_t dex_pc) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_unwind_event = true;
   }
 
-  void DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
-                  Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                  ArtMethod* method ATTRIBUTE_UNUSED,
-                  uint32_t new_dex_pc ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void DexPcMoved([[maybe_unused]] Thread* thread,
+                  [[maybe_unused]] Handle<mirror::Object> this_object,
+                  [[maybe_unused]] ArtMethod* method,
+                  [[maybe_unused]] uint32_t new_dex_pc) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_dex_pc_moved_event = true;
   }
 
-  void FieldRead(Thread* thread ATTRIBUTE_UNUSED,
-                 Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                 ArtMethod* method ATTRIBUTE_UNUSED,
-                 uint32_t dex_pc ATTRIBUTE_UNUSED,
-                 ArtField* field ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void FieldRead([[maybe_unused]] Thread* thread,
+                 [[maybe_unused]] Handle<mirror::Object> this_object,
+                 [[maybe_unused]] ArtMethod* method,
+                 [[maybe_unused]] uint32_t dex_pc,
+                 [[maybe_unused]] ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_read_event = true;
   }
 
-  void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
-                    Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                    ArtMethod* method ATTRIBUTE_UNUSED,
-                    uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    ArtField* field ATTRIBUTE_UNUSED,
-                    Handle<mirror::Object> field_value ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void FieldWritten([[maybe_unused]] Thread* thread,
+                    [[maybe_unused]] Handle<mirror::Object> this_object,
+                    [[maybe_unused]] ArtMethod* method,
+                    [[maybe_unused]] uint32_t dex_pc,
+                    [[maybe_unused]] ArtField* field,
+                    [[maybe_unused]] Handle<mirror::Object> field_value) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_written_object_event = true;
   }
 
-  void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
-                    Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                    ArtMethod* method ATTRIBUTE_UNUSED,
-                    uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    ArtField* field ATTRIBUTE_UNUSED,
-                    const JValue& field_value ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void FieldWritten([[maybe_unused]] Thread* thread,
+                    [[maybe_unused]] Handle<mirror::Object> this_object,
+                    [[maybe_unused]] ArtMethod* method,
+                    [[maybe_unused]] uint32_t dex_pc,
+                    [[maybe_unused]] ArtField* field,
+                    [[maybe_unused]] const JValue& field_value) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_field_written_event = true;
   }
 
-  void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
-                       Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void ExceptionThrown([[maybe_unused]] Thread* thread,
+                       [[maybe_unused]] Handle<mirror::Throwable> exception_object) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_exception_thrown_event = true;
   }
 
-  void ExceptionHandled(Thread* self ATTRIBUTE_UNUSED,
-                        Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void ExceptionHandled([[maybe_unused]] Thread* self,
+                        [[maybe_unused]] Handle<mirror::Throwable> throwable) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_exception_handled_event = true;
   }
 
-  void Branch(Thread* thread ATTRIBUTE_UNUSED,
-              ArtMethod* method ATTRIBUTE_UNUSED,
-              uint32_t dex_pc ATTRIBUTE_UNUSED,
-              int32_t dex_pc_offset ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void Branch([[maybe_unused]] Thread* thread,
+              [[maybe_unused]] ArtMethod* method,
+              [[maybe_unused]] uint32_t dex_pc,
+              [[maybe_unused]] int32_t dex_pc_offset) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_branch_event = true;
   }
 
-  void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void WatchedFramePop([[maybe_unused]] Thread* thread,
+                       [[maybe_unused]] const ShadowFrame& frame) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     received_watched_frame_pop  = true;
   }
 
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 32ed430..8d3d2d6 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -813,8 +813,10 @@
   }
 }
 
-void UnstartedRuntime::UnstartedSystemArraycopy(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
+void UnstartedRuntime::UnstartedSystemArraycopy(Thread* self,
+                                                ShadowFrame* shadow_frame,
+                                                [[maybe_unused]] JValue* result,
+                                                size_t arg_offset) {
   // Special case array copying without initializing System.
   jint src_pos = shadow_frame->GetVReg(arg_offset + 1);
   jint dst_pos = shadow_frame->GetVReg(arg_offset + 3);
@@ -930,9 +932,10 @@
   UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
 }
 
-void UnstartedRuntime::UnstartedSystemGetSecurityManager(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
-    JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedSystemGetSecurityManager([[maybe_unused]] Thread* self,
+                                                         [[maybe_unused]] ShadowFrame* shadow_frame,
+                                                         JValue* result,
+                                                         [[maybe_unused]] size_t arg_offset) {
   result->SetL(nullptr);
 }
 
@@ -1089,8 +1092,10 @@
   return nullptr;
 }
 
-void UnstartedRuntime::UnstartedThreadLocalGet(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedThreadLocalGet(Thread* self,
+                                               ShadowFrame* shadow_frame,
+                                               JValue* result,
+                                               [[maybe_unused]] size_t arg_offset) {
   if (CheckCallers(shadow_frame, { "jdk.internal.math.FloatingDecimal$BinaryToASCIIBuffer "
                                        "jdk.internal.math.FloatingDecimal.getBinaryToASCIIBuffer()" })) {
     result->SetL(CreateInstanceOf(self, "Ljdk/internal/math/FloatingDecimal$BinaryToASCIIBuffer;"));
@@ -1101,8 +1106,10 @@
   }
 }
 
-void UnstartedRuntime::UnstartedThreadCurrentThread(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedThreadCurrentThread(Thread* self,
+                                                    ShadowFrame* shadow_frame,
+                                                    JValue* result,
+                                                    [[maybe_unused]] size_t arg_offset) {
   if (CheckCallers(shadow_frame,
                    { "void java.lang.Thread.<init>(java.lang.ThreadGroup, java.lang.Runnable, "
                          "java.lang.String, long, java.security.AccessControlContext, boolean)",
@@ -1131,8 +1138,10 @@
   }
 }
 
-void UnstartedRuntime::UnstartedThreadGetNativeState(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedThreadGetNativeState(Thread* self,
+                                                     ShadowFrame* shadow_frame,
+                                                     JValue* result,
+                                                     [[maybe_unused]] size_t arg_offset) {
   if (CheckCallers(shadow_frame,
                    { "java.lang.Thread$State java.lang.Thread.getState()",
                      "java.lang.ThreadGroup java.lang.Thread.getThreadGroup()",
@@ -1154,45 +1163,61 @@
   }
 }
 
-void UnstartedRuntime::UnstartedMathCeil(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMathCeil([[maybe_unused]] Thread* self,
+                                         ShadowFrame* shadow_frame,
+                                         JValue* result,
+                                         size_t arg_offset) {
   result->SetD(ceil(shadow_frame->GetVRegDouble(arg_offset)));
 }
 
-void UnstartedRuntime::UnstartedMathFloor(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMathFloor([[maybe_unused]] Thread* self,
+                                          ShadowFrame* shadow_frame,
+                                          JValue* result,
+                                          size_t arg_offset) {
   result->SetD(floor(shadow_frame->GetVRegDouble(arg_offset)));
 }
 
-void UnstartedRuntime::UnstartedMathSin(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMathSin([[maybe_unused]] Thread* self,
+                                        ShadowFrame* shadow_frame,
+                                        JValue* result,
+                                        size_t arg_offset) {
   result->SetD(sin(shadow_frame->GetVRegDouble(arg_offset)));
 }
 
-void UnstartedRuntime::UnstartedMathCos(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMathCos([[maybe_unused]] Thread* self,
+                                        ShadowFrame* shadow_frame,
+                                        JValue* result,
+                                        size_t arg_offset) {
   result->SetD(cos(shadow_frame->GetVRegDouble(arg_offset)));
 }
 
-void UnstartedRuntime::UnstartedMathPow(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMathPow([[maybe_unused]] Thread* self,
+                                        ShadowFrame* shadow_frame,
+                                        JValue* result,
+                                        size_t arg_offset) {
   result->SetD(pow(shadow_frame->GetVRegDouble(arg_offset),
                    shadow_frame->GetVRegDouble(arg_offset + 2)));
 }
 
-void UnstartedRuntime::UnstartedMathTan(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMathTan([[maybe_unused]] Thread* self,
+                                        ShadowFrame* shadow_frame,
+                                        JValue* result,
+                                        size_t arg_offset) {
   result->SetD(tan(shadow_frame->GetVRegDouble(arg_offset)));
 }
 
-void UnstartedRuntime::UnstartedObjectHashCode(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedObjectHashCode([[maybe_unused]] Thread* self,
+                                               ShadowFrame* shadow_frame,
+                                               JValue* result,
+                                               size_t arg_offset) {
   mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
   result->SetI(obj->IdentityHashCode());
 }
 
-void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits([[maybe_unused]] Thread* self,
+                                                          ShadowFrame* shadow_frame,
+                                                          JValue* result,
+                                                          size_t arg_offset) {
   double in = shadow_frame->GetVRegDouble(arg_offset);
   result->SetJ(bit_cast<int64_t, double>(in));
 }
@@ -1240,23 +1265,31 @@
   UNREACHABLE();
 }
 
-void UnstartedRuntime::UnstartedMemoryPeekByte(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMemoryPeekByte([[maybe_unused]] Thread* self,
+                                               ShadowFrame* shadow_frame,
+                                               JValue* result,
+                                               size_t arg_offset) {
   UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset);
 }
 
-void UnstartedRuntime::UnstartedMemoryPeekShort(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMemoryPeekShort([[maybe_unused]] Thread* self,
+                                                ShadowFrame* shadow_frame,
+                                                JValue* result,
+                                                size_t arg_offset) {
   UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset);
 }
 
-void UnstartedRuntime::UnstartedMemoryPeekInt(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMemoryPeekInt([[maybe_unused]] Thread* self,
+                                              ShadowFrame* shadow_frame,
+                                              JValue* result,
+                                              size_t arg_offset) {
   UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset);
 }
 
-void UnstartedRuntime::UnstartedMemoryPeekLong(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMemoryPeekLong([[maybe_unused]] Thread* self,
+                                               ShadowFrame* shadow_frame,
+                                               JValue* result,
+                                               size_t arg_offset) {
   UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset);
 }
 
@@ -1309,14 +1342,18 @@
   UNREACHABLE();
 }
 
-void UnstartedRuntime::UnstartedMemoryPeekByteArray(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
+void UnstartedRuntime::UnstartedMemoryPeekByteArray(Thread* self,
+                                                    ShadowFrame* shadow_frame,
+                                                    [[maybe_unused]] JValue* result,
+                                                    size_t arg_offset) {
   UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset);
 }
 
 // This allows reading the new style of String objects during compilation.
-void UnstartedRuntime::UnstartedStringGetCharsNoCheck(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) {
+void UnstartedRuntime::UnstartedStringGetCharsNoCheck(Thread* self,
+                                                      ShadowFrame* shadow_frame,
+                                                      [[maybe_unused]] JValue* result,
+                                                      size_t arg_offset) {
   jint start = shadow_frame->GetVReg(arg_offset + 1);
   jint end = shadow_frame->GetVReg(arg_offset + 2);
   jint index = shadow_frame->GetVReg(arg_offset + 4);
@@ -1477,8 +1514,10 @@
 // where we can predict the behavior (somewhat).
 // Note: this is required (instead of lazy initialization) as these classes are used in the static
 //       initialization of other classes, so will *use* the value.
-void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(Thread* self,
+                                                           ShadowFrame* shadow_frame,
+                                                           JValue* result,
+                                                           [[maybe_unused]] size_t arg_offset) {
   if (CheckCallers(shadow_frame, { "void java.util.concurrent.SynchronousQueue.<clinit>()" })) {
     // SynchronousQueue really only separates between single- and multiprocessor case. Return
     // 8 as a conservative upper approximation.
@@ -1628,8 +1667,10 @@
   result->SetL(value);
 }
 
-void UnstartedRuntime::UnstartedJdkUnsafePutObjectVolatile(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+void UnstartedRuntime::UnstartedJdkUnsafePutObjectVolatile(Thread* self,
+                                                           ShadowFrame* shadow_frame,
+                                                           [[maybe_unused]] JValue* result,
+                                                           size_t arg_offset)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Argument 0 is the Unsafe instance, skip.
   mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
@@ -1650,8 +1691,10 @@
   }
 }
 
-void UnstartedRuntime::UnstartedJdkUnsafePutOrderedObject(
-    Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+void UnstartedRuntime::UnstartedJdkUnsafePutOrderedObject(Thread* self,
+                                                          ShadowFrame* shadow_frame,
+                                                          [[maybe_unused]] JValue* result,
+                                                          size_t arg_offset)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Argument 0 is the Unsafe instance, skip.
   mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
@@ -1799,8 +1842,10 @@
   }
 }
 
-void UnstartedRuntime::UnstartedSystemIdentityHashCode(
-    Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+void UnstartedRuntime::UnstartedSystemIdentityHashCode([[maybe_unused]] Thread* self,
+                                                       ShadowFrame* shadow_frame,
+                                                       JValue* result,
+                                                       size_t arg_offset)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
   result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
@@ -1810,9 +1855,11 @@
 // java.lang.invoke.VarHandle clinit. The clinit determines sets of
 // available VarHandle accessors and these differ based on machine
 // word size.
-void UnstartedRuntime::UnstartedJNIVMRuntimeIs64Bit(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+void UnstartedRuntime::UnstartedJNIVMRuntimeIs64Bit([[maybe_unused]] Thread* self,
+                                                    [[maybe_unused]] ArtMethod* method,
+                                                    [[maybe_unused]] mirror::Object* receiver,
+                                                    [[maybe_unused]] uint32_t* args,
+                                                    JValue* result) {
   PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
   jboolean is64bit = (pointer_size == PointerSize::k64) ? JNI_TRUE : JNI_FALSE;
   result->SetZ(is64bit);
@@ -1820,8 +1867,8 @@
 
 void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
     Thread* self,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
     uint32_t* args,
     JValue* result) {
   int32_t length = args[1];
@@ -1841,14 +1888,19 @@
 }
 
 void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+    [[maybe_unused]] Thread* self,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
+    [[maybe_unused]] uint32_t* args,
+    JValue* result) {
   result->SetL(nullptr);
 }
 
-void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+void UnstartedRuntime::UnstartedJNIVMStackGetStackClass2(Thread* self,
+                                                         [[maybe_unused]] ArtMethod* method,
+                                                         [[maybe_unused]] mirror::Object* receiver,
+                                                         [[maybe_unused]] uint32_t* args,
+                                                         JValue* result) {
   NthCallerVisitor visitor(self, 3);
   visitor.WalkStack();
   if (visitor.caller != nullptr) {
@@ -1856,75 +1908,91 @@
   }
 }
 
-void UnstartedRuntime::UnstartedJNIMathLog(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIMathLog([[maybe_unused]] Thread* self,
+                                           [[maybe_unused]] ArtMethod* method,
+                                           [[maybe_unused]] mirror::Object* receiver,
+                                           uint32_t* args,
+                                           JValue* result) {
   JValue value;
   value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
   result->SetD(log(value.GetD()));
 }
 
-void UnstartedRuntime::UnstartedJNIMathExp(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIMathExp([[maybe_unused]] Thread* self,
+                                           [[maybe_unused]] ArtMethod* method,
+                                           [[maybe_unused]] mirror::Object* receiver,
+                                           uint32_t* args,
+                                           JValue* result) {
   JValue value;
   value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
   result->SetD(exp(value.GetD()));
 }
 
 void UnstartedRuntime::UnstartedJNIAtomicLongVMSupportsCS8(
-    Thread* self ATTRIBUTE_UNUSED,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args ATTRIBUTE_UNUSED,
+    [[maybe_unused]] Thread* self,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
+    [[maybe_unused]] uint32_t* args,
     JValue* result) {
   result->SetZ(QuasiAtomic::LongAtomicsUseMutexes(Runtime::Current()->GetInstructionSet())
                    ? 0
                    : 1);
 }
 
-void UnstartedRuntime::UnstartedJNIClassGetNameNative(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
-    uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+void UnstartedRuntime::UnstartedJNIClassGetNameNative(Thread* self,
+                                                      [[maybe_unused]] ArtMethod* method,
+                                                      mirror::Object* receiver,
+                                                      [[maybe_unused]] uint32_t* args,
+                                                      JValue* result) {
   StackHandleScope<1> hs(self);
   result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
 }
 
-void UnstartedRuntime::UnstartedJNIDoubleLongBitsToDouble(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIDoubleLongBitsToDouble([[maybe_unused]] Thread* self,
+                                                          [[maybe_unused]] ArtMethod* method,
+                                                          [[maybe_unused]] mirror::Object* receiver,
+                                                          uint32_t* args,
+                                                          JValue* result) {
   uint64_t long_input = args[0] | (static_cast<uint64_t>(args[1]) << 32);
   result->SetD(bit_cast<double>(long_input));
 }
 
-void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits([[maybe_unused]] Thread* self,
+                                                          [[maybe_unused]] ArtMethod* method,
+                                                          [[maybe_unused]] mirror::Object* receiver,
+                                                          uint32_t* args,
+                                                          JValue* result) {
   result->SetI(args[0]);
 }
 
-void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIFloatIntBitsToFloat([[maybe_unused]] Thread* self,
+                                                       [[maybe_unused]] ArtMethod* method,
+                                                       [[maybe_unused]] mirror::Object* receiver,
+                                                       uint32_t* args,
+                                                       JValue* result) {
   result->SetI(args[0]);
 }
 
-void UnstartedRuntime::UnstartedJNIObjectInternalClone(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
-    uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+void UnstartedRuntime::UnstartedJNIObjectInternalClone(Thread* self,
+                                                       [[maybe_unused]] ArtMethod* method,
+                                                       mirror::Object* receiver,
+                                                       [[maybe_unused]] uint32_t* args,
+                                                       JValue* result) {
   StackHandleScope<1> hs(self);
   Handle<mirror::Object> h_receiver = hs.NewHandle(receiver);
   result->SetL(mirror::Object::Clone(h_receiver, self));
 }
 
-void UnstartedRuntime::UnstartedJNIObjectNotifyAll(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
-    uint32_t* args ATTRIBUTE_UNUSED, JValue* result ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedJNIObjectNotifyAll(Thread* self,
+                                                   [[maybe_unused]] ArtMethod* method,
+                                                   mirror::Object* receiver,
+                                                   [[maybe_unused]] uint32_t* args,
+                                                   [[maybe_unused]] JValue* result) {
   receiver->NotifyAll(self);
 }
 
 void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
-                                                   ArtMethod* method ATTRIBUTE_UNUSED,
+                                                   [[maybe_unused]] ArtMethod* method,
                                                    mirror::Object* receiver,
                                                    uint32_t* args,
                                                    JValue* result) {
@@ -1936,9 +2004,11 @@
   result->SetI(receiver->AsString()->CompareTo(rhs->AsString()));
 }
 
-void UnstartedRuntime::UnstartedJNIStringFillBytesLatin1(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver, uint32_t* args, JValue* ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedJNIStringFillBytesLatin1(Thread* self,
+                                                         [[maybe_unused]] ArtMethod* method,
+                                                         mirror::Object* receiver,
+                                                         uint32_t* args,
+                                                         [[maybe_unused]] JValue*) {
   StackHandleScope<2> hs(self);
   Handle<mirror::String> h_receiver(hs.NewHandle(
       reinterpret_cast<mirror::String*>(receiver)->AsString()));
@@ -1948,9 +2018,11 @@
   h_receiver->FillBytesLatin1(h_buffer, index);
 }
 
-void UnstartedRuntime::UnstartedJNIStringFillBytesUTF16(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver, uint32_t* args, JValue* ATTRIBUTE_UNUSED) {
+void UnstartedRuntime::UnstartedJNIStringFillBytesUTF16(Thread* self,
+                                                        [[maybe_unused]] ArtMethod* method,
+                                                        mirror::Object* receiver,
+                                                        uint32_t* args,
+                                                        [[maybe_unused]] JValue*) {
   StackHandleScope<2> hs(self);
   Handle<mirror::String> h_receiver(hs.NewHandle(
       reinterpret_cast<mirror::String*>(receiver)->AsString()));
@@ -1960,24 +2032,30 @@
   h_receiver->FillBytesUTF16(h_buffer, index);
 }
 
-void UnstartedRuntime::UnstartedJNIStringIntern(
-    Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
-    uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+void UnstartedRuntime::UnstartedJNIStringIntern([[maybe_unused]] Thread* self,
+                                                [[maybe_unused]] ArtMethod* method,
+                                                mirror::Object* receiver,
+                                                [[maybe_unused]] uint32_t* args,
+                                                JValue* result) {
   result->SetL(receiver->AsString()->Intern());
 }
 
-void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIArrayCreateMultiArray(Thread* self,
+                                                         [[maybe_unused]] ArtMethod* method,
+                                                         [[maybe_unused]] mirror::Object* receiver,
+                                                         uint32_t* args,
+                                                         JValue* result) {
   StackHandleScope<2> hs(self);
   auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
   auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
   result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions));
 }
 
-void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args, JValue* result) {
+void UnstartedRuntime::UnstartedJNIArrayCreateObjectArray(Thread* self,
+                                                          [[maybe_unused]] ArtMethod* method,
+                                                          [[maybe_unused]] mirror::Object* receiver,
+                                                          uint32_t* args,
+                                                          JValue* result) {
   int32_t length = static_cast<int32_t>(args[1]);
   if (length < 0) {
     ThrowNegativeArraySizeException(length);
@@ -1998,8 +2076,11 @@
 }
 
 void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(
-    Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
+    Thread* self,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
+    [[maybe_unused]] uint32_t* args,
+    JValue* result) {
   ScopedObjectAccessUnchecked soa(self);
   ScopedLocalRef<jobject> stack_trace(self->GetJniEnv(), self->CreateInternalStackTrace(soa));
   result->SetL(soa.Decode<mirror::Object>(stack_trace.get()));
@@ -2048,19 +2129,18 @@
   UnstartedJNIJdkUnsafeGetArrayIndexScaleForComponentType(self, method, receiver, args, result);
 }
 
-void UnstartedRuntime::UnstartedJNIJdkUnsafeAddressSize(
-    Thread* self ATTRIBUTE_UNUSED,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED,
-    uint32_t* args ATTRIBUTE_UNUSED,
-    JValue* result) {
+void UnstartedRuntime::UnstartedJNIJdkUnsafeAddressSize([[maybe_unused]] Thread* self,
+                                                        [[maybe_unused]] ArtMethod* method,
+                                                        [[maybe_unused]] mirror::Object* receiver,
+                                                        [[maybe_unused]] uint32_t* args,
+                                                        JValue* result) {
   result->SetI(sizeof(void*));
 }
 
 void UnstartedRuntime::UnstartedJNIJdkUnsafeCompareAndSwapInt(
     Thread* self,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
     uint32_t* args,
     JValue* result) {
   ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
@@ -2101,11 +2181,12 @@
   UnstartedJNIJdkUnsafeCompareAndSwapInt(self, method, receiver, args, result);
 }
 
-void UnstartedRuntime::UnstartedJNIJdkUnsafeGetIntVolatile(Thread* self,
-                                                        ArtMethod* method ATTRIBUTE_UNUSED,
-                                                        mirror::Object* receiver ATTRIBUTE_UNUSED,
-                                                        uint32_t* args,
-                                                        JValue* result) {
+void UnstartedRuntime::UnstartedJNIJdkUnsafeGetIntVolatile(
+    Thread* self,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
+    uint32_t* args,
+    JValue* result) {
   ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
   if (obj == nullptr) {
     AbortTransactionOrFail(self, "Unsafe.compareAndSwapIntVolatile with null object.");
@@ -2117,10 +2198,10 @@
 }
 
 void UnstartedRuntime::UnstartedJNIJdkUnsafePutObject(Thread* self,
-                                                   ArtMethod* method ATTRIBUTE_UNUSED,
-                                                   mirror::Object* receiver ATTRIBUTE_UNUSED,
-                                                   uint32_t* args,
-                                                   JValue* result ATTRIBUTE_UNUSED) {
+                                                      [[maybe_unused]] ArtMethod* method,
+                                                      [[maybe_unused]] mirror::Object* receiver,
+                                                      uint32_t* args,
+                                                      [[maybe_unused]] JValue* result) {
   ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
   if (obj == nullptr) {
     AbortTransactionOrFail(self, "Unsafe.putObject with null object.");
@@ -2141,8 +2222,8 @@
 
 void UnstartedRuntime::UnstartedJNIJdkUnsafeGetArrayBaseOffsetForComponentType(
     Thread* self,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
     uint32_t* args,
     JValue* result) {
   ObjPtr<mirror::Object> component = reinterpret_cast32<mirror::Object*>(args[0]);
@@ -2156,8 +2237,8 @@
 
 void UnstartedRuntime::UnstartedJNIJdkUnsafeGetArrayIndexScaleForComponentType(
     Thread* self,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver ATTRIBUTE_UNUSED,
+    [[maybe_unused]] ArtMethod* method,
+    [[maybe_unused]] mirror::Object* receiver,
     uint32_t* args,
     JValue* result) {
   ObjPtr<mirror::Object> component = reinterpret_cast32<mirror::Object*>(args[0]);
@@ -2169,23 +2250,21 @@
   result->SetI(Primitive::ComponentSize(primitive_type));
 }
 
-void UnstartedRuntime::UnstartedJNIFieldGetArtField(
-    Thread* self ATTRIBUTE_UNUSED,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver,
-    uint32_t* args ATTRIBUTE_UNUSED,
-    JValue* result) {
+void UnstartedRuntime::UnstartedJNIFieldGetArtField([[maybe_unused]] Thread* self,
+                                                    [[maybe_unused]] ArtMethod* method,
+                                                    mirror::Object* receiver,
+                                                    [[maybe_unused]] uint32_t* args,
+                                                    JValue* result) {
   ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(receiver);
   ArtField* art_field = field->GetArtField();
   result->SetJ(reinterpret_cast<int64_t>(art_field));
 }
 
-void UnstartedRuntime::UnstartedJNIFieldGetNameInternal(
-    Thread* self ATTRIBUTE_UNUSED,
-    ArtMethod* method ATTRIBUTE_UNUSED,
-    mirror::Object* receiver,
-    uint32_t* args ATTRIBUTE_UNUSED,
-    JValue* result) {
+void UnstartedRuntime::UnstartedJNIFieldGetNameInternal([[maybe_unused]] Thread* self,
+                                                        [[maybe_unused]] ArtMethod* method,
+                                                        mirror::Object* receiver,
+                                                        [[maybe_unused]] uint32_t* args,
+                                                        JValue* result) {
   ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(receiver);
   ArtField* art_field = field->GetArtField();
   result->SetL(art_field->ResolveNameString());
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index b231cce..ea475b5 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -866,7 +866,7 @@
   explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
       : dex_files_(dex_files) {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override {
+  void Run([[maybe_unused]] Thread* self) override {
     // Madvise DONTNEED dex files now that we're done compiling methods.
     for (const DexFile* dex_file : dex_files_) {
       if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
@@ -890,7 +890,7 @@
  public:
   JitZygoteDoneCompilingTask() {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override {
+  void Run([[maybe_unused]] Thread* self) override {
     DCHECK(Runtime::Current()->IsZygote());
     Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
         ZygoteCompilationState::kDone);
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 410bf70..cbfd39a 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -590,8 +590,8 @@
   return fd;
 }
 
-bool JitMemoryRegion::ProtectZygoteMemory(int fd ATTRIBUTE_UNUSED,
-                                          std::string* error_msg ATTRIBUTE_UNUSED) {
+bool JitMemoryRegion::ProtectZygoteMemory([[maybe_unused]] int fd,
+                                          [[maybe_unused]] std::string* error_msg) {
   return true;
 }
 
diff --git a/runtime/jit/jit_memory_region_test.cc b/runtime/jit/jit_memory_region_test.cc
index 2a79777..a77ea81 100644
--- a/runtime/jit/jit_memory_region_test.cc
+++ b/runtime/jit/jit_memory_region_test.cc
@@ -39,8 +39,7 @@
 // These globals are only set in child processes.
 void* gAddrToFaultOn = nullptr;
 
-[[noreturn]]
-void handler(int ATTRIBUTE_UNUSED, siginfo_t* info, void* ATTRIBUTE_UNUSED) {
+[[noreturn]] void handler([[maybe_unused]] int, siginfo_t* info, [[maybe_unused]] void*) {
   CHECK_EQ(info->si_addr, gAddrToFaultOn);
   exit(kReturnFromFault);
 }
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index eb54f98..3dc9b9f 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -1617,8 +1617,10 @@
    * Perform the array "release" operation, which may or may not copy data
    * back into the managed heap, and may or may not release the underlying storage.
    */
-  static void* ReleaseGuardedPACopy(const char* function_name, JNIEnv* env,
-                                    jarray java_array ATTRIBUTE_UNUSED, void* embedded_buf,
+  static void* ReleaseGuardedPACopy(const char* function_name,
+                                    JNIEnv* env,
+                                    [[maybe_unused]] jarray java_array,
+                                    void* embedded_buf,
                                     int mode) {
     ScopedObjectAccess soa(env);
     if (!GuardedCopy::Check(function_name, embedded_buf, true)) {
@@ -1635,7 +1637,6 @@
     return original_ptr;
   }
 
-
   /*
    * Free up the guard buffer, scrub it, and return the original pointer.
    */
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index cae33b5..e7295aa 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -62,7 +62,7 @@
 static bool gSmallStack = false;
 static bool gAsDaemon = false;
 
-static void* attach_current_thread_callback(void* arg ATTRIBUTE_UNUSED) {
+static void* attach_current_thread_callback([[maybe_unused]] void* arg) {
   JavaVM* vms_buf[1];
   jsize num_vms;
   JNIEnv* env;
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index bef0fd3..fcf38ba 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -289,7 +289,7 @@
   }
 }
 
-void ThreadResetFunctionTable(Thread* thread, void* arg ATTRIBUTE_UNUSED)
+void ThreadResetFunctionTable(Thread* thread, [[maybe_unused]] void* arg)
     REQUIRES(Locks::jni_function_table_lock_) {
   JNIEnvExt* env = thread->GetJniEnv();
   bool check_jni = env->IsCheckJniEnabled();
diff --git a/runtime/jni/jni_id_manager.cc b/runtime/jni/jni_id_manager.cc
index e556f61..5af1a78 100644
--- a/runtime/jni/jni_id_manager.cc
+++ b/runtime/jni/jni_id_manager.cc
@@ -100,7 +100,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_);
 
 template <>
-bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtMethod* t ATTRIBUTE_UNUSED) {
+bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, [[maybe_unused]] ArtMethod* t) {
   ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
   if (ext.IsNull()) {
     return true;
@@ -176,7 +176,7 @@
 size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtType* t, PointerSize pointer_size)
     REQUIRES_SHARED(Locks::mutator_lock_);
 template <>
-size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtField* f, PointerSize ptr_size ATTRIBUTE_UNUSED) {
+size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtField* f, [[maybe_unused]] PointerSize ptr_size) {
   return f->IsStatic() ? k->GetStaticFieldIdOffset(f) : k->GetInstanceFieldIdOffset(f);
 }
 template <>
@@ -208,7 +208,7 @@
 template <typename ArtType>
 bool CanUseIdArrays(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
 template <>
-bool CanUseIdArrays(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
+bool CanUseIdArrays([[maybe_unused]] ReflectiveHandle<ArtField> t) {
   return true;
 }
 template <>
@@ -264,7 +264,7 @@
 }
 template <>
 size_t JniIdManager::GetLinearSearchStartId<ArtField>(
-    ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
+    [[maybe_unused]] ReflectiveHandle<ArtField> t) {
   return deferred_allocation_field_id_start_;
 }
 
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index ad2efc5..71bed2e 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -162,7 +162,7 @@
   NewStringUTFVisitor(const char* utf, size_t utf8_length, int32_t count, bool has_bad_char)
       : utf_(utf), utf8_length_(utf8_length), count_(count), has_bad_char_(has_bad_char) {}
 
-  void operator()(ObjPtr<mirror::Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<mirror::Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     ObjPtr<mirror::String> string = ObjPtr<mirror::String>::DownCast(obj);
@@ -226,7 +226,7 @@
 jsize GetUncompressedStringUTFLength(const uint16_t* chars, size_t length) {
   jsize byte_count = 0;
   ConvertUtf16ToUtf8<kUtfUseShortZero, kUtfUse4ByteSequence, kUtfReplaceBadSurrogates>(
-      chars, length, [&](char c ATTRIBUTE_UNUSED) { ++byte_count; });
+      chars, length, [&]([[maybe_unused]] char c) { ++byte_count; });
   return byte_count;
 }
 
@@ -2830,7 +2830,7 @@
     return static_cast<jlong>(WellKnownClasses::java_nio_Buffer_capacity->GetInt(buffer.Get()));
   }
 
-  static jobjectRefType GetObjectRefType(JNIEnv* env ATTRIBUTE_UNUSED, jobject java_object) {
+  static jobjectRefType GetObjectRefType([[maybe_unused]] JNIEnv* env, jobject java_object) {
     if (java_object == nullptr) {
       return JNIInvalidRefType;
     }
diff --git a/runtime/jni/local_reference_table.h b/runtime/jni/local_reference_table.h
index 900e4c3..22fa4a9 100644
--- a/runtime/jni/local_reference_table.h
+++ b/runtime/jni/local_reference_table.h
@@ -333,7 +333,7 @@
 
   void SetSegmentState(LRTSegmentState new_state);
 
-  static Offset SegmentStateOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
+  static Offset SegmentStateOffset([[maybe_unused]] size_t pointer_size) {
     // Note: Currently segment_state_ is at offset 0. We're testing the expected value in
     //       jni_internal_test to make sure it stays correct. It is not OFFSETOF_MEMBER, as that
     //       is not pointer-size-safe.
diff --git a/runtime/metrics/reporter_test.cc b/runtime/metrics/reporter_test.cc
index 848a74e..a61f8be 100644
--- a/runtime/metrics/reporter_test.cc
+++ b/runtime/metrics/reporter_test.cc
@@ -65,10 +65,10 @@
     current_report_->data.Put(counter_type, value);
   }
 
-  void ReportHistogram(DatumId histogram_type ATTRIBUTE_UNUSED,
-                       int64_t low_value ATTRIBUTE_UNUSED,
-                       int64_t high_value ATTRIBUTE_UNUSED,
-                       const std::vector<uint32_t>& buckets ATTRIBUTE_UNUSED) override {
+  void ReportHistogram([[maybe_unused]] DatumId histogram_type,
+                       [[maybe_unused]] int64_t low_value,
+                       [[maybe_unused]] int64_t high_value,
+                       [[maybe_unused]] const std::vector<uint32_t>& buckets) override {
     // TODO: nothing yet. We should implement and test histograms as well.
   }
 
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index 7c0d91a..1e434a1 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -39,7 +39,7 @@
 
  private:
   // We only use the field indirectly using the FlagOffset() method.
-  uint8_t flag_ ATTRIBUTE_UNUSED;
+  [[maybe_unused]] uint8_t flag_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(AccessibleObject);
 };
diff --git a/runtime/mirror/array-alloc-inl.h b/runtime/mirror/array-alloc-inl.h
index c1e0175..32840d4 100644
--- a/runtime/mirror/array-alloc-inl.h
+++ b/runtime/mirror/array-alloc-inl.h
@@ -67,7 +67,7 @@
   explicit SetLengthVisitor(int32_t length) : length_(length) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsArray as object is not yet in live bitmap or allocation stack.
     ObjPtr<Array> array = ObjPtr<Array>::DownCast(obj);
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 5d64167..e7cfb92 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -140,10 +140,10 @@
 
   // The number of array elements.
   // We only use the field indirectly using the LengthOffset() method.
-  int32_t length_ ATTRIBUTE_UNUSED;
+  [[maybe_unused]] int32_t length_;
   // Marker for the data (used by generated code)
   // We only use the field indirectly using the DataOffset() method.
-  uint32_t first_element_[0] ATTRIBUTE_UNUSED;
+  [[maybe_unused]] uint32_t first_element_[0];
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
 };
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 6458613..296eeed 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -1605,9 +1605,9 @@
 
 class ReadBarrierOnNativeRootsVisitor {
  public:
-  void operator()(ObjPtr<Object> obj ATTRIBUTE_UNUSED,
-                  MemberOffset offset ATTRIBUTE_UNUSED,
-                  bool is_static ATTRIBUTE_UNUSED) const {}
+  void operator()([[maybe_unused]] ObjPtr<Object> obj,
+                  [[maybe_unused]] MemberOffset offset,
+                  [[maybe_unused]] bool is_static) const {}
 
   void VisitRootIfNonNull(CompressedReference<Object>* root) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1644,7 +1644,7 @@
         copy_bytes_(copy_bytes), imt_(imt), pointer_size_(pointer_size) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     StackHandleScope<1> hs(self_);
     Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 197172c..8a7ab88 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -81,7 +81,7 @@
   HeapReference<ClassLoader> parent_;
   HeapReference<Object> proxyCache_;
   // Native pointer to class table, need to zero this out when image writing.
-  uint32_t padding_ ATTRIBUTE_UNUSED;
+  [[maybe_unused]] uint32_t padding_;
   uint64_t allocator_;
   uint64_t class_table_;
 
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index dc4ec95..079efc3 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -64,7 +64,7 @@
   uint8_t has_real_parameter_data_;
 
   // Padding required for matching alignment with the Java peer.
-  uint8_t padding_[2] ATTRIBUTE_UNUSED;
+  [[maybe_unused]] uint8_t padding_[2];
 
   HeapReference<mirror::Class> declaring_class_;
   HeapReference<mirror::Class> declaring_class_of_overridden_method_;
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 5016c20..940b82d 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -66,9 +66,9 @@
   }
 
   // Unused since we don't copy class native roots.
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-      const {}
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+  void VisitRootIfNonNull(
+      [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
+  void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {}
 
  private:
   const ObjPtr<Object> dest_obj_;
@@ -144,7 +144,7 @@
   CopyObjectVisitor(Handle<Object>* orig, size_t num_bytes)
       : orig_(orig), num_bytes_(num_bytes) {}
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     Object::CopyObject(obj, orig_->Get(), num_bytes_);
   }
diff --git a/runtime/mirror/string-alloc-inl.h b/runtime/mirror/string-alloc-inl.h
index cb2dcb2..9c2529c 100644
--- a/runtime/mirror/string-alloc-inl.h
+++ b/runtime/mirror/string-alloc-inl.h
@@ -41,7 +41,7 @@
   explicit SetStringCountVisitor(int32_t count) : count_(count) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
@@ -61,7 +61,7 @@
       : count_(count), src_array_(src_array), offset_(offset), high_byte_(high_byte) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
@@ -96,7 +96,7 @@
       : count_(count), src_array_(src_array), offset_(offset) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
@@ -132,7 +132,7 @@
     count_(count), src_array_(src_array), offset_(offset) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
@@ -163,7 +163,7 @@
     count_(count), src_string_(src_string), offset_(offset) {
   }
 
-  void operator()(ObjPtr<Object> obj, size_t usable_size ATTRIBUTE_UNUSED) const
+  void operator()(ObjPtr<Object> obj, [[maybe_unused]] size_t usable_size) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     // Avoid AsString as object is not yet in live bitmap or allocation stack.
     ObjPtr<String> string = ObjPtr<String>::DownCast(obj);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 9f0c216..f602f73 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -368,8 +368,8 @@
 static jobject DexFile_openDexFileNative(JNIEnv* env,
                                          jclass,
                                          jstring javaSourceName,
-                                         jstring javaOutputName ATTRIBUTE_UNUSED,
-                                         jint flags ATTRIBUTE_UNUSED,
+                                         [[maybe_unused]] jstring javaOutputName,
+                                         [[maybe_unused]] jint flags,
                                          jobject class_loader,
                                          jobjectArray dex_elements) {
   ScopedUtfChars sourceName(env, javaSourceName);
@@ -758,8 +758,8 @@
 }
 
 static jboolean DexFile_isValidCompilerFilter(JNIEnv* env,
-                                            jclass javeDexFileClass ATTRIBUTE_UNUSED,
-                                            jstring javaCompilerFilter) {
+                                              [[maybe_unused]] jclass javaDexFileClass,
+                                              jstring javaCompilerFilter) {
   ScopedUtfChars compiler_filter(env, javaCompilerFilter);
   if (env->ExceptionCheck()) {
     return -1;
@@ -771,7 +771,7 @@
 }
 
 static jboolean DexFile_isProfileGuidedCompilerFilter(JNIEnv* env,
-                                                      jclass javeDexFileClass ATTRIBUTE_UNUSED,
+                                                      [[maybe_unused]] jclass javaDexFileClass,
                                                       jstring javaCompilerFilter) {
   ScopedUtfChars compiler_filter(env, javaCompilerFilter);
   if (env->ExceptionCheck()) {
@@ -786,7 +786,7 @@
 }
 
 static jboolean DexFile_isVerifiedCompilerFilter(JNIEnv* env,
-                                                 jclass javeDexFileClass ATTRIBUTE_UNUSED,
+                                                 [[maybe_unused]] jclass javaDexFileClass,
                                                  jstring javaCompilerFilter) {
   ScopedUtfChars compiler_filter(env, javaCompilerFilter);
   if (env->ExceptionCheck()) {
@@ -801,7 +801,7 @@
 }
 
 static jboolean DexFile_isOptimizedCompilerFilter(JNIEnv* env,
-                                                  jclass javeDexFileClass ATTRIBUTE_UNUSED,
+                                                  [[maybe_unused]] jclass javaDexFileClass,
                                                   jstring javaCompilerFilter) {
   ScopedUtfChars compiler_filter(env, javaCompilerFilter);
   if (env->ExceptionCheck()) {
@@ -816,12 +816,12 @@
 }
 
 static jboolean DexFile_isReadOnlyJavaDclEnforced(JNIEnv* env,
-                                                  jclass javeDexFileClass ATTRIBUTE_UNUSED) {
+                                                  [[maybe_unused]] jclass javaDexFileClass) {
   return (isReadOnlyJavaDclChecked() && isReadOnlyJavaDclEnforced(env)) ? JNI_TRUE : JNI_FALSE;
 }
 
 static jstring DexFile_getNonProfileGuidedCompilerFilter(JNIEnv* env,
-                                                         jclass javeDexFileClass ATTRIBUTE_UNUSED,
+                                                         [[maybe_unused]] jclass javaDexFileClass,
                                                          jstring javaCompilerFilter) {
   ScopedUtfChars compiler_filter(env, javaCompilerFilter);
   if (env->ExceptionCheck()) {
@@ -846,7 +846,7 @@
 }
 
 static jstring DexFile_getSafeModeCompilerFilter(JNIEnv* env,
-                                                 jclass javeDexFileClass ATTRIBUTE_UNUSED,
+                                                 [[maybe_unused]] jclass javaDexFileClass,
                                                  jstring javaCompilerFilter) {
   ScopedUtfChars compiler_filter(env, javaCompilerFilter);
   if (env->ExceptionCheck()) {
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 3653a83..65d131a 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -90,7 +90,7 @@
 
 static void VMDebug_startMethodTracingFd(JNIEnv* env,
                                          jclass,
-                                         jstring javaTraceFilename ATTRIBUTE_UNUSED,
+                                         [[maybe_unused]] jstring javaTraceFilename,
                                          jint javaFd,
                                          jint bufferSize,
                                          jint flags,
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 9e2e8b9..1ffb7ce 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -237,8 +237,8 @@
   return down_cast<JNIEnvExt*>(env)->GetVm()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
 }
 
-static jint VMRuntime_getSdkVersionNative(JNIEnv* env ATTRIBUTE_UNUSED,
-                                          jclass klass ATTRIBUTE_UNUSED,
+static jint VMRuntime_getSdkVersionNative([[maybe_unused]] JNIEnv* env,
+                                          [[maybe_unused]] jclass klass,
                                           jint default_sdk_version) {
   return android::base::GetIntProperty("ro.build.version.sdk",
                                        default_sdk_version);
@@ -355,8 +355,7 @@
   Runtime::Current()->GetHeap()->GetTaskProcessor()->RunAllTasks(Thread::ForEnv(env));
 }
 
-static void VMRuntime_preloadDexCaches(JNIEnv* env ATTRIBUTE_UNUSED, jobject) {
-}
+static void VMRuntime_preloadDexCaches([[maybe_unused]] JNIEnv* env, jobject) {}
 
 /*
  * This is called by the framework after it loads a code path on behalf of the app.
@@ -364,7 +363,7 @@
  * for more precise telemetry (e.g. is the split apk odex up to date?) and debugging.
  */
 static void VMRuntime_registerAppInfo(JNIEnv* env,
-                                      jclass clazz ATTRIBUTE_UNUSED,
+                                      [[maybe_unused]] jclass clazz,
                                       jstring package_name,
                                       jstring cur_profile_file,
                                       jstring ref_profile_file,
@@ -418,8 +417,8 @@
   return env->NewStringUTF(GetInstructionSetString(kRuntimeISA));
 }
 
-static void VMRuntime_setSystemDaemonThreadPriority(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                    jclass klass ATTRIBUTE_UNUSED) {
+static void VMRuntime_setSystemDaemonThreadPriority([[maybe_unused]] JNIEnv* env,
+                                                    [[maybe_unused]] jclass klass) {
 #ifdef ART_TARGET_ANDROID
   Thread* self = Thread::Current();
   DCHECK(self != nullptr);
@@ -435,14 +434,14 @@
 #endif
 }
 
-static void VMRuntime_setDedupeHiddenApiWarnings(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                 jclass klass ATTRIBUTE_UNUSED,
+static void VMRuntime_setDedupeHiddenApiWarnings([[maybe_unused]] JNIEnv* env,
+                                                 [[maybe_unused]] jclass klass,
                                                  jboolean dedupe) {
   Runtime::Current()->SetDedupeHiddenApiWarnings(dedupe);
 }
 
 static void VMRuntime_setProcessPackageName(JNIEnv* env,
-                                            jclass klass ATTRIBUTE_UNUSED,
+                                            [[maybe_unused]] jclass klass,
                                             jstring java_package_name) {
   ScopedUtfChars package_name(env, java_package_name);
   Runtime::Current()->SetProcessPackageName(package_name.c_str());
@@ -453,8 +452,7 @@
   Runtime::Current()->SetProcessDataDirectory(data_dir.c_str());
 }
 
-static void VMRuntime_bootCompleted(JNIEnv* env ATTRIBUTE_UNUSED,
-                                    jclass klass ATTRIBUTE_UNUSED) {
+static void VMRuntime_bootCompleted([[maybe_unused]] JNIEnv* env, [[maybe_unused]] jclass klass) {
   jit::Jit* jit = Runtime::Current()->GetJit();
   if (jit != nullptr) {
     jit->BootCompleted();
@@ -482,14 +480,14 @@
   }
 };
 
-static void VMRuntime_resetJitCounters(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+static void VMRuntime_resetJitCounters(JNIEnv* env, [[maybe_unused]] jclass klass) {
   ScopedObjectAccess soa(env);
   ClearJitCountersVisitor visitor;
   Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
 }
 
 static jboolean VMRuntime_isValidClassLoaderContext(JNIEnv* env,
-                                                    jclass klass ATTRIBUTE_UNUSED,
+                                                    [[maybe_unused]] jclass klass,
                                                     jstring jencoded_class_loader_context) {
   if (UNLIKELY(jencoded_class_loader_context == nullptr)) {
     ScopedFastNativeObjectAccess soa(env);
@@ -500,7 +498,7 @@
   return ClassLoaderContext::IsValidEncoding(encoded_class_loader_context.c_str());
 }
 
-static jobject VMRuntime_getBaseApkOptimizationInfo(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+static jobject VMRuntime_getBaseApkOptimizationInfo(JNIEnv* env, [[maybe_unused]] jclass klass) {
   AppInfo* app_info = Runtime::Current()->GetAppInfo();
   DCHECK(app_info != nullptr);
 
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 3c73cc5..5ea6d3f 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -266,8 +266,8 @@
   Runtime::Current()->PostZygoteFork();
 }
 
-static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                   jclass klass ATTRIBUTE_UNUSED,
+static void ZygoteHooks_nativePostForkSystemServer([[maybe_unused]] JNIEnv* env,
+                                                   [[maybe_unused]] jclass klass,
                                                    jint runtime_flags) {
   // Reload the current flags first. In case we need to take actions based on them.
   Runtime::Current()->ReloadAllFlags(__FUNCTION__);
@@ -441,18 +441,18 @@
   }
 }
 
-static void ZygoteHooks_startZygoteNoThreadCreation(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                    jclass klass ATTRIBUTE_UNUSED) {
+static void ZygoteHooks_startZygoteNoThreadCreation([[maybe_unused]] JNIEnv* env,
+                                                    [[maybe_unused]] jclass klass) {
   Runtime::Current()->SetZygoteNoThreadSection(true);
 }
 
-static void ZygoteHooks_stopZygoteNoThreadCreation(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                   jclass klass ATTRIBUTE_UNUSED) {
+static void ZygoteHooks_stopZygoteNoThreadCreation([[maybe_unused]] JNIEnv* env,
+                                                   [[maybe_unused]] jclass klass) {
   Runtime::Current()->SetZygoteNoThreadSection(false);
 }
 
-static jboolean ZygoteHooks_nativeZygoteLongSuspendOk(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                    jclass klass ATTRIBUTE_UNUSED) {
+static jboolean ZygoteHooks_nativeZygoteLongSuspendOk([[maybe_unused]] JNIEnv* env,
+                                                      [[maybe_unused]] jclass klass) {
   // Indefinite thread suspensions are not OK if we're supposed to be JIT-compiling for other
   // processes.  We only care about JIT compilation that affects other processes.  The zygote
   // itself doesn't run appreciable amounts of Java code when running single-threaded, so
@@ -464,7 +464,6 @@
   return (isJitZygote || explicitlyDisabled) ? JNI_FALSE : JNI_TRUE;
 }
 
-
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
   NATIVE_METHOD(ZygoteHooks, nativePostZygoteFork, "()V"),
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 4b2cc43..98afddc 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -120,11 +120,13 @@
   return javaReceiver;
 }
 
-static jobject Constructor_newInstanceFromSerialization(JNIEnv* env, jclass unused ATTRIBUTE_UNUSED,
-                                                        jclass ctorClass, jclass allocClass) {
-    jmethodID ctor = env->GetMethodID(ctorClass, "<init>", "()V");
-    DCHECK(ctor != nullptr);
-    return env->NewObject(allocClass, ctor);
+static jobject Constructor_newInstanceFromSerialization(JNIEnv* env,
+                                                        [[maybe_unused]] jclass unused,
+                                                        jclass ctorClass,
+                                                        jclass allocClass) {
+  jmethodID ctor = env->GetMethodID(ctorClass, "<init>", "()V");
+  DCHECK(ctor != nullptr);
+  return env->NewObject(allocClass, ctor);
 }
 
 static JNINativeMethod gMethods[] = {
diff --git a/runtime/native/jdk_internal_misc_Unsafe.cc b/runtime/native/jdk_internal_misc_Unsafe.cc
index 6e2f558..9b2021d 100644
--- a/runtime/native/jdk_internal_misc_Unsafe.cc
+++ b/runtime/native/jdk_internal_misc_Unsafe.cc
@@ -261,11 +261,11 @@
   return Primitive::ComponentSize(primitive_type);
 }
 
-static jint Unsafe_addressSize(JNIEnv* env ATTRIBUTE_UNUSED, jobject ob ATTRIBUTE_UNUSED) {
+static jint Unsafe_addressSize([[maybe_unused]] JNIEnv* env, [[maybe_unused]] jobject ob) {
   return sizeof(void*);
 }
 
-static jint Unsafe_pageSize(JNIEnv* env ATTRIBUTE_UNUSED, jobject ob ATTRIBUTE_UNUSED) {
+static jint Unsafe_pageSize([[maybe_unused]] JNIEnv* env, [[maybe_unused]] jobject ob) {
   return sysconf(_SC_PAGESIZE);
 }
 
@@ -288,73 +288,80 @@
   return reinterpret_cast<uintptr_t>(mem);
 }
 
-static void Unsafe_freeMemory(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static void Unsafe_freeMemory([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   free(reinterpret_cast<void*>(static_cast<uintptr_t>(address)));
 }
 
-static void Unsafe_setMemory(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jlong bytes, jbyte value) {
+static void Unsafe_setMemory(
+    [[maybe_unused]] JNIEnv* env, jobject, jlong address, jlong bytes, jbyte value) {
   memset(reinterpret_cast<void*>(static_cast<uintptr_t>(address)), value, bytes);
 }
 
-static jbyte Unsafe_getByteJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jbyte Unsafe_getByteJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jbyte*>(address);
 }
 
-static void Unsafe_putByteJB(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jbyte value) {
+static void Unsafe_putByteJB([[maybe_unused]] JNIEnv* env, jobject, jlong address, jbyte value) {
   *reinterpret_cast<jbyte*>(address) = value;
 }
 
-static jshort Unsafe_getShortJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jshort Unsafe_getShortJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jshort*>(address);
 }
 
-static void Unsafe_putShortJS(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jshort value) {
+static void Unsafe_putShortJS([[maybe_unused]] JNIEnv* env, jobject, jlong address, jshort value) {
   *reinterpret_cast<jshort*>(address) = value;
 }
 
-static jchar Unsafe_getCharJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jchar Unsafe_getCharJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jchar*>(address);
 }
 
-static void Unsafe_putCharJC(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jchar value) {
+static void Unsafe_putCharJC([[maybe_unused]] JNIEnv* env, jobject, jlong address, jchar value) {
   *reinterpret_cast<jchar*>(address) = value;
 }
 
-static jint Unsafe_getIntJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jint Unsafe_getIntJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jint*>(address);
 }
 
-static void Unsafe_putIntJI(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jint value) {
+static void Unsafe_putIntJI([[maybe_unused]] JNIEnv* env, jobject, jlong address, jint value) {
   *reinterpret_cast<jint*>(address) = value;
 }
 
-static jlong Unsafe_getLongJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jlong Unsafe_getLongJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jlong*>(address);
 }
 
-static void Unsafe_putLongJJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jlong value) {
+static void Unsafe_putLongJJ([[maybe_unused]] JNIEnv* env, jobject, jlong address, jlong value) {
   *reinterpret_cast<jlong*>(address) = value;
 }
 
-static jfloat Unsafe_getFloatJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jfloat Unsafe_getFloatJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jfloat*>(address);
 }
 
-static void Unsafe_putFloatJF(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jfloat value) {
+static void Unsafe_putFloatJF([[maybe_unused]] JNIEnv* env, jobject, jlong address, jfloat value) {
   *reinterpret_cast<jfloat*>(address) = value;
 }
-static jdouble Unsafe_getDoubleJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jdouble Unsafe_getDoubleJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jdouble*>(address);
 }
 
-static void Unsafe_putDoubleJD(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jdouble value) {
+static void Unsafe_putDoubleJD([[maybe_unused]] JNIEnv* env,
+                               jobject,
+                               jlong address,
+                               jdouble value) {
   *reinterpret_cast<jdouble*>(address) = value;
 }
 
-static void Unsafe_copyMemory0(JNIEnv *env, jobject unsafe ATTRIBUTE_UNUSED,
-                              jobject srcObj, jlong srcOffset,
-                              jobject dstObj, jlong dstOffset,
-                              jlong size) {
+static void Unsafe_copyMemory0(JNIEnv* env,
+                               [[maybe_unused]] jobject unsafe,
+                               jobject srcObj,
+                               jlong srcOffset,
+                               jobject dstObj,
+                               jlong dstOffset,
+                               jlong size) {
   ScopedFastNativeObjectAccess soa(env);
   if (size == 0) {
     return;
diff --git a/runtime/native/libcore_util_CharsetUtils.cc b/runtime/native/libcore_util_CharsetUtils.cc
index c53fd6e..46f8993 100644
--- a/runtime/native/libcore_util_CharsetUtils.cc
+++ b/runtime/native/libcore_util_CharsetUtils.cc
@@ -113,7 +113,7 @@
     utf8_length = length;
   } else {
     const uint16_t* utf16 = string->GetValue() + offset;
-    auto count_length = [&utf8_length](jbyte c ATTRIBUTE_UNUSED) ALWAYS_INLINE { ++utf8_length; };
+    auto count_length = [&utf8_length]([[maybe_unused]] jbyte c) ALWAYS_INLINE { ++utf8_length; };
     ConvertUtf16ToUtf8</*kUseShortZero=*/ true,
                        /*kUse4ByteSequence=*/ true,
                        /*kReplaceBadSurrogates=*/ true>(utf16, length, count_length);
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 8a203ce..f1e47ee 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -219,11 +219,11 @@
   return Primitive::ComponentSize(primitive_type);
 }
 
-static jint Unsafe_addressSize(JNIEnv* env ATTRIBUTE_UNUSED, jobject ob ATTRIBUTE_UNUSED) {
+static jint Unsafe_addressSize([[maybe_unused]] JNIEnv* env, [[maybe_unused]] jobject ob) {
   return sizeof(void*);
 }
 
-static jint Unsafe_pageSize(JNIEnv* env ATTRIBUTE_UNUSED, jobject ob ATTRIBUTE_UNUSED) {
+static jint Unsafe_pageSize([[maybe_unused]] JNIEnv* env, [[maybe_unused]] jobject ob) {
   return sysconf(_SC_PAGESIZE);
 }
 
@@ -242,71 +242,75 @@
   return (uintptr_t) mem;
 }
 
-static void Unsafe_freeMemory(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static void Unsafe_freeMemory([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   free(reinterpret_cast<void*>(static_cast<uintptr_t>(address)));
 }
 
-static void Unsafe_setMemory(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jlong bytes, jbyte value) {
+static void Unsafe_setMemory(
+    [[maybe_unused]] JNIEnv* env, jobject, jlong address, jlong bytes, jbyte value) {
   memset(reinterpret_cast<void*>(static_cast<uintptr_t>(address)), value, bytes);
 }
 
-static jbyte Unsafe_getByteJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jbyte Unsafe_getByteJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jbyte*>(address);
 }
 
-static void Unsafe_putByteJB(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jbyte value) {
+static void Unsafe_putByteJB([[maybe_unused]] JNIEnv* env, jobject, jlong address, jbyte value) {
   *reinterpret_cast<jbyte*>(address) = value;
 }
 
-static jshort Unsafe_getShortJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jshort Unsafe_getShortJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jshort*>(address);
 }
 
-static void Unsafe_putShortJS(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jshort value) {
+static void Unsafe_putShortJS([[maybe_unused]] JNIEnv* env, jobject, jlong address, jshort value) {
   *reinterpret_cast<jshort*>(address) = value;
 }
 
-static jchar Unsafe_getCharJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jchar Unsafe_getCharJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jchar*>(address);
 }
 
-static void Unsafe_putCharJC(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jchar value) {
+static void Unsafe_putCharJC([[maybe_unused]] JNIEnv* env, jobject, jlong address, jchar value) {
   *reinterpret_cast<jchar*>(address) = value;
 }
 
-static jint Unsafe_getIntJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jint Unsafe_getIntJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jint*>(address);
 }
 
-static void Unsafe_putIntJI(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jint value) {
+static void Unsafe_putIntJI([[maybe_unused]] JNIEnv* env, jobject, jlong address, jint value) {
   *reinterpret_cast<jint*>(address) = value;
 }
 
-static jlong Unsafe_getLongJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jlong Unsafe_getLongJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jlong*>(address);
 }
 
-static void Unsafe_putLongJJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jlong value) {
+static void Unsafe_putLongJJ([[maybe_unused]] JNIEnv* env, jobject, jlong address, jlong value) {
   *reinterpret_cast<jlong*>(address) = value;
 }
 
-static jfloat Unsafe_getFloatJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jfloat Unsafe_getFloatJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jfloat*>(address);
 }
 
-static void Unsafe_putFloatJF(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jfloat value) {
+static void Unsafe_putFloatJF([[maybe_unused]] JNIEnv* env, jobject, jlong address, jfloat value) {
   *reinterpret_cast<jfloat*>(address) = value;
 }
-static jdouble Unsafe_getDoubleJ(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address) {
+static jdouble Unsafe_getDoubleJ([[maybe_unused]] JNIEnv* env, jobject, jlong address) {
   return *reinterpret_cast<jdouble*>(address);
 }
 
-static void Unsafe_putDoubleJD(JNIEnv* env ATTRIBUTE_UNUSED, jobject, jlong address, jdouble value) {
+static void Unsafe_putDoubleJD([[maybe_unused]] JNIEnv* env,
+                               jobject,
+                               jlong address,
+                               jdouble value) {
   *reinterpret_cast<jdouble*>(address) = value;
 }
 
-static void Unsafe_copyMemory(JNIEnv *env, jobject unsafe ATTRIBUTE_UNUSED, jlong src,
-                              jlong dst, jlong size) {
+static void Unsafe_copyMemory(
+    JNIEnv* env, [[maybe_unused]] jobject unsafe, jlong src, jlong dst, jlong size) {
   if (size == 0) {
     return;
   }
@@ -347,8 +351,8 @@
   }
 }
 
-static void Unsafe_copyMemoryToPrimitiveArray(JNIEnv *env,
-                                              jobject unsafe ATTRIBUTE_UNUSED,
+static void Unsafe_copyMemoryToPrimitiveArray(JNIEnv* env,
+                                              [[maybe_unused]] jobject unsafe,
                                               jlong srcAddr,
                                               jobject dstObj,
                                               jlong dstOffset,
@@ -382,8 +386,8 @@
   }
 }
 
-static void Unsafe_copyMemoryFromPrimitiveArray(JNIEnv *env,
-                                                jobject unsafe ATTRIBUTE_UNUSED,
+static void Unsafe_copyMemoryFromPrimitiveArray(JNIEnv* env,
+                                                [[maybe_unused]] jobject unsafe,
                                                 jobject srcObj,
                                                 jlong srcOffset,
                                                 jlong dstAddr,
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index d6a0fae..bda912e 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -431,22 +431,20 @@
 
 #elif defined(__APPLE__)
 
-void DumpNativeStack(std::ostream& os ATTRIBUTE_UNUSED,
-                     pid_t tid ATTRIBUTE_UNUSED,
-                     const char* prefix ATTRIBUTE_UNUSED,
-                     ArtMethod* current_method ATTRIBUTE_UNUSED,
-                     void* ucontext_ptr ATTRIBUTE_UNUSED,
-                     bool skip_frames ATTRIBUTE_UNUSED) {
-}
+void DumpNativeStack([[maybe_unused]] std::ostream& os,
+                     [[maybe_unused]] pid_t tid,
+                     [[maybe_unused]] const char* prefix,
+                     [[maybe_unused]] ArtMethod* current_method,
+                     [[maybe_unused]] void* ucontext_ptr,
+                     [[maybe_unused]] bool skip_frames) {}
 
-void DumpNativeStack(std::ostream& os ATTRIBUTE_UNUSED,
-                     unwindstack::AndroidLocalUnwinder& existing_map ATTRIBUTE_UNUSED,
-                     pid_t tid ATTRIBUTE_UNUSED,
-                     const char* prefix ATTRIBUTE_UNUSED,
-                     ArtMethod* current_method ATTRIBUTE_UNUSED,
-                     void* ucontext_ptr ATTRIBUTE_UNUSED,
-                     bool skip_frames ATTRIBUTE_UNUSED) {
-}
+void DumpNativeStack([[maybe_unused]] std::ostream& os,
+                     [[maybe_unused]] unwindstack::AndroidLocalUnwinder& existing_map,
+                     [[maybe_unused]] pid_t tid,
+                     [[maybe_unused]] const char* prefix,
+                     [[maybe_unused]] ArtMethod* current_method,
+                     [[maybe_unused]] void* ucontext_ptr,
+                     [[maybe_unused]] bool skip_frames) {}
 
 #else
 #error "Unsupported architecture for native stack dumps."
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index aed0014..1e4e701 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -26,9 +26,9 @@
   NoopCompilerCallbacks() : CompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp) {}
   ~NoopCompilerCallbacks() {}
 
-  void AddUncompilableMethod(MethodReference ref ATTRIBUTE_UNUSED) override {}
-  void AddUncompilableClass(ClassReference ref ATTRIBUTE_UNUSED) override {}
-  void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
+  void AddUncompilableMethod([[maybe_unused]] MethodReference ref) override {}
+  void AddUncompilableClass([[maybe_unused]] ClassReference ref) override {}
+  void ClassRejected([[maybe_unused]] ClassReference ref) override {}
 
   verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
 
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 5f74584..c75a9ec 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1159,12 +1159,12 @@
             /*inout*/MemMap* reservation,  // Where to load if not null.
             /*out*/std::string* error_msg) override;
 
-  bool Load(int oat_fd ATTRIBUTE_UNUSED,
-            bool writable ATTRIBUTE_UNUSED,
-            bool executable ATTRIBUTE_UNUSED,
-            bool low_4gb ATTRIBUTE_UNUSED,
-            /*inout*/MemMap* reservation ATTRIBUTE_UNUSED,
-            /*out*/std::string* error_msg ATTRIBUTE_UNUSED) override {
+  bool Load([[maybe_unused]] int oat_fd,
+            [[maybe_unused]] bool writable,
+            [[maybe_unused]] bool executable,
+            [[maybe_unused]] bool low_4gb,
+            [[maybe_unused]] /*inout*/ MemMap* reservation,
+            [[maybe_unused]] /*out*/ std::string* error_msg) override {
     return false;
   }
 
@@ -1211,8 +1211,8 @@
 #else
   // Count the entries in dl_iterate_phdr we get at this point in time.
   struct dl_iterate_context {
-    static int callback(dl_phdr_info* info ATTRIBUTE_UNUSED,
-                        size_t size ATTRIBUTE_UNUSED,
+    static int callback([[maybe_unused]] dl_phdr_info* info,
+                        [[maybe_unused]] size_t size,
                         void* data) {
       reinterpret_cast<dl_iterate_context*>(data)->count++;
       return 0;  // Continue iteration.
@@ -1335,7 +1335,7 @@
     if (reservation != nullptr && dlopen_handle_ != nullptr) {
       // Find used pages from the reservation.
       struct dl_iterate_context {
-        static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
+        static int callback(dl_phdr_info* info, [[maybe_unused]] size_t size, void* data) {
           auto* context = reinterpret_cast<dl_iterate_context*>(data);
           static_assert(std::is_same<Elf32_Half, Elf64_Half>::value, "Half must match");
           using Elf_Half = Elf64_Half;
@@ -1433,7 +1433,7 @@
     size_t memsz;
   };
   struct dl_iterate_context {
-    static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
+    static int callback(dl_phdr_info* info, [[maybe_unused]] size_t size, void* data) {
       auto* context = reinterpret_cast<dl_iterate_context*>(data);
       static_assert(std::is_same<Elf32_Half, Elf64_Half>::value, "Half must match");
       using Elf_Half = Elf64_Half;
@@ -1597,8 +1597,7 @@
             /*inout*/MemMap* reservation,  // Where to load if not null.
             /*out*/std::string* error_msg) override;
 
-  void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {
-  }
+  void PreSetup([[maybe_unused]] const std::string& elf_filename) override {}
 
  private:
   bool ElfFileOpen(File* file,
@@ -1853,29 +1852,29 @@
  protected:
   void PreLoad() override {}
 
-  bool Load(const std::string& elf_filename ATTRIBUTE_UNUSED,
-            bool writable ATTRIBUTE_UNUSED,
-            bool executable ATTRIBUTE_UNUSED,
-            bool low_4gb ATTRIBUTE_UNUSED,
-            MemMap* reservation ATTRIBUTE_UNUSED,
-            std::string* error_msg ATTRIBUTE_UNUSED) override {
+  bool Load([[maybe_unused]] const std::string& elf_filename,
+            [[maybe_unused]] bool writable,
+            [[maybe_unused]] bool executable,
+            [[maybe_unused]] bool low_4gb,
+            [[maybe_unused]] MemMap* reservation,
+            [[maybe_unused]] std::string* error_msg) override {
     LOG(FATAL) << "Unsupported";
     UNREACHABLE();
   }
 
-  bool Load(int oat_fd ATTRIBUTE_UNUSED,
-            bool writable ATTRIBUTE_UNUSED,
-            bool executable ATTRIBUTE_UNUSED,
-            bool low_4gb ATTRIBUTE_UNUSED,
-            MemMap* reservation ATTRIBUTE_UNUSED,
-            std::string* error_msg ATTRIBUTE_UNUSED) override {
+  bool Load([[maybe_unused]] int oat_fd,
+            [[maybe_unused]] bool writable,
+            [[maybe_unused]] bool executable,
+            [[maybe_unused]] bool low_4gb,
+            [[maybe_unused]] MemMap* reservation,
+            [[maybe_unused]] std::string* error_msg) override {
     LOG(FATAL) << "Unsupported";
     UNREACHABLE();
   }
 
-  void PreSetup(const std::string& elf_filename ATTRIBUTE_UNUSED) override {}
+  void PreSetup([[maybe_unused]] const std::string& elf_filename) override {}
 
-  const uint8_t* FindDynamicSymbolAddress(const std::string& symbol_name ATTRIBUTE_UNUSED,
+  const uint8_t* FindDynamicSymbolAddress([[maybe_unused]] const std::string& symbol_name,
                                           std::string* error_msg) const override {
     *error_msg = "Unsupported";
     return nullptr;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 56d4c70..1a5c57f 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -1504,7 +1504,7 @@
         lock_(lock),
         loaded_oat_file_(nullptr) {}
 
-  void Run(Thread* self ATTRIBUTE_UNUSED) override {
+  void Run([[maybe_unused]] Thread* self) override {
     // Load the dex files, and save a pointer to the loaded oat file, so that
     // we can verify only one oat file was loaded for the dex location.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 6f0b8a1..2ba73cf 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -88,7 +88,7 @@
 
 class ThreadLifecycleCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
  public:
-  static void* PthreadsCallback(void* arg ATTRIBUTE_UNUSED) {
+  static void* PthreadsCallback([[maybe_unused]] void* arg) {
     // Attach.
     Runtime* runtime = Runtime::Current();
     CHECK(runtime->AttachCurrentThread("ThreadLifecycle test thread", true, nullptr, false));
@@ -260,12 +260,12 @@
 
   struct Callback : public ClassLoadCallback {
     void ClassPreDefine(const char* descriptor,
-                        Handle<mirror::Class> klass ATTRIBUTE_UNUSED,
-                        Handle<mirror::ClassLoader> class_loader ATTRIBUTE_UNUSED,
+                        [[maybe_unused]] Handle<mirror::Class> klass,
+                        [[maybe_unused]] Handle<mirror::ClassLoader> class_loader,
                         const DexFile& initial_dex_file,
-                        const dex::ClassDef& initial_class_def ATTRIBUTE_UNUSED,
-                        /*out*/DexFile const** final_dex_file ATTRIBUTE_UNUSED,
-                        /*out*/dex::ClassDef const** final_class_def ATTRIBUTE_UNUSED) override
+                        [[maybe_unused]] const dex::ClassDef& initial_class_def,
+                        [[maybe_unused]] /*out*/ DexFile const** final_dex_file,
+                        [[maybe_unused]] /*out*/ dex::ClassDef const** final_class_def) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       const std::string& location = initial_dex_file.GetLocation();
       std::string event =
@@ -468,20 +468,20 @@
       ref_ = { &k->GetDexFile(), k->GetDexClassDefIndex() };
     }
 
-    void MonitorContendedLocking(Monitor* mon ATTRIBUTE_UNUSED) override
-        REQUIRES_SHARED(Locks::mutator_lock_) { }
+    void MonitorContendedLocking([[maybe_unused]] Monitor* mon) override
+        REQUIRES_SHARED(Locks::mutator_lock_) {}
 
-    void MonitorContendedLocked(Monitor* mon ATTRIBUTE_UNUSED) override
-        REQUIRES_SHARED(Locks::mutator_lock_) { }
+    void MonitorContendedLocked([[maybe_unused]] Monitor* mon) override
+        REQUIRES_SHARED(Locks::mutator_lock_) {}
 
-    void ObjectWaitStart(Handle<mirror::Object> obj, int64_t millis ATTRIBUTE_UNUSED) override
+    void ObjectWaitStart(Handle<mirror::Object> obj, [[maybe_unused]] int64_t millis) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       if (IsInterestingObject(obj.Get())) {
         saw_wait_start_ = true;
       }
     }
 
-    void MonitorWaitFinished(Monitor* m, bool timed_out ATTRIBUTE_UNUSED) override
+    void MonitorWaitFinished(Monitor* m, [[maybe_unused]] bool timed_out) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       if (IsInterestingObject(m->GetObject())) {
         saw_wait_finished_ = true;
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index f41d4c9..9be1d5e 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -668,7 +668,7 @@
     explicit NativePointerVisitor(RuntimeImageHelper* helper) : helper_(helper) {}
 
     template <typename T>
-    T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const {
+    T* operator()(T* ptr, [[maybe_unused]] void** dest_addr) const {
       return helper_->NativeLocationInImage(ptr, /* must_have_relocation= */ true);
     }
 
@@ -1186,11 +1186,11 @@
         : image_(image), copy_offset_(copy_offset) {}
 
     // We do not visit native roots. These are handled with other logic.
-    void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
-        const {
+    void VisitRootIfNonNull(
+        [[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {
       LOG(FATAL) << "UNREACHABLE";
     }
-    void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {
+    void VisitRoot([[maybe_unused]] mirror::CompressedReference<mirror::Object>* root) const {
       LOG(FATAL) << "UNREACHABLE";
     }
 
@@ -1209,9 +1209,8 @@
     }
 
     // java.lang.ref.Reference visitor.
-    void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
-                    ObjPtr<mirror::Reference> ref) const
-        REQUIRES_SHARED(Locks::mutator_lock_) {
+    void operator()([[maybe_unused]] ObjPtr<mirror::Class> klass,
+                    ObjPtr<mirror::Reference> ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
       operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
     }
 
diff --git a/runtime/startup_completed_task.cc b/runtime/startup_completed_task.cc
index 9358d48..a9a06bb 100644
--- a/runtime/startup_completed_task.cc
+++ b/runtime/startup_completed_task.cc
@@ -82,7 +82,7 @@
     // - accessing the image space metadata section when we madvise it
     // - accessing dex caches when we free them
     static struct EmptyClosure : Closure {
-      void Run(Thread* thread ATTRIBUTE_UNUSED) override {}
+      void Run([[maybe_unused]] Thread* thread) override {}
     } closure;
 
     runtime->GetThreadList()->RunCheckpoint(&closure);
diff --git a/runtime/string_builder_append.cc b/runtime/string_builder_append.cc
index 0083b91..2071733 100644
--- a/runtime/string_builder_append.cc
+++ b/runtime/string_builder_append.cc
@@ -492,7 +492,7 @@
 }
 
 inline void StringBuilderAppend::Builder::operator()(ObjPtr<mirror::Object> obj,
-                                                     size_t usable_size ATTRIBUTE_UNUSED) const {
+                                                     [[maybe_unused]] size_t usable_size) const {
   ObjPtr<mirror::String> new_string = ObjPtr<mirror::String>::DownCast(obj);
   new_string->SetCount(length_with_flag_);
   if (mirror::String::IsCompressed(length_with_flag_)) {
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 719e5d9..5960bcc 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -89,8 +89,8 @@
   bool CasField32(art::MemberOffset offset,
                   int32_t old_value,
                   int32_t new_value,
-                  CASMode mode ATTRIBUTE_UNUSED,
-                  std::memory_order memory_order ATTRIBUTE_UNUSED)
+                  [[maybe_unused]] CASMode mode,
+                  [[maybe_unused]] std::memory_order memory_order)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     UNUSED(offset);
     if (old_value == GetField32Volatile(offset)) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 6b1934c..00a1468 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -810,7 +810,7 @@
       // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually
       // auto-initialize this local variable).
       volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
-      char sink ATTRIBUTE_UNUSED = space[zero];  // NOLINT
+      [[maybe_unused]] char sink = space[zero];
       // Remove tag from the pointer. Nop in non-hwasan builds.
       uintptr_t addr = reinterpret_cast<uintptr_t>(
           __hwasan_tag_pointer != nullptr ? __hwasan_tag_pointer(space, 0) : space);
@@ -2148,8 +2148,7 @@
 
   static constexpr size_t kMaxRepetition = 3u;
 
-  VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
-      override
+  VisitMethodResult StartMethod(ArtMethod* m, [[maybe_unused]] size_t frame_nr) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
     ObjPtr<mirror::DexCache> dex_cache = m->GetDexCache();
@@ -2194,12 +2193,11 @@
     return VisitMethodResult::kContinueMethod;
   }
 
-  VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
+  VisitMethodResult EndMethod([[maybe_unused]] ArtMethod* m) override {
     return VisitMethodResult::kContinueMethod;
   }
 
-  void VisitWaitingObject(ObjPtr<mirror::Object> obj, ThreadState state ATTRIBUTE_UNUSED)
-      override
+  void VisitWaitingObject(ObjPtr<mirror::Object> obj, [[maybe_unused]] ThreadState state) override
       REQUIRES_SHARED(Locks::mutator_lock_) {
     PrintObject(obj, "  - waiting on ", ThreadList::kInvalidThreadId);
   }
@@ -2531,8 +2529,8 @@
   explicit MonitorExitVisitor(Thread* self) : self_(self) { }
 
   // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
-  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
-      override NO_THREAD_SAFETY_ANALYSIS {
+  void VisitRoot(mirror::Object* entered_monitor,
+                 [[maybe_unused]] const RootInfo& info) override NO_THREAD_SAFETY_ANALYSIS {
     if (self_->HoldsLock(entered_monitor)) {
       LOG(WARNING) << "Calling MonitorExit on object "
                    << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")"
@@ -3345,8 +3343,7 @@
           soaa_(soaa_in) {}
 
    protected:
-    VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
-        override
+    VisitMethodResult StartMethod(ArtMethod* m, [[maybe_unused]] size_t frame_nr) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
           soaa_, m, GetDexPc(/* abort on error */ false));
@@ -3357,7 +3354,7 @@
       return VisitMethodResult::kContinueMethod;
     }
 
-    VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) override {
+    VisitMethodResult EndMethod([[maybe_unused]] ArtMethod* m) override {
       lock_objects_.push_back({});
       lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
 
@@ -3366,8 +3363,7 @@
       return VisitMethodResult::kContinueMethod;
     }
 
-    void VisitWaitingObject(ObjPtr<mirror::Object> obj, ThreadState state ATTRIBUTE_UNUSED)
-        override
+    void VisitWaitingObject(ObjPtr<mirror::Object> obj, [[maybe_unused]] ThreadState state) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
@@ -3377,9 +3373,8 @@
       wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
     void VisitBlockedOnObject(ObjPtr<mirror::Object> obj,
-                              ThreadState state ATTRIBUTE_UNUSED,
-                              uint32_t owner_tid ATTRIBUTE_UNUSED)
-        override
+                              [[maybe_unused]] ThreadState state,
+                              [[maybe_unused]] uint32_t owner_tid) override
         REQUIRES_SHARED(Locks::mutator_lock_) {
       block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
     }
@@ -4271,26 +4266,23 @@
 
   void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) {
     struct UndefinedVRegInfo {
-      UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED,
-                        const CodeInfo& code_info ATTRIBUTE_UNUSED,
-                        const StackMap& map ATTRIBUTE_UNUSED,
+      UndefinedVRegInfo([[maybe_unused]] ArtMethod* method,
+                        [[maybe_unused]] const CodeInfo& code_info,
+                        [[maybe_unused]] const StackMap& map,
                         RootVisitor& _visitor)
-          : visitor(_visitor) {
-      }
+          : visitor(_visitor) {}
 
       ALWAYS_INLINE
       void VisitStack(mirror::Object** ref,
-                      size_t stack_index ATTRIBUTE_UNUSED,
-                      const StackVisitor* stack_visitor)
-          REQUIRES_SHARED(Locks::mutator_lock_) {
+                      [[maybe_unused]] size_t stack_index,
+                      const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
         visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
       }
 
       ALWAYS_INLINE
       void VisitRegister(mirror::Object** ref,
-                         size_t register_index ATTRIBUTE_UNUSED,
-                         const StackVisitor* stack_visitor)
-          REQUIRES_SHARED(Locks::mutator_lock_) {
+                         [[maybe_unused]] size_t register_index,
+                         const StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
         visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
       }
 
@@ -4541,8 +4533,8 @@
 
 class VerifyRootVisitor : public SingleRootVisitor {
  public:
-  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
+  void VisitRoot(mirror::Object* root, [[maybe_unused]] const RootInfo& info) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     VerifyObject(root);
   }
 };
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 2e9f998..9045f50 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -349,7 +349,7 @@
   the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
 }
 
-static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg ATTRIBUTE_UNUSED) {
+static void ClearThreadStackTraceAndClockBase(Thread* thread, [[maybe_unused]] void* arg) {
   thread->SetTraceClockBase(0);
   std::vector<ArtMethod*>* stack_trace = thread->GetStackTraceSample();
   thread->SetStackTraceSample(nullptr);
@@ -489,7 +489,7 @@
   auto deleter = [](File* file) {
     if (file != nullptr) {
       file->MarkUnchecked();  // Don't deal with flushing requirements.
-      int result ATTRIBUTE_UNUSED = file->Close();
+      [[maybe_unused]] int result = file->Close();
       delete file;
     }
   };
@@ -916,8 +916,8 @@
   }
 }
 
-void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
-                       Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+void Trace::DexPcMoved([[maybe_unused]] Thread* thread,
+                       [[maybe_unused]] Handle<mirror::Object> this_object,
                        ArtMethod* method,
                        uint32_t new_dex_pc) {
   // We're not recorded to listen to this kind of event, so complain.
@@ -925,23 +925,22 @@
              << " " << new_dex_pc;
 }
 
-void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
-                      Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+void Trace::FieldRead([[maybe_unused]] Thread* thread,
+                      [[maybe_unused]] Handle<mirror::Object> this_object,
                       ArtMethod* method,
                       uint32_t dex_pc,
-                      ArtField* field ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+                      [[maybe_unused]] ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) {
   // We're not recorded to listen to this kind of event, so complain.
   LOG(ERROR) << "Unexpected field read event in tracing " << ArtMethod::PrettyMethod(method)
              << " " << dex_pc;
 }
 
-void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
-                         Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
+void Trace::FieldWritten([[maybe_unused]] Thread* thread,
+                         [[maybe_unused]] Handle<mirror::Object> this_object,
                          ArtMethod* method,
                          uint32_t dex_pc,
-                         ArtField* field ATTRIBUTE_UNUSED,
-                         const JValue& field_value ATTRIBUTE_UNUSED)
+                         [[maybe_unused]] ArtField* field,
+                         [[maybe_unused]] const JValue& field_value)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // We're not recorded to listen to this kind of event, so complain.
   LOG(ERROR) << "Unexpected field write event in tracing " << ArtMethod::PrettyMethod(method)
@@ -957,31 +956,29 @@
 
 void Trace::MethodExited(Thread* thread,
                          ArtMethod* method,
-                         instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
-                         JValue& return_value ATTRIBUTE_UNUSED) {
+                         [[maybe_unused]] instrumentation::OptionalFrame frame,
+                         [[maybe_unused]] JValue& return_value) {
   uint32_t thread_clock_diff = 0;
   uint64_t timestamp_counter = 0;
   ReadClocks(thread, &thread_clock_diff, &timestamp_counter);
   LogMethodTraceEvent(thread, method, kTraceMethodExit, thread_clock_diff, timestamp_counter);
 }
 
-void Trace::MethodUnwind(Thread* thread,
-                         ArtMethod* method,
-                         uint32_t dex_pc ATTRIBUTE_UNUSED) {
+void Trace::MethodUnwind(Thread* thread, ArtMethod* method, [[maybe_unused]] uint32_t dex_pc) {
   uint32_t thread_clock_diff = 0;
   uint64_t timestamp_counter = 0;
   ReadClocks(thread, &thread_clock_diff, &timestamp_counter);
   LogMethodTraceEvent(thread, method, kTraceUnroll, thread_clock_diff, timestamp_counter);
 }
 
-void Trace::ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
-                            Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
+void Trace::ExceptionThrown([[maybe_unused]] Thread* thread,
+                            [[maybe_unused]] Handle<mirror::Throwable> exception_object)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   LOG(ERROR) << "Unexpected exception thrown event in tracing";
 }
 
-void Trace::ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
-                             Handle<mirror::Throwable> exception_object ATTRIBUTE_UNUSED)
+void Trace::ExceptionHandled([[maybe_unused]] Thread* thread,
+                             [[maybe_unused]] Handle<mirror::Throwable> exception_object)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   LOG(ERROR) << "Unexpected exception thrown event in tracing";
 }
@@ -992,8 +989,8 @@
   LOG(ERROR) << "Unexpected branch event in tracing" << ArtMethod::PrettyMethod(method);
 }
 
-void Trace::WatchedFramePop(Thread* self ATTRIBUTE_UNUSED,
-                            const ShadowFrame& frame ATTRIBUTE_UNUSED) {
+void Trace::WatchedFramePop([[maybe_unused]] Thread* self,
+                            [[maybe_unused]] const ShadowFrame& frame) {
   LOG(ERROR) << "Unexpected WatchedFramePop event in tracing";
 }
 
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index bdead55..d55876d 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -57,7 +57,7 @@
   return (memcmp(vdex_version_, kVdexVersion, sizeof(kVdexVersion)) == 0);
 }
 
-VdexFile::VdexFileHeader::VdexFileHeader(bool has_dex_section ATTRIBUTE_UNUSED)
+VdexFile::VdexFileHeader::VdexFileHeader([[maybe_unused]] bool has_dex_section)
     : number_of_sections_(static_cast<uint32_t>(VdexSection::kNumberOfSections)) {
   memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
   memcpy(vdex_version_, kVdexVersion, sizeof(kVdexVersion));
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 965bbaf..c13784c 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -312,7 +312,7 @@
         cache_id_(cache_id) {}
 
   template <typename Class>
-  void CheckConstructorInvariants(Class* this_ ATTRIBUTE_UNUSED) const
+  void CheckConstructorInvariants([[maybe_unused]] Class* this_) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     static_assert(std::is_final<Class>::value, "Class must be final.");
     if (kIsDebugBuild) {
diff --git a/runtime/write_barrier-inl.h b/runtime/write_barrier-inl.h
index af8c1be..ee6b336 100644
--- a/runtime/write_barrier-inl.h
+++ b/runtime/write_barrier-inl.h
@@ -28,7 +28,7 @@
 
 template <WriteBarrier::NullCheck kNullCheck>
 inline void WriteBarrier::ForFieldWrite(ObjPtr<mirror::Object> dst,
-                                        MemberOffset offset ATTRIBUTE_UNUSED,
+                                        [[maybe_unused]] MemberOffset offset,
                                         ObjPtr<mirror::Object> new_value) {
   if (kNullCheck == kWithNullCheck && new_value == nullptr) {
     return;
@@ -38,8 +38,8 @@
 }
 
 inline void WriteBarrier::ForArrayWrite(ObjPtr<mirror::Object> dst,
-                                        int start_offset ATTRIBUTE_UNUSED,
-                                        size_t length ATTRIBUTE_UNUSED) {
+                                        [[maybe_unused]] int start_offset,
+                                        [[maybe_unused]] size_t length) {
   GetCardTable()->MarkCard(dst.Ptr());
 }
 
diff --git a/runtime/write_barrier.h b/runtime/write_barrier.h
index 112154e..8080b0d 100644
--- a/runtime/write_barrier.h
+++ b/runtime/write_barrier.h
@@ -38,15 +38,15 @@
   // safe-point. The call is not needed if null is stored in the field.
   template <NullCheck kNullCheck = kWithNullCheck>
   ALWAYS_INLINE static void ForFieldWrite(ObjPtr<mirror::Object> dst,
-                                          MemberOffset offset ATTRIBUTE_UNUSED,
-                                          ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED)
+                                          [[maybe_unused]] MemberOffset offset,
+                                          [[maybe_unused]] ObjPtr<mirror::Object> new_value)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Must be called if a reference field of an ObjectArray in the heap changes, and before any GC
   // safe-point. The call is not needed if null is stored in the field.
   ALWAYS_INLINE static void ForArrayWrite(ObjPtr<mirror::Object> dst,
-                                          int start_offset ATTRIBUTE_UNUSED,
-                                          size_t length ATTRIBUTE_UNUSED)
+                                          [[maybe_unused]] int start_offset,
+                                          [[maybe_unused]] size_t length)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Write barrier for every reference field in an object.