Update language to comply with Android’s inclusive language guidance

See https://source.android.com/setup/contribute/respectful-code for
reference

Bug: 161896447
Bug: 161850439
Bug: 161336379
Test: m -j checkbuild cts docs tests
Change-Id: I32d869c274a5d9a3dac63221e25874fe685d38c4
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index 01806d4..86e3b2c 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -484,8 +484,8 @@
       return *this;
     }
 
-    // Finish building the parser; performs sanity checks. Return value is moved, not copied.
-    // Do not call this more than once.
+    // Finish building the parser; performs a check of the validity. Return value is moved, not
+    // copied. Do not call this more than once.
     CmdlineParser Build() {
       assert(!built_);
       built_ = true;
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 4591d0b..f5cbda9 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -189,7 +189,7 @@
   // Mark the argument definition as completed, do not mutate the object anymore after this
   // call is done.
   //
-  // Performs several sanity checks and token calculations.
+  // Performs several checks of the validity and token calculations.
   void CompleteArgument() {
     assert(names_.size() >= 1);
     assert(!is_completed_);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 913a3ba..e7dd6cf 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -573,8 +573,8 @@
         // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
         __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue());
       } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) {
-        // Sanity check: If the return value is passed on the stack for some reason,
-        // then make sure the size matches.
+        // Check that if the return value is passed on the stack for some reason,
+        // that the size matches.
         CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue());
       }
     }
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index f4f44a0..c0441b0 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -552,7 +552,7 @@
       HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
       HLessThan cmp_lt(cst_lhs, cst_rhs);
       if_block->AddInstruction(&cmp_lt);
-      // We insert a dummy instruction to separate the HIf from the HLessThan
+      // We insert a fake instruction to separate the HIf from the HLessThan
       // and force the materialization of the condition.
       HMemoryBarrier force_materialization(MemBarrierKind::kAnyAny, 0);
       if_block->AddInstruction(&force_materialization);
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index ece88a0..44de54e 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -63,7 +63,7 @@
 size_t GraphChecker::Run(bool pass_change, size_t last_size) {
   size_t current_size = GetGraph()->GetReversePostOrder().size();
   if (!pass_change) {
-    // Nothing changed for certain. Do a quick sanity check on that assertion
+    // Nothing changed for certain. Do a quick check of the validity on that assertion
     // for anything other than the first call (when last size was still 0).
     if (last_size != 0) {
       if (current_size != last_size) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index d1db40b..f635142 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -685,7 +685,7 @@
         RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
     register_allocator->AllocateRegisters();
 
-    // Sanity check that in normal conditions, the register should be hinted to 0 (EAX).
+    // Check the validity that in normal conditions, the register should be hinted to 0 (EAX).
     ASSERT_EQ(field->GetLiveInterval()->GetRegister(), 0);
   }
 
@@ -753,7 +753,7 @@
         RegisterAllocator::Create(GetScopedAllocator(), &codegen, liveness, strategy);
     register_allocator->AllocateRegisters();
 
-    // Sanity check that in normal conditions, the registers are the same.
+    // Check the validity that in normal conditions, the registers are the same.
     ASSERT_EQ(first_sub->GetLiveInterval()->GetRegister(), 1);
     ASSERT_EQ(second_sub->GetLiveInterval()->GetRegister(), 1);
   }
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 1199602..e863b9a 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -229,7 +229,7 @@
 
   // When building the C++ tests, assertion code is enabled. To allow
   // asserting that the user of the assembler buffer has ensured the
-  // capacity needed for emitting, we add a dummy method in non-debug mode.
+  // capacity needed for emitting, we add a placeholder method in non-debug mode.
   bool HasEnsuredCapacity() const { return true; }
 
 #endif
diff --git a/dex2oat/dex/dex_to_dex_compiler.cc b/dex2oat/dex/dex_to_dex_compiler.cc
index de66c1e..caecf70 100644
--- a/dex2oat/dex/dex_to_dex_compiler.cc
+++ b/dex2oat/dex/dex_to_dex_compiler.cc
@@ -581,8 +581,8 @@
       CHECK(inserted) << "Failed to insert " << dex_file.PrettyMethod(method_idx);
     }
 
-    // Easy sanity check is to check that the existing stuff matches by re-quickening using the
-    // newly produced quicken data.
+    // Easy check of the validity is to check that the existing stuff matches by re-quickening using
+    // the newly produced quicken data.
     // Note that this needs to be behind the lock for this case since we may unquicken in another
     // thread.
     if (kIsDebugBuild) {
@@ -594,8 +594,8 @@
     CompilationState state(this, unit, compilation_level, /*quicken_data*/ nullptr);
     quicken_data = state.Compile();
 
-    // Easy sanity check is to check that the existing stuff matches by re-quickening using the
-    // newly produced quicken data.
+    // Easy check of the validity is to check that the existing stuff matches by re-quickening using
+    // the newly produced quicken data.
     if (kIsDebugBuild) {
       CompilationState state2(this, unit, compilation_level, &quicken_data);
       std::vector<uint8_t> new_data = state2.Compile();
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index adc7b9e..02302d3 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -211,7 +211,7 @@
     std::cout << "All methods and classes sizes " << everything_sizes << std::endl;
     // Putting all classes as image classes should increase art size
     EXPECT_GE(everything_sizes.art_size, base_sizes.art_size);
-    // Sanity check that dex is the same size.
+    // Check the validity that dex is the same size.
     EXPECT_EQ(everything_sizes.vdex_size, base_sizes.vdex_size);
   }
   static size_t kMethodFrequency = 3;
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 35f76a9..79da7bf 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1992,7 +1992,7 @@
 
   // Generate a quickened dex by using the input dm file to verify.
   generate_and_check(CompilerFilter::Filter::kQuicken);
-  // Use verify compiler filter to sanity check that FastVerify works for that filter too.
+  // Use verify compiler filter to verify that FastVerify works for that filter too.
   generate_and_check(CompilerFilter::Filter::kVerify);
 }
 
diff --git a/dex2oat/driver/compiler_driver.cc b/dex2oat/driver/compiler_driver.cc
index cb4cb27..ef0677d 100644
--- a/dex2oat/driver/compiler_driver.cc
+++ b/dex2oat/driver/compiler_driver.cc
@@ -2014,7 +2014,7 @@
       } else if (failure_kind == verifier::FailureKind::kSoftFailure) {
         manager_->GetCompiler()->AddSoftVerifierFailure();
       } else {
-        // Force a soft failure for the VerifierDeps. This is a sanity measure, as
+        // Force a soft failure for the VerifierDeps. This is a validity measure, as
         // the vdex file already records that the class hasn't been resolved. It avoids
         // trying to do future verification optimizations when processing the vdex file.
         DCHECK(failure_kind == verifier::FailureKind::kNoFailure ||
@@ -2319,8 +2319,8 @@
             VLOG(compiler) << "Initializing: " << descriptor;
             // TODO multithreading support. We should ensure the current compilation thread has
             // exclusive access to the runtime and the transaction. To achieve this, we could use
-            // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
-            // checks in Thread::AssertThreadSuspensionIsAllowable.
+            // a ReaderWriterMutex but we're holding the mutator lock so we fail the check of mutex
+            // validity in Thread::AssertThreadSuspensionIsAllowable.
 
             // Resolve and initialize the exception type before enabling the transaction in case
             // the transaction aborts and cannot resolve the type.
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 30b6187..2a58d02 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -3558,7 +3558,7 @@
     const uint8_t* raw_dex_file = vdex_begin_ + oat_dex_file.dex_file_offset_;
 
     if (kIsDebugBuild) {
-      // Sanity check our input files.
+      // Check the validity of the input files.
       // Note that ValidateDexFileHeader() logs error messages.
       CHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file.GetLocation()))
           << "Failed to verify written dex file header!"
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index 1971e5a..56e0dc1 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -214,7 +214,7 @@
   }
 
   bool CheckLinkedMethod(MethodReference method_ref, const ArrayRef<const uint8_t>& expected_code) {
-    // Sanity check: original code size must match linked_code.size().
+    // Check that the original code size must match linked_code.size().
     size_t idx = 0u;
     for (auto ref : compiled_method_refs_) {
       if (ref == method_ref) {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 61cbf9d..94ac8a8 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -358,7 +358,7 @@
   ~ImgObjectVisitor() override { }
 
   void Visit(mirror::Object* object) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    // Sanity check that we are reading a real mirror::Object
+    // Check that we are reading a real mirror::Object
     CHECK(object->GetClass() != nullptr) << "Image object at address "
                                          << object
                                          << " has null class";
@@ -1471,7 +1471,7 @@
       return false;
     }
     backtrace_map_t boot_map = maybe_boot_map.value_or(backtrace_map_t{});
-    // Sanity check boot_map_.
+    // Check the validity of the boot_map_.
     CHECK(boot_map.end >= boot_map.start);
 
     // Adjust the `end` of the mapping. Some other mappings may have been
diff --git a/libartbase/base/bit_struct.h b/libartbase/base/bit_struct.h
index eca8780..4cc222c 100644
--- a/libartbase/base/bit_struct.h
+++ b/libartbase/base/bit_struct.h
@@ -277,7 +277,7 @@
 #define BITSTRUCT_UINT(bit_offset, bit_width)                                  \
     BitStructUint<(bit_offset), (bit_width), StorageType>
 
-// End the definition of a bitstruct, and insert a sanity check
+// End the definition of a bitstruct, and insert a check
 // to ensure that the bitstruct did not exceed the specified size.
 //
 // See top of file for usage example.
diff --git a/libartbase/base/hiddenapi_flags.h b/libartbase/base/hiddenapi_flags.h
index 1c17385..10e58ff 100644
--- a/libartbase/base/hiddenapi_flags.h
+++ b/libartbase/base/hiddenapi_flags.h
@@ -123,7 +123,7 @@
   static_assert(kValueBitSize >= MinimumBitsToStore(helper::ToUint(Value::kMax)),
                 "Not enough bits to store all ApiList values");
 
-  // Sanity checks that all Values are covered by kValueBitMask.
+  // Checks that all Values are covered by kValueBitMask.
   static_assert(helper::MatchesBitMask(Value::kMin, kValueBitMask));
   static_assert(helper::MatchesBitMask(Value::kMax, kValueBitMask));
 
diff --git a/libartpalette/apex/palette.cc b/libartpalette/apex/palette.cc
index 041fe7a..e0697c6 100644
--- a/libartpalette/apex/palette.cc
+++ b/libartpalette/apex/palette.cc
@@ -93,7 +93,7 @@
     return reinterpret_cast<void*>(PaletteMethodNotSupported);
   }
   // TODO(oth): consider new GetMethodSignature() in the Palette API which
-  // would allow sanity checking the type signatures.
+  // would allow checking the validity of the type signatures.
   return method;
 }
 
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index baa2323..efa87ff 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -667,7 +667,7 @@
   uint32_t data_items_left = header_->data_size_;
   uint32_t used_bits = 0;
 
-  // Sanity check the size of the map list.
+  // Check the validity of the size of the map list.
   if (!CheckListSize(item, count, sizeof(dex::MapItem), "map size")) {
     return false;
   }
@@ -2269,7 +2269,7 @@
   size_t data_start = header_->data_off_;
   size_t data_end = data_start + header_->data_size_;
 
-  // Sanity check the offset of the section.
+  // Check the validity of the offset of the section.
   if (UNLIKELY((offset < data_start) || (offset > data_end))) {
     ErrorStringPrintf("Bad offset for data subsection: %zx", offset);
     return false;
diff --git a/libelffile/elf/elf_builder.h b/libelffile/elf/elf_builder.h
index 07f0d00..a76bf92 100644
--- a/libelffile/elf/elf_builder.h
+++ b/libelffile/elf/elf_builder.h
@@ -733,7 +733,7 @@
     // Buckets.  Having just one makes it linear search.
     hash.push_back(1);  // Point to first non-NULL symbol.
     // Chains.  This creates linked list of symbols.
-    hash.push_back(0);  // Dummy entry for the NULL symbol.
+    hash.push_back(0);  // Placeholder entry for the NULL symbol.
     for (int i = 1; i < count - 1; i++) {
       hash.push_back(i + 1);  // Each symbol points to the next one.
     }
diff --git a/libnativebridge/tests/NativeBridgeVersion_test.cpp b/libnativebridge/tests/NativeBridgeVersion_test.cpp
index d3f9a80..22aecad 100644
--- a/libnativebridge/tests/NativeBridgeVersion_test.cpp
+++ b/libnativebridge/tests/NativeBridgeVersion_test.cpp
@@ -24,7 +24,7 @@
     // When a bridge isn't loaded, we expect 0.
     EXPECT_EQ(NativeBridgeGetVersion(), 0U);
 
-    // After our dummy bridge has been loaded, we expect 1.
+    // After our fake bridge has been loaded, we expect 1.
     ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
     EXPECT_EQ(NativeBridgeGetVersion(), 1U);
 
diff --git a/libnativebridge/tests/PreInitializeNativeBridge_test.cpp b/libnativebridge/tests/PreInitializeNativeBridge_test.cpp
index cd5a8e2..149b05e 100644
--- a/libnativebridge/tests/PreInitializeNativeBridge_test.cpp
+++ b/libnativebridge/tests/PreInitializeNativeBridge_test.cpp
@@ -38,7 +38,7 @@
 
     // Try to create our mount namespace.
     if (unshare(CLONE_NEWNS) != -1) {
-        // Create a dummy file.
+        // Create a placeholder file.
         FILE* cpuinfo = fopen("./cpuinfo", "w");
         ASSERT_NE(nullptr, cpuinfo) << strerror(errno);
         fprintf(cpuinfo, kTestData);
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index d6ae8a2..a3ff8fc 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -1479,7 +1479,7 @@
       EXPECT_FALSE(info.GetHotMethodInfo(MethodReference(dex1, i)) != nullptr);
     }
 
-    // Sanity check that methods cannot be found with a non existing annotation.
+    // Check that the methods cannot be found with a non existing annotation.
     MethodReference ref(dex1, 0);
     ProfileSampleAnnotation not_exisiting("A");
     EXPECT_FALSE(info.GetMethodHotness(ref, not_exisiting).IsInProfile());
@@ -1541,7 +1541,7 @@
       EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(i)));
     }
 
-    // Sanity check that classes cannot be found with a non existing annotation.
+    // Check that the classes cannot be found with a non existing annotation.
     EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(0), ProfileSampleAnnotation("new_test")));
   };
 
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 4d6b41a..c22f38f 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -661,7 +661,7 @@
   // spec says these should not be reported.
   if (klass->IsArrayClass()) {
     *interface_count_ptr = 0;
-    *interfaces_ptr = nullptr;  // TODO: Should we allocate a dummy here?
+    *interfaces_ptr = nullptr;  // TODO: Should we allocate a placeholder here?
     return ERR(NONE);
   }
 
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index cc8f1fa..cca9a91 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -135,7 +135,7 @@
   // Allow easy indirection back to Thread*.
   tlsPtr_.self = this;
 
-  // Sanity check that reads from %fs point to this Thread*.
+  // Check that the reads from %fs point to this Thread*.
   Thread* self_check;
   CHECK_EQ(THREAD_SELF_OFFSET, SelfOffset<PointerSize::k32>().Int32Value());
   __asm__ __volatile__("movl %%fs:(%1), %0"
@@ -144,7 +144,7 @@
       :);  // clobber
   CHECK_EQ(self_check, this);
 
-  // Sanity check other offsets.
+  // Check the other offsets
   CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
   CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
   CHECK_EQ(THREAD_ID_OFFSET, ThinLockIdOffset<PointerSize::k32>().Int32Value());
@@ -153,7 +153,7 @@
 void Thread::CleanupCpu() {
   MutexLock mu(this, *Locks::modify_ldt_lock_);
 
-  // Sanity check that reads from %fs point to this Thread*.
+  // Check that the reads from %fs point to this Thread*.
   Thread* self_check;
   __asm__ __volatile__("movl %%fs:(%1), %0"
       : "=r"(self_check)  // output
diff --git a/runtime/arch/x86_64/thread_x86_64.cc b/runtime/arch/x86_64/thread_x86_64.cc
index 5c0446f..b01a1d3 100644
--- a/runtime/arch/x86_64/thread_x86_64.cc
+++ b/runtime/arch/x86_64/thread_x86_64.cc
@@ -58,7 +58,7 @@
   // Allow easy indirection back to Thread*.
   tlsPtr_.self = this;
 
-  // Sanity check that reads from %gs point to this Thread*.
+  // Check that the reads from %gs point to this Thread*.
   Thread* self_check;
   __asm__ __volatile__("movq %%gs:(%1), %0"
       : "=r"(self_check)  // output
@@ -68,7 +68,7 @@
 }
 
 void Thread::CleanupCpu() {
-  // Sanity check that reads from %gs point to this Thread*.
+  // Check that the reads from %gs point to this Thread*.
   Thread* self_check;
   __asm__ __volatile__("movq %%gs:(%1), %0"
       : "=r"(self_check)  // output
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 821c75d..2a1a08d 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -57,8 +57,8 @@
 }
 
 static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
-  // The check below enumerates the cases where we expect not to be able to sanity check locks
-  // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
+  // The check below enumerates the cases where we expect not to be able to check the validity of
+  // locks on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
   // TODO: tighten this check.
   if (kDebugLocking) {
     CHECK(!Locks::IsSafeToCallAbortRacy() ||
@@ -227,7 +227,7 @@
   DCHECK(self == nullptr || self == Thread::Current());
   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
   if (kDebugLocking) {
-    // Sanity debug check that if we think it is locked we have it in our held mutexes.
+    // Debug check that if we think it is locked we have it in our held mutexes.
     if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
       if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) {
         CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this);
@@ -257,7 +257,7 @@
   DCHECK(self == nullptr || self == Thread::Current());
   bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
   if (kDebugLocking) {
-    // Sanity that if the pthread thinks we own the lock the Thread agrees.
+    // Verify that if the pthread thinks we own the lock the Thread agrees.
     if (self != nullptr && result)  {
       CHECK_EQ(self->GetHeldMutex(level_), this);
     }
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index a7561de..ef85191 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -1768,7 +1768,7 @@
   // in the top parent.
   VerifyClassResolution("LDefinedInAC;", class_loader_d, class_loader_a);
 
-  // Sanity check that we don't find an undefined class.
+  // Check that we don't find an undefined class.
   VerifyClassResolution("LNotDefined;", class_loader_d, nullptr, /*should_find=*/ false);
 }
 
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index b7d37e2..c854dc5 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -266,7 +266,7 @@
     }
   }
   const size_t ret = combined.WriteToMemory(ptr);
-  // Sanity check.
+  // Validity check
   if (kIsDebugBuild && ptr != nullptr) {
     size_t read_count;
     ClassSet class_set(ptr, /*make copy*/false, &read_count);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index f218c4e..d2cb939 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -510,7 +510,7 @@
     Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
     Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
     // [u4]: length of piece, in allocation units
-    // We won't know this until we're done, so save the offset and stuff in a dummy value.
+    // We won't know this until we're done, so save the offset and stuff in a fake value.
     pieceLenField_ = p_;
     Write4BE(&p_, 0x55555555);
     needHeader_ = false;
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 88992f1..3c6fd85 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -549,33 +549,32 @@
 
 template <typename ElfTypes>
 typename ElfTypes::Ehdr& ElfFileImpl<ElfTypes>::GetHeader() const {
-  CHECK(header_ != nullptr);  // Header has been checked in SetMap. This is a sanity check.
+  CHECK(header_ != nullptr);  // Header has been checked in SetMap
   return *header_;
 }
 
 template <typename ElfTypes>
 uint8_t* ElfFileImpl<ElfTypes>::GetProgramHeadersStart() const {
-  CHECK(program_headers_start_ != nullptr);  // Header has been set in Setup. This is a sanity
-                                             // check.
+  CHECK(program_headers_start_ != nullptr);  // Header has been set in Setup
   return program_headers_start_;
 }
 
 template <typename ElfTypes>
 uint8_t* ElfFileImpl<ElfTypes>::GetSectionHeadersStart() const {
   CHECK(!program_header_only_);              // Only used in "full" mode.
-  CHECK(section_headers_start_ != nullptr);  // Is checked in CheckSectionsExist. Sanity check.
+  CHECK(section_headers_start_ != nullptr);  // Is checked in CheckSectionsExist
   return section_headers_start_;
 }
 
 template <typename ElfTypes>
 typename ElfTypes::Phdr& ElfFileImpl<ElfTypes>::GetDynamicProgramHeader() const {
-  CHECK(dynamic_program_header_ != nullptr);  // Is checked in CheckSectionsExist. Sanity check.
+  CHECK(dynamic_program_header_ != nullptr);  // Is checked in CheckSectionsExist
   return *dynamic_program_header_;
 }
 
 template <typename ElfTypes>
 typename ElfTypes::Dyn* ElfFileImpl<ElfTypes>::GetDynamicSectionStart() const {
-  CHECK(dynamic_section_start_ != nullptr);  // Is checked in CheckSectionsExist. Sanity check.
+  CHECK(dynamic_section_start_ != nullptr);  // Is checked in CheckSectionsExist
   return dynamic_section_start_;
 }
 
@@ -678,7 +677,7 @@
 
 template <typename ElfTypes>
 typename ElfTypes::Phdr* ElfFileImpl<ElfTypes>::GetProgramHeader(Elf_Word i) const {
-  CHECK_LT(i, GetProgramHeaderNum()) << file_path_;  // Sanity check for caller.
+  CHECK_LT(i, GetProgramHeaderNum()) << file_path_;  // Validity check for caller.
   uint8_t* program_header = GetProgramHeadersStart() + (i * GetHeader().e_phentsize);
   CHECK_LT(program_header, End());
   return reinterpret_cast<Elf_Phdr*>(program_header);