Replace NULL with nullptr

Also fixed some lines that were too long, and a few other minor
details.

Change-Id: I6efba5fb6e03eb5d0a300fddb2a75bf8e2f175cb
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 05cb8b4..5a9e04f 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -263,7 +263,7 @@
   mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
   CHECK(klass != nullptr) << "Class not found " << class_name;
   mirror::ArtMethod* method = klass->FindVirtualMethod(method_name, signature);
-  CHECK(method != NULL) << "Virtual method not found: "
+  CHECK(method != nullptr) << "Virtual method not found: "
       << class_name << "." << method_name << signature;
   CompileMethod(method);
 }
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 4f7a970..d1acada 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -108,7 +108,7 @@
     }
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h
index 6e25db6..83dfc28 100644
--- a/compiler/dex/dataflow_iterator-inl.h
+++ b/compiler/dex/dataflow_iterator-inl.h
@@ -23,7 +23,7 @@
 
 // Single forward pass over the nodes.
 inline BasicBlock* DataflowIterator::ForwardSingleNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we not yet at the end?
   if (idx_ < end_idx_) {
@@ -38,7 +38,7 @@
 
 // Repeat full forward passes over all nodes until no change occurs during a complete pass.
 inline BasicBlock* DataflowIterator::ForwardRepeatNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we at the end and have we changed something?
   if ((idx_ >= end_idx_) && changed_ == true) {
@@ -61,7 +61,7 @@
 
 // Single reverse pass over the nodes.
 inline BasicBlock* DataflowIterator::ReverseSingleNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we not yet at the end?
   if (idx_ >= 0) {
@@ -76,7 +76,7 @@
 
 // Repeat full backwards passes over all nodes until no change occurs during a complete pass.
 inline BasicBlock* DataflowIterator::ReverseRepeatNext() {
-  BasicBlock* res = NULL;
+  BasicBlock* res = nullptr;
 
   // Are we done and we changed something during the last iteration?
   if ((idx_ < 0) && changed_) {
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 2a06cec..097c2a4 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -72,7 +72,7 @@
           : mir_graph_(mir_graph),
             start_idx_(start_idx),
             end_idx_(end_idx),
-            block_id_list_(NULL),
+            block_id_list_(nullptr),
             idx_(0),
             repeats_(0),
             changed_(false) {}
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ef94d8b..d1ddfda 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -301,7 +301,7 @@
                               art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
   UNUSED(invoke_type);
   if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
-    art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
+    art::DexCompilationUnit unit(nullptr, class_loader, art::Runtime::Current()->GetClassLinker(),
                                  dex_file, code_item, class_def_idx, method_idx, access_flags,
                                  driver.GetVerifiedMethod(&dex_file, method_idx));
     art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level);
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 3d7a640..9099e8a 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -968,7 +968,7 @@
    * edges until we reach an explicit branch or return.
    */
   BasicBlock* ending_bb = bb;
-  if (ending_bb->last_mir_insn != NULL) {
+  if (ending_bb->last_mir_insn != nullptr) {
     uint32_t ending_flags = kAnalysisAttributes[ending_bb->last_mir_insn->dalvikInsn.opcode];
     while ((ending_flags & kAnBranch) == 0) {
       ending_bb = GetBasicBlock(ending_bb->fall_through);
@@ -998,7 +998,7 @@
   bool done = false;
   while (!done) {
     tbb->visited = true;
-    for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
+    for (MIR* mir = tbb->first_mir_insn; mir != nullptr; mir = mir->next) {
       if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
         // Skip any MIR pseudo-op.
         continue;
@@ -1195,7 +1195,7 @@
 
   ClearAllVisitedFlags();
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     AnalyzeBlock(bb, &stats);
   }
 
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index eaaf540..b4aec98 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -989,7 +989,7 @@
   MIR* mir;
   ArenaBitVector *use_v, *def_v, *live_in_v;
 
-  if (bb->data_flow_info == NULL) return false;
+  if (bb->data_flow_info == nullptr) return false;
 
   use_v = bb->data_flow_info->use_v =
       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
@@ -998,7 +998,7 @@
   live_in_v = bb->data_flow_info->live_in_v =
       new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
 
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     uint64_t df_attributes = GetDataFlowAttributes(mir);
     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
 
@@ -1188,7 +1188,7 @@
 
 /* Entry function to convert a block into SSA representation */
 bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
-  if (bb->data_flow_info == NULL) return false;
+  if (bb->data_flow_info == nullptr) return false;
 
   /*
    * Pruned SSA form: Insert phi nodes for each dalvik register marked in phi_node_blocks
@@ -1211,7 +1211,7 @@
     }
   }
 
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     mir->ssa_rep =
         static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
                                                               kArenaAllocDFInfo));
@@ -1402,8 +1402,8 @@
     return;
   }
   uint32_t weight = GetUseCountWeight(bb);
-  for (MIR* mir = bb->first_mir_insn; (mir != NULL); mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
+  for (MIR* mir = bb->first_mir_insn; (mir != nullptr); mir = mir->next) {
+    if (mir->ssa_rep == nullptr) {
       continue;
     }
     for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
@@ -1448,7 +1448,7 @@
 void MIRGraph::VerifyDataflow() {
     /* Verify if all blocks are connected as claimed */
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     VerifyPredInfo(bb);
   }
 }
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index 11773e7..e4570fd 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -124,7 +124,7 @@
   uint16_t declaring_field_idx_;
   // The type index of the class declaring the field, 0 if unresolved.
   uint16_t declaring_class_idx_;
-  // The dex file that defines the class containing the field and the field, nullptr if unresolved.
+  // The dex file that defines the class containing the field and the field, null if unresolved.
   const DexFile* declaring_dex_file_;
 };
 
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 7d0729f..b5c42f1 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -81,15 +81,15 @@
 };
 
 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
-    : reg_location_(NULL),
+    : reg_location_(nullptr),
       block_id_map_(std::less<unsigned int>(), arena->Adapter()),
       cu_(cu),
       ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
       ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
-      vreg_to_ssa_map_(NULL),
-      ssa_last_defs_(NULL),
-      is_constant_v_(NULL),
-      constant_values_(NULL),
+      vreg_to_ssa_map_(nullptr),
+      ssa_last_defs_(nullptr),
+      is_constant_v_(nullptr),
+      constant_values_(nullptr),
       use_counts_(arena->Adapter()),
       raw_use_counts_(arena->Adapter()),
       num_reachable_blocks_(0),
@@ -106,24 +106,24 @@
       topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
       topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
       max_nested_loops_(0u),
-      i_dom_list_(NULL),
+      i_dom_list_(nullptr),
       temp_scoped_alloc_(),
       block_list_(arena->Adapter(kArenaAllocBBList)),
-      try_block_addr_(NULL),
-      entry_block_(NULL),
-      exit_block_(NULL),
-      current_code_item_(NULL),
+      try_block_addr_(nullptr),
+      entry_block_(nullptr),
+      exit_block_(nullptr),
+      current_code_item_(nullptr),
       m_units_(arena->Adapter()),
       method_stack_(arena->Adapter()),
       current_method_(kInvalidEntry),
       current_offset_(kInvalidEntry),
       def_count_(0),
-      opcode_count_(NULL),
+      opcode_count_(nullptr),
       num_ssa_regs_(0),
       extended_basic_blocks_(arena->Adapter()),
       method_sreg_(0),
       attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
-      checkstats_(NULL),
+      checkstats_(nullptr),
       arena_(arena),
       backward_branches_(0),
       forward_branches_(0),
@@ -185,13 +185,13 @@
                                  BasicBlock* orig_block, BasicBlock** immed_pred_block_p) {
   DCHECK_GT(code_offset, orig_block->start_offset);
   MIR* insn = orig_block->first_mir_insn;
-  MIR* prev = NULL;  // Will be set to instruction before split.
+  MIR* prev = nullptr;  // Will be set to instruction before split.
   while (insn) {
     if (insn->offset == code_offset) break;
     prev = insn;
     insn = insn->next;
   }
-  if (insn == NULL) {
+  if (insn == nullptr) {
     LOG(FATAL) << "Break split failed";
   }
   // Now insn is at the instruction where we want to split, namely
@@ -530,7 +530,7 @@
     size = switch_data[1];
     first_key = switch_data[2] | (switch_data[3] << 16);
     target_table = reinterpret_cast<const int*>(&switch_data[4]);
-    keyTable = NULL;        // Make the compiler happy.
+    keyTable = nullptr;        // Make the compiler happy.
   /*
    * Sparse switch data format:
    *  ushort ident = 0x0200   magic value
@@ -718,8 +718,8 @@
 
   // If this is the first method, set up default entry and exit blocks.
   if (current_method_ == 0) {
-    DCHECK(entry_block_ == NULL);
-    DCHECK(exit_block_ == NULL);
+    DCHECK(entry_block_ == nullptr);
+    DCHECK(exit_block_ == nullptr);
     DCHECK_EQ(GetNumBlocks(), 0U);
     // Use id 0 to represent a null block.
     BasicBlock* null_block = CreateNewBB(kNullBlock);
@@ -755,7 +755,7 @@
     insn->m_unit_index = current_method_;
     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
     Instruction::Code opcode = insn->dalvikInsn.opcode;
-    if (opcode_count_ != NULL) {
+    if (opcode_count_ != nullptr) {
       opcode_count_[static_cast<int>(opcode)]++;
     }
 
@@ -879,7 +879,7 @@
 }
 
 void MIRGraph::ShowOpcodeStats() {
-  DCHECK(opcode_count_ != NULL);
+  DCHECK(opcode_count_ != nullptr);
   LOG(INFO) << "Opcode Count";
   for (int i = 0; i < kNumPackedOpcodes; i++) {
     if (opcode_count_[i] != 0) {
@@ -947,7 +947,7 @@
     return;
   }
   file = fopen(fpath.c_str(), "w");
-  if (file == NULL) {
+  if (file == nullptr) {
     PLOG(ERROR) << "Could not open " << fpath << " for DumpCFG.";
     return;
   }
@@ -961,7 +961,7 @@
   for (idx = 0; idx < num_blocks; idx++) {
     int block_idx = all_blocks ? idx : dfs_order_[idx];
     BasicBlock* bb = GetBasicBlock(block_idx);
-    if (bb == NULL) continue;
+    if (bb == nullptr) continue;
     if (bb->block_type == kDead) continue;
     if (bb->hidden) continue;
     if (bb->block_type == kEntryBlock) {
@@ -1501,8 +1501,8 @@
     }
     nop = true;
   }
-  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
+  int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
 
   if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
     // Note that this does not check the MIR's opcode in all cases. In cases where it
@@ -1530,7 +1530,7 @@
     for (int i = 0; i < uses; i++) {
       str.append(" ");
       str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
-      if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
+      if (!show_singles && (reg_location_ != nullptr) && reg_location_[i].wide) {
         // For the listing, skip the high sreg.
         i++;
       }
@@ -1623,7 +1623,7 @@
 
 // Similar to GetSSAName, but if ssa name represents an immediate show that as well.
 std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
-  if (reg_location_ == NULL) {
+  if (reg_location_ == nullptr) {
     // Pre-SSA - just use the standard name.
     return GetSSAName(ssa_reg);
   }
@@ -1716,7 +1716,7 @@
   CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
                                                         kArenaAllocMisc));
   MIR* move_result_mir = FindMoveResult(bb, mir);
-  if (move_result_mir == NULL) {
+  if (move_result_mir == nullptr) {
     info->result.location = kLocInvalid;
   } else {
     info->result = GetRawDest(move_result_mir);
@@ -2294,7 +2294,7 @@
 
 void BasicBlock::ResetOptimizationFlags(uint16_t reset_flags) {
   // Reset flags for all MIRs in bb.
-  for (MIR* mir = first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = first_mir_insn; mir != nullptr; mir = mir->next) {
     mir->optimization_flags &= (~reset_flags);
   }
 }
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d7e4dd9..0db54bf 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -602,7 +602,7 @@
 
   BasicBlock* GetBasicBlock(unsigned int block_id) const {
     DCHECK_LT(block_id, block_list_.size());  // NOTE: NullBasicBlockId is 0.
-    return (block_id == NullBasicBlockId) ? NULL : block_list_[block_id];
+    return (block_id == NullBasicBlockId) ? nullptr : block_list_[block_id];
   }
 
   size_t GetBasicBlockListCount() const {
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index 3706012..946c74b 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -88,7 +88,7 @@
   // The type index of the class declaring the method, 0 if unresolved.
   uint16_t declaring_class_idx_;
   // The dex file that defines the class containing the method and the method,
-  // nullptr if unresolved.
+  // null if unresolved.
   const DexFile* declaring_dex_file_;
 };
 
@@ -223,7 +223,7 @@
   uintptr_t direct_code_;
   uintptr_t direct_method_;
   // Before Resolve(), target_dex_file_ and target_method_idx_ hold the verification-based
-  // devirtualized invoke target if available, nullptr and 0u otherwise.
+  // devirtualized invoke target if available, null and 0u otherwise.
   // After Resolve() they hold the actual target method that will be called; it will be either
   // a devirtualized target method or the compilation's unit's dex file and MethodIndex().
   const DexFile* target_dex_file_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 546e67a..467c14e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -55,7 +55,7 @@
 void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
   MIR* mir;
 
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     // Skip pass if BB has MIR without SSA representation.
     if (mir->ssa_rep == nullptr) {
        return;
@@ -116,11 +116,11 @@
 /* Advance to next strictly dominated MIR node in an extended basic block */
 MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
   BasicBlock* bb = *p_bb;
-  if (mir != NULL) {
+  if (mir != nullptr) {
     mir = mir->next;
-    while (mir == NULL) {
+    while (mir == nullptr) {
       bb = GetBasicBlock(bb->fall_through);
-      if ((bb == NULL) || Predecessors(bb) != 1) {
+      if ((bb == nullptr) || Predecessors(bb) != 1) {
         // mir is null and we cannot proceed further.
         break;
       } else {
@@ -134,7 +134,7 @@
 
 /*
  * To be used at an invoke mir.  If the logically next mir node represents
- * a move-result, return it.  Else, return NULL.  If a move-result exists,
+ * a move-result, return it.  Else, return nullptr.  If a move-result exists,
  * it is required to immediately follow the invoke with no intervening
  * opcodes or incoming arcs.  However, if the result of the invoke is not
  * used, a move-result may not be present.
@@ -142,7 +142,7 @@
 MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
   BasicBlock* tbb = bb;
   mir = AdvanceMIR(&tbb, mir);
-  while (mir != NULL) {
+  while (mir != nullptr) {
     if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
@@ -152,7 +152,7 @@
     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       mir = AdvanceMIR(&tbb, mir);
     } else {
-      mir = NULL;
+      mir = nullptr;
     }
   }
   return mir;
@@ -160,29 +160,29 @@
 
 BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
   if (bb->block_type == kDead) {
-    return NULL;
+    return nullptr;
   }
   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
       || (bb->block_type == kExitBlock));
   BasicBlock* bb_taken = GetBasicBlock(bb->taken);
   BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
-  if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
+  if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
       ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
     // Follow simple unconditional branches.
     bb = bb_taken;
   } else {
     // Follow simple fallthrough
-    bb = (bb_taken != NULL) ? NULL : bb_fall_through;
+    bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
   }
-  if (bb == NULL || (Predecessors(bb) != 1)) {
-    return NULL;
+  if (bb == nullptr || (Predecessors(bb) != 1)) {
+    return nullptr;
   }
   DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
   return bb;
 }
 
 static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
       for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
         if (mir->ssa_rep->uses[i] == ssa_name) {
@@ -191,11 +191,11 @@
       }
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 static SelectInstructionKind SelectKind(MIR* mir) {
-  // Work with the case when mir is nullptr.
+  // Work with the case when mir is null.
   if (mir == nullptr) {
     return kSelectNone;
   }
@@ -256,7 +256,8 @@
   }
 
   // Calculate remaining ME temps available.
-  size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+  size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
+      reserved_temps_for_backend_;
 
   if (num_non_special_compiler_temps_ >= remaining_me_temps) {
     return 0;
@@ -347,7 +348,8 @@
     size_t available_temps = GetNumAvailableVRTemps();
     if (available_temps <= 0 || (available_temps <= 1 && wide)) {
       if (verbose) {
-        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
+            << " are available.";
       }
       return nullptr;
     }
@@ -365,8 +367,8 @@
   compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
 
   if (verbose) {
-    LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
-        << " and s" << compiler_temp->s_reg_low << " has been created.";
+    LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
+        << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
   }
 
   if (wide) {
@@ -478,8 +480,8 @@
     local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
                                                                  allocator.get()));
   }
-  while (bb != NULL) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  while (bb != nullptr) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       // TUNING: use the returned value number for CSE.
       if (use_lvn) {
         local_valnum->GetValueNumber(mir);
@@ -538,7 +540,7 @@
             // Bitcode doesn't allow this optimization.
             break;
           }
-          if (mir->next != NULL) {
+          if (mir->next != nullptr) {
             MIR* mir_next = mir->next;
             // Make sure result of cmp is used by next insn and nowhere else
             if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
@@ -594,12 +596,12 @@
            cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
           IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
         BasicBlock* ft = GetBasicBlock(bb->fall_through);
-        DCHECK(ft != NULL);
+        DCHECK(ft != nullptr);
         BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
         BasicBlock* ft_tk = GetBasicBlock(ft->taken);
 
         BasicBlock* tk = GetBasicBlock(bb->taken);
-        DCHECK(tk != NULL);
+        DCHECK(tk != nullptr);
         BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
         BasicBlock* tk_tk = GetBasicBlock(tk->taken);
 
@@ -608,7 +610,7 @@
          * transfers to the rejoin block and the fall_though edge goes to a block that
          * unconditionally falls through to the rejoin block.
          */
-        if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
+        if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
             (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
           /*
            * Okay - we have the basic diamond shape.
@@ -628,7 +630,7 @@
             MIR* if_false = ft->first_mir_insn;
             // It's possible that the target of the select isn't used - skip those (rare) cases.
             MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
-            if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
+            if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
               /*
                * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
                * Phi node in the merge block and delete it (while using the SSA name
@@ -712,7 +714,8 @@
         }
       }
     }
-    bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
+    bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
+        nullptr;
   }
   if (use_lvn && UNLIKELY(!global_valnum->Good())) {
     LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -723,9 +726,9 @@
 
 /* Collect stats on number of checks removed */
 void MIRGraph::CountChecks(class BasicBlock* bb) {
-  if (bb->data_flow_info != NULL) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-      if (mir->ssa_rep == NULL) {
+  if (bb->data_flow_info != nullptr) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      if (mir->ssa_rep == nullptr) {
         continue;
       }
       uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -926,7 +929,7 @@
   // reset MIR_MARK
   AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       mir->optimization_flags &= ~MIR_MARK;
     }
   }
@@ -1001,7 +1004,7 @@
   // no intervening uses.
 
   // Walk through the instruction in the block, updating as necessary
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     uint64_t df_attributes = GetDataFlowAttributes(mir);
 
     if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
@@ -1112,7 +1115,7 @@
   // converge MIR_MARK with MIR_IGNORE_NULL_CHECK
   AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
-    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
       static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
       uint16_t mirMarkAdjustedToIgnoreNullCheck =
@@ -1503,7 +1506,7 @@
   if (bb->block_type != kDalvikByteCode) {
     return;
   }
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       continue;
     }
@@ -1534,7 +1537,8 @@
             ->GenInline(this, bb, mir, target.dex_method_index)) {
       if (cu_->verbose || cu_->print_pass) {
         LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
-            << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index, *target.dex_file)
+            << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
+                                                            *target.dex_file)
             << "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
             << "\" @0x" << std::hex << mir->offset;
       }
@@ -1558,7 +1562,7 @@
       static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
   checkstats_ = stats;
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     CountChecks(bb);
   }
   if (stats->null_checks > 0) {
@@ -1591,7 +1595,7 @@
   bool terminated_by_return = false;
   bool do_local_value_numbering = false;
   // Visit blocks strictly dominated by this head.
-  while (bb != NULL) {
+  while (bb != nullptr) {
     bb->visited = true;
     terminated_by_return |= bb->terminated_by_return;
     do_local_value_numbering |= bb->use_lvn;
@@ -1600,7 +1604,7 @@
   if (terminated_by_return || do_local_value_numbering) {
     // Do lvn for all blocks in this extended set.
     bb = start_bb;
-    while (bb != NULL) {
+    while (bb != nullptr) {
       bb->use_lvn = do_local_value_numbering;
       bb->dominates_return = terminated_by_return;
       bb = NextDominatedBlock(bb);
@@ -1623,7 +1627,7 @@
   if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
     ClearAllVisitedFlags();
     PreOrderDfsIterator iter2(this);
-    for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+    for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
       BuildExtendedBBList(bb);
     }
     // Perform extended basic block optimizations.
@@ -1632,7 +1636,7 @@
     }
   } else {
     PreOrderDfsIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
       BasicBlockOpt(bb);
     }
   }
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 671bcec..8762b53 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -68,7 +68,7 @@
    * @return whether the pass was applied.
    */
   virtual bool RunPass(const char* pass_name) {
-    // Paranoid: c_unit cannot be nullptr and we need a pass name.
+    // Paranoid: c_unit cannot be null and we need a pass name.
     DCHECK(pass_name != nullptr);
     DCHECK_NE(pass_name[0], 0);
 
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
index 94eef22..cbe4a02 100644
--- a/compiler/dex/pass_driver_me.h
+++ b/compiler/dex/pass_driver_me.h
@@ -88,7 +88,7 @@
   }
 
   bool RunPass(const Pass* pass, bool time_split) OVERRIDE {
-    // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+    // Paranoid: c_unit and pass cannot be null, and the pass should have a name.
     DCHECK(pass != nullptr);
     DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
     CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
@@ -211,8 +211,9 @@
    * @param settings_to_fill Fills the options to contain the mapping of name of option to the new
    * configuration.
    */
-  static void FillOverriddenPassSettings(const PassManagerOptions* options, const char* pass_name,
-                                         SafeMap<const std::string, const OptionContent>& settings_to_fill) {
+  static void FillOverriddenPassSettings(
+      const PassManagerOptions* options, const char* pass_name,
+      SafeMap<const std::string, const OptionContent>& settings_to_fill) {
     const std::string& settings = options->GetOverriddenPassOptions();
     const size_t settings_len = settings.size();
 
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index c5ac4c1..df4a9f2 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1055,7 +1055,7 @@
 // new_lir replaces orig_lir in the pcrel_fixup list.
 void ArmMir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     prev_lir->u.a.pcrel_next = new_lir;
@@ -1066,7 +1066,7 @@
 // new_lir is inserted before orig_lir in the pcrel_fixup list.
 void ArmMir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -1084,7 +1084,7 @@
 
 uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
   uint8_t* const write_buffer = write_pos;
-  for (; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (; lir != nullptr; lir = NEXT_LIR(lir)) {
     lir->offset = (write_pos - write_buffer);
     if (!lir->flags.is_nop) {
       int opcode = lir->opcode;
@@ -1258,8 +1258,8 @@
     generation ^= 1;
     // Note: nodes requring possible fixup linked in ascending order.
     lir = first_fixup_;
-    prev_lir = NULL;
-    while (lir != NULL) {
+    prev_lir = nullptr;
+    while (lir != nullptr) {
       /*
        * NOTE: the lir being considered here will be encoded following the switch (so long as
        * we're not in a retry situation).  However, any new non-pc_rel instructions inserted
@@ -1506,7 +1506,7 @@
         case kFixupAdr: {
           const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[2]);
           LIR* target = lir->target;
-          int32_t target_disp = (tab_rec != NULL) ?  tab_rec->offset + offset_adjustment
+          int32_t target_disp = (tab_rec != nullptr) ?  tab_rec->offset + offset_adjustment
               : target->offset + ((target->flags.generation == lir->flags.generation) ? 0 :
               offset_adjustment);
           int32_t disp = target_disp - ((lir->offset + 4) & ~3);
@@ -1642,7 +1642,7 @@
 uint32_t ArmMir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
   LIR* end_lir = tail_lir->next;
 
-  LIR* last_fixup = NULL;
+  LIR* last_fixup = nullptr;
   for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
     if (!lir->flags.is_nop) {
       if (lir->flags.fixup != kFixupNone) {
@@ -1658,8 +1658,8 @@
         }
         // Link into the fixup chain.
         lir->flags.use_def_invalid = true;
-        lir->u.a.pcrel_next = NULL;
-        if (first_fixup_ == NULL) {
+        lir->u.a.pcrel_next = nullptr;
+        if (first_fixup_ == nullptr) {
           first_fixup_ = lir;
         } else {
           last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3d18af6..6ba4016 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -124,7 +124,7 @@
   }
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size-1);
-  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
 
   // Load the displacement from the switch table
   RegStorage disp_reg = AllocTemp();
@@ -156,7 +156,7 @@
     } else {
       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
       if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
       }
     }
     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
@@ -165,12 +165,12 @@
     MarkPossibleNullPointerException(opt_flags);
     // Zero out the read barrier bits.
     OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
-    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+    LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
     // r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
     OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
     NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
         mirror::Object::MonitorOffset().Int32Value() >> 2);
-    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
+    LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
 
 
     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
@@ -238,7 +238,7 @@
     } else {
       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
       if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
+        null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, nullptr);
       }
     }
     if (!kUseReadBarrier) {
@@ -252,16 +252,16 @@
     OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
     // Zero out except the read barrier bits.
     OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
-    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
+    LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, nullptr);
     GenMemBarrier(kAnyStore);
     LIR* unlock_success_branch;
     if (!kUseReadBarrier) {
       Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
-      unlock_success_branch = OpUnconditionalBranch(NULL);
+      unlock_success_branch = OpUnconditionalBranch(nullptr);
     } else {
       NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
               mirror::Object::MonitorOffset().Int32Value() >> 2);
-      unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+      unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, nullptr);
     }
     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
     slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 62903af..8d20f1b 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -138,10 +138,10 @@
   RegStorage t_reg = AllocTemp();
   LoadConstant(t_reg, -1);
   OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
-  LIR* branch1 = OpCondBranch(kCondLt, NULL);
-  LIR* branch2 = OpCondBranch(kCondGt, NULL);
+  LIR* branch1 = OpCondBranch(kCondLt, nullptr);
+  LIR* branch2 = OpCondBranch(kCondGt, nullptr);
   OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
-  LIR* branch3 = OpCondBranch(kCondEq, NULL);
+  LIR* branch3 = OpCondBranch(kCondEq, nullptr);
 
   LIR* it = OpIT(kCondHi, "E");
   NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
@@ -389,7 +389,7 @@
    * generate the long form in an attempt to avoid an extra assembly pass.
    * TODO: consider interspersing slowpaths in code following unconditional branches.
    */
-  bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
+  bool skip = ((target != nullptr) && (target->opcode == kPseudoThrowTarget));
   skip &= ((mir_graph_->GetNumDalvikInsns() - current_dalvik_offset_) > 64);
   if (!skip && reg.Low8() && (check_value == 0)) {
     if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
@@ -1159,12 +1159,12 @@
 LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
 #ifdef ARM_R4_SUSPEND_FLAG
   NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
-  return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
+  return OpCondBranch((target == nullptr) ? kCondEq : kCondNe, target);
 #else
   RegStorage t_reg = AllocTemp();
   LoadBaseDisp(rs_rARM_SELF, Thread::ThreadFlagsOffset<4>().Int32Value(),
     t_reg, kUnsignedHalf, kNotVolatile);
-  LIR* cmp_branch = OpCmpImmBranch((target == NULL) ? kCondNe : kCondEq, t_reg,
+  LIR* cmp_branch = OpCmpImmBranch((target == nullptr) ? kCondNe : kCondEq, t_reg,
     0, target);
   FreeTemp(t_reg);
   return cmp_branch;
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 25ea694..2ef92f8 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -90,7 +90,7 @@
     }
   }
   LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWordData(&literal_list_, value);
   }
   ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -411,7 +411,7 @@
     return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
   } else {
     LOG(FATAL) << "Unexpected encoding operand count";
-    return NULL;
+    return nullptr;
   }
 }
 
@@ -695,7 +695,7 @@
 }
 
 LIR* ArmMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
-  LIR* res = NULL;
+  LIR* res = nullptr;
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
   if (r_dest.IsFloat()) {
@@ -721,10 +721,10 @@
       LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
     }
   }
-  if (res == NULL) {
+  if (res == nullptr) {
     // No short form - load from the literal pool.
     LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-    if (data_target == NULL) {
+    if (data_target == nullptr) {
       data_target = AddWideData(&literal_list_, val_lo, val_hi);
     }
     ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
@@ -814,7 +814,7 @@
 LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                   int scale, OpSize size) {
   bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
-  LIR* store = NULL;
+  LIR* store = nullptr;
   ArmOpcode opcode = kThumbBkpt;
   bool thumb_form = (all_low_regs && (scale == 0));
   RegStorage reg_ptr;
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 2f1ae66..b78fb80 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -663,7 +663,7 @@
 // new_lir replaces orig_lir in the pcrel_fixup list.
 void Arm64Mir2Lir::ReplaceFixup(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir->u.a.pcrel_next;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     prev_lir->u.a.pcrel_next = new_lir;
@@ -674,7 +674,7 @@
 // new_lir is inserted before orig_lir in the pcrel_fixup list.
 void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir) {
   new_lir->u.a.pcrel_next = orig_lir;
-  if (UNLIKELY(prev_lir == NULL)) {
+  if (UNLIKELY(prev_lir == nullptr)) {
     first_fixup_ = new_lir;
   } else {
     DCHECK(prev_lir->u.a.pcrel_next == orig_lir);
@@ -889,8 +889,8 @@
     generation ^= 1;
     // Note: nodes requiring possible fixup linked in ascending order.
     lir = first_fixup_;
-    prev_lir = NULL;
-    while (lir != NULL) {
+    prev_lir = nullptr;
+    while (lir != nullptr) {
       // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
       // the time of insertion.  Note that inserted instructions don't need use/def flags, but do
       // need size and pc-rel status properly updated.
@@ -1037,7 +1037,7 @@
               // Check that the instruction preceding the multiply-accumulate is a load or store.
               if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
                 // insert a NOP between the load/store and the multiply-accumulate.
-                LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, NULL);
+                LIR* new_lir = RawLIR(lir->dalvik_offset, kA64Nop0, 0, 0, 0, 0, 0, nullptr);
                 new_lir->offset = lir->offset;
                 new_lir->flags.fixup = kFixupNone;
                 new_lir->flags.size = EncodingMap[kA64Nop0].size;
@@ -1108,7 +1108,7 @@
 uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t offset) {
   LIR* end_lir = tail_lir->next;
 
-  LIR* last_fixup = NULL;
+  LIR* last_fixup = nullptr;
   for (LIR* lir = head_lir; lir != end_lir; lir = NEXT_LIR(lir)) {
     A64Opcode opcode = UNWIDE(lir->opcode);
     if (!lir->flags.is_nop) {
@@ -1123,8 +1123,8 @@
         }
         // Link into the fixup chain.
         lir->flags.use_def_invalid = true;
-        lir->u.a.pcrel_next = NULL;
-        if (first_fixup_ == NULL) {
+        lir->u.a.pcrel_next = nullptr;
+        if (first_fixup_ == nullptr) {
           first_fixup_ = lir;
         } else {
           last_fixup->u.a.pcrel_next = lir;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 4abbd77..9a7c2ad 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -127,7 +127,7 @@
   }
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, key_reg, size - 1);
-  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
 
   // Load the displacement from the switch table
   RegStorage disp_reg = AllocTemp();
@@ -167,7 +167,7 @@
   } else {
     // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
     }
   }
   Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -176,12 +176,12 @@
   MarkPossibleNullPointerException(opt_flags);
   // Zero out the read barrier bits.
   OpRegRegImm(kOpAnd, rs_w2, rs_w3, LockWord::kReadBarrierStateMaskShiftedToggled);
-  LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, NULL);
+  LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
   // w3 is zero except for the rb bits here. Copy the read barrier bits into w1.
   OpRegRegReg(kOpOr, rs_w1, rs_w1, rs_w3);
   OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
   NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
-  LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
+  LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr);
 
   LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
   not_unlocked_branch->target = slow_path_target;
@@ -220,7 +220,7 @@
   } else {
     // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
-      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
+      null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, nullptr);
     }
   }
   Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
@@ -235,16 +235,16 @@
   OpRegRegImm(kOpAnd, rs_w3, rs_w2, LockWord::kReadBarrierStateMaskShiftedToggled);
   // Zero out except the read barrier bits.
   OpRegRegImm(kOpAnd, rs_w2, rs_w2, LockWord::kReadBarrierStateMaskShifted);
-  LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, NULL);
+  LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w3, rs_w1, nullptr);
   GenMemBarrier(kAnyStore);
   LIR* unlock_success_branch;
   if (!kUseReadBarrier) {
     Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
-    unlock_success_branch = OpUnconditionalBranch(NULL);
+    unlock_success_branch = OpUnconditionalBranch(nullptr);
   } else {
     OpRegRegImm(kOpAdd, rs_x3, rs_x0, mirror::Object::MonitorOffset().Int32Value());
     NewLIR3(kA64Stxr3wrX, rw1, rw2, rx3);
-    unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, NULL);
+    unlock_success_branch = OpCmpImmBranch(kCondEq, rs_w1, 0, nullptr);
   }
   LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
   slow_unlock_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index b7dbd0a..9340d01 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -803,7 +803,7 @@
   NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
   OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
   DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
-  LIR* early_exit = OpCondBranch(kCondNe, NULL);
+  LIR* early_exit = OpCondBranch(kCondNe, nullptr);
   NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
   NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
   DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index e9ad8ba..483231f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -121,7 +121,7 @@
   }
 
   LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     // Wide, as we need 8B alignment.
     data_target = AddWideData(&literal_list_, value, 0);
   }
@@ -148,7 +148,7 @@
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
   LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWideData(&literal_list_, val_lo, val_hi);
   }
 
@@ -525,7 +525,7 @@
   int32_t val_lo = Low32Bits(value);
   int32_t val_hi = High32Bits(value);
   LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-  if (data_target == NULL) {
+  if (data_target == nullptr) {
     data_target = AddWideData(&literal_list_, val_lo, val_hi);
   }
 
@@ -624,7 +624,7 @@
   }
 
   LOG(FATAL) << "Unexpected encoding operand count";
-  return NULL;
+  return nullptr;
 }
 
 LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
@@ -658,7 +658,7 @@
   }
 
   LOG(FATAL) << "Unexpected encoding operand count";
-  return NULL;
+  return nullptr;
 }
 
 LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
@@ -1190,7 +1190,7 @@
  */
 LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
                                     OpSize size) {
-  LIR* load = NULL;
+  LIR* load = nullptr;
   A64Opcode opcode = kA64Brk1d;
   A64Opcode alt_opcode = kA64Brk1d;
   int scale = 0;
@@ -1286,7 +1286,7 @@
 
 LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
                                      OpSize size) {
-  LIR* store = NULL;
+  LIR* store = nullptr;
   A64Opcode opcode = kA64Brk1d;
   A64Opcode alt_opcode = kA64Brk1d;
   int scale = 0;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9f4a318..fb68335 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1080,7 +1080,7 @@
   reginfo_map_.reserve(RegStorage::kMaxRegs);
   pointer_storage_.reserve(128);
   slow_paths_.reserve(32);
-  // Reserve pointer id 0 for nullptr.
+  // Reserve pointer id 0 for null.
   size_t null_idx = WrapPointer<void>(nullptr);
   DCHECK_EQ(null_idx, 0U);
 }
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index ca31dbf..f5e6c09 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -392,7 +392,7 @@
 
 DexFileMethodInliner::DexFileMethodInliner()
     : lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock),
-      dex_file_(NULL) {
+      dex_file_(nullptr) {
   static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0");
   static_assert(arraysize(kClassCacheNames) == kClassCacheLast,
                 "bad arraysize for kClassCacheNames");
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1a72cd7..de5e041 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -87,7 +87,7 @@
     const RegStorage r_result_;
   };
 
-  LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, NULL);
+  LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
   LIR* cont = NewLIR0(kPseudoTargetLabel);
 
   AddSlowPath(new (arena_) CallHelperImmMethodSlowPath(this, branch, cont, trampoline, imm,
@@ -113,10 +113,10 @@
     int32_t offset_of_field = ObjArray::OffsetOfElement(field_info.StorageIndex()).Int32Value();
     LoadRefDisp(r_base, offset_of_field, r_base, kNotVolatile);
   }
-  // r_base now points at static storage (Class*) or nullptr if the type is not yet resolved.
+  // r_base now points at static storage (Class*) or null if the type is not yet resolved.
   LIR* unresolved_branch = nullptr;
   if (!field_info.IsClassInDexCache() && (opt_flags & MIR_CLASS_IS_IN_DEX_CACHE) == 0) {
-    // Check if r_base is nullptr.
+    // Check if r_base is null.
     unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, nullptr);
   }
   LIR* uninit_branch = nullptr;
@@ -136,8 +136,8 @@
     class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
      public:
       // There are up to two branches to the static field slow path, the "unresolved" when the type
-      // entry in the dex cache is nullptr, and the "uninit" when the class is not yet initialized.
-      // At least one will be non-nullptr here, otherwise we wouldn't generate the slow path.
+      // entry in the dex cache is null, and the "uninit" when the class is not yet initialized.
+      // At least one will be non-null here, otherwise we wouldn't generate the slow path.
       StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
                           RegStorage r_base_in, RegStorage r_method_in)
           : LIRSlowPath(m2l, unresolved != nullptr ? unresolved : uninit, cont),
@@ -165,7 +165,7 @@
       }
 
      private:
-      // Second branch to the slow path, or nullptr if there's only one branch.
+      // Second branch to the slow path, or null if there's only one branch.
       LIR* const second_branch_;
 
       const int storage_index_;
@@ -173,7 +173,7 @@
       RegStorage r_method_;
     };
 
-    // The slow path is invoked if the r_base is nullptr or the class pointed
+    // The slow path is invoked if the r_base is null or the class pointed
     // to by it is not initialized.
     LIR* cont = NewLIR0(kPseudoTargetLabel);
     AddSlowPath(new (arena_) StaticFieldSlowPath(this, unresolved_branch, uninit_branch, cont,
@@ -319,7 +319,7 @@
 /* Perform an explicit null-check on a register.  */
 LIR* Mir2Lir::GenExplicitNullCheck(RegStorage m_reg, int opt_flags) {
   if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
-    return NULL;
+    return nullptr;
   }
   return GenNullCheck(m_reg);
 }
@@ -1188,7 +1188,7 @@
     DCHECK(!IsSameReg(result_reg, object.reg));
   }
   LoadConstant(result_reg, 0);     // assume false
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
 
   RegStorage check_class = AllocTypedTemp(false, kRefReg);
   RegStorage object_class = AllocTypedTemp(false, kRefReg);
@@ -1287,7 +1287,7 @@
     // On MIPS and x86_64 rArg0 != rl_result, place false in result if branch is taken.
     LoadConstant(rl_result.reg, 0);
   }
-  LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+  LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, nullptr);
 
   /* load object->klass_ */
   RegStorage ref_class_reg = TargetReg(kArg1, kRef);  // kArg1 will hold the Class* of ref.
@@ -1295,7 +1295,7 @@
   LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(),
               ref_class_reg, kNotVolatile);
   /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
-  LIR* branchover = NULL;
+  LIR* branchover = nullptr;
   if (type_known_final) {
     // rl_result == ref == class.
     GenSelectConst32(ref_class_reg, class_reg, kCondEq, 1, 0, rl_result.reg,
@@ -1320,7 +1320,7 @@
       if (!type_known_abstract) {
         /* Uses branchovers */
         LoadConstant(rl_result.reg, 1);     // assume true
-        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL);
+        branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), nullptr);
       }
 
       OpRegCopy(TargetReg(kArg0, kRef), class_reg);    // .ne case - arg0 <= class
@@ -2129,7 +2129,7 @@
   }
   if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
     FlushAllRegs();
-    LIR* branch = OpTestSuspend(NULL);
+    LIR* branch = OpTestSuspend(nullptr);
     LIR* cont = NewLIR0(kPseudoTargetLabel);
     AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
   } else {
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 54e5742..4215e8b 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -46,7 +46,7 @@
   if (rl_src.location == kLocPhysReg) {
     OpRegCopy(r_dest, rl_src.reg);
   } else if (IsInexpensiveConstant(rl_src)) {
-    // On 64-bit targets, will sign extend.  Make sure constant reference is always NULL.
+    // On 64-bit targets, will sign extend.  Make sure constant reference is always null.
     DCHECK(!rl_src.ref || (mir_graph_->ConstantValue(rl_src) == 0));
     LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
   } else {
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 936ff42..f9b9684 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -613,7 +613,7 @@
       LOG(FATAL) << "Unexpected branch kind " << opcode;
       UNREACHABLE();
   }
-  LIR* hop_target = NULL;
+  LIR* hop_target = nullptr;
   if (!unconditional) {
     hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
     LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
@@ -650,7 +650,7 @@
   LIR *lir;
   AssemblerStatus res = kSuccess;  // Assume success.
 
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     if (lir->opcode < 0) {
       continue;
     }
@@ -668,7 +668,7 @@
          * (label2 - label1), where label1 is a standard
          * kPseudoTargetLabel and is stored in operands[2].
          * If operands[3] is null, then label2 is a kPseudoTargetLabel
-         * and is found in lir->target.  If operands[3] is non-NULL,
+         * and is found in lir->target.  If operands[3] is non-nullptr,
          * then it is a Switch/Data table.
          */
         int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
@@ -863,7 +863,7 @@
   LIR* lir;
   int offset = 0;
 
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     lir->offset = offset;
     if (LIKELY(lir->opcode >= 0)) {
       if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 05570e4..39b9cc7 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -112,7 +112,7 @@
   // Test loop.
   RegStorage r_key = AllocTemp();
   LIR* loop_label = NewLIR0(kPseudoTargetLabel);
-  LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+  LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, nullptr);
   Load32Disp(r_base, 0, r_key);
   OpRegImm(kOpAdd, r_base, 8);
   OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
@@ -188,7 +188,7 @@
   tab_rec->anchor = base_label;
 
   // Bounds check - if < 0 or >= size continue following switch.
-  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+  LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, nullptr);
 
   // Materialize the table base pointer.
   RegStorage r_base = AllocPtrSizeTemp();
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1ca8bb6..9319c64 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -68,7 +68,7 @@
     NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
     NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
     NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
-    LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+    LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
     NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
     NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
     NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
@@ -128,7 +128,7 @@
       break;
     default:
       LOG(FATAL) << "No support for ConditionCode: " << cond;
-      return NULL;
+      return nullptr;
   }
   if (cmp_zero) {
     branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
@@ -278,7 +278,7 @@
   // Implement as a branch-over.
   // TODO: Conditional move?
   LoadConstant(rs_dest, true_val);
-  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, nullptr);
   LoadConstant(rs_dest, false_val);
   LIR* target_label = NewLIR0(kPseudoTargetLabel);
   ne_branchover->target = target_label;
@@ -447,7 +447,7 @@
 // Test suspend flag, return target of taken suspend branch.
 LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
   OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
-  return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
+  return OpCmpImmBranch((target == nullptr) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
 }
 
 // Decrement register and branch on condition.
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 8ab5422..95c61cd 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -566,7 +566,7 @@
 /* Load value from base + scaled index. */
 LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
                                   int scale, OpSize size) {
-  LIR *first = NULL;
+  LIR *first = nullptr;
   LIR *res;
   MipsOpCode opcode = kMipsNop;
   bool is64bit = cu_->target64 && r_dest.Is64Bit();
@@ -640,7 +640,7 @@
 // Store value base base + scaled index.
 LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
                                    int scale, OpSize size) {
-  LIR *first = NULL;
+  LIR *first = nullptr;
   MipsOpCode opcode = kMipsNop;
   RegStorage t_reg = AllocTemp();
 
@@ -696,8 +696,8 @@
  * rlp and then restore.
  */
   LIR *res;
-  LIR *load = NULL;
-  LIR *load2 = NULL;
+  LIR *load = nullptr;
+  LIR *load2 = nullptr;
   MipsOpCode opcode = kMipsNop;
   bool short_form = IS_SIMM16(displacement);
   bool is64bit = false;
@@ -857,8 +857,8 @@
 LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
                                     OpSize size) {
   LIR *res;
-  LIR *store = NULL;
-  LIR *store2 = NULL;
+  LIR *store = nullptr;
+  LIR *store2 = nullptr;
   MipsOpCode opcode = kMipsNop;
   bool short_form = IS_SIMM16(displacement);
   bool is64bit = false;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 2deb727..e9e9161 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1219,7 +1219,7 @@
   block_label_list_[block_id].flags.fixup = kFixupLabel;
   AppendLIR(&block_label_list_[block_id]);
 
-  LIR* head_lir = NULL;
+  LIR* head_lir = nullptr;
 
   // If this is a catch block, export the start address.
   if (bb->catch_entry) {
@@ -1245,7 +1245,7 @@
     DCHECK_EQ(cfi_.GetCurrentCFAOffset(), frame_size_);
   }
 
-  for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     ResetRegPool();
     if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
       ClobberAllTemps();
@@ -1269,7 +1269,7 @@
     GenPrintLabel(mir);
 
     // Remember the first LIR for this block.
-    if (head_lir == NULL) {
+    if (head_lir == nullptr) {
       head_lir = &block_label_list_[bb->id];
       // Set the first label as a scheduling barrier.
       DCHECK(!head_lir->flags.use_def_invalid);
@@ -1309,7 +1309,7 @@
   cu_->NewTimingSplit("SpecialMIR2LIR");
   // Find the first DalvikByteCode block.
   DCHECK_EQ(mir_graph_->GetNumReachableBlocks(), mir_graph_->GetDfsOrder().size());
-  BasicBlock*bb = NULL;
+  BasicBlock*bb = nullptr;
   for (BasicBlockId dfs_id : mir_graph_->GetDfsOrder()) {
     BasicBlock* candidate = mir_graph_->GetBasicBlock(dfs_id);
     if (candidate->block_type == kDalvikByteCode) {
@@ -1317,11 +1317,11 @@
       break;
     }
   }
-  if (bb == NULL) {
+  if (bb == nullptr) {
     return false;
   }
   DCHECK_EQ(bb->start_offset, 0);
-  DCHECK(bb->first_mir_insn != NULL);
+  DCHECK(bb->first_mir_insn != nullptr);
 
   // Get the first instruction.
   MIR* mir = bb->first_mir_insn;
@@ -1343,17 +1343,17 @@
   PreOrderDfsIterator iter(mir_graph_);
   BasicBlock* curr_bb = iter.Next();
   BasicBlock* next_bb = iter.Next();
-  while (curr_bb != NULL) {
+  while (curr_bb != nullptr) {
     MethodBlockCodeGen(curr_bb);
     // If the fall_through block is no longer laid out consecutively, drop in a branch.
     BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
-    if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
+    if ((curr_bb_fall_through != nullptr) && (curr_bb_fall_through != next_bb)) {
       OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
     }
     curr_bb = next_bb;
     do {
       next_bb = iter.Next();
-    } while ((next_bb != NULL) && (next_bb->block_type == kDead));
+    } while ((next_bb != nullptr) && (next_bb->block_type == kDead));
   }
   HandleSlowPaths();
 }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f9efe37..8f08a51 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -388,7 +388,7 @@
       LIR* DefEnd() { return def_end_; }
       void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
       void ResetDefBody() { def_start_ = def_end_ = nullptr; }
-      // Find member of aliased set matching storage_used; return nullptr if none.
+      // Find member of aliased set matching storage_used; return null if none.
       RegisterInfo* FindMatchingView(uint32_t storage_used) {
         RegisterInfo* res = Master();
         for (; res != nullptr; res = res->GetAliasChain()) {
@@ -605,7 +605,7 @@
     char* ArenaStrdup(const char* str) {
       size_t len = strlen(str) + 1;
       char* res = arena_->AllocArray<char>(len, kArenaAllocMisc);
-      if (res != NULL) {
+      if (res != nullptr) {
         strncpy(res, str, len);
       }
       return res;
@@ -650,7 +650,7 @@
     void DumpPromotionMap();
     void CodegenDump();
     LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
-                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
+                int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr);
     LIR* NewLIR0(int opcode);
     LIR* NewLIR1(int opcode, int dest);
     LIR* NewLIR2(int opcode, int dest, int src1);
@@ -1120,8 +1120,8 @@
      * @param base_reg The register holding the base address.
      * @param offset The offset from the base.
      * @param check_value The immediate to compare to.
-     * @param target branch target (or nullptr)
-     * @param compare output for getting LIR for comparison (or nullptr)
+     * @param target branch target (or null)
+     * @param compare output for getting LIR for comparison (or null)
      * @returns The branch instruction that was generated.
      */
     virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
@@ -1854,7 +1854,7 @@
     // to deduplicate the masks.
     ResourceMaskCache mask_cache_;
 
-    // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+    // Record the MIR that generated a given safepoint (null for prologue safepoints).
     ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
 
     // The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing.
@@ -1869,7 +1869,7 @@
     // For architectures that don't have true PC-relative addressing (see pc_rel_temp_
     // above) and also have a limited range of offsets for loads, it's be useful to
     // know the minimum offset into the dex cache arrays, so we calculate that as well
-    // if pc_rel_temp_ isn't nullptr.
+    // if pc_rel_temp_ isn't null.
     uint32_t dex_cache_arrays_min_offset_;
 
     dwarf::LazyDebugFrameOpCodeWriter cfi_;
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 555d5b9..b3c7355 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -100,7 +100,7 @@
       }
     }
     m2l->AdjustSpillMask();
-    m2l->GenEntrySequence(NULL, m2l->LocCReturnRef());
+    m2l->GenEntrySequence(nullptr, m2l->LocCReturnRef());
     m2l->GenExitSequence();
     m2l->HandleSlowPaths();
     m2l->AssembleLIR();
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fc3e687..39eb117 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -102,7 +102,7 @@
 static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
               "kDisabledOpts unexpected");
 
-// Supported shorty types per instruction set. nullptr means that all are available.
+// Supported shorty types per instruction set. null means that all are available.
 // Z : boolean
 // B : byte
 // S : short
@@ -422,7 +422,7 @@
     Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
 };
 
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// Unsupported opcodes. null can be used when everything is supported. Size of the lists is
 // recorded below.
 static const int* kUnsupportedOpcodes[] = {
     // 0 = kNone.
@@ -515,7 +515,7 @@
 
   for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
     BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
-    if (bb == NULL) continue;
+    if (bb == nullptr) continue;
     if (bb->block_type == kDead) continue;
     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
       int opcode = mir->dalvikInsn.opcode;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index e779479..8ec86fa 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -935,7 +935,7 @@
       RegStorage my_reg = info->GetReg();
       RegStorage partner_reg = info->Partner();
       RegisterInfo* partner = GetRegInfo(partner_reg);
-      DCHECK(partner != NULL);
+      DCHECK(partner != nullptr);
       DCHECK(partner->IsWide());
       DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
       DCHECK(partner->IsLive());
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index af19f5e..eb33357 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1633,7 +1633,7 @@
   AssemblerStatus res = kSuccess;  // Assume success
 
   const bool kVerbosePcFixup = false;
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     if (IsPseudoLirOp(lir->opcode)) {
       continue;
     }
@@ -1646,7 +1646,7 @@
       switch (lir->opcode) {
         case kX86Jcc8: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           int delta = 0;
           CodeOffset pc;
           if (IS_SIMM8(lir->operands[0])) {
@@ -1679,7 +1679,7 @@
         }
         case kX86Jcc32: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
           CodeOffset target = target_lir->offset;
           int delta = target - pc;
@@ -1695,7 +1695,7 @@
         }
         case kX86Jecxz8: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           CodeOffset pc;
           pc = lir->offset + 2;  // opcode + rel8
           CodeOffset target = target_lir->offset;
@@ -1706,7 +1706,7 @@
         }
         case kX86Jmp8: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           int delta = 0;
           CodeOffset pc;
           if (IS_SIMM8(lir->operands[0])) {
@@ -1738,7 +1738,7 @@
         }
         case kX86Jmp32: {
           LIR *target_lir = lir->target;
-          DCHECK(target_lir != NULL);
+          DCHECK(target_lir != nullptr);
           CodeOffset pc = lir->offset + 5 /* opcode + rel32 */;
           CodeOffset target = target_lir->offset;
           int delta = target - pc;
@@ -1748,7 +1748,7 @@
         default:
           if (lir->flags.fixup == kFixupLoad) {
             LIR *target_lir = lir->target;
-            DCHECK(target_lir != NULL);
+            DCHECK(target_lir != nullptr);
             CodeOffset target = target_lir->offset;
             // Handle 64 bit RIP addressing.
             if (lir->operands[1] == kRIPReg) {
@@ -1950,7 +1950,7 @@
   LIR* lir;
   int offset = 0;
 
-  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+  for (lir = first_lir_insn_; lir != nullptr; lir = NEXT_LIR(lir)) {
     lir->offset = offset;
     if (LIKELY(!IsPseudoLirOp(lir->opcode))) {
       if (!lir->flags.is_nop) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d7a5eb0..e2364d8 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -80,7 +80,7 @@
 
   // Bounds check - if < 0 or >= size continue following switch
   OpRegImm(kOpCmp, keyReg, size - 1);
-  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+  LIR* branch_over = OpCondBranch(kCondHi, nullptr);
 
   RegStorage addr_for_jump;
   if (cu_->target64) {
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 10af31a..8e81746 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -484,13 +484,13 @@
   } else {
     NewLIR2(kX86UcomisdRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
   }
-  LIR* branch = NULL;
+  LIR* branch = nullptr;
   if (unordered_gt) {
     branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
   }
   // If the result reg can't be byte accessed, use a jump and move instead of a set.
   if (!IsByteRegister(rl_result.reg)) {
-    LIR* branch2 = NULL;
+    LIR* branch2 = nullptr;
     if (unordered_gt) {
       branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
       NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
@@ -513,7 +513,7 @@
                                      bool is_double) {
   LIR* taken = &block_label_list_[bb->taken];
   LIR* not_taken = &block_label_list_[bb->fall_through];
-  LIR* branch = NULL;
+  LIR* branch = nullptr;
   RegLocation rl_src1;
   RegLocation rl_src2;
   if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 2c13b61..943bfc0 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1569,7 +1569,7 @@
   } else {
     OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
   }
-  return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+  return OpCondBranch((target == nullptr) ? kCondNe : kCondEq, target);
 }
 
 // Decrement register and branch on condition
@@ -3005,7 +3005,7 @@
 
   // Assume that there is no match.
   LoadConstant(result_reg, 0);
-  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, NULL);
+  LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg, 0, nullptr);
 
   // We will use this register to compare to memory below.
   // References are 32 bit in memory, and 64 bit in registers (in 64 bit mode).
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index a16e242..b460379 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1281,7 +1281,7 @@
   RegLocation rl_return = GetReturn(kCoreReg);
   RegLocation rl_dest = InlineTarget(info);
 
-  // Is the string non-NULL?
+  // Is the string non-null?
   LoadValueDirectFixed(rl_obj, rs_rDX);
   GenNullCheck(rs_rDX, info->opt_flags);
   info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index efcb9ee..61a1bec 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -578,7 +578,7 @@
       } else if (pc_rel_base_reg_.Valid() || cu_->target64) {
         // We will load the value from the literal area.
         LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
-        if (data_target == NULL) {
+        if (data_target == nullptr) {
           data_target = AddWideData(&literal_list_, val_lo, val_hi);
         }
 
@@ -642,8 +642,8 @@
 
 LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
                                      int displacement, RegStorage r_dest, OpSize size) {
-  LIR *load = NULL;
-  LIR *load2 = NULL;
+  LIR *load = nullptr;
+  LIR *load2 = nullptr;
   bool is_array = r_index.Valid();
   bool pair = r_dest.IsPair();
   bool is64bit = ((size == k64) || (size == kDouble));
@@ -763,7 +763,7 @@
     }
   }
 
-  // Always return first load generated as this might cause a fault if base is nullptr.
+  // Always return first load generated as this might cause a fault if base is null.
   return load;
 }
 
@@ -791,8 +791,8 @@
 LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
                                       int displacement, RegStorage r_src, OpSize size,
                                       int opt_flags) {
-  LIR *store = NULL;
-  LIR *store2 = NULL;
+  LIR *store = nullptr;
+  LIR *store2 = nullptr;
   bool is_array = r_index.Valid();
   bool pair = r_src.IsPair();
   bool is64bit = (size == k64) || (size == kDouble);
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 197f66d..939bf40 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -26,15 +26,15 @@
 
 void MIRGraph::ClearAllVisitedFlags() {
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     bb->visited = false;
   }
 }
 
 BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
-  if (bb != NULL) {
+  if (bb != nullptr) {
     if (bb->visited || bb->hidden) {
-      bb = NULL;
+      bb = nullptr;
     }
   }
   return bb;
@@ -42,13 +42,13 @@
 
 BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
   BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
-  if (res == NULL) {
+  if (res == nullptr) {
     res = NeedsVisit(GetBasicBlock(bb->taken));
-    if (res == NULL) {
+    if (res == nullptr) {
       if (bb->successor_block_list_type != kNotUsed) {
         for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
           res = NeedsVisit(GetBasicBlock(sbi->block));
-          if (res != NULL) {
+          if (res != nullptr) {
             break;
           }
         }
@@ -75,7 +75,7 @@
   while (!succ.empty()) {
     BasicBlock* curr = succ.back();
     BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
-    if (next_successor != NULL) {
+    if (next_successor != nullptr) {
       MarkPreOrder(next_successor);
       succ.push_back(next_successor);
       continue;
@@ -107,7 +107,7 @@
   if (num_reachable_blocks_ != GetNumBlocks()) {
     // Kill all unreachable blocks.
     AllNodesIterator iter(this);
-    for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+    for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
       if (!bb->visited) {
         bb->Kill(this);
       }
@@ -121,7 +121,7 @@
  * register idx is defined in BasicBlock bb.
  */
 bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
-  if (bb->data_flow_info == NULL) {
+  if (bb->data_flow_info == nullptr) {
     return false;
   }
 
@@ -149,11 +149,11 @@
   }
 
   AllNodesIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     FindLocalLiveIn(bb);
   }
   AllNodesIterator iter2(this);
-  for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
+  for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
     FillDefBlockMatrix(bb);
   }
 
@@ -247,7 +247,7 @@
 void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
   int num_total_blocks = GetBasicBlockListCount();
 
-  if (bb->dominators == NULL) {
+  if (bb->dominators == nullptr) {
     bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
                                                  true /* expandable */, kBitMapDominators);
     bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
@@ -357,7 +357,7 @@
 
   /* Initialize domination-related data structures */
   PreOrderDfsIterator iter(this);
-  for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
+  for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
     InitializeDominationInfo(bb);
   }
 
@@ -376,7 +376,7 @@
   /* Compute the immediate dominators */
   RepeatingReversePostOrderDfsIterator iter2(this);
   bool change = false;
-  for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
+  for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
     change = ComputeblockIDom(bb);
   }
 
@@ -387,19 +387,19 @@
   GetEntryBlock()->i_dom = 0;
 
   PreOrderDfsIterator iter3(this);
-  for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
+  for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
     SetDominators(bb);
   }
 
   ReversePostOrderDfsIterator iter4(this);
-  for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
+  for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
     ComputeBlockDominators(bb);
   }
 
   // Compute the dominance frontier for each block.
   ComputeDomPostOrderTraversal(GetEntryBlock());
   PostOrderDOMIterator iter5(this);
-  for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
+  for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
     ComputeDominanceFrontier(bb);
   }
 
@@ -434,7 +434,7 @@
   DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
   ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
 
-  if (bb->data_flow_info == NULL) {
+  if (bb->data_flow_info == nullptr) {
     return false;
   }
   temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
@@ -466,7 +466,7 @@
 void MIRGraph::FindPhiNodeBlocks() {
   RepeatingPostOrderDfsIterator iter(this);
   bool change = false;
-  for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
+  for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
     change = ComputeBlockLiveIns(bb);
   }
 
@@ -505,7 +505,7 @@
  */
 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
   /* Phi nodes are at the beginning of each block */
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+  for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
     if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
       return true;
     int ssa_reg = mir->ssa_rep->defs[0];
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a4df00e..c1d5cb7 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -46,7 +46,7 @@
 }
 
 bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) {
-  DCHECK(method_verifier != NULL);
+  DCHECK(method_verifier != nullptr);
   MethodReference ref = method_verifier->GetMethodReference();
   bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags());
   const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile);
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 977757f..7eba515 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -166,7 +166,7 @@
         }
       }
     } else {
-      DCHECK(i >= 65536 || reg_bitmap == NULL);
+      DCHECK(i >= 65536 || reg_bitmap == nullptr);
     }
   }
 }
@@ -283,7 +283,7 @@
     }
     mirror::ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod(
         is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
-    if (abstract_method == NULL) {
+    if (abstract_method == nullptr) {
       // If the method is not found in the cache this means that it was never found
       // by ResolveMethodAndCheckAccess() called when verifying invoke_*.
       continue;
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 437ae52..ad07639 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -59,7 +59,7 @@
     return safe_cast_set_;
   }
 
-  // Returns the devirtualization target method, or nullptr if none.
+  // Returns the devirtualization target method, or null if none.
   const MethodReference* GetDevirtTarget(uint32_t dex_pc) const;
 
   // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index b4d4695..bad8335 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -79,7 +79,7 @@
   }
   if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
     // ClassLinker can return a field of the wrong kind directly from the DexCache.
-    // Silently return nullptr on such incompatible class change.
+    // Silently return null on such incompatible class change.
     return nullptr;
   }
   return resolved_field;
@@ -206,7 +206,7 @@
   }
   if (check_incompatible_class_change &&
       UNLIKELY(resolved_method->CheckIncompatibleClassChange(invoke_type))) {
-    // Silently return nullptr on incompatible class change.
+    // Silently return null on incompatible class change.
     return nullptr;
   }
   return resolved_method;
@@ -302,7 +302,7 @@
                                                   target_dex_cache, class_loader,
                                                   NullHandle<mirror::ArtMethod>(), kVirtual);
     }
-    CHECK(called_method != NULL);
+    CHECK(called_method != nullptr);
     CHECK(!called_method->IsAbstract());
     int stats_flags = kFlagMethodResolved;
     GetCodeAndMethodForDirectCall(/*out*/invoke_type,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e665e1d..c858326 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -495,7 +495,8 @@
                                 const std::vector<const DexFile*>& dex_files,
                                 TimingLogger* timings) {
   DCHECK(!Runtime::Current()->IsStarted());
-  std::unique_ptr<ThreadPool> thread_pool(new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
+  std::unique_ptr<ThreadPool> thread_pool(
+      new ThreadPool("Compiler driver thread pool", thread_count_ - 1));
   VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
   PreCompile(class_loader, dex_files, thread_pool.get(), timings);
   Compile(class_loader, dex_files, thread_pool.get(), timings);
@@ -2101,7 +2102,8 @@
   VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
 }
 
-void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, size_t class_def_index) {
+void CompilerDriver::CompileClass(const ParallelCompilationManager* manager,
+                                  size_t class_def_index) {
   ATRACE_CALL();
   const DexFile& dex_file = *manager->GetDexFile();
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
@@ -2251,7 +2253,7 @@
                    // Is eligable for compilation by methods-to-compile filter.
                    IsMethodToCompile(method_ref);
     if (compile) {
-      // NOTE: if compiler declines to compile this method, it will return nullptr.
+      // NOTE: if compiler declines to compile this method, it will return null.
       compiled_method = compiler_->Compile(code_item, access_flags, invoke_type, class_def_idx,
                                            method_idx, class_loader, dex_file);
     }
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 50e1fb1..03c5c5c 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -94,7 +94,7 @@
   // Create a compiler targeting the requested "instruction_set".
   // "image" should be true if image specific optimizations should be
   // enabled.  "image_classes" lets the compiler know what classes it
-  // can assume will be in the image, with nullptr implying all available
+  // can assume will be in the image, with null implying all available
   // classes.
   explicit CompilerDriver(const CompilerOptions* compiler_options,
                           VerificationResults* verification_results,
@@ -228,7 +228,7 @@
   mirror::ClassLoader* GetClassLoader(ScopedObjectAccess& soa, const DexCompilationUnit* mUnit)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Resolve compiling method's class. Returns nullptr on failure.
+  // Resolve compiling method's class. Returns null on failure.
   mirror::Class* ResolveCompilingMethodsClass(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
@@ -240,7 +240,7 @@
       const DexCompilationUnit* mUnit)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Resolve a field. Returns nullptr on failure, including incompatible class change.
+  // Resolve a field. Returns null on failure, including incompatible class change.
   // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
   ArtField* ResolveField(
       const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
@@ -290,7 +290,7 @@
                                       ArtField* resolved_field)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Resolve a method. Returns nullptr on failure, including incompatible class change.
+  // Resolve a method. Returns null on failure, including incompatible class change.
   mirror::ArtMethod* ResolveMethod(
       ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
       Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
@@ -592,16 +592,16 @@
   const bool image_;
 
   // If image_ is true, specifies the classes that will be included in
-  // the image. Note if image_classes_ is nullptr, all classes are
+  // the image. Note if image_classes_ is null, all classes are
   // included in the image.
   std::unique_ptr<std::unordered_set<std::string>> image_classes_;
 
-  // Specifies the classes that will be compiled. Note that if classes_to_compile_ is nullptr,
+  // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
   // all classes are eligible for compilation (duplication filters etc. will still apply).
   // This option may be restricted to the boot image, depending on a flag in the implementation.
   std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
 
-  // Specifies the methods that will be compiled. Note that if methods_to_compile_ is nullptr,
+  // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null,
   // all methods are eligible for compilation (compilation filters etc. will still apply).
   // This option may be restricted to the boot image, depending on a flag in the implementation.
   std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index ded50ca..5085f32 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -56,20 +56,20 @@
     CHECK(started);
     env_ = Thread::Current()->GetJniEnv();
     class_ = env_->FindClass(class_name);
-    CHECK(class_ != NULL) << "Class not found: " << class_name;
+    CHECK(class_ != nullptr) << "Class not found: " << class_name;
     if (is_virtual) {
       mid_ = env_->GetMethodID(class_, method, signature);
     } else {
       mid_ = env_->GetStaticMethodID(class_, method, signature);
     }
-    CHECK(mid_ != NULL) << "Method not found: " << class_name << "." << method << signature;
+    CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature;
   }
 
   void MakeAllExecutable(jobject class_loader) {
     const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
     for (size_t i = 0; i != class_path.size(); ++i) {
       const DexFile* dex_file = class_path[i];
-      CHECK(dex_file != NULL);
+      CHECK(dex_file != nullptr);
       MakeDexFileExecutable(class_loader, *dex_file);
     }
   }
@@ -84,7 +84,7 @@
       Handle<mirror::ClassLoader> loader(
           hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader)));
       mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader);
-      CHECK(c != NULL);
+      CHECK(c != nullptr);
       for (size_t j = 0; j < c->NumDirectMethods(); j++) {
         MakeExecutable(c->GetDirectMethod(j));
       }
@@ -101,39 +101,38 @@
 
 // Disabled due to 10 second runtime on host
 TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
-  CompileAll(NULL);
+  CompileAll(nullptr);
 
   // All libcore references should resolve
   ScopedObjectAccess soa(Thread::Current());
-  ASSERT_TRUE(java_lang_dex_file_ != NULL);
+  ASSERT_TRUE(java_lang_dex_file_ != nullptr);
   const DexFile& dex = *java_lang_dex_file_;
   mirror::DexCache* dex_cache = class_linker_->FindDexCache(dex);
   EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings());
   for (size_t i = 0; i < dex_cache->NumStrings(); i++) {
     const mirror::String* string = dex_cache->GetResolvedString(i);
-    EXPECT_TRUE(string != NULL) << "string_idx=" << i;
+    EXPECT_TRUE(string != nullptr) << "string_idx=" << i;
   }
   EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes());
   for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
     mirror::Class* type = dex_cache->GetResolvedType(i);
-    EXPECT_TRUE(type != NULL) << "type_idx=" << i
+    EXPECT_TRUE(type != nullptr) << "type_idx=" << i
                               << " " << dex.GetTypeDescriptor(dex.GetTypeId(i));
   }
   EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods());
   for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
     mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i);
-    EXPECT_TRUE(method != NULL) << "method_idx=" << i
+    EXPECT_TRUE(method != nullptr) << "method_idx=" << i
                                 << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
                                 << " " << dex.GetMethodName(dex.GetMethodId(i));
-    EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != NULL) << "method_idx=" << i
-                                           << " "
-                                           << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i))
-                                           << " " << dex.GetMethodName(dex.GetMethodId(i));
+    EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i
+        << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " "
+        << dex.GetMethodName(dex.GetMethodId(i));
   }
   EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields());
   for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
     ArtField* field = Runtime::Current()->GetClassLinker()->GetResolvedField(i, dex_cache);
-    EXPECT_TRUE(field != NULL) << "field_idx=" << i
+    EXPECT_TRUE(field != nullptr) << "field_idx=" << i
                                << " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i))
                                << " " << dex.GetFieldName(dex.GetFieldId(i));
   }
@@ -153,14 +152,14 @@
     CompileDirectMethod(NullHandle<mirror::ClassLoader>(), "java.lang.Object", "<init>", "()V");
     class_loader = LoadDex("AbstractMethod");
   }
-  ASSERT_TRUE(class_loader != NULL);
+  ASSERT_TRUE(class_loader != nullptr);
   EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true);
 
   // Create a jobj_ of ConcreteClass, NOT AbstractClass.
   jclass c_class = env_->FindClass("ConcreteClass");
   jmethodID constructor = env_->GetMethodID(c_class, "<init>", "()V");
   jobject jobj_ = env_->NewObject(c_class, constructor);
-  ASSERT_TRUE(jobj_ != NULL);
+  ASSERT_TRUE(jobj_ != nullptr);
 
   // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception.
   env_->CallNonvirtualVoidMethod(jobj_, class_, mid_);
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index b67dd26..32c8cce 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -374,7 +374,7 @@
   }
 
   Elf_Word GetSize() const {
-    // 1 is for the implicit NULL symbol.
+    // 1 is for the implicit null symbol.
     return symbols_.size() + 1;
   }
 
@@ -578,7 +578,7 @@
       hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0, sizeof(Elf_Word),
                     sizeof(Elf_Word)),
       dynamic_builder_(".dynamic", &dynsym_builder_),
-      shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
+      shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, nullptr, 0, 1, 1) {
     SetupEhdr();
     SetupDynamic();
     SetupRequiredSymbols();
@@ -689,7 +689,7 @@
     // +-------------------------+  (Optional)
     // | .debug_line             |  (Optional)
     // +-------------------------+  (Optional)
-    // | Elf_Shdr NULL           |
+    // | Elf_Shdr null           |
     // | Elf_Shdr .dynsym        |
     // | Elf_Shdr .dynstr        |
     // | Elf_Shdr .hash          |
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 949fcab..3b2ca94 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -148,7 +148,7 @@
   RawSection debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
   RawSection debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
   RawSection debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-  RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0, 1, 0);
+  RawSection oat_patches(".oat_patches", SHT_OAT_PATCH, 0, nullptr, 0, 1, 0);
 
   // Do not add to .oat_patches since we will make the addresses relative.
   std::vector<uintptr_t> eh_frame_patches;
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 3e5ad7b..08523d8 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -55,12 +55,12 @@
   LOG(INFO) << "elf_filename=" << elf_filename;
 
   UnreserveImageSpace();
-  void* dl_oatdata = NULL;
-  void* dl_oatexec = NULL;
-  void* dl_oatlastword = NULL;
+  void* dl_oatdata = nullptr;
+  void* dl_oatexec = nullptr;
+  void* dl_oatlastword = nullptr;
 
   std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
-  ASSERT_TRUE(file.get() != NULL);
+  ASSERT_TRUE(file.get() != nullptr);
   {
     std::string error_msg;
     std::unique_ptr<ElfFile> ef(ElfFile::Open(file.get(), false, false, &error_msg));
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index 8016831..eaf3489 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -68,7 +68,7 @@
   // TODO: compile_pic should be a test argument.
   {
     {
-      jobject class_loader = NULL;
+      jobject class_loader = nullptr;
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       TimingLogger timings("ImageTest::WriteRead", false, false);
       TimingLogger::ScopedTiming t("CompileAll", &timings);
@@ -92,7 +92,7 @@
   }
   // Workound bug that mcld::Linker::emit closes oat_file by reopening as dup_oat.
   std::unique_ptr<File> dup_oat(OS::OpenFileReadWrite(oat_file.GetFilename().c_str()));
-  ASSERT_TRUE(dup_oat.get() != NULL);
+  ASSERT_TRUE(dup_oat.get() != nullptr);
 
   {
     bool success_image =
@@ -107,7 +107,7 @@
 
   {
     std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
-    ASSERT_TRUE(file.get() != NULL);
+    ASSERT_TRUE(file.get() != nullptr);
     ImageHeader image_header;
     ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
     ASSERT_TRUE(image_header.IsValid());
@@ -118,12 +118,12 @@
     ASSERT_TRUE(!heap->GetContinuousSpaces().empty());
     gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
     ASSERT_FALSE(space->IsImageSpace());
-    ASSERT_TRUE(space != NULL);
+    ASSERT_TRUE(space != nullptr);
     ASSERT_TRUE(space->IsMallocSpace());
     ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
   }
 
-  ASSERT_TRUE(compiler_driver_->GetImageClasses() != NULL);
+  ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
   std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
 
   // Need to delete the compiler since it has worker threads which are attached to runtime.
@@ -137,7 +137,7 @@
   writer.reset(nullptr);
 
   runtime_.reset();
-  java_lang_dex_file_ = NULL;
+  java_lang_dex_file_ = nullptr;
 
   MemMap::Init();
   std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
@@ -145,7 +145,7 @@
   RuntimeOptions options;
   std::string image("-Ximage:");
   image.append(image_location.GetFilename());
-  options.push_back(std::make_pair(image.c_str(), reinterpret_cast<void*>(NULL)));
+  options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
   // By default the compiler this creates will not include patch information.
   options.push_back(std::make_pair("-Xnorelocate", nullptr));
 
@@ -158,7 +158,7 @@
   // give it away now and then switch to a more managable ScopedObjectAccess.
   Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   ScopedObjectAccess soa(Thread::Current());
-  ASSERT_TRUE(runtime_.get() != NULL);
+  ASSERT_TRUE(runtime_.get() != nullptr);
   class_linker_ = runtime_->GetClassLinker();
 
   gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index a99ef34..fc70d8f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -129,7 +129,7 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
   std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
-  if (oat_file.get() == NULL) {
+  if (oat_file.get() == nullptr) {
     PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
     return false;
   }
@@ -180,7 +180,7 @@
 
   std::unique_ptr<File> image_file(OS::CreateEmptyFile(image_filename.c_str()));
   ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
-  if (image_file.get() == NULL) {
+  if (image_file.get() == nullptr) {
     LOG(ERROR) << "Failed to open image file " << image_filename;
     return false;
   }
@@ -519,7 +519,7 @@
 
 void ImageWriter::ComputeLazyFieldsForImageClasses() {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, NULL);
+  class_linker->VisitClassesWithoutClassesLock(ComputeLazyFieldsForClassesVisitor, nullptr);
 }
 
 bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
@@ -675,7 +675,7 @@
     if (string_id != nullptr) {
       // This string occurs in this dex file, assign the dex cache entry.
       uint32_t string_idx = dex_file.GetIndexForStringId(*string_id);
-      if (dex_cache->GetResolvedString(string_idx) == NULL) {
+      if (dex_cache->GetResolvedString(string_idx) == nullptr) {
         dex_cache->SetResolvedString(string_idx, string);
       }
     }
@@ -697,7 +697,7 @@
 };
 
 void ImageWriter::PruneNonImageClasses() {
-  if (compiler_driver_.GetImageClasses() == NULL) {
+  if (compiler_driver_.GetImageClasses() == nullptr) {
     return;
   }
   Runtime* runtime = Runtime::Current();
@@ -712,7 +712,7 @@
 
   // Remove the undesired classes from the class roots.
   for (const std::string& it : non_image_classes) {
-    bool result = class_linker->RemoveClass(it.c_str(), NULL);
+    bool result = class_linker->RemoveClass(it.c_str(), nullptr);
     DCHECK(result);
   }
 
@@ -724,13 +724,13 @@
     DexCache* dex_cache = class_linker->GetDexCache(idx);
     for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) {
       Class* klass = dex_cache->GetResolvedType(i);
-      if (klass != NULL && !IsImageClass(klass)) {
-        dex_cache->SetResolvedType(i, NULL);
+      if (klass != nullptr && !IsImageClass(klass)) {
+        dex_cache->SetResolvedType(i, nullptr);
       }
     }
     for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) {
       ArtMethod* method = dex_cache->GetResolvedMethod(i);
-      if (method != NULL && !IsImageClass(method->GetDeclaringClass())) {
+      if (method != nullptr && !IsImageClass(method->GetDeclaringClass())) {
         dex_cache->SetResolvedMethod(i, resolution_method);
       }
     }
@@ -777,14 +777,14 @@
 
 void ImageWriter::DumpImageClasses() {
   auto image_classes = compiler_driver_.GetImageClasses();
-  CHECK(image_classes != NULL);
+  CHECK(image_classes != nullptr);
   for (const std::string& image_class : *image_classes) {
     LOG(INFO) << " " << image_class;
   }
 }
 
 void ImageWriter::CalculateObjectBinSlots(Object* obj) {
-  DCHECK(obj != NULL);
+  DCHECK(obj != nullptr);
   // if it is a string, we want to intern it if its not interned.
   if (obj->GetClass()->IsStringClass()) {
     // we must be an interned string that was forward referenced and already assigned
@@ -856,7 +856,7 @@
   image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
   image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
   for (int i = 0; i < ImageHeader::kImageRootsMax; i++) {
-    CHECK(image_roots->Get(i) != NULL);
+    CHECK(image_roots->Get(i) != nullptr);
   }
   return image_roots.Get();
 }
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index d25acc7..436fc0c 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -47,7 +47,7 @@
       return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
@@ -122,7 +122,7 @@
       return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 2402ea5..6f2cb25 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -152,9 +152,9 @@
     // References need placing in handle scope and the entry value passing
     if (ref_param) {
       // Compute handle scope entry, note null is placed in the handle scope but its boxed value
-      // must be NULL
+      // must be null.
       FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
-      // Check handle scope offset is within frame and doesn't run into the saved segment state
+      // Check handle scope offset is within frame and doesn't run into the saved segment state.
       CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
       CHECK_NE(handle_scope_offset.Uint32Value(),
                main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
@@ -243,9 +243,9 @@
   // 7. Iterate over arguments placing values from managed calling convention in
   //    to the convention required for a native call (shuffling). For references
   //    place an index/pointer to the reference after checking whether it is
-  //    NULL (which must be encoded as NULL).
+  //    null (which must be encoded as null).
   //    Note: we do this prior to materializing the JNIEnv* and static's jclass to
-  //    give as many free registers for the shuffle as possible
+  //    give as many free registers for the shuffle as possible.
   mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
   uint32_t args_count = 0;
   while (mr_conv->HasNext()) {
@@ -451,7 +451,7 @@
                                                  ArrayRef<const LinkerPatch>());
 }
 
-// Copy a single parameter from the managed to the JNI calling convention
+// Copy a single parameter from the managed to the JNI calling convention.
 static void CopyParameter(Assembler* jni_asm,
                           ManagedRuntimeCallingConvention* mr_conv,
                           JniCallingConvention* jni_conv,
@@ -469,7 +469,7 @@
   } else {
     CHECK(jni_conv->IsCurrentParamOnStack());
   }
-  // References need placing in handle scope and the entry address passing
+  // References need placing in handle scope and the entry address passing.
   if (ref_param) {
     null_allowed = mr_conv->IsCurrentArgPossiblyNull();
     // Compute handle scope offset. Note null is placed in the handle scope but the jobject
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5abd204..d2d38da 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -73,7 +73,7 @@
     image_file_location_oat_begin_(image_file_location_oat_begin),
     image_patch_delta_(image_patch_delta),
     key_value_store_(key_value_store),
-    oat_header_(NULL),
+    oat_header_(nullptr),
     size_dex_file_alignment_(0),
     size_executable_offset_alignment_(0),
     size_oat_header_(0),
@@ -326,7 +326,7 @@
     ClassReference class_ref(dex_file_, class_def_index_);
     CompiledClass* compiled_class = writer_->compiler_driver_->GetCompiledClass(class_ref);
     mirror::Class::Status status;
-    if (compiled_class != NULL) {
+    if (compiled_class != nullptr) {
       status = compiled_class->GetStatus();
     } else if (writer_->compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) {
       status = mirror::Class::kStatusError;
@@ -473,7 +473,7 @@
         ClassReference class_ref(dex_file_, class_def_index_);
         CompiledClass* compiled_class = compiler_driver->GetCompiledClass(class_ref);
         mirror::Class::Status status;
-        if (compiled_class != NULL) {
+        if (compiled_class != nullptr) {
           status = compiled_class->GetStatus();
         } else if (compiler_driver->GetVerificationResults()->IsClassRejected(class_ref)) {
           status = mirror::Class::kStatusError;
@@ -690,7 +690,7 @@
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
-    if (compiled_method != NULL) {  // ie. not an abstract method
+    if (compiled_method != nullptr) {  // ie. not an abstract method
       size_t file_offset = file_offset_;
       OutputStream* out = out_;
 
@@ -893,7 +893,7 @@
     OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
     const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
 
-    if (compiled_method != NULL) {  // ie. not an abstract method
+    if (compiled_method != nullptr) {  // ie. not an abstract method
       size_t file_offset = file_offset_;
       OutputStream* out = out_;
 
@@ -940,7 +940,7 @@
       }
       const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
       const uint8_t* class_data = dex_file->GetClassData(class_def);
-      if (class_data != NULL) {  // ie not an empty class, such as a marker interface
+      if (class_data != nullptr) {  // ie not an empty class, such as a marker interface
         ClassDataItemIterator it(*dex_file, class_data);
         while (it.HasNextStaticField()) {
           it.Next();
@@ -987,7 +987,7 @@
   // create the OatDexFiles
   for (size_t i = 0; i != dex_files_->size(); ++i) {
     const DexFile* dex_file = (*dex_files_)[i];
-    CHECK(dex_file != NULL);
+    CHECK(dex_file != nullptr);
     OatDexFile* oat_dex_file = new OatDexFile(offset, *dex_file);
     oat_dex_files_.push_back(oat_dex_file);
     offset += oat_dex_file->SizeOf();
@@ -1471,13 +1471,13 @@
     oat_method_offsets_offset_from_oat_class += sizeof(method_bitmap_size_);
     oat_method_offsets_offset_from_oat_class += method_bitmap_size_;
   } else {
-    method_bitmap_ = NULL;
+    method_bitmap_ = nullptr;
     method_bitmap_size_ = 0;
   }
 
   for (size_t i = 0; i < num_methods; i++) {
     CompiledMethod* compiled_method = compiled_methods_[i];
-    if (compiled_method == NULL) {
+    if (compiled_method == nullptr) {
       oat_method_offsets_offsets_from_oat_class_[i] = 0;
     } else {
       oat_method_offsets_offsets_from_oat_class_[i] = oat_method_offsets_offset_from_oat_class;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index cc2b39a..8c79b44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -235,13 +235,13 @@
     // used to validate file position when writing.
     size_t offset_;
 
-    // CompiledMethods for each class_def_method_index, or NULL if no method is available.
+    // CompiledMethods for each class_def_method_index, or null if no method is available.
     std::vector<CompiledMethod*> compiled_methods_;
 
     // Offset from OatClass::offset_ to the OatMethodOffsets for the
     // class_def_method_index. If 0, it means the corresponding
     // CompiledMethod entry in OatClass::compiled_methods_ should be
-    // NULL and that the OatClass::type_ should be kOatClassBitmap.
+    // null and that the OatClass::type_ should be kOatClassBitmap.
     std::vector<uint32_t> oat_method_offsets_offsets_from_oat_class_;
 
     // data to write
@@ -258,12 +258,12 @@
     // OatClassType::type_ is kOatClassBitmap, a set bit indicates the
     // method has an OatMethodOffsets in methods_offsets_, otherwise
     // the entry was ommited to save space. If OatClassType::type_ is
-    // not is kOatClassBitmap, the bitmap will be NULL.
+    // not is kOatClassBitmap, the bitmap will be null.
     BitVector* method_bitmap_;
 
     // OatMethodOffsets and OatMethodHeaders for each CompiledMethod
     // present in the OatClass. Note that some may be missing if
-    // OatClass::compiled_methods_ contains NULL values (and
+    // OatClass::compiled_methods_ contains null values (and
     // oat_method_offsets_offsets_from_oat_class_ should contain 0
     // values in this case).
     std::vector<OatMethodOffsets> method_offsets_;
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 74848d5..708733e 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -55,7 +55,7 @@
         buckets_owned_(allocator, num_buckets_, false),
         num_entries_(to_copy.num_entries_) {
     // ArenaAllocator returns zeroed memory, so entries of buckets_ and
-    // buckets_owned_ are initialized to nullptr and false, respectively.
+    // buckets_owned_ are initialized to null and false, respectively.
     DCHECK(IsPowerOfTwo(num_buckets_));
     if (num_buckets_ == to_copy.num_buckets_) {
       // Hash table remains the same size. We copy the bucket pointers and leave
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index bef5896..6ab57b8 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -714,7 +714,7 @@
     // TODO: Implement static evaluation of long unary operations.
     //
     // Do not exit with a fatal condition here.  Instead, simply
-    // return `nullptr' to notify the caller that this instruction
+    // return `null' to notify the caller that this instruction
     // cannot (yet) be statically evaluated.
     return nullptr;
   }
@@ -750,7 +750,7 @@
 }
 
 // If `GetConstantRight()` returns one of the input, this returns the other
-// one. Otherwise it returns nullptr.
+// one. Otherwise it returns null.
 HInstruction* HBinaryOperation::GetLeastConstantLeft() const {
   HInstruction* most_constant_right = GetConstantRight();
   if (most_constant_right == nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 1a24cb5..0993a18 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1634,7 +1634,7 @@
 
   // Try to statically evaluate `operation` and return a HConstant
   // containing the result of this evaluation.  If `operation` cannot
-  // be evaluated as a constant, return nullptr.
+  // be evaluated as a constant, return null.
   HConstant* TryStaticEvaluation() const;
 
   // Apply this operation to `x`.
@@ -1702,7 +1702,7 @@
 
   // Try to statically evaluate `operation` and return a HConstant
   // containing the result of this evaluation.  If `operation` cannot
-  // be evaluated as a constant, return nullptr.
+  // be evaluated as a constant, return null.
   HConstant* TryStaticEvaluation() const;
 
   // Apply this operation to `x` and `y`.
@@ -1710,11 +1710,11 @@
   virtual int64_t Evaluate(int64_t x, int64_t y) const = 0;
 
   // Returns an input that can legally be used as the right input and is
-  // constant, or nullptr.
+  // constant, or null.
   HConstant* GetConstantRight() const;
 
   // If `GetConstantRight()` returns one of the input, this returns the other
-  // one. Otherwise it returns nullptr.
+  // one. Otherwise it returns null.
   HInstruction* GetLeastConstantLeft() const;
 
   DECLARE_INSTRUCTION(BinaryOperation);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 03f5545..fe70d3a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -333,7 +333,8 @@
     }
     if (after_loop == nullptr) {
       // Uses are only in the loop.
-      first_range_ = last_range_ = range_search_start_ = new (allocator_) LiveRange(start, end, nullptr);
+      first_range_ = last_range_ = range_search_start_ =
+          new (allocator_) LiveRange(start, end, nullptr);
     } else if (after_loop->GetStart() <= end) {
       first_range_ = range_search_start_ = after_loop;
       // There are uses after the loop.
@@ -596,7 +597,7 @@
         previous->next_ = nullptr;
         new_interval->first_range_ = current;
         if (range_search_start_ != nullptr && range_search_start_->GetEnd() >= current->GetEnd()) {
-          // Search start point is inside `new_interval`. Change it to nullptr
+          // Search start point is inside `new_interval`. Change it to null
           // (i.e. the end of the interval) in the original interval.
           range_search_start_ = nullptr;
         }
@@ -863,7 +864,7 @@
         defined_by_(defined_by) {}
 
   // Searches for a LiveRange that either covers the given position or is the
-  // first next LiveRange. Returns nullptr if no such LiveRange exists. Ranges
+  // first next LiveRange. Returns null if no such LiveRange exists. Ranges
   // known to end before `position` can be skipped with `search_start`.
   LiveRange* FindRangeAtOrAfter(size_t position, LiveRange* search_start) const {
     if (kIsDebugBuild) {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index bba9892..fbc9d0d 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -66,7 +66,7 @@
   SetOutputStream(output_stream);
   GenerateTestOutput();
   std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
-  EXPECT_TRUE(in.get() != NULL);
+  EXPECT_TRUE(in.get() != nullptr);
   std::vector<uint8_t> actual(in->GetLength());
   bool readSuccess = in->ReadFully(&actual[0], actual.size());
   EXPECT_TRUE(readSuccess);
@@ -76,12 +76,12 @@
 TEST_F(OutputStreamTest, Buffered) {
   ScratchFile tmp;
   std::unique_ptr<FileOutputStream> file_output_stream(new FileOutputStream(tmp.GetFile()));
-  CHECK(file_output_stream.get() != NULL);
+  CHECK(file_output_stream.get() != nullptr);
   BufferedOutputStream buffered_output_stream(file_output_stream.release());
   SetOutputStream(buffered_output_stream);
   GenerateTestOutput();
   std::unique_ptr<File> in(OS::OpenFileForReading(tmp.GetFilename().c_str()));
-  EXPECT_TRUE(in.get() != NULL);
+  EXPECT_TRUE(in.get() != nullptr);
   std::vector<uint8_t> actual(in->GetLength());
   bool readSuccess = in->ReadFully(&actual[0], actual.size());
   EXPECT_TRUE(readSuccess);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index dd0dba2..313f365 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -739,17 +739,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b7715af..e47b531 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -149,14 +149,14 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
+  // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
                        ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
                        ManagedRegister scratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 36342c6..b016e74 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -41,8 +41,8 @@
   contents_ = NewContents(kInitialBufferCapacity);
   cursor_ = contents_;
   limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
-  fixup_ = NULL;
-  slow_path_ = NULL;
+  fixup_ = nullptr;
+  slow_path_ = nullptr;
 #ifndef NDEBUG
   has_ensured_capacity_ = false;
   fixups_processed_ = false;
@@ -61,7 +61,7 @@
 
 void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
   AssemblerFixup* fixup = fixup_;
-  while (fixup != NULL) {
+  while (fixup != nullptr) {
     fixup->Process(region, fixup->position());
     fixup = fixup->previous();
   }
@@ -127,7 +127,7 @@
       return new x86_64::X86_64Assembler();
     default:
       LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
-      return NULL;
+      return nullptr;
   }
 }
 
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index ebafd3d..2e3a47b 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -156,7 +156,7 @@
 // Parent of all queued slow paths, emitted during finalization
 class SlowPath {
  public:
-  SlowPath() : next_(NULL) {}
+  SlowPath() : next_(nullptr) {}
   virtual ~SlowPath() {}
 
   Label* Continuation() { return &continuation_; }
@@ -216,20 +216,20 @@
   }
 
   void EnqueueSlowPath(SlowPath* slowpath) {
-    if (slow_path_ == NULL) {
+    if (slow_path_ == nullptr) {
       slow_path_ = slowpath;
     } else {
       SlowPath* cur = slow_path_;
-      for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+      for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
       cur->next_ = slowpath;
     }
   }
 
   void EmitSlowPaths(Assembler* sp_asm) {
     SlowPath* cur = slow_path_;
-    SlowPath* next = NULL;
-    slow_path_ = NULL;
-    for ( ; cur != NULL ; cur = next) {
+    SlowPath* next = nullptr;
+    slow_path_ = nullptr;
+    for ( ; cur != nullptr ; cur = next) {
       cur->Emit(sp_asm);
       next = cur->next_;
       delete cur;
@@ -489,14 +489,14 @@
   virtual void GetCurrentThread(FrameOffset dest_offset,
                                 ManagedRegister scratch) = 0;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
+  // null.
   virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
                                ManagedRegister in_reg, bool null_allowed) = 0;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
                                ManagedRegister scratch, bool null_allowed) = 0;
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index b062a2a..a9a5781 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -40,8 +40,8 @@
   struct HashedKey {
     StoreKey* store_ptr;
     union {
-      HashType store_hash;        // Valid if store_ptr != nullptr.
-      const HashedInKey* in_key;  // Valid if store_ptr == nullptr.
+      HashType store_hash;        // Valid if store_ptr != null.
+      const HashedInKey* in_key;  // Valid if store_ptr == null.
     };
   };
 
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 216cb41..d4acf03 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,17 +238,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister mscratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 36e74d7..b7f6a9e 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -235,14 +235,14 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
+  // null.
   void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
                               ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
   void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
                               mscratch, bool null_allowed) OVERRIDE;
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index a933474..7fc8ef0 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -576,17 +576,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 0344f52..c0ca7ef 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2751,7 +2751,7 @@
   X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
   X86_64ManagedRegister in_reg = min_reg.AsX86_64();
   if (in_reg.IsNoRegister()) {  // TODO(64): && null_allowed
-    // Use out_reg as indicator of NULL
+    // Use out_reg as indicator of null.
     in_reg = out_reg;
     // TODO: movzwl
     movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 79ad8f5..f5327a8 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -711,17 +711,17 @@
   void GetCurrentThread(ManagedRegister tr) OVERRIDE;
   void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
 
-  // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed. in_reg holds a possibly stale reference
   // that can be used to avoid loading the handle scope entry to see if the value is
-  // NULL.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
-                       bool null_allowed) OVERRIDE;
+  // null.
+  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+                              ManagedRegister in_reg, bool null_allowed) OVERRIDE;
 
-  // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+  // Set up out_off to hold a Object** into the handle scope, or to be null if the
   // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
-                       bool null_allowed) OVERRIDE;
+  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+                              ManagedRegister scratch, bool null_allowed) OVERRIDE;
 
   // src holds a handle scope entry (Object**) load this into dst
   virtual void LoadReferenceFromHandleScope(ManagedRegister dst,