Fix cpplint whitespace/comments issues

Change-Id: Iae286862c85fb8fd8901eae1204cd6d271d69496
diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h
index 3bd733e..e8e2c02 100644
--- a/compiler/dex/arena_allocator.h
+++ b/compiler/dex/arena_allocator.h
@@ -86,7 +86,7 @@
      explicit MemStats(const ArenaAllocator &arena) : arena_(arena) {}
   private:
     const ArenaAllocator &arena_;
-}; // MemStats
+};  // MemStats
 
 }  // namespace art
 
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 88240e8..97a682f 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -48,7 +48,7 @@
 };
 
 enum RegLocationType {
-  kLocDalvikFrame = 0, // Normal Dalvik register
+  kLocDalvikFrame = 0,  // Normal Dalvik register
   kLocPhysReg,
   kLocCompilerTemp,
   kLocInvalid
@@ -249,20 +249,20 @@
   kX86CondC   = kX86CondB,  // carry
 
   kX86CondNb  = 0x3,    // not-below
-  kX86CondAe  = kX86CondNb, // above-equal
-  kX86CondNc  = kX86CondNb, // not-carry
+  kX86CondAe  = kX86CondNb,  // above-equal
+  kX86CondNc  = kX86CondNb,  // not-carry
 
   kX86CondZ   = 0x4,    // zero
   kX86CondEq  = kX86CondZ,  // equal
 
   kX86CondNz  = 0x5,    // not-zero
-  kX86CondNe  = kX86CondNz, // not-equal
+  kX86CondNe  = kX86CondNz,  // not-equal
 
   kX86CondBe  = 0x6,    // below-equal
-  kX86CondNa  = kX86CondBe, // not-above
+  kX86CondNa  = kX86CondBe,  // not-above
 
   kX86CondNbe = 0x7,    // not-below-equal
-  kX86CondA   = kX86CondNbe,// above
+  kX86CondA   = kX86CondNbe,  // above
 
   kX86CondS   = 0x8,    // sign
   kX86CondNs  = 0x9,    // not-sign
@@ -277,13 +277,13 @@
   kX86CondNge = kX86CondL,  // not-greater-equal
 
   kX86CondNl  = 0xd,    // not-less-than
-  kX86CondGe  = kX86CondNl, // not-greater-equal
+  kX86CondGe  = kX86CondNl,  // not-greater-equal
 
   kX86CondLe  = 0xe,    // less-than-equal
-  kX86CondNg  = kX86CondLe, // not-greater
+  kX86CondNg  = kX86CondLe,  // not-greater
 
   kX86CondNle = 0xf,    // not-less-than
-  kX86CondG   = kX86CondNle,// greater
+  kX86CondG   = kX86CondNle,  // greater
 };
 
 std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
@@ -349,7 +349,7 @@
   kIsIT,
   kMemLoad,
   kMemStore,
-  kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
+  kPCRelFixup,  // x86 FIXME: add NEEDS_FIXUP to instruction attributes.
   kRegDef0,
   kRegDef1,
   kRegDefA,
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 847a614..da44ffd 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -80,7 +80,7 @@
       GrowableArray<int>* block_id_list_;
       int idx_;
       bool changed_;
-  }; // DataflowIterator
+  };  // DataflowIterator
 
   class ReachableNodesIterator : public DataflowIterator {
     public:
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 28c3257..3c491ce 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -240,12 +240,12 @@
   // We are modifying 4 consecutive bytes.
   ScopedDexWriteAccess sdwa(GetModifiableDexFile(), inst, 4u);
   inst->SetOpcode(Instruction::NOP);
-  inst->SetVRegA_10x(0u); // keep compliant with verifier.
+  inst->SetVRegA_10x(0u);  // keep compliant with verifier.
   // Get to next instruction which is the second half of check-cast and replace
   // it by a NOP.
   inst = const_cast<Instruction*>(inst->Next());
   inst->SetOpcode(Instruction::NOP);
-  inst->SetVRegA_10x(0u); // keep compliant with verifier.
+  inst->SetVRegA_10x(0u);  // keep compliant with verifier.
   return inst;
 }
 
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 113a80a..9cc4d18 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -72,37 +72,37 @@
 }
 
 /* Default optimizer/debug setting for the compiler. */
-static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimizations
+static uint32_t kCompilerOptimizerDisableFlags = 0 |  // Disable specific optimizations
   (1 << kLoadStoreElimination) |
-  //(1 << kLoadHoisting) |
-  //(1 << kSuppressLoads) |
-  //(1 << kNullCheckElimination) |
-  //(1 << kPromoteRegs) |
-  //(1 << kTrackLiveTemps) |
-  //(1 << kSafeOptimizations) |
-  //(1 << kBBOpt) |
-  //(1 << kMatch) |
-  //(1 << kPromoteCompilerTemps) |
+  // (1 << kLoadHoisting) |
+  // (1 << kSuppressLoads) |
+  // (1 << kNullCheckElimination) |
+  // (1 << kPromoteRegs) |
+  // (1 << kTrackLiveTemps) |
+  // (1 << kSafeOptimizations) |
+  // (1 << kBBOpt) |
+  // (1 << kMatch) |
+  // (1 << kPromoteCompilerTemps) |
   0;
 
 static uint32_t kCompilerDebugFlags = 0 |     // Enable debug/testing modes
-  //(1 << kDebugDisplayMissingTargets) |
-  //(1 << kDebugVerbose) |
-  //(1 << kDebugDumpCFG) |
-  //(1 << kDebugSlowFieldPath) |
-  //(1 << kDebugSlowInvokePath) |
-  //(1 << kDebugSlowStringPath) |
-  //(1 << kDebugSlowestFieldPath) |
-  //(1 << kDebugSlowestStringPath) |
-  //(1 << kDebugExerciseResolveMethod) |
-  //(1 << kDebugVerifyDataflow) |
-  //(1 << kDebugShowMemoryUsage) |
-  //(1 << kDebugShowNops) |
-  //(1 << kDebugCountOpcodes) |
-  //(1 << kDebugDumpCheckStats) |
-  //(1 << kDebugDumpBitcodeFile) |
-  //(1 << kDebugVerifyBitcode) |
-  //(1 << kDebugShowSummaryMemoryUsage) |
+  // (1 << kDebugDisplayMissingTargets) |
+  // (1 << kDebugVerbose) |
+  // (1 << kDebugDumpCFG) |
+  // (1 << kDebugSlowFieldPath) |
+  // (1 << kDebugSlowInvokePath) |
+  // (1 << kDebugSlowStringPath) |
+  // (1 << kDebugSlowestFieldPath) |
+  // (1 << kDebugSlowestStringPath) |
+  // (1 << kDebugExerciseResolveMethod) |
+  // (1 << kDebugVerifyDataflow) |
+  // (1 << kDebugShowMemoryUsage) |
+  // (1 << kDebugShowNops) |
+  // (1 << kDebugCountOpcodes) |
+  // (1 << kDebugDumpCheckStats) |
+  // (1 << kDebugDumpBitcodeFile) |
+  // (1 << kDebugVerifyBitcode) |
+  // (1 << kDebugShowSummaryMemoryUsage) |
   0;
 
 static CompiledMethod* CompileMethod(CompilerDriver& compiler,
@@ -277,7 +277,7 @@
 #if defined(ART_USE_PORTABLE_COMPILER)
                        , llvm_compilation_unit
 #endif
-                       ); // NOLINT(whitespace/parens)
+                       );  // NOLINT(whitespace/parens)
 }
 
 }  // namespace art
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index a863389..5c68ab4 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -102,7 +102,7 @@
 
   private:
     UniquePtr< ::llvm::LLVMContext> llvm_context_;
-    ::llvm::Module* llvm_module_; // Managed by context_.
+    ::llvm::Module* llvm_module_;  // Managed by context_.
     UniquePtr<art::llvm::IntrinsicHelper> intrinsic_helper_;
     UniquePtr<art::llvm::IRBuilder> ir_builder_;
 };
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index e3fd7ad..33ca8f1 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -137,6 +137,6 @@
   std::set<uint16_t> null_checked_;
 };
 
-} // namespace art
+}  // namespace art
 
 #endif  // ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 264604c..6b010ed 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -972,23 +972,23 @@
       }
     }
     switch (dalvik_format) {
-      case Instruction::k11n: // Add one immediate from vB
+      case Instruction::k11n:  // Add one immediate from vB
       case Instruction::k21s:
       case Instruction::k31i:
       case Instruction::k21h:
         str.append(StringPrintf(", #%d", insn.vB));
         break;
-      case Instruction::k51l: // Add one wide immediate
+      case Instruction::k51l:  // Add one wide immediate
         str.append(StringPrintf(", #%lld", insn.vB_wide));
         break;
-      case Instruction::k21c: // One register, one string/type/method index
+      case Instruction::k21c:  // One register, one string/type/method index
       case Instruction::k31c:
         str.append(StringPrintf(", index #%d", insn.vB));
         break;
-      case Instruction::k22c: // Two registers, one string/type/method index
+      case Instruction::k22c:  // Two registers, one string/type/method index
         str.append(StringPrintf(", index #%d", insn.vC));
         break;
-      case Instruction::k22s: // Add one immediate from vC
+      case Instruction::k22s:  // Add one immediate from vC
       case Instruction::k22b:
         str.append(StringPrintf(", #%d", insn.vC));
         break;
@@ -1154,4 +1154,4 @@
   return bb;
 }
 
-} // namespace art
+}  // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 342d2a2..e9ec949 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -273,7 +273,7 @@
   unsigned fp:1;        // Floating point?
   unsigned core:1;      // Non-floating point?
   unsigned ref:1;       // Something GC cares about.
-  unsigned high_word:1; // High word of pair?
+  unsigned high_word:1;  // High word of pair?
   unsigned home:1;      // Does this represent the home location?
   uint8_t low_reg;      // First physical register.
   uint8_t high_reg;     // 2nd physical register (if wide).
@@ -650,7 +650,7 @@
   BasicBlock* cur_block_;
   int num_blocks_;
   const DexFile::CodeItem* current_code_item_;
-  SafeMap<unsigned int, BasicBlock*> block_map_; // FindBlock lookup cache.
+  SafeMap<unsigned int, BasicBlock*> block_map_;  // FindBlock lookup cache.
   std::vector<DexCompilationUnit*> m_units_;     // List of methods included in this graph
   typedef std::pair<int, int> MIRLocation;       // Insert point, (m_unit_ index, offset)
   std::vector<MIRLocation> method_stack_;        // Include stack
@@ -659,7 +659,7 @@
   int def_count_;                                // Used to estimate size of ssa name storage.
   int* opcode_count_;                            // Dex opcode coverage stats.
   int num_ssa_regs_;                             // Number of names following SSA transformation.
-  std::vector<BasicBlock*> extended_basic_blocks_; // Heads of block "traces".
+  std::vector<BasicBlock*> extended_basic_blocks_;  // Heads of block "traces".
   int method_sreg_;
   unsigned int attributes_;
   Checkstats* checkstats_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index d79b26e..a6314f4 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -845,7 +845,7 @@
       bb = NextDominatedBlock(bb);
     }
   }
-  return false; // Not iterative - return value will be ignored
+  return false;  // Not iterative - return value will be ignored
 }
 
 
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 6fc01bd..7831cf6 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -1648,7 +1648,7 @@
   if (bb->block_type == kEntryBlock) {
     SetMethodInfo();
 
-    { // Allocate shadowframe.
+    {  // Allocate shadowframe.
       art::llvm::IntrinsicHelper::IntrinsicId id =
               art::llvm::IntrinsicHelper::AllocaShadowFrame;
       ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
@@ -1656,7 +1656,7 @@
       irb_->CreateCall(func, entries);
     }
 
-    { // Store arguments to vregs.
+    {  // Store arguments to vregs.
       uint16_t arg_reg = cu_->num_regs;
 
       ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
@@ -1666,7 +1666,7 @@
       uint32_t shorty_size = strlen(shorty);
       CHECK_GE(shorty_size, 1u);
 
-      ++arg_iter; // skip method object
+      ++arg_iter;  // skip method object
 
       if ((cu_->access_flags & kAccStatic) == 0) {
         SetVregOnValue(arg_iter, arg_reg);
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 93fee05..2f54190 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -239,7 +239,7 @@
  */
 enum ArmOpcode {
   kArmFirst = 0,
-  kArm16BitData = kArmFirst, // DATA   [0] rd[15..0].
+  kArm16BitData = kArmFirst,  // DATA   [0] rd[15..0].
   kThumbAdcRR,       // adc   [0100000101] rm[5..3] rd[2..0].
   kThumbAddRRI3,     // add(1)  [0001110] imm_3[8..6] rn[5..3] rd[2..0]*/
   kThumbAddRI8,      // add(2)  [00110] rd[10..8] imm_8[7..0].
@@ -332,12 +332,12 @@
   kThumb2VcvtDF,     // vcvt.F32.F64 vd, vm [1110111010110111] vd[15..12] [10111100] vm[3..0].
   kThumb2Vsqrts,     // vsqrt.f32 vd, vm [1110111010110001] vd[15..12] [10101100] vm[3..0].
   kThumb2Vsqrtd,     // vsqrt.f64 vd, vm [1110111010110001] vd[15..12] [10111100] vm[3..0].
-  kThumb2MovImmShift,// mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8.
+  kThumb2MovImmShift,  // mov(T2) rd, #<const> [11110] i [00001001111] imm3 rd[11..8] imm8.
   kThumb2MovImm16,   // mov(T3) rd, #<const> [11110] i [0010100] imm4 [0] imm3 rd[11..8] imm8.
   kThumb2StrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
   kThumb2LdrRRI12,   // str(Imm,T3) rd,[rn,#imm12] [111110001100] rn[19..16] rt[15..12] imm12[11..0].
-  kThumb2StrRRI8Predec, // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/
-  kThumb2LdrRRI8Predec, // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+  kThumb2StrRRI8Predec,  // str(Imm,T4) rd,[rn,#-imm8] [111110000100] rn[19..16] rt[15..12] [1100] imm[7..0]*/
+  kThumb2LdrRRI8Predec,  // ldr(Imm,T4) rd,[rn,#-imm8] [111110000101] rn[19..16] rt[15..12] [1100] imm[7..0]*/
   kThumb2Cbnz,       // cbnz rd,<label> [101110] i [1] imm5[7..3] rn[2..0].
   kThumb2Cbz,        // cbn rd,<label> [101100] i [1] imm5[7..3] rn[2..0].
   kThumb2AddRRI12,   // add rd, rn, #imm12 [11110] i [100000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
@@ -364,9 +364,9 @@
   kThumb2StrhRRR,    // str rt,[rn,rm,LSL #imm] [111110000010] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
   kThumb2StrbRRR,    // str rt,[rn,rm,LSL #imm] [111110000000] rn[19-16] rt[15-12] [000000] imm[5-4] rm[3-0].
   kThumb2LdrhRRI12,  // ldrh rt,[rn,#imm12] [111110001011] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2LdrshRRI12, // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrshRRI12,  // ldrsh rt,[rn,#imm12] [111110011011] rt[15..12] rn[19..16] imm12[11..0].
   kThumb2LdrbRRI12,  // ldrb rt,[rn,#imm12] [111110001001] rt[15..12] rn[19..16] imm12[11..0].
-  kThumb2LdrsbRRI12, // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0].
+  kThumb2LdrsbRRI12,  // ldrsb rt,[rn,#imm12] [111110011001] rt[15..12] rn[19..16] imm12[11..0].
   kThumb2StrhRRI12,  // strh rt,[rn,#imm12] [111110001010] rt[15..12] rn[19..16] imm12[11..0].
   kThumb2StrbRRI12,  // strb rt,[rn,#imm12] [111110001000] rt[15..12] rn[19..16] imm12[11..0].
   kThumb2Pop,        // pop   [1110100010111101] list[15-0]*/
@@ -403,7 +403,7 @@
   kThumb2Fmstat,     // fmstat [11101110111100011111101000010000].
   kThumb2Vcmpd,      // vcmp [111011101] D [11011] rd[15-12] [1011] E [1] M [0] rm[3-0].
   kThumb2Vcmps,      // vcmp [111011101] D [11010] rd[15-12] [1011] E [1] M [0] rm[3-0].
-  kThumb2LdrPcRel12, // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+  kThumb2LdrPcRel12,  // ldr rd,[pc,#imm12] [1111100011011111] rt[15-12] imm12[11-0].
   kThumb2BCond,      // b<c> [1110] S cond[25-22] imm6[21-16] [10] J1 [0] J2 imm11[10..0].
   kThumb2Vmovd_RR,   // vmov [111011101] D [110000] vd[15-12 [101101] M [0] vm[3-0].
   kThumb2Vmovs_RR,   // vmov [111011101] D [110000] vd[15-12 [101001] M [0] vm[3-0].
@@ -415,8 +415,8 @@
   kThumb2Vabss,      // vabs.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
   kThumb2Vnegd,      // vneg.f64 [111011101] D [110000] rd[15-12] [1011110] M [0] vm[3-0].
   kThumb2Vnegs,      // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
-  kThumb2Vmovs_IMM8, // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
-  kThumb2Vmovd_IMM8, // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
+  kThumb2Vmovs_IMM8,  // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
+  kThumb2Vmovd_IMM8,  // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
   kThumb2Mla,        // mla [111110110000] rn[19-16] ra[15-12] rd[7-4] [0000] rm[3-0].
   kThumb2Umull,      // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
   kThumb2Ldrex,      // ldrex [111010000101] rn[19-16] rt[11-8] [1111] imm8[7-0].
@@ -425,7 +425,7 @@
   kThumb2Bfi,        // bfi [111100110110] rn[19-16] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
   kThumb2Bfc,        // bfc [11110011011011110] [0] imm3[14-12] rd[11-8] imm2[7-6] [0] msb[4-0].
   kThumb2Dmb,        // dmb [1111001110111111100011110101] option[3-0].
-  kThumb2LdrPcReln12,// ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0].
+  kThumb2LdrPcReln12,  // ldr rd,[pc,-#imm12] [1111100011011111] rt[15-12] imm12[11-0].
   kThumb2Stm,        // stm <list> [111010010000] rn[19-16] 000 rl[12-0].
   kThumbUndefined,   // undefined [11011110xxxxxxxx].
   kThumb2VPopCS,     // vpop <list of callee save fp singles (s16+).
@@ -436,8 +436,8 @@
   kThumb2MovImm16H,  // similar to kThumb2MovImm16, but target high hw.
   kThumb2AddPCR,     // Thumb2 2-operand add with hard-coded PC target.
   kThumb2Adr,        // Special purpose encoding of ADR for switch tables.
-  kThumb2MovImm16LST,// Special purpose version for switch table use.
-  kThumb2MovImm16HST,// Special purpose version for switch table use.
+  kThumb2MovImm16LST,  // Special purpose version for switch table use.
+  kThumb2MovImm16HST,  // Special purpose version for switch table use.
   kThumb2LdmiaWB,    // ldmia  [111010011001[ rn[19..16] mask[15..0].
   kThumb2SubsRRI12,  // setflags encoding.
   kThumb2OrrRRRs,    // orrx [111010100101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
@@ -445,7 +445,7 @@
   kThumb2Pop1,       // t3 encoding of pop.
   kThumb2RsubRRR,    // rsb [111010111101] rn[19..16] [0000] rd[11..8] [0000] rm[3..0].
   kThumb2Smull,      // smull [111110111000] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
-  kThumb2LdrdPcRel8, // ldrd rt, rt2, pc +-/1024.
+  kThumb2LdrdPcRel8,  // ldrd rt, rt2, pc +-/1024.
   kThumb2LdrdI8,     // ldrd rt, rt2, [rn +-/1024].
   kThumb2StrdI8,     // strd rt, rt2, [rn +-/1024].
   kArmLast,
@@ -485,7 +485,7 @@
   struct {
     ArmEncodingKind kind;
     int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
-    int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+    int start;  // start for kFmtBitBlt, 4-bit slice end for FP regs.
   } field_loc[4];
   ArmOpcode opcode;
   uint64_t flags;
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 1416c61..64ebb6a 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -117,7 +117,7 @@
 }
 
 /* Used for the "verbose" listing */
-//TODO:  move to common code
+// TODO:  move to common code
 void ArmMir2Lir::GenPrintLabel(MIR* mir) {
   /* Mark the beginning of a Dalvik instruction for line tracking */
   char* inst_str = cu_->verbose ?
@@ -475,7 +475,7 @@
   GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
   LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
   NewLIR3(kThumb2Ldrex, r1, r0,
-          mirror::Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
+          mirror::Object::MonitorOffset().Int32Value() >> 2);  // Get object->lock
   // Align owner
   OpRegImm(kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
   // Is lock unheld on lock or held by us (==thread_id) on unlock?
@@ -507,7 +507,7 @@
   LoadValueDirectFixed(rl_src, r0);  // Get obj
   LockCallTemps();  // Prepare for explicit register usage
   GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
-  LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
+  LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1);  // Get lock
   LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
   // Is lock unheld on lock or held by us (==thread_id) on unlock?
   OpRegRegImm(kOpAnd, r3, r1,
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 8f73f0c..1bb08c4 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -273,7 +273,7 @@
 
   OpIT((default_result == -1) ? kCondGt : kCondMi, "");
   NewLIR2(kThumb2MovImmShift, rl_result.low_reg,
-          ModifiedImmediate(-default_result)); // Must not alter ccodes
+          ModifiedImmediate(-default_result));  // Must not alter ccodes
   GenBarrier();
 
   OpIT(kCondEq, "");
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 15d361e..e1a77da 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -49,7 +49,7 @@
   int cond_bit = code & 1;
   int alt_bit = cond_bit ^ 1;
 
-  //Note: case fallthroughs intentional
+  // Note: case fallthroughs intentional
   switch (strlen(guide)) {
     case 3:
       mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
@@ -107,7 +107,7 @@
 
   target1 = NewLIR0(kPseudoTargetLabel);
 
-  RegLocation rl_temp = LocCReturn(); // Just using as template, will change
+  RegLocation rl_temp = LocCReturn();  // Just using as template, will change
   rl_temp.low_reg = t_reg;
   StoreValue(rl_dest, rl_temp);
   FreeTemp(t_reg);
@@ -207,13 +207,13 @@
       OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
       OpIT(kCondCc, "");
       LoadConstant(rl_result.low_reg, 0);
-      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
       OpRegImm(kOpCmp, rl_src.low_reg, 0);
       OpIT(kCondEq, "E");
       LoadConstant(rl_result.low_reg, true_val);
       LoadConstant(rl_result.low_reg, false_val);
-      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     } else {
       // Unlikely case - could be tuned.
       int t_reg1 = AllocTemp();
@@ -224,7 +224,7 @@
       OpIT(kCondEq, "E");
       OpRegCopy(rl_result.low_reg, t_reg1);
       OpRegCopy(rl_result.low_reg, t_reg2);
-      GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+      GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
     }
   } else {
     // MOVE case
@@ -239,7 +239,7 @@
     l1->flags.is_nop = false;  // Make sure this instruction isn't optimized away
     LIR* l2 = OpRegCopy(rl_result.low_reg, rl_false.low_reg);
     l2->flags.is_nop = false;  // Make sure this instruction isn't optimized away
-    GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
+    GenBarrier();  // Add a scheduling barrier to keep the IT shadow intact
   }
   StoreValue(rl_dest, rl_result);
 }
@@ -716,7 +716,7 @@
       }
     }
     FreeTemp(tmp1);
-    rl_result = GetReturnWide(false); // Just using as a template.
+    rl_result = GetReturnWide(false);  // Just using as a template.
     rl_result.low_reg = res_lo;
     rl_result.high_reg = res_hi;
     StoreValueWide(rl_dest, rl_result);
@@ -883,7 +883,7 @@
   int reg_len = INVALID_REG;
   if (needs_range_check) {
     reg_len = AllocTemp();
-    //NOTE: max live temps(4) here.
+    // NOTE: max live temps(4) here.
     /* Get len */
     LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
   }
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 5296f30..6f37798 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -553,7 +553,7 @@
   // Keep special registers from being allocated
   for (int i = 0; i < num_reserved; i++) {
     if (NO_SUSPEND && (ReservedRegs[i] == rARM_SUSPEND)) {
-      //To measure cost of suspend check
+      // To measure cost of suspend check
       continue;
     }
     MarkInUse(ReservedRegs[i]);
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 1ea0a64..afc8a66 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -507,7 +507,7 @@
       alt_opcode = kThumb2EorRRR;
       break;
     case kOpMul:
-      //TUNING: power of 2, shift & add
+      // TUNING: power of 2, shift & add
       mod_imm = -1;
       alt_opcode = kThumb2MulRRR;
       break;
@@ -662,7 +662,7 @@
   }
 
   switch (size) {
-    case kDouble: // fall-through
+    case kDouble:  // fall-through
     case kSingle:
       reg_ptr = AllocTemp();
       if (scale) {
@@ -726,7 +726,7 @@
   }
 
   switch (size) {
-    case kDouble: // fall-through
+    case kDouble:  // fall-through
     case kSingle:
       reg_ptr = AllocTemp();
       if (scale) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9e9b39e..c9780fa 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1063,4 +1063,4 @@
 }
 
 
-} // namespace art
+}  // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index bc49b70..40db2c6 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -547,7 +547,7 @@
       case kThrowNullPointer:
         func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
         break;
-      case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
+      case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
         // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
         if (target_x86) {
           OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
@@ -792,7 +792,7 @@
       *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
     // slow path, resolve string if not in dex cache
     FlushAllRegs();
-    LockCallTemps(); // Using explicit registers
+    LockCallTemps();  // Using explicit registers
     LoadCurrMethodDirect(TargetReg(kArg2));
     LoadWordDisp(TargetReg(kArg2),
                  mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
@@ -951,7 +951,7 @@
       // Not resolved
       // Call out to helper, which will return resolved type in kRet0
       CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
-      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
+      OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
       LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
       // Rejoin code paths
       LIR* hop_target = NewLIR0(kPseudoTargetLabel);
@@ -1090,7 +1090,7 @@
       // InitializeTypeFromCode(idx, method)
       CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
                               true);
-      OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
+      OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
       // Rejoin code paths
       LIR* hop_target = NewLIR0(kPseudoTargetLabel);
       hop_branch->target = hop_target;
@@ -1167,7 +1167,7 @@
 
 void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_shift) {
-  int func_offset = -1; // Make gcc happy
+  int func_offset = -1;  // Make gcc happy
 
   switch (opcode) {
     case Instruction::SHL_LONG:
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 13a59bf..cae1319 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -428,22 +428,22 @@
       cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
       break;
     }
-    case 1: // Is "this" null? [use kArg1]
+    case 1:  // Is "this" null? [use kArg1]
       cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
       // get this->klass_ [use kArg1, set kInvokeTgt]
       cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
                        cg->TargetReg(kInvokeTgt));
       break;
-    case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
+    case 2:  // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
       cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
                        cg->TargetReg(kInvokeTgt));
       break;
-    case 3: // Get target method [use kInvokeTgt, set kArg0]
+    case 3:  // Get target method [use kInvokeTgt, set kArg0]
       cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
                        mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
                        cg->TargetReg(kArg0));
       break;
-    case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
+    case 4:  // Get the compiled code address [uses kArg0, sets kInvokeTgt]
       if (cu->instruction_set != kX86) {
         cg->LoadWordDisp(cg->TargetReg(kArg0),
                          mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
@@ -649,7 +649,7 @@
   DCHECK_LE(info->num_arg_words, 5);
   if (info->num_arg_words > 3) {
     int32_t next_use = 3;
-    //Detect special case of wide arg spanning arg3/arg4
+    // Detect special case of wide arg spanning arg3/arg4
     RegLocation rl_use0 = info->args[0];
     RegLocation rl_use1 = info->args[1];
     RegLocation rl_use2 = info->args[2];
@@ -789,7 +789,7 @@
                                direct_code, direct_method, type);
       OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
       LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
-      //TUNING: loosen barrier
+      // TUNING: loosen barrier
       ld->def_mask = ENCODE_ALL;
       SetMemRefType(ld, true /* is_load */, kDalvikReg);
       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
@@ -1086,7 +1086,7 @@
   int r_tgt = (cu_->instruction_set != kX86) ?
       LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
   GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
-  //TUNING: check if rl_cmp.s_reg_low is already null checked
+  // TUNING: check if rl_cmp.s_reg_low is already null checked
   LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
   intrinsic_launchpads_.Insert(launch_pad);
   OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index db57643..8b375ea 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -259,7 +259,7 @@
 
   // And go...
   ClobberCalleeSave();
-  LIR* call_inst = OpReg(kOpBlx, r_tgt); // ( array*, fill_data* )
+  LIR* call_inst = OpReg(kOpBlx, r_tgt);  // ( array*, fill_data* )
   MarkSafepointPC(call_inst);
 }
 
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 2e744a2..6cd9acc 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -174,7 +174,7 @@
 void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2) {
   bool wide = true;
-  int offset = -1; // Make gcc happy.
+  int offset = -1;  // Make gcc happy.
 
   switch (opcode) {
     case Instruction::CMPL_FLOAT:
@@ -237,4 +237,4 @@
   return false;
 }
 
-} //  namespace art
+}  // namespace art
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 03a58cc..ea7da60 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -145,7 +145,7 @@
     case kCondGe: opc = kMipsBgez; break;
     case kCondGt: opc = kMipsBgtz; break;
     case kCondLe: opc = kMipsBlez; break;
-    //case KCondMi:
+    // case KCondMi:
     case kCondLt: opc = kMipsBltz; break;
     case kCondNe: opc = kMipsBnez; break;
     default:
@@ -513,7 +513,7 @@
   int reg_len = INVALID_REG;
   if (needs_range_check) {
     reg_len = AllocTemp();
-    //NOTE: max live temps(4) here.
+    // NOTE: max live temps(4) here.
     /* Get len */
     LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
   }
@@ -521,7 +521,7 @@
   OpRegImm(kOpAdd, reg_ptr, data_offset);
   /* at this point, reg_ptr points to array, 2 live temps */
   if ((size == kLong) || (size == kDouble)) {
-    //TUNING: specific wide routine that can handle fp regs
+    // TUNING: specific wide routine that can handle fp regs
     if (scale) {
       int r_new_index = AllocTemp();
       OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index c3709b7..278fcef 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -155,7 +155,7 @@
   kMipsGPReg0   = 0,
   kMipsRegSP    = 29,
   kMipsRegLR    = 31,
-  kMipsFPReg0   = 32, // only 16 fp regs supported currently.
+  kMipsFPReg0   = 32,  // only 16 fp regs supported currently.
   kMipsFPRegEnd   = 48,
   kMipsRegHI    = kMipsFPRegEnd,
   kMipsRegLO,
@@ -248,7 +248,7 @@
   r_DF5 = r_F10 + MIPS_FP_DOUBLE,
   r_DF6 = r_F12 + MIPS_FP_DOUBLE,
   r_DF7 = r_F14 + MIPS_FP_DOUBLE,
-#if 0 // TODO: expand resource mask to enable use of all MIPS fp registers.
+#if 0  // TODO: expand resource mask to enable use of all MIPS fp registers.
   r_DF8 = r_F16 + MIPS_FP_DOUBLE,
   r_DF9 = r_F18 + MIPS_FP_DOUBLE,
   r_DF10 = r_F20 + MIPS_FP_DOUBLE,
@@ -305,8 +305,8 @@
  */
 enum MipsOpCode {
   kMipsFirst = 0,
-  kMips32BitData = kMipsFirst, // data [31..0].
-  kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+  kMips32BitData = kMipsFirst,  // data [31..0].
+  kMipsAddiu,  // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
   kMipsAddu,  // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
   kMipsAnd,   // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
   kMipsAndi,  // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
@@ -366,31 +366,31 @@
   kMipsSw,    // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
   kMipsXor,   // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
   kMipsXori,  // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
-  kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
-  kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
-  kMipsFmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
-  kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
-  kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
-  kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
-  kMipsFmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
-  kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
-  kMipsFcvtsd,// cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
-  kMipsFcvtsw,// cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
-  kMipsFcvtds,// cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
-  kMipsFcvtdw,// cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
-  kMipsFcvtws,// cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
-  kMipsFcvtwd,// cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
-  kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
-  kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
-  kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
-  kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
-  kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
-  kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+  kMipsFadds,  // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubs,  // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFmuls,  // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFdivs,  // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFaddd,  // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+  kMipsFsubd,  // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+  kMipsFmuld,  // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+  kMipsFdivd,  // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+  kMipsFcvtsd,  // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtsw,  // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+  kMipsFcvtds,  // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtdw,  // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+  kMipsFcvtws,  // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+  kMipsFcvtwd,  // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+  kMipsFmovs,  // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+  kMipsFmovd,  // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+  kMipsFlwc1,  // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+  kMipsFldc1,  // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+  kMipsFswc1,  // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+  kMipsFsdc1,  // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
   kMipsMfc1,  // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
   kMipsMtc1,  // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
-  kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
-  kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
-  kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+  kMipsDelta,  // Psuedo for ori t, s, <label>-<label>.
+  kMipsDeltaHi,  // Pseudo for lui t, high16(<label>-<label>).
+  kMipsDeltaLo,  // Pseudo for ori t, s, low16(<label>-<label>).
   kMipsCurrPC,  // jal to .+8 to materialize pc.
   kMipsSync,    // sync kind [000000] [0000000000000000] s[10..6] [001111].
   kMipsUndefined,  // undefined [011001xxxxxxxxxxxxxxxx].
@@ -412,7 +412,7 @@
   struct {
     MipsEncodingKind kind;
     int end;   // end for kFmtBitBlt, 1-bit slice end for FP regs.
-    int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+    int start;  // start for kFmtBitBlt, 4-bit slice end for FP regs.
   } field_loc[4];
   MipsOpCode opcode;
   uint64_t flags;
@@ -425,7 +425,7 @@
 
 #define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
 #define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
-#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763))  // 2 offsets must fit.
 
 }  // namespace art
 
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 21ba69c..0a17fb1 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -477,7 +477,7 @@
   // Keep special registers from being allocated
   for (int i = 0; i < num_reserved; i++) {
     if (NO_SUSPEND && (ReservedRegs[i] == rMIPS_SUSPEND)) {
-      //To measure cost of suspend check
+      // To measure cost of suspend check
       continue;
     }
     MarkInUse(ReservedRegs[i]);
@@ -572,4 +572,4 @@
   return MipsMir2Lir::EncodingMap[opcode].fmt;
 }
 
-} // namespace art
+}  // namespace art
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 79f6f16..68b26f1 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -63,7 +63,7 @@
 }
 
 bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
-  return false; // TUNING
+  return false;  // TUNING
 }
 
 /*
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4774456..a34e929 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -157,7 +157,7 @@
 #define ENCODE_ALL              (~0ULL)
 #define ENCODE_MEM              (ENCODE_DALVIK_REG | ENCODE_LITERAL | \
                                  ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS)
-//TODO: replace these macros
+// TODO: replace these macros
 #define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
 #define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
 #define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath))
@@ -726,7 +726,7 @@
     GrowableArray<LIR*> throw_launchpads_;
     GrowableArray<LIR*> suspend_launchpads_;
     GrowableArray<LIR*> intrinsic_launchpads_;
-    SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
+    SafeMap<unsigned int, LIR*> boundary_map_;  // boundary lookup cache.
     /*
      * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
      * Native PC is on the return address of the safepointed operation.  Dex PC is for
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 4542f8f..d59c986 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -198,7 +198,7 @@
  * s_reg<=sX[even] & (s_reg+1)<= sX+1.
  */
 int Mir2Lir::AllocPreservedDouble(int s_reg) {
-  int res = -1; // Assume failure
+  int res = -1;  // Assume failure
   int v_reg = mir_graph_->SRegToVReg(s_reg);
   int p_map_idx = SRegToPMap(s_reg);
   if (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg) {
@@ -303,7 +303,7 @@
   return -1;  // No register available
 }
 
-//REDO: too many assumptions.
+// REDO: too many assumptions.
 int Mir2Lir::AllocTempDouble() {
   RegisterInfo* p = reg_pool_->FPRegs;
   int num_regs = reg_pool_->num_fp_regs;
@@ -629,7 +629,7 @@
 }
 
 
-//TUNING: rewrite all of this reg stuff.  Probably use an attribute table
+// TUNING: rewrite all of this reg stuff.  Probably use an attribute table
 bool Mir2Lir::RegClassMatches(int reg_class, int reg) {
   if (reg_class == kAnyReg) {
     return true;
@@ -917,7 +917,7 @@
     RegLocation loc = mir_graph_->reg_location_[i];
     RefCounts* counts = loc.fp ? fp_counts : core_counts;
     int p_map_idx = SRegToPMap(loc.s_reg_low);
-    //Don't count easily regenerated immediates
+    // Don't count easily regenerated immediates
     if (loc.fp || !IsInexpensiveConstant(loc)) {
       counts[p_map_idx].count += mir_graph_->GetUseCount(i);
     }
@@ -1152,7 +1152,7 @@
 }
 
 bool Mir2Lir::oat_live_out(int s_reg) {
-  //For now.
+  // For now.
   return true;
 }
 
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index d864115..e883432 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -474,7 +474,7 @@
         return ComputeSize(entry, lir->operands[1], 0x12345678, true);
       } else {
         DCHECK(entry->opcode == kX86PcRelAdr);
-        return 5; // opcode with reg + 4 byte immediate
+        return 5;  // opcode with reg + 4 byte immediate
       }
     case kMacro:
       DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
@@ -1342,7 +1342,7 @@
       case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
         EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]);
         break;
-      case kShiftRegCl: // lir operands - 0: reg, 1: cl
+      case kShiftRegCl:  // lir operands - 0: reg, 1: cl
         EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]);
         break;
       case kRegCond:  // lir operands - 0: reg, 1: condition
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 1aeb39a..d530a1c 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -87,7 +87,7 @@
   rl_src = LoadValue(rl_src, kCoreReg);
   int start_of_method_reg = AllocTemp();
   // Materialize a pointer to the switch table
-  //NewLIR0(kX86Bkpt);
+  // NewLIR0(kX86Bkpt);
   NewLIR1(kX86StartOfMethod, start_of_method_reg);
   int low_key = s4FromSwitchData(&table[2]);
   int keyReg;
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f2ecf6c..cc6f374 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -373,4 +373,4 @@
 
 
 
-} //  namespace art
+}  // namespace art
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 5b64a6b..2c9b3c8 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -23,7 +23,7 @@
 
 namespace art {
 
-//FIXME: restore "static" when usage uncovered
+// FIXME: restore "static" when usage uncovered
 /*static*/ int core_regs[] = {
   rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
 #ifdef TARGET_REX_SUPPORT
@@ -541,4 +541,4 @@
   return X86Mir2Lir::EncodingMap[opcode].fmt;
 }
 
-} // namespace art
+}  // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 75367a3..e15995f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -61,7 +61,7 @@
 }
 
 bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
-  return false; // TUNING
+  return false;  // TUNING
 }
 
 /*
@@ -135,7 +135,7 @@
     case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
     case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
     case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
-    //case kOpSbb: opcode = kX86Sbb32RI; break;
+    // case kOpSbb: opcode = kX86Sbb32RI; break;
     case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
     case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
     case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
@@ -221,7 +221,7 @@
 LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
                  int r_src2) {
   if (r_dest != r_src1 && r_dest != r_src2) {
-    if (op == kOpAdd) { // lea special case, except can't encode rbp as base
+    if (op == kOpAdd) {  // lea special case, except can't encode rbp as base
       if (r_src1 == r_src2) {
         OpRegCopy(r_dest, r_src1);
         return OpRegImm(kOpLsl, r_dest, 1);
@@ -279,11 +279,11 @@
     }
   }
   if (r_dest != r_src) {
-    if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
+    if (false && op == kOpLsl && value >= 0 && value <= 3) {  // lea shift special case
       // TODO: fix bug in LEA encoding when disp == 0
       return NewLIR5(kX86Lea32RA, r_dest,  r5sib_no_base /* base */,
                      r_src /* index */, value /* scale */, 0 /* disp */);
-    } else if (op == kOpAdd) { // lea add special case
+    } else if (op == kOpAdd) {  // lea add special case
       return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
                      r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
     }
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 73e562e..643a3d5 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -219,7 +219,7 @@
  */
 enum X86OpCode {
   kX86First = 0,
-  kX8632BitData = kX86First, // data [31..0].
+  kX8632BitData = kX86First,  // data [31..0].
   kX86Bkpt,
   kX86Nop,
   // Define groups of binary operations
@@ -322,12 +322,12 @@
   Binary0fOpCode(kX86Movss),
   kX86MovssMR,
   kX86MovssAR,
-  Binary0fOpCode(kX86Cvtsi2sd), // int to double
-  Binary0fOpCode(kX86Cvtsi2ss), // int to float
-  Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
-  Binary0fOpCode(kX86Cvttss2si),// truncating float to int
-  Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
-  Binary0fOpCode(kX86Cvtss2si), // rounding float to int
+  Binary0fOpCode(kX86Cvtsi2sd),  // int to double
+  Binary0fOpCode(kX86Cvtsi2ss),  // int to float
+  Binary0fOpCode(kX86Cvttsd2si),  // truncating double to int
+  Binary0fOpCode(kX86Cvttss2si),  // truncating float to int
+  Binary0fOpCode(kX86Cvtsd2si),  // rounding double to int
+  Binary0fOpCode(kX86Cvtss2si),  // rounding float to int
   Binary0fOpCode(kX86Ucomisd),  // unordered double compare
   Binary0fOpCode(kX86Ucomiss),  // unordered float compare
   Binary0fOpCode(kX86Comisd),   // double compare
@@ -338,8 +338,8 @@
   Binary0fOpCode(kX86Addss),    // float add
   Binary0fOpCode(kX86Mulsd),    // double multiply
   Binary0fOpCode(kX86Mulss),    // float multiply
-  Binary0fOpCode(kX86Cvtsd2ss), // double to float
-  Binary0fOpCode(kX86Cvtss2sd), // float to double
+  Binary0fOpCode(kX86Cvtsd2ss),  // double to float
+  Binary0fOpCode(kX86Cvtss2sd),  // float to double
   Binary0fOpCode(kX86Subsd),    // double subtract
   Binary0fOpCode(kX86Subss),    // float subtract
   Binary0fOpCode(kX86Divsd),    // double divide
@@ -347,13 +347,13 @@
   kX86PsrlqRI,                  // right shift of floating point registers
   kX86PsllqRI,                  // left shift of floating point registers
   Binary0fOpCode(kX86Movdxr),   // move into xmm from gpr
-  kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,// move into reg from xmm
-  kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
+  kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,  // move into reg from xmm
+  kX86Set8R, kX86Set8M, kX86Set8A,  // set byte depending on condition operand
   kX86Mfence,                   // memory barrier
   Binary0fOpCode(kX86Imul16),   // 16bit multiply
   Binary0fOpCode(kX86Imul32),   // 32bit multiply
-  kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,// compare and exchange
-  kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,// locked compare and exchange
+  kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,  // compare and exchange
+  kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,  // locked compare and exchange
   Binary0fOpCode(kX86Movzx8),   // zero-extend 8-bit value
   Binary0fOpCode(kX86Movzx16),  // zero-extend 16-bit value
   Binary0fOpCode(kX86Movsx8),   // sign-extend 8-bit value
@@ -383,9 +383,9 @@
   kNullary,                                // Opcode that takes no arguments.
   kReg, kMem, kArray,                      // R, M and A instruction kinds.
   kMemReg, kArrayReg, kThreadReg,          // MR, AR and TR instruction kinds.
-  kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
+  kRegReg, kRegMem, kRegArray, kRegThread,  // RR, RM, RA and RT instruction kinds.
   kRegRegStore,                            // RR following the store modrm reg-reg encoding rather than the load.
-  kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
+  kRegImm, kMemImm, kArrayImm, kThreadImm,  // RI, MI, AI and TI instruction kinds.
   kRegRegImm, kRegMemImm, kRegArrayImm,    // RRI, RMI and RAI instruction kinds.
   kMovRegImm,                              // Shorter form move RI.
   kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
@@ -407,13 +407,13 @@
   uint8_t prefix1;       // non-zero => a prefix byte
   uint8_t prefix2;       // non-zero => a second prefix byte
   uint8_t opcode;        // 1 byte opcode
-  uint8_t extra_opcode1; // possible extra opcode byte
-  uint8_t extra_opcode2; // possible second extra opcode byte
+  uint8_t extra_opcode1;  // possible extra opcode byte
+  uint8_t extra_opcode2;  // possible second extra opcode byte
   // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
   // encoding kind
   uint8_t modrm_opcode;
   uint8_t ax_opcode;  // non-zero => shorter encoding for AX as a destination
-  uint8_t immediate_bytes; // number of bytes of immediate
+  uint8_t immediate_bytes;  // number of bytes of immediate
   } skeleton;
   const char *name;
   const char* fmt;
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 7739e29..18d8e93 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -241,7 +241,7 @@
   /* Calculate DF_up */
   ArenaBitVector::Iterator bv_iterator(bb->i_dominated);
   while (true) {
-    //TUNING: hot call to BitVectorIteratorNext
+    // TUNING: hot call to BitVectorIteratorNext
     int dominated_idx = bv_iterator.Next();
     if (dominated_idx == -1) {
       break;
@@ -249,7 +249,7 @@
     BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
     ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier);
     while (true) {
-      //TUNING: hot call to BitVectorIteratorNext
+      // TUNING: hot call to BitVectorIteratorNext
       int df_up_idx = df_iterator.Next();
       if (df_up_idx == -1) {
         break;
@@ -530,7 +530,7 @@
         BasicBlock* def_bb = GetBasicBlock(idx);
 
         /* Merge the dominance frontier to tmp_blocks */
-        //TUNING: hot call to Union().
+        // TUNING: hot call to Union().
         if (def_bb->dom_frontier != NULL) {
           tmp_blocks->Union(def_bb->dom_frontier);
         }
@@ -568,7 +568,7 @@
       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
       phi->dalvikInsn.vA = dalvik_reg;
       phi->offset = phi_bb->start_offset;
-      phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
+      phi->m_unit_index = 0;  // Arbitrarily assign all Phi nodes to outermost method.
       PrependMIR(phi_bb, phi);
     }
   }
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index b50fe71..5ee6753 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -333,7 +333,7 @@
 static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
 
 void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
-  //FIXME: Quick-specific.  Move to Quick (and make a generic version for MIRGraph?
+  // FIXME: Quick-specific.  Move to Quick (and make a generic version for MIRGraph?
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get());
   if (cg != NULL) {
     for (int i = 0; i < count; i++) {