ART: More warnings

Enable -Wno-conversion-null, -Wredundant-decls and -Wshadow in general,
and -Wunused-but-set-parameter for GCC builds.

Change-Id: I81bbdd762213444673c65d85edae594a523836e5
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a7d9353..c1ce2ac 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -787,9 +787,9 @@
   if (same_version) {
     // Find the first non-null values.
     for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto it = (lvn->*map_ptr).find(key);
-      if (it != (lvn->*map_ptr).end()) {
-        cmp_values = &it->second;
+      auto value = (lvn->*map_ptr).find(key);
+      if (value != (lvn->*map_ptr).end()) {
+        cmp_values = &value->second;
         break;
       }
     }
@@ -799,21 +799,21 @@
     // field version and the values' memory_version_before_stores, last_stored_value
     // and store_loc_set are identical.
     for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto it = (lvn->*map_ptr).find(key);
-      if (it == (lvn->*map_ptr).end()) {
+      auto value = (lvn->*map_ptr).find(key);
+      if (value == (lvn->*map_ptr).end()) {
         if (cmp_values->memory_version_before_stores != kNoValue) {
           same_version = false;
           break;
         }
-      } else if (cmp_values->last_stored_value != it->second.last_stored_value ||
-          cmp_values->memory_version_before_stores != it->second.memory_version_before_stores ||
-          cmp_values->store_loc_set != it->second.store_loc_set) {
+      } else if (cmp_values->last_stored_value != value->second.last_stored_value ||
+          cmp_values->memory_version_before_stores != value->second.memory_version_before_stores ||
+          cmp_values->store_loc_set != value->second.store_loc_set) {
         same_version = false;
         break;
-      } else if (it->second.last_load_memory_version != kNoValue) {
+      } else if (value->second.last_load_memory_version != kNoValue) {
         DCHECK(load_memory_version_for_same_version == kNoValue ||
-               load_memory_version_for_same_version == it->second.last_load_memory_version);
-        load_memory_version_for_same_version = it->second.last_load_memory_version;
+               load_memory_version_for_same_version == value->second.last_load_memory_version);
+        load_memory_version_for_same_version = value->second.last_load_memory_version;
       }
     }
   }
@@ -828,12 +828,12 @@
     if (!cmp_values->load_value_map.empty()) {
       my_values->load_value_map = cmp_values->load_value_map;
       for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-        auto it = (lvn->*map_ptr).find(key);
-        if (it == (lvn->*map_ptr).end() || it->second.load_value_map.empty()) {
+        auto value = (lvn->*map_ptr).find(key);
+        if (value == (lvn->*map_ptr).end() || value->second.load_value_map.empty()) {
           my_values->load_value_map.clear();
           break;
         }
-        InPlaceIntersectMaps(&my_values->load_value_map, it->second.load_value_map);
+        InPlaceIntersectMaps(&my_values->load_value_map, value->second.load_value_map);
         if (my_values->load_value_map.empty()) {
           break;
         }
@@ -847,20 +847,20 @@
     // Calculate the locations that have been either read from or written to in each incoming LVN.
     bool first_lvn = true;
     for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
-      auto it = (lvn->*map_ptr).find(key);
-      if (it == (lvn->*map_ptr).end()) {
+      auto value = (lvn->*map_ptr).find(key);
+      if (value == (lvn->*map_ptr).end()) {
         my_values->load_value_map.clear();
         break;
       }
       if (first_lvn) {
         first_lvn = false;
         // Copy the first LVN's locations. Values will be overwritten later.
-        my_values->load_value_map = it->second.load_value_map;
-        for (uint16_t location : it->second.store_loc_set) {
+        my_values->load_value_map = value->second.load_value_map;
+        for (uint16_t location : value->second.store_loc_set) {
           my_values->load_value_map.Put(location, 0u);
         }
       } else {
-        IntersectAliasingValueLocations(my_values, &it->second);
+        IntersectAliasingValueLocations(my_values, &value->second);
       }
     }
     // Calculate merged values for the intersection.
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 448e80f..a1a5ad1 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -464,7 +464,6 @@
 }
 
 LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
-  LIR* res;
   bool neg = (value < 0);
   int32_t abs_value = (neg) ? -value : value;
   ArmOpcode opcode = kThumbBkpt;
@@ -590,6 +589,7 @@
   } else {
     RegStorage r_scratch = AllocTemp();
     LoadConstant(r_scratch, value);
+    LIR* res;
     if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
       res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
     else
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 85f502c..da7ac87 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -928,14 +928,13 @@
           // Check if branch offset can be encoded in tbz/tbnz.
           if (!IS_SIGNED_IMM14(delta >> 2)) {
             DexOffset dalvik_offset = lir->dalvik_offset;
-            int16_t opcode = lir->opcode;
-            LIR* target = lir->target;
+            LIR* targetLIR = lir->target;
             // "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
             offset_adjustment -= lir->flags.size;
-            int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
-            DCHECK_NE(imm, -1);
+            int32_t encodedImm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
+            DCHECK_NE(encodedImm, -1);
             lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
-            lir->operands[1] = imm;
+            lir->operands[1] = encodedImm;
             lir->target = nullptr;
             lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
             lir->flags.size = EncodingMap[kA64Tst2rl].size;
@@ -944,7 +943,7 @@
             opcode = UNWIDE(opcode);
             DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
             LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
-                opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target);
+                opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, targetLIR);
             InsertLIRAfter(lir, new_lir);
             new_lir->offset = lir->offset + lir->flags.size;
             new_lir->flags.generation = generation;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 9410f7e..c5aa27c 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -66,8 +66,8 @@
 void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
   class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath {
    public:
-    DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch)
-        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) {
+    DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in)
+        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in) {
     }
 
     void Compile() OVERRIDE {
@@ -84,9 +84,10 @@
 void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
   class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
    public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length)
-        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
-          index_(index), length_(length) {
+    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in,
+                             RegStorage length_in)
+        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+          index_(index_in), length_(length_in) {
     }
 
     void Compile() OVERRIDE {
@@ -108,9 +109,9 @@
 void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
   class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
    public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length)
-        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
-          index_(index), length_(length) {
+    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in)
+        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+          index_(index_in), length_(length_in) {
     }
 
     void Compile() OVERRIDE {
@@ -461,7 +462,7 @@
     // Set up the loop counter (known to be > 0)
     LoadConstant(r_idx, elems - 1);
     // Generate the copy loop.  Going backwards for convenience
-    LIR* target = NewLIR0(kPseudoTargetLabel);
+    LIR* loop_head_target = NewLIR0(kPseudoTargetLabel);
     // Copy next element
     {
       ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -471,7 +472,7 @@
     }
     StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32);
     FreeTemp(r_val);
-    OpDecAndBranch(kCondGe, r_idx, target);
+    OpDecAndBranch(kCondGe, r_idx, loop_head_target);
     if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
       // Restore the target pointer
       OpRegRegImm(kOpAdd, ref_reg, r_dst,
@@ -955,7 +956,6 @@
   RegLocation rl_method = LoadCurrMethod();
   CheckRegLocation(rl_method);
   RegStorage res_reg = AllocTempRef();
-  RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
   if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
                                                         *cu_->dex_file,
                                                         type_idx)) {
@@ -965,6 +965,7 @@
     RegLocation rl_result = GetReturn(kRefReg);
     StoreValue(rl_dest, rl_result);
   } else {
+    RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
     // We're don't need access checks, load type from dex cache
     int32_t dex_cache_offset =
         mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
@@ -981,10 +982,10 @@
       // Object to generate the slow path for class resolution.
       class SlowPath : public LIRSlowPath {
        public:
-        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
-                 const RegLocation& rl_method, const RegLocation& rl_result) :
-                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
-                   rl_method_(rl_method), rl_result_(rl_result) {
+        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
+                 const RegLocation& rl_method_in, const RegLocation& rl_result_in) :
+                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
+                   type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) {
         }
 
         void Compile() {
@@ -1045,9 +1046,10 @@
       // Object to generate the slow path for string resolution.
       class SlowPath : public LIRSlowPath {
        public:
-        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) :
-          LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont),
-          r_method_(r_method), string_idx_(string_idx) {
+        SlowPath(Mir2Lir* m2l, LIR* fromfast_in, LIR* cont_in, RegStorage r_method_in,
+                 int32_t string_idx_in) :
+            LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast_in, cont_in),
+            r_method_(r_method_in), string_idx_(string_idx_in) {
         }
 
         void Compile() {
@@ -1225,10 +1227,10 @@
 
       class InitTypeSlowPath : public Mir2Lir::LIRSlowPath {
        public:
-        InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx,
-                         RegLocation rl_src)
-            : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx),
-              rl_src_(rl_src) {
+        InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx_in,
+                         RegLocation rl_src_in)
+            : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx_in),
+              rl_src_(rl_src_in) {
         }
 
         void Compile() OVERRIDE {
@@ -1370,10 +1372,10 @@
       // Slow path to initialize the type.  Executed if the type is NULL.
       class SlowPath : public LIRSlowPath {
        public:
-        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx,
-                 const RegStorage class_reg) :
-                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx),
-                   class_reg_(class_reg) {
+        SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in,
+                 const RegStorage class_reg_in) :
+                   LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in),
+                   type_idx_(type_idx_in), class_reg_(class_reg_in) {
         }
 
         void Compile() {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index c7449c8..4cb12f1 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -44,8 +44,8 @@
 void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
   class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
    public:
-    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr)
-        : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) {
+    IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
+        : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) {
     }
 
     void Compile() {
@@ -790,13 +790,13 @@
         if (rl_arg.reg.IsPair()) {
           reg = rl_arg.reg.GetHigh();
         } else {
-          RegisterInfo* info = GetRegInfo(rl_arg.reg);
-          info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
-          if (info == nullptr) {
+          RegisterInfo* reg_info = GetRegInfo(rl_arg.reg);
+          reg_info = reg_info->FindMatchingView(RegisterInfo::kHighSingleStorageMask);
+          if (reg_info == nullptr) {
             // NOTE: For hard float convention we won't split arguments across reg/mem.
             UNIMPLEMENTED(FATAL) << "Needs hard float api.";
           }
-          reg = info->GetReg();
+          reg = reg_info->GetReg();
         }
       } else {
         // kArg2 & rArg3 can safely be used here
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 310e1e9..ca71c30 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -696,12 +696,12 @@
     // TUNING: replace with proper delay slot handling
     if (encoder->size == 8) {
       DCHECK(!IsPseudoLirOp(lir->opcode));
-      const MipsEncodingMap *encoder = &EncodingMap[kMipsNop];
-      uint32_t bits = encoder->skeleton;
-      code_buffer_.push_back(bits & 0xff);
-      code_buffer_.push_back((bits >> 8) & 0xff);
-      code_buffer_.push_back((bits >> 16) & 0xff);
-      code_buffer_.push_back((bits >> 24) & 0xff);
+      const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
+      uint32_t bits2 = encoder2->skeleton;
+      code_buffer_.push_back(bits2 & 0xff);
+      code_buffer_.push_back((bits2 >> 8) & 0xff);
+      code_buffer_.push_back((bits2 >> 16) & 0xff);
+      code_buffer_.push_back((bits2 >> 24) & 0xff);
     }
   }
   return res;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 7229318..26465a5 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1362,10 +1362,10 @@
                                      int len_offset) {
   class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
    public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
-                             RegStorage index, RegStorage array_base, int32_t len_offset)
-        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
-          index_(index), array_base_(array_base), len_offset_(len_offset) {
+    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
+                             RegStorage index_in, RegStorage array_base_in, int32_t len_offset_in)
+        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+          index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
     }
 
     void Compile() OVERRIDE {
@@ -1410,10 +1410,10 @@
                                      int32_t len_offset) {
   class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath {
    public:
-    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch,
-                             int32_t index, RegStorage array_base, int32_t len_offset)
-        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch),
-          index_(index), array_base_(array_base), len_offset_(len_offset) {
+    ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in,
+                             int32_t index_in, RegStorage array_base_in, int32_t len_offset_in)
+        : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in),
+          index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) {
     }
 
     void Compile() OVERRIDE {
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 9616d8f..270a4e5 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2703,7 +2703,7 @@
         bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
         bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
 
-        ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+        ScopedMemRefType mem_ref_type2(this, ResourceMask::kDalvikReg);
         if (src_is_16b_aligned) {
           ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
         } else if (src_is_8b_aligned) {
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 412f85d..d3d76ba 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -539,8 +539,7 @@
     for (BasicBlockId pred_id : bb->predecessors) {
       BasicBlock* pred_bb = GetBasicBlock(pred_id);
       DCHECK(pred_bb != nullptr);
-      int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
-      uses[idx] = ssa_reg;
+      uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
       incoming[idx] = pred_id;
       idx++;
     }