Merge "More profiler driven tweaks." into dalvik-dev
diff --git a/src/compiler/dex/mir_optimization.cc b/src/compiler/dex/mir_optimization.cc
index 5345501..74b13fe 100644
--- a/src/compiler/dex/mir_optimization.cc
+++ b/src/compiler/dex/mir_optimization.cc
@@ -418,6 +418,13 @@
                   static_cast<bool*>(arena_->NewMem(sizeof(bool) * 1, false,
                                                     ArenaAllocator::kAllocDFInfo));
               mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
+              // Match type of uses to def.
+              mir->ssa_rep->fp_use =
+                  static_cast<bool*>(arena_->NewMem(sizeof(bool) * mir->ssa_rep->num_uses, false,
+                                                    ArenaAllocator::kAllocDFInfo));
+              for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+                mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
+              }
               /*
                * There is usually a Phi node in the join block for our two cases.  If the
                * Phi node only contains our two cases as input, we will use the result
diff --git a/src/compiler/dex/vreg_analysis.cc b/src/compiler/dex/vreg_analysis.cc
index d4223f1..c260933 100644
--- a/src/compiler/dex/vreg_analysis.cc
+++ b/src/compiler/dex/vreg_analysis.cc
@@ -292,18 +292,10 @@
           is_high |= is_phi && rl_temp.wide && rl_temp.high_word;
         }
         /*
-         * TODO: cleaner fix
-         * We don't normally expect to see a Dalvik register
-         * definition used both as a floating point and core
-         * value.  However, the instruction rewriting that occurs
-         * during verification can eliminate some type information,
-         * leaving us confused.  The real fix here is either to
-         * add explicit type information to Dalvik byte codes,
-         * or to recognize THROW_VERIFICATION_ERROR as
-         * an unconditional branch and support dead code elimination.
-         * As a workaround we can detect this situation and
-         * disable register promotion (which is the only thing that
-         * relies on distinctions between core and fp usages.
+         * We don't normally expect to see a Dalvik register definition used both as a
+         * floating point and core value, though technically it could happen with constants.
+         * Until we have proper typing, detect this situation and disable register promotion
+         * (which relies on the distinction between core a fp usages).
          */
         if ((defined_fp && (defined_core | defined_ref)) &&
             ((cu_->disable_opt & (1 << kPromoteRegs)) == 0)) {
diff --git a/src/dex_file_verifier.cc b/src/dex_file_verifier.cc
index b1efcaa..6df4411 100644
--- a/src/dex_file_verifier.cc
+++ b/src/dex_file_verifier.cc
@@ -369,10 +369,12 @@
   }
 
   if (expect_code && code_offset == 0) {
-    LOG(ERROR) << StringPrintf("Unexpected zero value for class_data_item method code_off with access flags %x", access_flags);
+    LOG(ERROR)<< StringPrintf("Unexpected zero value for class_data_item method code_off"
+        " with access flags %x", access_flags);
     return false;
   } else if (!expect_code && code_offset != 0) {
-    LOG(ERROR) << StringPrintf("Unexpected non-zero value %x for class_data_item method code_off with access flags %x", code_offset, access_flags);
+    LOG(ERROR) << StringPrintf("Unexpected non-zero value %x for class_data_item method code_off"
+        " with access flags %x", code_offset, access_flags);
     return false;
   }
 
@@ -544,7 +546,8 @@
     }
 
     if (last_idx >= idx && i != 0) {
-      LOG(ERROR) << StringPrintf("Out-of-order annotation_element name_idx: %x then %x", last_idx, idx);
+      LOG(ERROR) << StringPrintf("Out-of-order annotation_element name_idx: %x then %x",
+          last_idx, idx);
       return false;
     }
 
@@ -651,7 +654,8 @@
   uint32_t last_addr = 0;
   while (try_items_size--) {
     if (try_items->start_addr_ < last_addr) {
-      LOG(ERROR) << StringPrintf("Out-of_order try_item with start_addr: %x", try_items->start_addr_);
+      LOG(ERROR) << StringPrintf("Out-of_order try_item with start_addr: %x",
+          try_items->start_addr_);
       return false;
     }
 
@@ -933,7 +937,8 @@
   last_idx = 0;
   for (uint32_t i = 0; i < method_count; i++) {
     if (last_idx >= method_item->method_idx_ && i != 0) {
-      LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x", last_idx, method_item->method_idx_);
+      LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x",
+          last_idx, method_item->method_idx_);
       return false;
     }
     last_idx = method_item->method_idx_;
@@ -944,14 +949,16 @@
   const DexFile::ParameterAnnotationsItem* parameter_item =
       reinterpret_cast<const DexFile::ParameterAnnotationsItem*>(method_item);
   uint32_t parameter_count = item->parameters_size_;
-  if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem), "parameter_annotations list")) {
+  if (!CheckListSize(parameter_item, parameter_count, sizeof(DexFile::ParameterAnnotationsItem),
+      "parameter_annotations list")) {
     return false;
   }
 
   last_idx = 0;
   for (uint32_t i = 0; i < parameter_count; i++) {
     if (last_idx >= parameter_item->method_idx_ && i != 0) {
-      LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x", last_idx, parameter_item->method_idx_);
+      LOG(ERROR) << StringPrintf("Out-of-order method_idx for annotation: %x then %x",
+          last_idx, parameter_item->method_idx_);
       return false;
     }
     last_idx = parameter_item->method_idx_;
@@ -1051,7 +1058,8 @@
         uint32_t count = list->size_;
 
         if (!CheckPointerRange(list, list + 1, "annotation_set_ref_list") ||
-            !CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem), "annotation_set_ref_list size")) {
+            !CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem),
+                "annotation_set_ref_list size")) {
           return false;
         }
         ptr_ = reinterpret_cast<const byte*>(item + count);
@@ -1257,7 +1265,8 @@
           return false;
         }
         if (section_offset != header_->map_off_) {
-          LOG(ERROR) << StringPrintf("Map not at header-defined offset: %x, expected %x", section_offset, header_->map_off_);
+          LOG(ERROR) << StringPrintf("Map not at header-defined offset: %x, expected %x",
+              section_offset, header_->map_off_);
           return false;
         }
         ptr_ += sizeof(uint32_t) + (map->size_ * sizeof(DexFile::MapItem));
@@ -1297,7 +1306,8 @@
     return false;
   }
   if (it->second != type) {
-    LOG(ERROR) << StringPrintf("Unexpected data map entry @ %x; expected %x, found %x", offset, type, it->second);
+    LOG(ERROR) << StringPrintf("Unexpected data map entry @ %x; expected %x, found %x",
+        offset, type, it->second);
     return false;
   }
   return true;
@@ -1380,7 +1390,8 @@
   if (previous_item_ != NULL) {
     const DexFile::TypeId* prev_item = reinterpret_cast<const DexFile::TypeId*>(previous_item_);
     if (prev_item->descriptor_idx_ >= item->descriptor_idx_) {
-      LOG(ERROR) << StringPrintf("Out-of-order type_ids: %x then %x", prev_item->descriptor_idx_, item->descriptor_idx_);
+      LOG(ERROR) << StringPrintf("Out-of-order type_ids: %x then %x",
+          prev_item->descriptor_idx_, item->descriptor_idx_);
       return false;
     }
   }
@@ -1757,7 +1768,8 @@
       LOG(ERROR) << "Mismatched defining class for parameter_annotation";
       return false;
     }
-    if (!CheckOffsetToTypeMap(parameter_item->annotations_off_, DexFile::kDexTypeAnnotationSetRefList)) {
+    if (!CheckOffsetToTypeMap(parameter_item->annotations_off_,
+        DexFile::kDexTypeAnnotationSetRefList)) {
       return false;
     }
     parameter_item++;
diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc
index 674ca12..ce65829 100644
--- a/src/verifier/method_verifier.cc
+++ b/src/verifier/method_verifier.cc
@@ -1307,6 +1307,12 @@
 #endif
   }
 
+  // We need to ensure the work line is consistent while performing validation. When we spot a
+  // peephole pattern we compute a new line for either the fallthrough instruction or the
+  // branch target.
+  UniquePtr<RegisterLine> branch_line;
+  UniquePtr<RegisterLine> fallthrough_line;
+
   switch (dec_insn.opcode) {
     case Instruction::NOP:
       /*
@@ -1718,6 +1724,53 @@
       if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type << " unexpected as arg to if-eqz/if-nez";
       }
+
+      // Find previous instruction - its existence is a precondition to peephole optimization.
+      uint32_t prev_idx = 0;
+      if (0 != work_insn_idx_) {
+        prev_idx = work_insn_idx_ - 1;
+        while(0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+          prev_idx--;
+        }
+        CHECK(insn_flags_[prev_idx].IsOpcode());
+      } else {
+        break;
+      }
+
+      const Instruction* prev_inst = Instruction::At(code_item_->insns_+prev_idx);
+
+      /* Check for peep-hole pattern of:
+       *    ...;
+       *    instance-of vX, vO, T;
+       *    ifXXX vX, b ;
+       *    ...;
+       * b: INST;
+       *    ...;
+       * and sharpen the type for either the fall-through or the branch case.
+       */
+      if (!CurrentInsnFlags()->IsBranchTarget()) {
+        DecodedInstruction prev_dec_insn(prev_inst);
+        if ((Instruction::INSTANCE_OF == prev_inst->Opcode())
+            && (dec_insn.vA == prev_dec_insn.vA)) {
+          // Check that the we are not attempting conversion to interface types,
+          // which is not done because of the multiple inheritance implications.
+          const RegType& cast_type =
+                    ResolveClassAndCheckAccess(prev_dec_insn.vC);
+
+          if(!cast_type.IsUnresolvedTypes() && !cast_type.GetClass()->IsInterface()) {
+            if (dec_insn.opcode == Instruction::IF_EQZ) {
+              fallthrough_line.reset(new RegisterLine(code_item_->registers_size_, this));
+              fallthrough_line->CopyFromLine(work_line_.get());
+              fallthrough_line->SetRegisterType(prev_dec_insn.vB , cast_type);
+            } else {
+              branch_line.reset(new RegisterLine(code_item_->registers_size_, this));
+              branch_line->CopyFromLine(work_line_.get());
+              branch_line->SetRegisterType(prev_dec_insn.vB , cast_type);
+            }
+          }
+        }
+      }
+
       break;
     }
     case Instruction::IF_LTZ:
@@ -2306,33 +2359,7 @@
     work_line_->SetResultTypeToUnknown();
   }
 
-  /* Handle "continue". Tag the next consecutive instruction. */
-  if ((opcode_flags & Instruction::kContinue) != 0) {
-    uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
-    if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
-      return false;
-    }
-    // The only way to get to a move-exception instruction is to get thrown there. Make sure the
-    // next instruction isn't one.
-    if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
-      return false;
-    }
-    RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
-    if (next_line != NULL) {
-      // Merge registers into what we have for the next instruction, and set the "changed" flag if
-      // needed.
-      if (!UpdateRegisters(next_insn_idx, work_line_.get())) {
-        return false;
-      }
-    } else {
-      /*
-       * We're not recording register data for the next instruction, so we don't know what the prior
-       * state was. We have to assume that something has changed and re-evaluate it.
-       */
-      insn_flags_[next_insn_idx].SetChanged();
-    }
-  }
+
 
   /*
    * Handle "branch". Tag the branch target.
@@ -2358,8 +2385,14 @@
       return false;
     }
     /* update branch target, set "changed" if appropriate */
-    if (!UpdateRegisters(work_insn_idx_ + branch_target, work_line_.get())) {
-      return false;
+    if (NULL != branch_line.get()) {
+      if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get())) {
+        return false;
+      }
+    } else {
+      if (!UpdateRegisters(work_insn_idx_ + branch_target, work_line_.get())) {
+        return false;
+      }
     }
   }
 
@@ -2442,6 +2475,42 @@
     }
   }
 
+  /* Handle "continue". Tag the next consecutive instruction.
+   *  Note: Keep the code handling "continue" case below the "branch" and "switch" cases,
+   *        because it changes work_line_ when performing peephole optimization
+   *        and this change should not be used in those cases.
+   */
+    if ((opcode_flags & Instruction::kContinue) != 0) {
+      uint32_t next_insn_idx = work_insn_idx_ + CurrentInsnFlags()->GetLengthInCodeUnits();
+      if (next_insn_idx >= code_item_->insns_size_in_code_units_) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Execution can walk off end of code area";
+        return false;
+      }
+      // The only way to get to a move-exception instruction is to get thrown there. Make sure the
+      // next instruction isn't one.
+      if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
+        return false;
+      }
+      RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
+      if (next_line != NULL) {
+        if (NULL != fallthrough_line.get()) {
+          // Make workline consistent with fallthrough computed from peephole optimization.
+          work_line_->CopyFromLine(fallthrough_line.get());
+        }
+        // Merge registers into what we have for the next instruction,
+        // and set the "changed" flag if needed.
+        if (!UpdateRegisters(next_insn_idx, work_line_.get())) {
+          return false;
+        }
+      } else {
+        /*
+         * We're not recording register data for the next instruction, so we don't know what the
+         * prior state was. We have to assume that something has changed and re-evaluate it.
+         */
+        insn_flags_[next_insn_idx].SetChanged();
+      }
+    }
+
   /* If we're returning from the method, make sure monitor stack is empty. */
   if ((opcode_flags & Instruction::kReturn) != 0) {
     if (!work_line_->VerifyMonitorStackEmpty()) {
diff --git a/src/verifier/register_line.cc b/src/verifier/register_line.cc
index 31201da..7ae3fb5 100644
--- a/src/verifier/register_line.cc
+++ b/src/verifier/register_line.cc
@@ -422,6 +422,8 @@
 
 bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
   bool changed = false;
+  CHECK(NULL != incoming_line);
+  CHECK(NULL != line_.get());
   for (size_t idx = 0; idx < num_regs_; idx++) {
     if (line_[idx] != incoming_line->line_[idx]) {
       const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);