Implement try/catch/throw in optimizing.

- We currently don't run optimizations in the presence of a try/catch.
- We therefore implement Quick's mapping table.
- Also fix a missing null check on array-length.

Change-Id: I6917dfcb868e75c1cf6eff32b7cbb60b6cfbd68f
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index e43841a..6eff23a 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -119,13 +119,6 @@
   return true;
 }
 
-static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
-  if (code_item.tries_size_ > 0) {
-    return false;
-  }
-  return true;
-}
-
 template<typename T>
 void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
   int32_t target_offset = instruction.GetTargetOffset();
@@ -164,10 +157,6 @@
 }
 
 HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
-  if (!CanHandleCodeItem(code_item)) {
-    return nullptr;
-  }
-
   const uint16_t* code_ptr = code_item.insns_;
   const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
   code_start_ = code_ptr;
@@ -187,6 +176,25 @@
   // start a new block, and create these blocks.
   ComputeBranchTargets(code_ptr, code_end);
 
+  // Also create blocks for catch handlers.
+  if (code_item.tries_size_ != 0) {
+    const uint8_t* handlers_ptr = DexFile::GetCatchHandlerData(code_item, 0);
+    uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+    for (uint32_t idx = 0; idx < handlers_size; ++idx) {
+      CatchHandlerIterator iterator(handlers_ptr);
+      for (; iterator.HasNext(); iterator.Next()) {
+        uint32_t address = iterator.GetHandlerAddress();
+        HBasicBlock* block = FindBlockStartingAt(address);
+        if (block == nullptr) {
+          block = new (arena_) HBasicBlock(graph_, address);
+          branch_targets_.Put(address, block);
+        }
+        block->SetIsCatchBlock();
+      }
+      handlers_ptr = iterator.EndDataPointer();
+    }
+  }
+
   if (!InitializeParameters(code_item.ins_size_)) {
     return nullptr;
   }
@@ -1217,6 +1225,10 @@
 
     case Instruction::ARRAY_LENGTH: {
       HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
+      // No need for a temporary for the null check, it is the only input of the following
+      // instruction.
+      object = new (arena_) HNullCheck(object, dex_offset);
+      current_block_->AddInstruction(object);
       current_block_->AddInstruction(new (arena_) HArrayLength(object));
       UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
       break;
@@ -1251,6 +1263,23 @@
       break;
     }
 
+    case Instruction::MOVE_EXCEPTION: {
+      current_block_->AddInstruction(new (arena_) HLoadException());
+      UpdateLocal(instruction.VRegA_11x(), current_block_->GetLastInstruction());
+      break;
+    }
+
+    case Instruction::THROW: {
+      HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
+      current_block_->AddInstruction(new (arena_) HThrow(exception, dex_offset));
+      // A throw instruction must branch to the exit block.
+      current_block_->AddSuccessor(exit_block_);
+      // We finished building this block. Set the current block to null to avoid
+      // adding dead instructions to it.
+      current_block_ = nullptr;
+      break;
+    }
+
     default:
       return false;
   }
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ac72a33..c75980d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -356,12 +356,13 @@
   int32_t pc2dex_dalvik_offset = 0;
   uint32_t dex2pc_data_size = 0u;
   uint32_t dex2pc_entries = 0u;
+  uint32_t dex2pc_offset = 0u;
+  int32_t dex2pc_dalvik_offset = 0;
 
   if (src_map != nullptr) {
     src_map->reserve(pc2dex_entries);
   }
 
-  // We currently only have pc2dex entries.
   for (size_t i = 0; i < pc2dex_entries; i++) {
     struct PcInfo pc_info = pc_infos_.Get(i);
     pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
@@ -373,6 +374,19 @@
     }
   }
 
+  // Walk over the blocks and find which ones correspond to catch block entries.
+  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+    HBasicBlock* block = graph_->GetBlocks().Get(i);
+    if (block->IsCatchBlock()) {
+      intptr_t native_pc = GetAddressOf(block);
+      ++dex2pc_entries;
+      dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
+      dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
+      dex2pc_offset = native_pc;
+      dex2pc_dalvik_offset = block->GetDexPc();
+    }
+  }
+
   uint32_t total_entries = pc2dex_entries + dex2pc_entries;
   uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
   uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
@@ -380,6 +394,7 @@
 
   uint8_t* data_ptr = &(*data)[0];
   uint8_t* write_pos = data_ptr;
+
   write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
   write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
   DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
@@ -387,6 +402,9 @@
 
   pc2dex_offset = 0u;
   pc2dex_dalvik_offset = 0u;
+  dex2pc_offset = 0u;
+  dex2pc_dalvik_offset = 0u;
+
   for (size_t i = 0; i < pc2dex_entries; i++) {
     struct PcInfo pc_info = pc_infos_.Get(i);
     DCHECK(pc2dex_offset <= pc_info.native_pc);
@@ -395,6 +413,19 @@
     pc2dex_offset = pc_info.native_pc;
     pc2dex_dalvik_offset = pc_info.dex_pc;
   }
+
+  for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+    HBasicBlock* block = graph_->GetBlocks().Get(i);
+    if (block->IsCatchBlock()) {
+      intptr_t native_pc = GetAddressOf(block);
+      write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
+      write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
+      dex2pc_offset = native_pc;
+      dex2pc_dalvik_offset = block->GetDexPc();
+    }
+  }
+
+
   DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
   DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
 
@@ -411,6 +442,14 @@
       CHECK_EQ(pc_info.dex_pc, it.DexPc());
       ++it;
     }
+    for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
+      HBasicBlock* block = graph_->GetBlocks().Get(i);
+      if (block->IsCatchBlock()) {
+        CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
+        CHECK_EQ(block->GetDexPc(), it2.DexPc());
+        ++it2;
+      }
+    }
     CHECK(it == table.PcToDexEnd());
     CHECK(it2 == table.DexToPcEnd());
   }
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 01c5cc9..fc4ea4b 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -92,6 +92,7 @@
   virtual HGraphVisitor* GetInstructionVisitor() = 0;
   virtual Assembler* GetAssembler() = 0;
   virtual size_t GetWordSize() const = 0;
+  virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
   void ComputeFrameSize(size_t number_of_spill_slots,
                         size_t maximum_number_of_live_registers,
                         size_t number_of_out_slots);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index dd595d9..adedf78 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2509,5 +2509,31 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
+void LocationsBuilderARM::VisitLoadException(HLoadException* load) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadException(HLoadException* load) {
+  Register out = load->GetLocations()->Out().As<Register>();
+  int32_t offset = Thread::ExceptionOffset<kArmWordSize>().Int32Value();
+  __ LoadFromOffset(kLoadWord, out, TR, offset);
+  __ LoadImmediate(IP, 0);
+  __ StoreToOffset(kStoreWord, IP, TR, offset);
+}
+
+void LocationsBuilderARM::VisitThrow(HThrow* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
+  codegen_->InvokeRuntime(
+      QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
+}
+
 }  // namespace arm
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5076a4b..5d51993 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -77,10 +77,10 @@
   ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
       : ParallelMoveResolver(allocator), codegen_(codegen) {}
 
-  virtual void EmitMove(size_t index) OVERRIDE;
-  virtual void EmitSwap(size_t index) OVERRIDE;
-  virtual void SpillScratch(int reg) OVERRIDE;
-  virtual void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) OVERRIDE;
+  void EmitSwap(size_t index) OVERRIDE;
+  void SpillScratch(int reg) OVERRIDE;
+  void RestoreScratch(int reg) OVERRIDE;
 
   ArmAssembler* GetAssembler() const;
 
@@ -99,7 +99,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr);
 
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -119,7 +119,7 @@
   InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr);
 
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -145,39 +145,43 @@
   explicit CodeGeneratorARM(HGraph* graph);
   virtual ~CodeGeneratorARM() {}
 
-  virtual void GenerateFrameEntry() OVERRIDE;
-  virtual void GenerateFrameExit() OVERRIDE;
-  virtual void Bind(HBasicBlock* block) OVERRIDE;
-  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
-  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  void GenerateFrameEntry() OVERRIDE;
+  void GenerateFrameExit() OVERRIDE;
+  void Bind(HBasicBlock* block) OVERRIDE;
+  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
 
-  virtual size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const OVERRIDE {
     return kArmWordSize;
   }
 
-  virtual size_t FrameEntrySpillSize() const OVERRIDE;
+  size_t FrameEntrySpillSize() const OVERRIDE;
 
-  virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() OVERRIDE {
     return &location_builder_;
   }
 
-  virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
     return &instruction_visitor_;
   }
 
-  virtual ArmAssembler* GetAssembler() OVERRIDE {
+  ArmAssembler* GetAssembler() OVERRIDE {
     return &assembler_;
   }
 
-  virtual void SetupBlockedRegisters() const OVERRIDE;
+  uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+    return GetLabelOf(block)->Position();
+  }
 
-  virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+  void SetupBlockedRegisters() const OVERRIDE;
 
-  virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
 
-  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
 
   // Blocks all register pairs made out of blocked core registers.
   void UpdateBlockedPairRegisters() const;
@@ -186,7 +190,7 @@
     return &move_resolver_;
   }
 
-  virtual InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const OVERRIDE {
     return InstructionSet::kThumb2;
   }
 
@@ -208,7 +212,7 @@
     return block_labels_.GetRawStorage() + block->GetBlockId();
   }
 
-  virtual void Initialize() OVERRIDE {
+  void Initialize() OVERRIDE {
     block_labels_.SetSize(GetGraph()->GetBlocks().Size());
   }
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c26b0ab..31526e2 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -540,12 +540,14 @@
   M(DivZeroCheck)                                          \
   M(FloatConstant)                                         \
   M(LoadClass)                                             \
+  M(LoadException)                                         \
   M(LoadString)                                            \
   M(Neg)                                                   \
   M(NewArray)                                              \
   M(ParallelMove)                                          \
   M(StaticFieldGet)                                        \
   M(StaticFieldSet)                                        \
+  M(Throw)                                                 \
   M(TypeConversion)                                        \
 
 #define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 5530f46..4a41000 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -95,7 +95,7 @@
   InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super) \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr) OVERRIDE;
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -118,7 +118,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super) \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr) OVERRIDE;
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 #undef DECLARE_VISIT_INSTRUCTION
 
@@ -135,10 +135,10 @@
 class CodeGeneratorARM64 : public CodeGenerator {
  public:
   explicit CodeGeneratorARM64(HGraph* graph);
-  virtual ~CodeGeneratorARM64() { }
+  virtual ~CodeGeneratorARM64() {}
 
-  virtual void GenerateFrameEntry() OVERRIDE;
-  virtual void GenerateFrameExit() OVERRIDE;
+  void GenerateFrameEntry() OVERRIDE;
+  void GenerateFrameExit() OVERRIDE;
 
   static const vixl::CPURegList& GetFramePreservedRegisters() {
     static const vixl::CPURegList frame_preserved_regs =
@@ -149,44 +149,49 @@
     return GetFramePreservedRegisters().TotalSizeInBytes();
   }
 
-  virtual void Bind(HBasicBlock* block) OVERRIDE;
+  void Bind(HBasicBlock* block) OVERRIDE;
 
   vixl::Label* GetLabelOf(HBasicBlock* block) const {
     return block_labels_ + block->GetBlockId();
   }
 
-  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
 
-  virtual size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const OVERRIDE {
     return kArm64WordSize;
   }
 
-  virtual size_t FrameEntrySpillSize() const OVERRIDE;
+  uintptr_t GetAddressOf(HBasicBlock* block ATTRIBUTE_UNUSED) const OVERRIDE {
+    UNIMPLEMENTED(INFO) << "TODO: GetAddressOf";
+    return 0u;
+  }
 
-  virtual HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
-  virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
-  virtual Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+  size_t FrameEntrySpillSize() const OVERRIDE;
+
+  HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+  HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+  Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
 
   // Emit a write barrier.
   void MarkGCCard(vixl::Register object, vixl::Register value);
 
   // Register allocation.
 
-  virtual void SetupBlockedRegisters() const OVERRIDE;
+  void SetupBlockedRegisters() const OVERRIDE;
   // AllocateFreeRegister() is only used when allocating registers locally
   // during CompileBaseline().
-  virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
 
-  virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
 
-  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
     UNUSED(stack_index);
     UNUSED(reg_id);
     UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
     return 0;
   }
 
-  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
     UNUSED(stack_index);
     UNUSED(reg_id);
     UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
@@ -205,16 +210,16 @@
       kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
 
-  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
 
-  virtual InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const OVERRIDE {
     return InstructionSet::kArm64;
   }
 
   void MoveHelper(Location destination, Location source, Primitive::Type type);
 
-  virtual void Initialize() OVERRIDE {
+  void Initialize() OVERRIDE {
     HGraph* graph = GetGraph();
     int length = graph->GetBlocks().Size();
     block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index b2d9187..2a8eded 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2596,5 +2596,29 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
+void LocationsBuilderX86::VisitLoadException(HLoadException* load) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadException(HLoadException* load) {
+  Address address = Address::Absolute(Thread::ExceptionOffset<kX86WordSize>().Int32Value());
+  __ fs()->movl(load->GetLocations()->Out().As<Register>(), address);
+  __ fs()->movl(address, Immediate(0));
+}
+
+void LocationsBuilderX86::VisitThrow(HThrow* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
+  __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pDeliverException)));
+  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
 }  // namespace x86
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 176a269..85fe21c 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -71,10 +71,10 @@
   ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
       : ParallelMoveResolver(allocator), codegen_(codegen) {}
 
-  virtual void EmitMove(size_t index) OVERRIDE;
-  virtual void EmitSwap(size_t index) OVERRIDE;
-  virtual void SpillScratch(int reg) OVERRIDE;
-  virtual void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) OVERRIDE;
+  void EmitSwap(size_t index) OVERRIDE;
+  void SpillScratch(int reg) OVERRIDE;
+  void RestoreScratch(int reg) OVERRIDE;
 
   X86Assembler* GetAssembler() const;
 
@@ -94,7 +94,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr) OVERRIDE;
 
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -114,7 +114,7 @@
   InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr) OVERRIDE;
 
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -140,39 +140,43 @@
   explicit CodeGeneratorX86(HGraph* graph);
   virtual ~CodeGeneratorX86() {}
 
-  virtual void GenerateFrameEntry() OVERRIDE;
-  virtual void GenerateFrameExit() OVERRIDE;
-  virtual void Bind(HBasicBlock* block) OVERRIDE;
-  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
-  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  void GenerateFrameEntry() OVERRIDE;
+  void GenerateFrameExit() OVERRIDE;
+  void Bind(HBasicBlock* block) OVERRIDE;
+  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
 
-  virtual size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const OVERRIDE {
     return kX86WordSize;
   }
 
-  virtual size_t FrameEntrySpillSize() const OVERRIDE;
+  size_t FrameEntrySpillSize() const OVERRIDE;
 
-  virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() OVERRIDE {
     return &location_builder_;
   }
 
-  virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
     return &instruction_visitor_;
   }
 
-  virtual X86Assembler* GetAssembler() OVERRIDE {
+  X86Assembler* GetAssembler() OVERRIDE {
     return &assembler_;
   }
 
-  virtual void SetupBlockedRegisters() const OVERRIDE;
+  uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+    return GetLabelOf(block)->Position();
+  }
 
-  virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+  void SetupBlockedRegisters() const OVERRIDE;
 
-  virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
 
-  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
 
   // Blocks all register pairs made out of blocked core registers.
   void UpdateBlockedPairRegisters() const;
@@ -181,7 +185,7 @@
     return &move_resolver_;
   }
 
-  virtual InstructionSet GetInstructionSet() const OVERRIDE {
+  InstructionSet GetInstructionSet() const OVERRIDE {
     return InstructionSet::kX86;
   }
 
@@ -199,7 +203,7 @@
     return block_labels_.GetRawStorage() + block->GetBlockId();
   }
 
-  virtual void Initialize() OVERRIDE {
+  void Initialize() OVERRIDE {
     block_labels_.SetSize(GetGraph()->GetBlocks().Size());
   }
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2bd76c1..41f3c59 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2549,5 +2549,31 @@
   __ Bind(slow_path->GetExitLabel());
 }
 
+void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadException(HLoadException* load) {
+  Address address = Address::Absolute(
+      Thread::ExceptionOffset<kX86_64WordSize>().Int32Value(), true);
+  __ gs()->movl(load->GetLocations()->Out().As<CpuRegister>(), address);
+  __ gs()->movl(address, Immediate(0));
+}
+
+void LocationsBuilderX86_64::VisitThrow(HThrow* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+  InvokeRuntimeCallingConvention calling_convention;
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
+  __ gs()->call(
+        Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pDeliverException), true));
+  codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
 }  // namespace x86_64
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 0de3045..9565b6f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -72,10 +72,10 @@
   ParallelMoveResolverX86_64(ArenaAllocator* allocator, CodeGeneratorX86_64* codegen)
       : ParallelMoveResolver(allocator), codegen_(codegen) {}
 
-  virtual void EmitMove(size_t index) OVERRIDE;
-  virtual void EmitSwap(size_t index) OVERRIDE;
-  virtual void SpillScratch(int reg) OVERRIDE;
-  virtual void RestoreScratch(int reg) OVERRIDE;
+  void EmitMove(size_t index) OVERRIDE;
+  void EmitSwap(size_t index) OVERRIDE;
+  void SpillScratch(int reg) OVERRIDE;
+  void RestoreScratch(int reg) OVERRIDE;
 
   X86_64Assembler* GetAssembler() const;
 
@@ -98,7 +98,7 @@
       : HGraphVisitor(graph), codegen_(codegen) {}
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr) OVERRIDE;
 
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -118,7 +118,7 @@
   InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
 
 #define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  virtual void Visit##name(H##name* instr);
+  void Visit##name(H##name* instr) OVERRIDE;
 
   FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
@@ -144,30 +144,30 @@
   explicit CodeGeneratorX86_64(HGraph* graph);
   virtual ~CodeGeneratorX86_64() {}
 
-  virtual void GenerateFrameEntry() OVERRIDE;
-  virtual void GenerateFrameExit() OVERRIDE;
-  virtual void Bind(HBasicBlock* block) OVERRIDE;
-  virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
-  virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
-  virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  void GenerateFrameEntry() OVERRIDE;
+  void GenerateFrameExit() OVERRIDE;
+  void Bind(HBasicBlock* block) OVERRIDE;
+  void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
 
-  virtual size_t GetWordSize() const OVERRIDE {
+  size_t GetWordSize() const OVERRIDE {
     return kX86_64WordSize;
   }
 
-  virtual size_t FrameEntrySpillSize() const OVERRIDE;
+  size_t FrameEntrySpillSize() const OVERRIDE;
 
-  virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+  HGraphVisitor* GetLocationBuilder() OVERRIDE {
     return &location_builder_;
   }
 
-  virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE {
+  HGraphVisitor* GetInstructionVisitor() OVERRIDE {
     return &instruction_visitor_;
   }
 
-  virtual X86_64Assembler* GetAssembler() OVERRIDE {
+  X86_64Assembler* GetAssembler() OVERRIDE {
     return &assembler_;
   }
 
@@ -175,14 +175,18 @@
     return &move_resolver_;
   }
 
-  virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+  uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+    return GetLabelOf(block)->Position();
+  }
 
-  virtual void SetupBlockedRegisters() const OVERRIDE;
-  virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
-  virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
-  virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+  Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
 
-  virtual InstructionSet GetInstructionSet() const OVERRIDE {
+  void SetupBlockedRegisters() const OVERRIDE;
+  Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+  void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+  void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+  InstructionSet GetInstructionSet() const OVERRIDE {
     return InstructionSet::kX86_64;
   }
 
@@ -198,7 +202,7 @@
     return block_labels_.GetRawStorage() + block->GetBlockId();
   }
 
-  virtual void Initialize() OVERRIDE {
+  void Initialize() OVERRIDE {
     block_labels_.SetSize(GetGraph()->GetBlocks().Size());
   }
 
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a57bbdb..37e5e6b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -292,7 +292,8 @@
         block_id_(-1),
         dex_pc_(dex_pc),
         lifetime_start_(kNoLifetime),
-        lifetime_end_(kNoLifetime) {}
+        lifetime_end_(kNoLifetime),
+        is_catch_block_(false) {}
 
   const GrowableArray<HBasicBlock*>& GetPredecessors() const {
     return predecessors_;
@@ -450,6 +451,9 @@
 
   uint32_t GetDexPc() const { return dex_pc_; }
 
+  bool IsCatchBlock() const { return is_catch_block_; }
+  void SetIsCatchBlock() { is_catch_block_ = true; }
+
  private:
   HGraph* const graph_;
   GrowableArray<HBasicBlock*> predecessors_;
@@ -464,6 +468,7 @@
   const uint32_t dex_pc_;
   size_t lifetime_start_;
   size_t lifetime_end_;
+  bool is_catch_block_;
 
   DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
 };
@@ -495,6 +500,7 @@
   M(LessThan, Condition)                                                \
   M(LessThanOrEqual, Condition)                                         \
   M(LoadClass, Instruction)                                             \
+  M(LoadException, Instruction)                                         \
   M(LoadLocal, Instruction)                                             \
   M(LoadString, Instruction)                                            \
   M(Local, Instruction)                                                 \
@@ -517,6 +523,7 @@
   M(Sub, BinaryOperation)                                               \
   M(SuspendCheck, Instruction)                                          \
   M(Temporary, Instruction)                                             \
+  M(Throw, Instruction)                                                 \
   M(TypeConversion, Instruction)                                        \
 
 #define FOR_EACH_INSTRUCTION(M)                                         \
@@ -1052,7 +1059,7 @@
 };
 
 // The exit instruction is the only instruction of the exit block.
-// Instructions aborting the method (HTrow and HReturn) must branch to the
+// Instructions aborting the method (HThrow and HReturn) must branch to the
 // exit block.
 class HExit : public HTemplateInstruction<0> {
  public:
@@ -1071,7 +1078,7 @@
  public:
   HGoto() : HTemplateInstruction(SideEffects::None()) {}
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   HBasicBlock* GetSuccessor() const {
     return GetBlock()->GetSuccessors().Get(0);
@@ -1092,7 +1099,7 @@
     SetRawInputAt(0, input);
   }
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   HBasicBlock* IfTrueSuccessor() const {
     return GetBlock()->GetSuccessors().Get(0);
@@ -2284,6 +2291,38 @@
   DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
 };
 
+// Implement the move-exception DEX instruction.
+class HLoadException : public HExpression<0> {
+ public:
+  HLoadException() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+
+  DECLARE_INSTRUCTION(LoadException);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HLoadException);
+};
+
+class HThrow : public HTemplateInstruction<1> {
+ public:
+  HThrow(HInstruction* exception, uint32_t dex_pc)
+      : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {
+    SetRawInputAt(0, exception);
+  }
+
+  bool IsControlFlow() const OVERRIDE { return true; }
+
+  bool NeedsEnvironment() const OVERRIDE { return true; }
+
+  uint32_t GetDexPc() const { return dex_pc_; }
+
+  DECLARE_INSTRUCTION(Throw);
+
+ private:
+  uint32_t dex_pc_;
+
+  DISALLOW_COPY_AND_ASSIGN(HThrow);
+};
+
 class MoveOperands : public ArenaObject<kArenaAllocMisc> {
  public:
   MoveOperands(Location source, Location destination, HInstruction* instruction)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 08b74c7..6e3653a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -206,6 +206,11 @@
       || instruction_set == kX86_64;
 }
 
+static bool CanOptimize(const DexFile::CodeItem& code_item) {
+  // TODO: We currently cannot optimize methods with try/catch.
+  return code_item.tries_size_ == 0;
+}
+
 CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
                                                uint32_t access_flags,
                                                InvokeType invoke_type,
@@ -264,7 +269,9 @@
 
   CodeVectorAllocator allocator;
 
-  if (run_optimizations_ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+  if (run_optimizations_
+      && CanOptimize(*code_item)
+      && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
     optimized_compiled_methods_++;
     graph->BuildDominatorTree();
     graph->TransformToSSA();
@@ -315,17 +322,19 @@
     unoptimized_compiled_methods_++;
     codegen->CompileBaseline(&allocator);
 
-    // Run these phases to get some test coverage.
-    graph->BuildDominatorTree();
-    graph->TransformToSSA();
-    visualizer.DumpGraph("ssa");
-    graph->FindNaturalLoops();
-    SsaRedundantPhiElimination(graph).Run();
-    SsaDeadPhiElimination(graph).Run();
-    GlobalValueNumberer(graph->GetArena(), graph).Run();
-    SsaLivenessAnalysis liveness(*graph, codegen);
-    liveness.Analyze();
-    visualizer.DumpGraph(kLivenessPassName);
+    if (CanOptimize(*code_item)) {
+      // Run these phases to get some test coverage.
+      graph->BuildDominatorTree();
+      graph->TransformToSSA();
+      visualizer.DumpGraph("ssa");
+      graph->FindNaturalLoops();
+      SsaRedundantPhiElimination(graph).Run();
+      SsaDeadPhiElimination(graph).Run();
+      GlobalValueNumberer(graph->GetArena(), graph).Run();
+      SsaLivenessAnalysis liveness(*graph, codegen);
+      liveness.Analyze();
+      visualizer.DumpGraph(kLivenessPassName);
+    }
 
     std::vector<uint8_t> mapping_table;
     SrcMap src_mapping_table;
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index acd1043..a742aaa 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -197,7 +197,7 @@
   return DexFile::kDexNoIndex;
 }
 
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc) {
+uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure) {
   const void* entry_point = GetQuickOatEntryPoint();
   MappingTable table(
       entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
@@ -219,9 +219,11 @@
       return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
     }
   }
-  LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
-             << " in " << PrettyMethod(this);
-  return 0;
+  if (abort_on_failure) {
+    LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+               << " in " << PrettyMethod(this);
+  }
+  return UINTPTR_MAX;
 }
 
 uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> exception_type,
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 08c0996..d92d00a 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -453,7 +453,8 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Converts a dex PC to a native PC.
-  uintptr_t ToNativeQuickPc(const uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Find the catch block for the given exception type and dex_pc. When a catch block is found,
   // indicates whether the found catch block is responsible for clearing the exception or whether
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 291b45f..631c4be 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -19,10 +19,13 @@
 
 namespace art {
 
-#define CHECK_REGS_CONTAIN_REFS(native_pc_offset, ...) do { \
+#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
   int t[] = {__VA_ARGS__}; \
   int t_size = sizeof(t) / sizeof(*t); \
-  CheckReferences(t, t_size, m->NativeQuickPcOffset(m->ToNativeQuickPc(native_pc_offset))); \
+  uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, abort_if_not_found); \
+  if (native_quick_pc != UINTPTR_MAX) { \
+    CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
+  } \
 } while (false);
 
 struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
@@ -40,31 +43,33 @@
     // we know the Dex registers with live reference values. Assert that what we
     // find is what is expected.
     if (m_name.compare("f") == 0) {
-      CHECK_REGS_CONTAIN_REFS(0x03U, 8);  // v8: this
-      CHECK_REGS_CONTAIN_REFS(0x06U, 8, 1);  // v8: this, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x08U, 8, 3, 1);  // v8: this, v3: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x0cU, 8, 3, 1);  // v8: this, v3: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x0eU, 8, 3, 1);  // v8: this, v3: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x10U, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x03U, true, 8);  // v8: this
+      CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1);  // v8: this, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1);  // v8: this, v3: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1);  // v8: this, v3: y, v1: x
       // v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
       //   0024: move-object v3, v2
       //   0025: goto 0013
       // Detaled dex instructions for ReferenceMap.java are at the end of this function.
       // CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1);  // v8: this, v3: y, v2: y, v1: x
-      // We eliminate the non-live registers at a return, so only v3 is live:
-      CHECK_REGS_CONTAIN_REFS(0x13U);  // v3: y
-      CHECK_REGS_CONTAIN_REFS(0x18U, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
-      CHECK_REGS_CONTAIN_REFS(0x1aU, 8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
-      CHECK_REGS_CONTAIN_REFS(0x1dU, 8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+      // We eliminate the non-live registers at a return, so only v3 is live.
+      // Note that it is OK for a compiler to not have a dex map at this dex PC because
+      // a return is not a safepoint.
+      CHECK_REGS_CONTAIN_REFS(0x13U, false);  // v3: y
+      CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1, 0);  // v8: this, v5: x[1], v2: y, v1: x, v0: ex
       // v5 is removed from the root set because there is a "merge" operation.
       // See 0015: if-nez v2, 001f.
-      CHECK_REGS_CONTAIN_REFS(0x1fU, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
-      CHECK_REGS_CONTAIN_REFS(0x21U, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
-      CHECK_REGS_CONTAIN_REFS(0x27U, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x29U, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x2cU, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x2fU, 8, 4, 3, 2, 1);  // v8: this, v4: ex, v3: y, v2: y, v1: x
-      CHECK_REGS_CONTAIN_REFS(0x32U, 8, 3, 2, 1, 0);  // v8: this, v3: y, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x1fU, true, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1, 0);  // v8: this, v2: y, v1: x, v0: ex
+      CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1);  // v8: this, v4: ex, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x2fU, true, 8, 4, 3, 2, 1);  // v8: this, v4: ex, v3: y, v2: y, v1: x
+      CHECK_REGS_CONTAIN_REFS(0x32U, true, 8, 3, 2, 1, 0);  // v8: this, v3: y, v2: y, v1: x, v0: ex
     }
 
     return true;
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
index 07c407b..7c3fd25 100644
--- a/test/401-optimizing-compiler/src/Main.java
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -94,6 +94,14 @@
       exception = e;
     }
 
+    // Test that we do NPE checks on array length.
+    exception = null;
+    try {
+      $opt$ArrayLengthOfNull(null);
+    } catch (NullPointerException e) {
+      exception = e;
+    }
+
     if (exception == null) {
       throw new Error("Missing NullPointerException");
     }
@@ -218,5 +226,9 @@
     return 42;
   }
 
+  public static int $opt$ArrayLengthOfNull(int[] array) {
+    return array.length;
+  }
+
   Object o;
 }
diff --git a/test/421-exceptions/expected.txt b/test/421-exceptions/expected.txt
new file mode 100644
index 0000000..94db350
--- /dev/null
+++ b/test/421-exceptions/expected.txt
@@ -0,0 +1,20 @@
+1
+3
+4
+1
+4
+1
+4
+1
+4
+Caught class java.lang.RuntimeException
+1
+2
+4
+1
+4
+1
+4
+1
+4
+Caught class java.lang.NullPointerException
diff --git a/test/421-exceptions/info.txt b/test/421-exceptions/info.txt
new file mode 100644
index 0000000..bdec67e
--- /dev/null
+++ b/test/421-exceptions/info.txt
@@ -0,0 +1 @@
+Simple test for try/catch/throw.
diff --git a/test/421-exceptions/src/Main.java b/test/421-exceptions/src/Main.java
new file mode 100644
index 0000000..6bf2377
--- /dev/null
+++ b/test/421-exceptions/src/Main.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void $opt$bar() {
+    try {
+      $opt$foo(1);
+    } catch (NullPointerException e) {
+      $opt$foo(2);
+    } catch (RuntimeException e) {
+      $opt$foo(3);
+    } finally {
+      $opt$foo(4);
+    }
+  }
+
+  static int barState;
+  static int fooState;
+
+  public static void main(String[] args) {
+    fooState = 0;
+    $opt$runTest();
+    fooState = 1;
+    $opt$runTest();
+  }
+
+  public static void $opt$runTest() {
+    barState = 1;
+    $opt$bar();
+    barState = 2;
+    $opt$bar();
+    barState = 3;
+    $opt$bar();
+    barState = 4;
+    try {
+      $opt$bar();
+    } catch (RuntimeException e) {
+      System.out.println("Caught " + e.getClass());
+    }
+  }
+
+  public static void $opt$foo(int value) {
+    System.out.println(value);
+    if (value == barState) {
+      if (fooState == 0) {
+        throw new RuntimeException();
+      } else {
+        throw new NullPointerException();
+      }
+    }
+  }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 7d56271..269f2fb 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -312,26 +312,39 @@
   003-omnibus-opcodes \
   004-InterfaceTest \
   004-JniTest \
+  004-ReferenceMap \
+  004-SignalTest \
   004-StackWalk \
   004-UnsafeTest \
+  005-annotations \
   006-args \
   007-count10 \
+  008-exceptions \
   011-array-copy \
   013-math2 \
   016-intern \
   017-float \
   018-stack-overflow \
+  019-wrong-array-type \
   020-string \
+  021-string2 \
   022-interface \
   023-many-interfaces \
+  024-illegal-access \
   026-access \
   028-array-write \
+  029-assert \
   030-bad-finalizer \
   031-class-attributes \
   032-concrete-sub \
+  033-class-init-deadlock \
+  035-enum \
   036-finalizer \
   037-inherit \
   038-inner-null \
+  039-join-main \
+  040-miranda \
+  042-new-instance \
   043-privates \
   044-proxy \
   045-reflect-array \
@@ -342,9 +355,13 @@
   051-thread \
   052-verifier-fun \
   054-uncaught \
+  055-enum-performance \
   056-const-string-jumbo \
   061-out-of-memory \
   063-process-manager \
+  064-field-access \
+  065-mismatched-implements \
+  066-mismatched-super \
   067-preemptive-unpark \
   068-classloader \
   069-field-type \
@@ -352,16 +369,20 @@
   071-dexfile \
   072-precise-gc \
   074-gc-thrash \
+  075-verification-error \
   076-boolean-put \
   077-method-override \
+  078-polymorphic-virtual \
   079-phantom \
   080-oom-throw \
+  081-hot-exceptions \
   082-inline-execute \
   083-compiler-regressions \
   084-class-init \
   085-old-style-inner-class \
   086-null-super \
   087-gc-after-link \
+  088-monitor-verification \
   090-loop-formation \
   092-locale \
   093-serialization \
@@ -370,6 +391,7 @@
   097-duplicate-method \
   098-ddmc \
   100-reflect2 \
+  101-fibonacci \
   102-concurrent-gc \
   103-string-append \
   105-invoke \
@@ -377,20 +399,28 @@
   107-int-math2 \
   109-suspend-check \
   110-field-access \
+  111-unresolvable-exception \
   112-double-math \
   113-multidex \
   117-nopatchoat \
+  118-noimage-dex2oat \
+  119-noimage-patchoat \
   121-modifiers \
+  121-simple-suspend-check \
   122-npe \
   123-compiler-regressions-mt \
   124-missing-classes \
   125-gc-and-classloading \
   126-miranda-multidex \
+  201-built-in-exception-detail-messages \
+  202-thread-oome \
   300-package-override \
   301-abstract-protected \
   303-verification-stress \
   401-optimizing-compiler \
+  402-optimizing-control-flow \
   403-optimizing-long \
+  404-optimizing-allocator \
   405-optimizing-long-allocator \
   406-fields \
   407-arrays \
@@ -407,6 +437,7 @@
   418-const-string \
   419-long-parameter \
   420-const-class \
+  421-exceptions \
   422-type-conversion \
   700-LoadArgRegs \
   701-easy-div-rem \