Merge "ART: More warnings"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 63200b7..00fbd69 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -111,6 +111,7 @@
   runtime/indirect_reference_table_test.cc \
   runtime/instruction_set_test.cc \
   runtime/intern_table_test.cc \
+  runtime/interpreter/safe_math_test.cc \
   runtime/leb128_test.cc \
   runtime/mem_map_test.cc \
   runtime/mirror/dex_cache_test.cc \
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 434d9ef..d168fc8 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -505,11 +505,11 @@
   }
 
   HLoadClass* constant = new (arena_) HLoadClass(
-      storage_index, is_referrers_class, is_initialized, dex_offset);
+      storage_index, is_referrers_class, dex_offset);
   current_block_->AddInstruction(constant);
 
   HInstruction* cls = constant;
-  if (constant->NeedsInitialization()) {
+  if (!is_initialized) {
     cls = new (arena_) HClinitCheck(constant, dex_offset);
     current_block_->AddInstruction(cls);
   }
@@ -1185,6 +1185,23 @@
       break;
     }
 
+    case Instruction::CONST_CLASS: {
+      uint16_t type_index = instruction.VRegB_21c();
+      bool type_known_final;
+      bool type_known_abstract;
+      bool is_referrers_class;
+      bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+          dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+          &type_known_final, &type_known_abstract, &is_referrers_class);
+      if (!can_access) {
+        return false;
+      }
+      current_block_->AddInstruction(
+          new (arena_) HLoadClass(instruction.VRegB_21c(), is_referrers_class, dex_offset));
+      UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+      break;
+    }
+
     default:
       return false;
   }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 5513c62..6e6d64c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -170,30 +170,55 @@
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
 };
 
-class ClinitCheckSlowPathARM : public SlowPathCodeARM {
+class LoadClassSlowPathARM : public SlowPathCodeARM {
  public:
-  explicit ClinitCheckSlowPathARM(HClinitCheck* instruction) : instruction_(instruction) {}
+  LoadClassSlowPathARM(HLoadClass* cls,
+                       HInstruction* at,
+                       uint32_t dex_pc,
+                       bool do_clinit)
+      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+  }
 
   virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = at_->GetLocations();
+
     CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
+    codegen->SaveLiveRegisters(locations);
 
-    HLoadClass* cls = instruction_->GetLoadClass();
     InvokeRuntimeCallingConvention calling_convention;
-    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls->GetTypeIndex());
+    __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
     arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
-    arm_codegen->InvokeRuntime(
-        QUICK_ENTRY_POINT(pInitializeStaticStorage), instruction_, instruction_->GetDexPc());
-    arm_codegen->Move32(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(R0));
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    int32_t entry_point_offset = do_clinit_
+        ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+        : QUICK_ENTRY_POINT(pInitializeType);
+    arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+
+    // Move the class to the desired location.
+    if (locations->Out().IsValid()) {
+      DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+      arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+    }
+    codegen->RestoreLiveRegisters(locations);
     __ b(GetExitLabel());
   }
 
  private:
-  HClinitCheck* const instruction_;
+  // The class this slow path will load.
+  HLoadClass* const cls_;
 
-  DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathARM);
+  // The instruction where this slow path is happening.
+  // (Might be the load class or an initialization check).
+  HInstruction* const at_;
+
+  // The dex PC of `at_`.
+  const uint32_t dex_pc_;
+
+  // Whether to initialize the class.
+  const bool do_clinit_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
 };
 
 class LoadStringSlowPathARM : public SlowPathCodeARM {
@@ -2142,21 +2167,38 @@
 }
 
 void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
+  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+      ? LocationSummary::kCallOnSlowPath
+      : LocationSummary::kNoCall;
   LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
   locations->SetOut(Location::RequiresRegister());
 }
 
 void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
   Register out = cls->GetLocations()->Out().As<Register>();
   if (cls->IsReferrersClass()) {
+    DCHECK(!cls->CanCallRuntime());
+    DCHECK(!cls->MustGenerateClinitCheck());
     codegen_->LoadCurrentMethod(out);
     __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
   } else {
+    DCHECK(cls->CanCallRuntime());
     codegen_->LoadCurrentMethod(out);
     __ LoadFromOffset(
         kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
     __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+
+    SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    codegen_->AddSlowPath(slow_path);
+    __ cmp(out, ShifterOperand(0));
+    __ b(slow_path->GetEntryLabel(), EQ);
+    if (cls->MustGenerateClinitCheck()) {
+      GenerateClassInitializationCheck(slow_path, out);
+    } else {
+      __ Bind(slow_path->GetExitLabel());
+    }
   }
 }
 
@@ -2170,17 +2212,15 @@
 }
 
 void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
-  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathARM(check);
+  // We assume the class is not null.
+  SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+      check->GetLoadClass(), check, check->GetDexPc(), true);
   codegen_->AddSlowPath(slow_path);
+  GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
 
-  LocationSummary* locations = check->GetLocations();
-  // We remove the class as a live register, we know it's null or unused in the slow path.
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  register_set->Remove(locations->InAt(0));
-
-  Register class_reg = locations->InAt(0).As<Register>();
-  __ cmp(class_reg, ShifterOperand(0));
-  __ b(slow_path->GetEntryLabel(), EQ);
+void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
+    SlowPathCodeARM* slow_path, Register class_reg) {
   __ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
   __ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
   __ b(slow_path->GetEntryLabel(), LT);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index c65b426..5076a4b 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -26,6 +26,7 @@
 namespace arm {
 
 class CodeGeneratorARM;
+class SlowPathCodeARM;
 
 static constexpr size_t kArmWordSize = 4;
 
@@ -131,6 +132,7 @@
   // is the block to branch to if the suspend check is not needed, and after
   // the suspend call.
   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+  void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
 
   ArmAssembler* const assembler_;
   CodeGeneratorARM* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ff85251..1e37909 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -157,32 +157,6 @@
   DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
 };
 
-class ClinitCheckSlowPathX86 : public SlowPathCodeX86 {
- public:
-  explicit ClinitCheckSlowPathX86(HClinitCheck* instruction) : instruction_(instruction) {}
-
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
-    CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
-    __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
-
-    HLoadClass* cls = instruction_->GetLoadClass();
-    InvokeRuntimeCallingConvention calling_convention;
-    __ movl(calling_convention.GetRegisterAt(0), Immediate(cls->GetTypeIndex()));
-    x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
-    __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)));
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
-    x86_codegen->Move32(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(EAX));
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
-    __ jmp(GetExitLabel());
-  }
-
- private:
-  HClinitCheck* const instruction_;
-
-  DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathX86);
-};
-
 class LoadStringSlowPathX86 : public SlowPathCodeX86 {
  public:
   explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
@@ -212,6 +186,56 @@
   DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
 };
 
+class LoadClassSlowPathX86 : public SlowPathCodeX86 {
+ public:
+  LoadClassSlowPathX86(HLoadClass* cls,
+                       HInstruction* at,
+                       uint32_t dex_pc,
+                       bool do_clinit)
+      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+  }
+
+  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = at_->GetLocations();
+    CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+    __ Bind(GetEntryLabel());
+    codegen->SaveLiveRegisters(locations);
+
+    InvokeRuntimeCallingConvention calling_convention;
+    __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
+    x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+    __ fs()->call(Address::Absolute(do_clinit_
+        ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
+        : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
+    codegen->RecordPcInfo(at_, dex_pc_);
+
+    // Move the class to the desired location.
+    if (locations->Out().IsValid()) {
+      DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+      x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+    }
+    codegen->RestoreLiveRegisters(locations);
+    __ jmp(GetExitLabel());
+  }
+
+ private:
+  // The class this slow path will load.
+  HLoadClass* const cls_;
+
+  // The instruction where this slow path is happening.
+  // (Might be the load class or an initialization check).
+  HInstruction* const at_;
+
+  // The dex PC of `at_`.
+  const uint32_t dex_pc_;
+
+  // Whether to initialize the class.
+  const bool do_clinit_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
+};
+
 #undef __
 #define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
 
@@ -2180,20 +2204,37 @@
 }
 
 void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
+  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+      ? LocationSummary::kCallOnSlowPath
+      : LocationSummary::kNoCall;
   LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
   locations->SetOut(Location::RequiresRegister());
 }
 
 void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
   Register out = cls->GetLocations()->Out().As<Register>();
   if (cls->IsReferrersClass()) {
+    DCHECK(!cls->CanCallRuntime());
+    DCHECK(!cls->MustGenerateClinitCheck());
     codegen_->LoadCurrentMethod(out);
     __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
   } else {
+    DCHECK(cls->CanCallRuntime());
     codegen_->LoadCurrentMethod(out);
     __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
     __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+
+    SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    codegen_->AddSlowPath(slow_path);
+    __ testl(out, out);
+    __ j(kEqual, slow_path->GetEntryLabel());
+    if (cls->MustGenerateClinitCheck()) {
+      GenerateClassInitializationCheck(slow_path, out);
+    } else {
+      __ Bind(slow_path->GetExitLabel());
+    }
   }
 }
 
@@ -2207,17 +2248,15 @@
 }
 
 void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
-  SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathX86(check);
+  // We assume the class to not be null.
+  SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+      check->GetLoadClass(), check, check->GetDexPc(), true);
   codegen_->AddSlowPath(slow_path);
+  GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
 
-  LocationSummary* locations = check->GetLocations();
-  // We remove the class as a live register, we know it's null or unused in the slow path.
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  register_set->Remove(locations->InAt(0));
-
-  Register class_reg = locations->InAt(0).As<Register>();
-  __ testl(class_reg, class_reg);
-  __ j(kEqual, slow_path->GetEntryLabel());
+void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
+    SlowPathCodeX86* slow_path, Register class_reg) {
   __ cmpl(Address(class_reg,  mirror::Class::StatusOffset().Int32Value()),
           Immediate(mirror::Class::kStatusInitialized));
   __ j(kLess, slow_path->GetEntryLabel());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index bcceaad..176a269 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -28,6 +28,7 @@
 static constexpr size_t kX86WordSize = 4;
 
 class CodeGeneratorX86;
+class SlowPathCodeX86;
 
 static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
 static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX };
@@ -126,6 +127,7 @@
   // is the block to branch to if the suspend check is not needed, and after
   // the suspend call.
   void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+  void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
 
   X86Assembler* const assembler_;
   CodeGeneratorX86* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0bc2bad..40eec9b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -168,32 +168,56 @@
   DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
 };
 
-class ClinitCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
  public:
-  explicit ClinitCheckSlowPathX86_64(HClinitCheck* instruction) : instruction_(instruction) {}
+  LoadClassSlowPathX86_64(HLoadClass* cls,
+                          HInstruction* at,
+                          uint32_t dex_pc,
+                          bool do_clinit)
+      : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+  }
 
   virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+    LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
-    codegen->SaveLiveRegisters(instruction_->GetLocations());
 
-    HLoadClass* cls = instruction_->GetLoadClass();
+    codegen->SaveLiveRegisters(locations);
+
     InvokeRuntimeCallingConvention calling_convention;
-    __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls->GetTypeIndex()));
+    __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
     x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
-    __ gs()->call(Address::Absolute(
-        QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage), true));
+    __ gs()->call(Address::Absolute((do_clinit_
+          ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
+          : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
+    codegen->RecordPcInfo(at_, dex_pc_);
 
-    codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
-    x64_codegen->Move(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(RAX));
-    codegen->RestoreLiveRegisters(instruction_->GetLocations());
+    // Move the class to the desired location.
+    if (locations->Out().IsValid()) {
+      DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+      x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+    }
+
+    codegen->RestoreLiveRegisters(locations);
     __ jmp(GetExitLabel());
   }
 
  private:
-  HClinitCheck* const instruction_;
+  // The class this slow path will load.
+  HLoadClass* const cls_;
 
-  DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathX86_64);
+  // The instruction where this slow path is happening.
+  // (Might be the load class or an initialization check).
+  HInstruction* const at_;
+
+  // The dex PC of `at_`.
+  const uint32_t dex_pc_;
+
+  // Whether to initialize the class.
+  const bool do_clinit_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
 };
 
 class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
@@ -2151,21 +2175,46 @@
   __ popq(CpuRegister(reg));
 }
 
+void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
+    SlowPathCodeX86_64* slow_path, CpuRegister class_reg) {
+  __ cmpl(Address(class_reg,  mirror::Class::StatusOffset().Int32Value()),
+          Immediate(mirror::Class::kStatusInitialized));
+  __ j(kLess, slow_path->GetEntryLabel());
+  __ Bind(slow_path->GetExitLabel());
+  // No need for memory fence, thanks to the X86_64 memory model.
+}
+
 void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
+  LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+      ? LocationSummary::kCallOnSlowPath
+      : LocationSummary::kNoCall;
   LocationSummary* locations =
-      new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+      new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
   locations->SetOut(Location::RequiresRegister());
 }
 
 void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
   CpuRegister out = cls->GetLocations()->Out().As<CpuRegister>();
   if (cls->IsReferrersClass()) {
+    DCHECK(!cls->CanCallRuntime());
+    DCHECK(!cls->MustGenerateClinitCheck());
     codegen_->LoadCurrentMethod(out);
     __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
   } else {
+    DCHECK(cls->CanCallRuntime());
     codegen_->LoadCurrentMethod(out);
     __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
     __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+    SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+        cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+    codegen_->AddSlowPath(slow_path);
+    __ testl(out, out);
+    __ j(kEqual, slow_path->GetEntryLabel());
+    if (cls->MustGenerateClinitCheck()) {
+      GenerateClassInitializationCheck(slow_path, out);
+    } else {
+      __ Bind(slow_path->GetExitLabel());
+    }
   }
 }
 
@@ -2179,22 +2228,11 @@
 }
 
 void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
-  SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathX86_64(check);
+  // We assume the class to not be null.
+  SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+      check->GetLoadClass(), check, check->GetDexPc(), true);
   codegen_->AddSlowPath(slow_path);
-
-  LocationSummary* locations = check->GetLocations();
-  // We remove the class as a live register, we know it's null or unused in the slow path.
-  RegisterSet* register_set = locations->GetLiveRegisters();
-  register_set->Remove(locations->InAt(0));
-
-  CpuRegister class_reg = locations->InAt(0).As<CpuRegister>();
-  __ testl(class_reg, class_reg);
-  __ j(kEqual, slow_path->GetEntryLabel());
-  __ cmpl(Address(class_reg,  mirror::Class::StatusOffset().Int32Value()),
-          Immediate(mirror::Class::kStatusInitialized));
-  __ j(kLess, slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-  // No need for memory fence, thanks to the X86_64 memory model.
+  GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<CpuRegister>());
 }
 
 void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 32d2702..0de3045 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -65,6 +65,7 @@
 };
 
 class CodeGeneratorX86_64;
+class SlowPathCodeX86_64;
 
 class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
  public:
@@ -130,6 +131,7 @@
   // is the block to branch to if the suspend check is not needed, and after
   // the suspend call.
   void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
+  void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
 
   X86_64Assembler* const assembler_;
   CodeGeneratorX86_64* const codegen_;
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index d96131a..bed688b 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -27,6 +27,9 @@
 
 class HConstant;
 class HInstruction;
+class Location;
+
+std::ostream& operator<<(std::ostream& os, const Location& location);
 
 /**
  * A Location is an abstraction over the potential location
@@ -371,7 +374,7 @@
     if (loc.IsRegister()) {
       core_registers_ &= ~(1 << loc.reg());
     } else {
-      DCHECK(loc.IsFpuRegister());
+      DCHECK(loc.IsFpuRegister()) << loc;
       floating_point_registers_ &= ~(1 << loc.reg());
     }
   }
@@ -528,8 +531,6 @@
   DISALLOW_COPY_AND_ASSIGN(LocationSummary);
 };
 
-std::ostream& operator<<(std::ostream& os, const Location& location);
-
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7549ebf..79638b3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2052,9 +2052,6 @@
   DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
 };
 
-// TODO: Make this class handle the case the load is null (dex cache
-// is null). This will be required when using it for other things than
-// initialization check.
 /**
  * Instruction to load a Class object.
  */
@@ -2062,13 +2059,14 @@
  public:
   HLoadClass(uint16_t type_index,
              bool is_referrers_class,
-             bool is_initialized,
              uint32_t dex_pc)
       : HExpression(Primitive::kPrimNot, SideEffects::None()),
         type_index_(type_index),
         is_referrers_class_(is_referrers_class),
-        is_initialized_(is_initialized),
-        dex_pc_(dex_pc) {}
+        dex_pc_(dex_pc),
+        generate_clinit_check_(false) {}
+
+  bool CanBeMoved() const OVERRIDE { return true; }
 
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return other->AsLoadClass()->type_index_ == type_index_;
@@ -2078,20 +2076,35 @@
 
   uint32_t GetDexPc() const { return dex_pc_; }
   uint16_t GetTypeIndex() const { return type_index_; }
+  bool IsReferrersClass() const { return is_referrers_class_; }
 
-  bool NeedsInitialization() const {
-    return !is_initialized_ && !is_referrers_class_;
+  bool NeedsEnvironment() const OVERRIDE {
+    // Will call runtime and load the class if the class is not loaded yet.
+    // TODO: finer grain decision.
+    return !is_referrers_class_;
   }
 
-  bool IsReferrersClass() const { return is_referrers_class_; }
+  bool MustGenerateClinitCheck() const {
+    return generate_clinit_check_;
+  }
+
+  void SetMustGenerateClinitCheck() {
+    generate_clinit_check_ = true;
+  }
+
+  bool CanCallRuntime() const {
+    return MustGenerateClinitCheck() || !is_referrers_class_;
+  }
 
   DECLARE_INSTRUCTION(LoadClass);
 
  private:
   const uint16_t type_index_;
   const bool is_referrers_class_;
-  const bool is_initialized_;
   const uint32_t dex_pc_;
+  // Whether this instruction must generate the initialization check.
+  // Used for code generation.
+  bool generate_clinit_check_;
 
   DISALLOW_COPY_AND_ASSIGN(HLoadClass);
 };
@@ -2103,6 +2116,8 @@
         string_index_(string_index),
         dex_pc_(dex_pc) {}
 
+  bool CanBeMoved() const OVERRIDE { return true; }
+
   bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return other->AsLoadString()->string_index_ == string_index_;
   }
@@ -2136,6 +2151,12 @@
     SetRawInputAt(0, constant);
   }
 
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+    UNUSED(other);
+    return true;
+  }
+
   bool NeedsEnvironment() const OVERRIDE {
     // May call runtime to initialize the class.
     return true;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 35d56f3..c4db840 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -39,7 +39,14 @@
 }
 
 void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
-  check->ReplaceWith(check->InputAt(0));
+  HLoadClass* cls = check->GetLoadClass();
+  check->ReplaceWith(cls);
+  if (check->GetPrevious() == cls) {
+    // Pass the initialization duty to the `HLoadClass` instruction,
+    // and remove the instruction from the graph.
+    cls->SetMustGenerateClinitCheck();
+    check->GetBlock()->RemoveInstruction(check);
+  }
 }
 
 void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) {
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index be73594..1661554 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -119,6 +119,7 @@
 void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
                      PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
 #if defined(__APPLE__)
+  UNUSED(ipoints, jpoints, ppoints, qpoints);
   UNIMPLEMENTED(FATAL);
 #else
   // Interpreter
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 88b99a1..66d6fab 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -48,9 +48,11 @@
 
 // DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
 // declarations in a class.
+#if !defined(DISALLOW_COPY_AND_ASSIGN)
 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
   TypeName(const TypeName&) = delete;  \
   void operator=(const TypeName&) = delete
+#endif
 
 // A macro to disallow all the implicit constructors, namely the default constructor, copy
 // constructor and operator= functions.
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index cccf8f3..c0b79b2 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -227,32 +227,34 @@
 }
 
 void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints) {
-  switch (entry_points_allocator) {
 #if !defined(__APPLE__) || !defined(__LP64__)
+  switch (entry_points_allocator) {
     case gc::kAllocatorTypeDlMalloc: {
       SetQuickAllocEntryPoints_dlmalloc(qpoints, entry_points_instrumented);
-      break;
+      return;
     }
     case gc::kAllocatorTypeRosAlloc: {
       SetQuickAllocEntryPoints_rosalloc(qpoints, entry_points_instrumented);
-      break;
+      return;
     }
     case gc::kAllocatorTypeBumpPointer: {
       CHECK(kMovingCollector);
       SetQuickAllocEntryPoints_bump_pointer(qpoints, entry_points_instrumented);
-      break;
+      return;
     }
     case gc::kAllocatorTypeTLAB: {
       CHECK(kMovingCollector);
       SetQuickAllocEntryPoints_tlab(qpoints, entry_points_instrumented);
+      return;
+    }
+    default:
       break;
-    }
-#endif
-    default: {
-      UNIMPLEMENTED(FATAL);
-      UNREACHABLE();
-    }
   }
+#else
+  UNUSED(qpoints);
+#endif
+  UNIMPLEMENTED(FATAL);
+  UNREACHABLE();
 }
 
 }  // namespace art
diff --git a/runtime/interpreter/safe_math_test.cc b/runtime/interpreter/safe_math_test.cc
new file mode 100644
index 0000000..28087a3
--- /dev/null
+++ b/runtime/interpreter/safe_math_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "safe_math.h"
+
+#include <limits>
+
+#include "gtest/gtest.h"
+
+namespace art {
+namespace interpreter {
+
+TEST(SafeMath, Add) {
+  // Adding 1 overflows 0x7ff... to 0x800... aka max and min.
+  EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::max(), 1),
+            std::numeric_limits<int32_t>::min());
+  EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::max(), 1),
+            std::numeric_limits<int64_t>::min());
+
+  // Vanilla arithmetic should work too.
+  EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::max() - 1, 1),
+            std::numeric_limits<int32_t>::max());
+  EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::max() - 1, 1),
+            std::numeric_limits<int64_t>::max());
+
+  EXPECT_EQ(SafeAdd(std::numeric_limits<int32_t>::min() + 1, -1),
+            std::numeric_limits<int32_t>::min());
+  EXPECT_EQ(SafeAdd(std::numeric_limits<int64_t>::min() + 1, -1),
+            std::numeric_limits<int64_t>::min());
+
+  EXPECT_EQ(SafeAdd(int32_t(-1), -1), -2);
+  EXPECT_EQ(SafeAdd(int64_t(-1), -1), -2);
+
+  EXPECT_EQ(SafeAdd(int32_t(1), 1), 2);
+  EXPECT_EQ(SafeAdd(int64_t(1), 1), 2);
+
+  EXPECT_EQ(SafeAdd(int32_t(-1), 1), 0);
+  EXPECT_EQ(SafeAdd(int64_t(-1), 1), 0);
+
+  EXPECT_EQ(SafeAdd(int32_t(1), -1), 0);
+  EXPECT_EQ(SafeAdd(int64_t(1), -1), 0);
+
+  // Test sign extension of smaller operand sizes.
+  EXPECT_EQ(SafeAdd(int32_t(1), int8_t(-1)), 0);
+  EXPECT_EQ(SafeAdd(int64_t(1), int8_t(-1)), 0);
+}
+
+TEST(SafeMath, Sub) {
+  // Subtracting 1 underflows 0x800... to 0x7ff... aka min and max.
+  EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::min(), 1),
+            std::numeric_limits<int32_t>::max());
+  EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::min(), 1),
+            std::numeric_limits<int64_t>::max());
+
+  // Vanilla arithmetic should work too.
+  EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::max() - 1, -1),
+            std::numeric_limits<int32_t>::max());
+  EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::max() - 1, -1),
+            std::numeric_limits<int64_t>::max());
+
+  EXPECT_EQ(SafeSub(std::numeric_limits<int32_t>::min() + 1, 1),
+            std::numeric_limits<int32_t>::min());
+  EXPECT_EQ(SafeSub(std::numeric_limits<int64_t>::min() + 1, 1),
+            std::numeric_limits<int64_t>::min());
+
+  EXPECT_EQ(SafeSub(int32_t(-1), -1), 0);
+  EXPECT_EQ(SafeSub(int64_t(-1), -1), 0);
+
+  EXPECT_EQ(SafeSub(int32_t(1), 1), 0);
+  EXPECT_EQ(SafeSub(int64_t(1), 1), 0);
+
+  EXPECT_EQ(SafeSub(int32_t(-1), 1), -2);
+  EXPECT_EQ(SafeSub(int64_t(-1), 1), -2);
+
+  EXPECT_EQ(SafeSub(int32_t(1), -1), 2);
+  EXPECT_EQ(SafeSub(int64_t(1), -1), 2);
+
+  // Test sign extension of smaller operand sizes.
+  EXPECT_EQ(SafeAdd(int32_t(1), int8_t(-1)), 0);
+  EXPECT_EQ(SafeAdd(int64_t(1), int8_t(-1)), 0);
+}
+
+TEST(SafeMath, Mul) {
+  // Multiplying by 2 overflows 0x7ff...f to 0xfff...e aka max and -2.
+  EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::max(), 2),
+            -2);
+  EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::max(), 2),
+            -2);
+
+  // Vanilla arithmetic should work too.
+  EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::max() / 2, 2),
+            std::numeric_limits<int32_t>::max() - 1);  // -1 as LSB is lost by division.
+  EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::max() / 2, 2),
+            std::numeric_limits<int64_t>::max() - 1);  // -1 as LSB is lost by division.
+
+  EXPECT_EQ(SafeMul(std::numeric_limits<int32_t>::min() / 2, 2),
+            std::numeric_limits<int32_t>::min());
+  EXPECT_EQ(SafeMul(std::numeric_limits<int64_t>::min() / 2, 2),
+            std::numeric_limits<int64_t>::min());
+
+  EXPECT_EQ(SafeMul(int32_t(-1), -1), 1);
+  EXPECT_EQ(SafeMul(int64_t(-1), -1), 1);
+
+  EXPECT_EQ(SafeMul(int32_t(1), 1), 1);
+  EXPECT_EQ(SafeMul(int64_t(1), 1), 1);
+
+  EXPECT_EQ(SafeMul(int32_t(-1), 1), -1);
+  EXPECT_EQ(SafeMul(int64_t(-1), 1), -1);
+
+  EXPECT_EQ(SafeMul(int32_t(1), -1), -1);
+  EXPECT_EQ(SafeMul(int64_t(1), -1), -1);
+
+  // Test sign extension of smaller operand sizes.
+  EXPECT_EQ(SafeMul(int32_t(1), int8_t(-1)), -1);
+  EXPECT_EQ(SafeMul(int64_t(1), int8_t(-1)), -1);
+}
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 59922b8..c2c6b12 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -19,6 +19,7 @@
 #include "nativebridge/native_bridge.h"
 
 #include "base/logging.h"
+#include "base/macros.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
 #include "scoped_thread_state_change.h"
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 11c610b..0373708 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1164,6 +1164,8 @@
     }
     os << "\n";
   }
+#else
+  UNUSED(os, tid, prefix, current_method);
 #endif
 }
 
diff --git a/test/420-const-class/expected.txt b/test/420-const-class/expected.txt
new file mode 100644
index 0000000..3213026
--- /dev/null
+++ b/test/420-const-class/expected.txt
@@ -0,0 +1,16 @@
+class Main
+class Main
+class Main$Other
+class Main$Other
+class java.lang.System
+class java.lang.System
+Hello from OtherWithClinit
+42
+class Main$OtherWithClinit
+42
+class Main$OtherWithClinit
+class Main$OtherWithClinit2
+Hello from OtherWithClinit2
+43
+class Main$OtherWithClinit2
+43
diff --git a/test/420-const-class/info.txt b/test/420-const-class/info.txt
new file mode 100644
index 0000000..81cbac7
--- /dev/null
+++ b/test/420-const-class/info.txt
@@ -0,0 +1 @@
+Test for the CONST_CLASS opcode.
diff --git a/test/420-const-class/src/Main.java b/test/420-const-class/src/Main.java
new file mode 100644
index 0000000..44a7436
--- /dev/null
+++ b/test/420-const-class/src/Main.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  static class Other {
+  }
+
+  static class OtherWithClinit {
+    static int a;
+    static {
+      System.out.println("Hello from OtherWithClinit");
+      a = 42;
+    }
+  }
+
+  static class OtherWithClinit2 {
+    static int a;
+    static {
+      System.out.println("Hello from OtherWithClinit2");
+      a = 43;
+    }
+  }
+
+  public static void main(String[] args) {
+    // Call methods twice in case they have a slow path.
+
+    System.out.println($opt$LoadThisClass());
+    System.out.println($opt$LoadThisClass());
+
+    System.out.println($opt$LoadOtherClass());
+    System.out.println($opt$LoadOtherClass());
+
+    System.out.println($opt$LoadSystemClass());
+    System.out.println($opt$LoadSystemClass());
+
+    $opt$ClinitCheckAndLoad();
+    $opt$ClinitCheckAndLoad();
+
+    $opt$LoadAndClinitCheck();
+    $opt$LoadAndClinitCheck();
+  }
+
+  public static Class $opt$LoadThisClass() {
+    return Main.class;
+  }
+
+  public static Class $opt$LoadOtherClass() {
+    return Other.class;
+  }
+
+  public static Class $opt$LoadSystemClass() {
+    return System.class;
+  }
+
+  public static void $opt$ClinitCheckAndLoad() {
+    System.out.println(OtherWithClinit.a);
+    System.out.println(OtherWithClinit.class);
+  }
+
+  public static void $opt$LoadAndClinitCheck() {
+    System.out.println(OtherWithClinit2.class);
+    System.out.println(OtherWithClinit2.a);
+  }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index bd426a7..0a1e3e1 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -312,6 +312,7 @@
   004-InterfaceTest \
   004-JniTest \
   004-StackWalk \
+  004-UnsafeTest \
   006-args \
   007-count10 \
   011-array-copy \
@@ -383,6 +384,7 @@
   123-compiler-regressions-mt \
   124-missing-classes \
   125-gc-and-classloading \
+  126-miranda-multidex \
   300-package-override \
   301-abstract-protected \
   303-verification-stress \
@@ -403,6 +405,7 @@
   417-optimizing-arith-div \
   418-const-string \
   419-long-parameter \
+  420-const-class \
   700-LoadArgRegs \
   701-easy-div-rem \
   702-LargeBranchOffset \