Move other compiler bits into the compiler shared libraries.

Change-Id: I288337af4c70716709217ff2d21050ba5f858807
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 0ab796a..7d9ef81 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -218,12 +218,6 @@
 	src/compiler_llvm/runtime_support_llvm.cc \
 	src/compiler_llvm/upcall_compiler.cc \
 	src/compiler_llvm/utils_llvm.cc
-else
-# TODO: should these be in libart-compiler.so instead?
-LIBART_COMMON_SRC_FILES += \
-	src/jni_compiler.cc \
-	src/jni_internal_arm.cc \
-	src/jni_internal_x86.cc
 endif
 
 LIBART_TARGET_SRC_FILES := \
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index a9108e07..01d9c12 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -21,10 +21,12 @@
 	src/compiler/Ralloc.cc \
 	src/compiler/SSATransformation.cc \
 	src/compiler/Utility.cc \
-	src/compiler/codegen/RallocUtil.cc
+	src/compiler/codegen/RallocUtil.cc \
+	src/jni_compiler.cc
 
 LIBART_COMPILER_ARM_SRC_FILES += \
 	$(LIBART_COMPILER_COMMON_SRC_FILES) \
+	src/jni_internal_arm.cc \
 	src/compiler/codegen/arm/ArchUtility.cc \
 	src/compiler/codegen/arm/ArmRallocUtil.cc \
 	src/compiler/codegen/arm/Assemble.cc \
@@ -39,6 +41,7 @@
 
 LIBART_COMPILER_X86_SRC_FILES += \
 	$(LIBART_COMPILER_COMMON_SRC_FILES) \
+	src/jni_internal_x86.cc \
 	src/compiler/codegen/x86/ArchUtility.cc \
 	src/compiler/codegen/x86/X86RallocUtil.cc \
 	src/compiler/codegen/x86/Assemble.cc \
diff --git a/src/compiler.cc b/src/compiler.cc
index 9539972..5ca7023 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -36,10 +36,6 @@
 #include "stl_util.h"
 #include "timing_logger.h"
 
-#if !defined(ART_USE_LLVM_COMPILER)
-#include "jni_compiler.h"
-#endif
-
 #if defined(ART_USE_LLVM_COMPILER)
 #include "compiler_llvm/compiler_llvm.h"
 #endif
@@ -48,13 +44,11 @@
 
 namespace arm {
   ByteArray* CreateAbstractMethodErrorStub();
-  CompiledInvokeStub* ArmCreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len);
   ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type);
   ByteArray* CreateJniDlsymLookupStub();
 }
 namespace x86 {
   ByteArray* CreateAbstractMethodErrorStub();
-  CompiledInvokeStub* X86CreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len);
   ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType type);
   ByteArray* CreateJniDlsymLookupStub();
 }
@@ -230,12 +224,19 @@
   return StringPrintf(OS_SHARED_LIB_FORMAT_STR, name.c_str());
 }
 
+template<typename Fn>
+static Fn FindFunction(const std::string& compiler_so_name, void* library, const char* name) {
+  Fn fn = reinterpret_cast<Fn>(dlsym(library, name));
+  if (fn == NULL) {
+    LOG(FATAL) << "Couldn't find \"" << name << "\" in compiler library " << compiler_so_name << ": " << dlerror();
+  }
+  VLOG(compiler) << "Found \"" << name << "\") at " << reinterpret_cast<void*>(fn);
+  return fn;
+}
+
 Compiler::Compiler(InstructionSet instruction_set, bool image, size_t thread_count,
                    bool support_debugging, const std::set<std::string>* image_classes)
     : instruction_set_(instruction_set),
-#if !defined(ART_USE_LLVM_COMPILER)
-      jni_compiler_(instruction_set),
-#endif
       compiled_classes_lock_("compiled classes lock"),
       compiled_methods_lock_("compiled method lock"),
       compiled_invoke_stubs_lock_("compiled invoke stubs lock"),
@@ -246,7 +247,9 @@
       image_classes_(image_classes),
 #if !defined(ART_USE_LLVM_COMPILER)
       compiler_library_(NULL),
-      compiler_(NULL)
+      compiler_(NULL),
+      jni_compiler_(NULL),
+      create_invoke_stub_(NULL)
 #else
       compiler_llvm_(new compiler_llvm::CompilerLLVM(this, instruction_set))
 #endif
@@ -258,12 +261,9 @@
   }
   VLOG(compiler) << "dlopen(\"" << compiler_so_name << "\", RTLD_LAZY) returned " << compiler_library_;
 
-  compiler_ = reinterpret_cast<CompilerFn>(dlsym(compiler_library_, "oatCompileMethod"));
-  if (compiler_ == NULL) {
-    LOG(FATAL) << "Couldn't find \"oatCompileMethod\" in compiler library " << compiler_so_name << ": " << dlerror();
-  }
-
-  VLOG(compiler) << "dlsym(compiler_library, \"oatCompileMethod\") returned " << reinterpret_cast<void*>(compiler_);
+  compiler_ = FindFunction<CompilerFn>(compiler_so_name, compiler_library_, "oatCompileMethod");
+  jni_compiler_ = FindFunction<JniCompilerFn>(compiler_so_name, compiler_library_, "ArtJniCompileMethod");
+  create_invoke_stub_ = FindFunction<CreateInvokeStubFn>(compiler_so_name, compiler_library_, "ArtCreateInvokeStub");
 
   CHECK(!Runtime::Current()->IsStarted());
   if (!image_) {
@@ -1093,7 +1093,7 @@
 #if defined(ART_USE_LLVM_COMPILER)
     compiled_method = compiler_llvm_->CompileNativeMethod(&oat_compilation_unit);
 #else
-    compiled_method = jni_compiler_.Compile(access_flags, method_idx, class_loader, dex_file);
+    compiled_method = (*jni_compiler_)(*this, access_flags, method_idx, class_loader, dex_file);
 #endif
     CHECK(compiled_method != NULL);
   } else if ((access_flags & kAccAbstract) != 0) {
@@ -1128,13 +1128,7 @@
 #if defined(ART_USE_LLVM_COMPILER)
     compiled_invoke_stub = compiler_llvm_->CreateInvokeStub(is_static, shorty);
 #else
-    if (instruction_set_ == kX86) {
-      compiled_invoke_stub = ::art::x86::X86CreateInvokeStub(is_static, shorty, shorty_len);
-    } else {
-      CHECK(instruction_set_ == kArm || instruction_set_ == kThumb2);
-      // Generates invocation stub using ARM instruction set
-      compiled_invoke_stub = ::art::arm::ArmCreateInvokeStub(is_static, shorty, shorty_len);
-    }
+    compiled_invoke_stub = (*create_invoke_stub_)(is_static, shorty, shorty_len);
 #endif
 
     CHECK(compiled_invoke_stub != NULL);
diff --git a/src/compiler.h b/src/compiler.h
index 31ee613..e53cc46 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -30,10 +30,6 @@
 #include "object.h"
 #include "runtime.h"
 
-#if !defined(ART_USE_LLVM_COMPILER)
-#include "jni_compiler.h"
-#endif
-
 #if defined(ART_USE_LLVM_COMPILER)
 #include "compiler_llvm/compiler_llvm.h"
 #endif
@@ -173,9 +169,6 @@
                         const CompiledInvokeStub* compiled_invoke_stub);
 
   InstructionSet instruction_set_;
-#if !defined(ART_USE_LLVM_COMPILER)
-  JniCompiler jni_compiler_;
-#endif
 
   typedef std::map<const ClassReference, CompiledClass*> ClassTable;
   // All class references that this compiler has compiled
@@ -205,12 +198,23 @@
   UniquePtr<compiler_llvm::CompilerLLVM> compiler_llvm_;
 #else
   void* compiler_library_;
+
   typedef CompiledMethod* (*CompilerFn)(Compiler& compiler,
                                         const DexFile::CodeItem* code_item,
                                         uint32_t access_flags, uint32_t method_idx,
                                         const ClassLoader* class_loader,
                                         const DexFile& dex_file);
   CompilerFn compiler_;
+
+  typedef CompiledMethod* (*JniCompilerFn)(Compiler& compiler,
+                                           uint32_t access_flags, uint32_t method_idx,
+                                           const ClassLoader* class_loader,
+                                           const DexFile& dex_file);
+  JniCompilerFn jni_compiler_;
+
+  typedef CompiledInvokeStub* (*CreateInvokeStubFn)(bool is_static,
+                                                    const char* shorty, uint32_t shorty_len);
+  CreateInvokeStubFn create_invoke_stub_;
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(Compiler);
diff --git a/src/exception_test.cc b/src/exception_test.cc
index 22ae682..eebba9d 100644
--- a/src/exception_test.cc
+++ b/src/exception_test.cc
@@ -22,7 +22,6 @@
 #include "common_test.h"
 #include "dex_file.h"
 #include "gtest/gtest.h"
-#include "jni_compiler.h"
 #include "runtime.h"
 #include "thread.h"
 
diff --git a/src/jni_compiler.cc b/src/jni_compiler.cc
index a7a7d12..bb64c73 100644
--- a/src/jni_compiler.cc
+++ b/src/jni_compiler.cc
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#include "jni_compiler.h"
-
 #include <sys/mman.h>
 #include <vector>
 
@@ -23,6 +21,7 @@
 #include "calling_convention.h"
 #include "class_linker.h"
 #include "compiled_method.h"
+#include "compiler.h"
 #include "constants.h"
 #include "jni_internal.h"
 #include "logging.h"
@@ -31,39 +30,165 @@
 #include "thread.h"
 #include "UniquePtr.h"
 
+#define __ jni_asm->
+
 namespace art {
 
-JniCompiler::JniCompiler(InstructionSet instruction_set) {
-  if (instruction_set == kThumb2) {
-    // currently only ARM code generation is supported
-    instruction_set_ = kArm;
+static void ChangeThreadState(Assembler* jni_asm, Thread::State new_state,
+                              ManagedRegister scratch, ManagedRegister return_reg,
+                              FrameOffset return_save_location,
+                              size_t return_size) {
+  /*
+   * This code mirrors that of Thread::SetState where detail is given on why
+   * barriers occur when they do.
+   */
+  if (new_state == Thread::kRunnable) {
+    /*
+     * Change our status to Thread::kRunnable.  The transition requires
+     * that we check for pending suspension, because the VM considers
+     * us to be "asleep" in all other states, and another thread could
+     * be performing a GC now.
+     */
+    __ StoreImmediateToThread(Thread::StateOffset(), Thread::kRunnable, scratch);
+    __ MemoryBarrier(scratch);
+    __ SuspendPoll(scratch, return_reg, return_save_location, return_size);
   } else {
-    instruction_set_ = instruction_set;
+    /*
+     * Not changing to Thread::kRunnable. No additional work required.
+     */
+    __ MemoryBarrier(scratch);
+    __ StoreImmediateToThread(Thread::StateOffset(), new_state, scratch);
   }
 }
 
-JniCompiler::~JniCompiler() {}
+// Copy a single parameter from the managed to the JNI calling convention
+static void CopyParameter(Assembler* jni_asm,
+                          ManagedRuntimeCallingConvention* mr_conv,
+                          JniCallingConvention* jni_conv,
+                          size_t frame_size, size_t out_arg_size) {
+  bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+  bool output_in_reg = jni_conv->IsCurrentParamInRegister();
+  FrameOffset sirt_offset(0);
+  bool null_allowed = false;
+  bool ref_param = jni_conv->IsCurrentParamAReference();
+  CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+  // input may be in register, on stack or both - but not none!
+  CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
+  if (output_in_reg) {  // output shouldn't straddle registers and stack
+    CHECK(!jni_conv->IsCurrentParamOnStack());
+  } else {
+    CHECK(jni_conv->IsCurrentParamOnStack());
+  }
+  // References need placing in SIRT and the entry address passing
+  if (ref_param) {
+    null_allowed = mr_conv->IsCurrentArgPossiblyNull();
+    // Compute SIRT offset. Note null is placed in the SIRT but the jobject
+    // passed to the native code must be null (not a pointer into the SIRT
+    // as with regular references).
+    sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    // Check SIRT offset is within frame.
+    CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+  }
+  if (input_in_reg && output_in_reg) {
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+    } else {
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling move
+        __ Move(out_reg, in_reg);
+      } else {
+        UNIMPLEMENTED(FATAL);  // we currently don't expect to see this case
+      }
+    }
+  } else if (!input_in_reg && !output_in_reg) {
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    if (ref_param) {
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size);
+    }
+  } else if (!input_in_reg && output_in_reg) {
+    FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    // Check that incoming stack arguments are above the current stack frame.
+    CHECK_GT(in_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Load(out_reg, in_off, param_size);
+    }
+  } else {
+    CHECK(input_in_reg && !output_in_reg);
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    // Check outgoing argument is within frame
+    CHECK_LT(out_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      // TODO: recycle value in in_reg rather than reload from SIRT
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling store
+        __ Store(out_off, in_reg, param_size);
+      } else {
+        // store where input straddles registers and stack
+        CHECK_EQ(param_size, 8u);
+        FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+        __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister());
+      }
+    }
+  }
+}
+
+static void SetNativeParameter(Assembler* jni_asm,
+                               JniCallingConvention* jni_conv,
+                               ManagedRegister in_reg) {
+  if (jni_conv->IsCurrentParamOnStack()) {
+    FrameOffset dest = jni_conv->CurrentParamStackOffset();
+    __ StoreRawPtr(dest, in_reg);
+  } else {
+    if (!jni_conv->CurrentParamRegister().Equals(in_reg)) {
+      __ Move(jni_conv->CurrentParamRegister(), in_reg);
+    }
+  }
+}
 
 // Generate the JNI bridge for the given method, general contract:
 // - Arguments are in the managed runtime format, either on stack or in
 //   registers, a reference to the method object is supplied as part of this
 //   convention.
 //
-CompiledMethod* JniCompiler::Compile(uint32_t access_flags, uint32_t method_idx,
-                                     const ClassLoader* class_loader, const DexFile& dex_file) {
+CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler,
+                                            uint32_t access_flags, uint32_t method_idx,
+                                            const ClassLoader* class_loader,
+                                            const DexFile& dex_file) {
   CHECK((access_flags & kAccNative) != 0);
   const bool is_static = (access_flags & kAccStatic) != 0;
   const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
   const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+  InstructionSet instruction_set = compiler.GetInstructionSet();
+  if (instruction_set == kThumb2) {
+    instruction_set = kArm;
+  }
   // Calling conventions used to iterate over parameters to method
   UniquePtr<JniCallingConvention> jni_conv(
-      JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set_));
+      JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
   UniquePtr<ManagedRuntimeCallingConvention> mr_conv(
-      ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set_));
+      ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
 
   // Assembler that holds generated instructions
-  UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set_));
-#define __ jni_asm->
+  UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
 
   // Offsets into data structures
   // TODO: if cross compiling these offsets are for the host not the target
@@ -426,148 +551,18 @@
   std::vector<uint8_t> managed_code(cs);
   MemoryRegion code(&managed_code[0], managed_code.size());
   __ FinalizeInstructions(code);
-  return new CompiledMethod(instruction_set_,
+  return new CompiledMethod(instruction_set,
                             managed_code,
                             frame_size,
                             jni_conv->CoreSpillMask(),
                             jni_conv->FpSpillMask());
-#undef __
-}
-
-void JniCompiler::SetNativeParameter(Assembler* jni_asm,
-                                     JniCallingConvention* jni_conv,
-                                     ManagedRegister in_reg) {
-#define __ jni_asm->
-  if (jni_conv->IsCurrentParamOnStack()) {
-    FrameOffset dest = jni_conv->CurrentParamStackOffset();
-    __ StoreRawPtr(dest, in_reg);
-  } else {
-    if (!jni_conv->CurrentParamRegister().Equals(in_reg)) {
-      __ Move(jni_conv->CurrentParamRegister(), in_reg);
-    }
-  }
-#undef __
-}
-
-// Copy a single parameter from the managed to the JNI calling convention
-void JniCompiler::CopyParameter(Assembler* jni_asm,
-                                ManagedRuntimeCallingConvention* mr_conv,
-                                JniCallingConvention* jni_conv,
-                                size_t frame_size, size_t out_arg_size) {
-  bool input_in_reg = mr_conv->IsCurrentParamInRegister();
-  bool output_in_reg = jni_conv->IsCurrentParamInRegister();
-  FrameOffset sirt_offset(0);
-  bool null_allowed = false;
-  bool ref_param = jni_conv->IsCurrentParamAReference();
-  CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
-  // input may be in register, on stack or both - but not none!
-  CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
-  if (output_in_reg) {  // output shouldn't straddle registers and stack
-    CHECK(!jni_conv->IsCurrentParamOnStack());
-  } else {
-    CHECK(jni_conv->IsCurrentParamOnStack());
-  }
-  // References need placing in SIRT and the entry address passing
-  if (ref_param) {
-    null_allowed = mr_conv->IsCurrentArgPossiblyNull();
-    // Compute SIRT offset. Note null is placed in the SIRT but the jobject
-    // passed to the native code must be null (not a pointer into the SIRT
-    // as with regular references).
-    sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
-    // Check SIRT offset is within frame.
-    CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
-  }
-#define __ jni_asm->
-  if (input_in_reg && output_in_reg) {
-    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
-    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
-    if (ref_param) {
-      __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
-    } else {
-      if (!mr_conv->IsCurrentParamOnStack()) {
-        // regular non-straddling move
-        __ Move(out_reg, in_reg);
-      } else {
-        UNIMPLEMENTED(FATAL);  // we currently don't expect to see this case
-      }
-    }
-  } else if (!input_in_reg && !output_in_reg) {
-    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
-    if (ref_param) {
-      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
-                         null_allowed);
-    } else {
-      FrameOffset in_off = mr_conv->CurrentParamStackOffset();
-      size_t param_size = mr_conv->CurrentParamSize();
-      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
-      __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size);
-    }
-  } else if (!input_in_reg && output_in_reg) {
-    FrameOffset in_off = mr_conv->CurrentParamStackOffset();
-    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
-    // Check that incoming stack arguments are above the current stack frame.
-    CHECK_GT(in_off.Uint32Value(), frame_size);
-    if (ref_param) {
-      __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
-    } else {
-      size_t param_size = mr_conv->CurrentParamSize();
-      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
-      __ Load(out_reg, in_off, param_size);
-    }
-  } else {
-    CHECK(input_in_reg && !output_in_reg);
-    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
-    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
-    // Check outgoing argument is within frame
-    CHECK_LT(out_off.Uint32Value(), frame_size);
-    if (ref_param) {
-      // TODO: recycle value in in_reg rather than reload from SIRT
-      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
-                         null_allowed);
-    } else {
-      size_t param_size = mr_conv->CurrentParamSize();
-      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
-      if (!mr_conv->IsCurrentParamOnStack()) {
-        // regular non-straddling store
-        __ Store(out_off, in_reg, param_size);
-      } else {
-        // store where input straddles registers and stack
-        CHECK_EQ(param_size, 8u);
-        FrameOffset in_off = mr_conv->CurrentParamStackOffset();
-        __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister());
-      }
-    }
-  }
-#undef __
-}
-
-void JniCompiler::ChangeThreadState(Assembler* jni_asm, Thread::State new_state,
-                                    ManagedRegister scratch, ManagedRegister return_reg,
-                                    FrameOffset return_save_location,
-                                    size_t return_size) {
-  /*
-   * This code mirrors that of Thread::SetState where detail is given on why
-   * barriers occur when they do.
-   */
-#define __ jni_asm->
-  if (new_state == Thread::kRunnable) {
-    /*
-     * Change our status to Thread::kRunnable.  The transition requires
-     * that we check for pending suspension, because the VM considers
-     * us to be "asleep" in all other states, and another thread could
-     * be performing a GC now.
-     */
-    __ StoreImmediateToThread(Thread::StateOffset(), Thread::kRunnable, scratch);
-    __ MemoryBarrier(scratch);
-    __ SuspendPoll(scratch, return_reg, return_save_location, return_size);
-  } else {
-    /*
-     * Not changing to Thread::kRunnable. No additional work required.
-     */
-    __ MemoryBarrier(scratch);
-    __ StoreImmediateToThread(Thread::StateOffset(), new_state, scratch);
-  }
-  #undef __
 }
 
 }  // namespace art
+
+extern "C" art::CompiledMethod* ArtJniCompileMethod(art::Compiler& compiler,
+                                                    uint32_t access_flags, uint32_t method_idx,
+                                                    const art::ClassLoader* class_loader,
+                                                    const art::DexFile& dex_file) {
+  return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, class_loader, dex_file);
+}
diff --git a/src/jni_compiler.h b/src/jni_compiler.h
deleted file mode 100644
index 53563b3..0000000
--- a/src/jni_compiler.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_JNI_COMPILER_H_
-#define ART_SRC_JNI_COMPILER_H_
-
-#include "compiled_method.h"
-#include "constants.h"
-#include "macros.h"
-#include "thread.h"
-
-namespace art {
-
-class Assembler;
-class ClassLoader;
-class Compiler;
-class DexFile;
-class JniCallingConvention;
-class ManagedRegister;
-class ManagedRuntimeCallingConvention;
-class Method;
-
-// A JNI compiler generates code that acts as the bridge between managed code
-// and native code.
-// TODO: move the responsibility of managing memory to somewhere else
-class JniCompiler {
- public:
-  explicit JniCompiler(InstructionSet instruction_set);
-  ~JniCompiler();
-
-  CompiledMethod* Compile(uint32_t access_flags, uint32_t method_idx,
-                          const ClassLoader* class_loader, const DexFile& dex_file);
-
- private:
-  // Copy a single parameter from the managed to the JNI calling convention
-  void CopyParameter(Assembler* jni_asm,
-                     ManagedRuntimeCallingConvention* mr_conv,
-                     JniCallingConvention* jni_conv,
-                     size_t frame_size, size_t out_arg_size);
-
-  void SetNativeParameter(Assembler* jni_asm,
-                          JniCallingConvention* jni_conv,
-                          ManagedRegister in_reg);
-
-  void ChangeThreadState(Assembler* jni_asm, Thread::State new_state,
-                         ManagedRegister scratch, ManagedRegister return_reg,
-                         FrameOffset return_save_location,
-                         size_t return_size);
-
-  // Architecture to generate code for
-  InstructionSet instruction_set_;
-
-  DISALLOW_COPY_AND_ASSIGN(JniCompiler);
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_JNI_COMPILER_H_
diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc
index fd7574d..3bf3bb4 100644
--- a/src/jni_compiler_test.cc
+++ b/src/jni_compiler_test.cc
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#include "jni_compiler.h"
-
 #include <sys/mman.h>
 
 #include "UniquePtr.h"
diff --git a/src/jni_internal_arm.cc b/src/jni_internal_arm.cc
index d5c2a25..05deff7 100644
--- a/src/jni_internal_arm.cc
+++ b/src/jni_internal_arm.cc
@@ -43,9 +43,8 @@
 // register and transfer arguments from the array into register and on
 // the stack, if needed.  On return, the thread register must be
 // shuffled and the return value must be store into the result JValue.
-CompiledInvokeStub* ArmCreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
-  UniquePtr<ArmAssembler> assembler(
-      down_cast<ArmAssembler*>(Assembler::Create(kArm)));
+CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
+  UniquePtr<ArmAssembler> assembler(down_cast<ArmAssembler*>(Assembler::Create(kArm)));
 #define __ assembler->
   size_t num_arg_array_bytes = NumArgArrayBytes(shorty, shorty_len);
   // Size of frame - spill of R4,R9/LR + Method* + possible receiver + arg array
@@ -143,3 +142,7 @@
 
 }  // namespace arm
 }  // namespace art
+
+extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
+  return art::arm::CreateInvokeStub(is_static, shorty, shorty_len);
+}
diff --git a/src/jni_internal_x86.cc b/src/jni_internal_x86.cc
index 403b37a..67e8d30 100644
--- a/src/jni_internal_x86.cc
+++ b/src/jni_internal_x86.cc
@@ -39,7 +39,7 @@
 // "running" state the remaining responsibilities of this routine are
 // to save the native registers and set up the managed registers. On
 // return, the return value must be store into the result JValue.
-CompiledInvokeStub* X86CreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
+CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
   UniquePtr<X86Assembler> assembler(
       down_cast<X86Assembler*>(Assembler::Create(kX86)));
 #define __ assembler->
@@ -104,3 +104,7 @@
 
 }  // namespace x86
 }  // namespace art
+
+extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
+  return art::x86::CreateInvokeStub(is_static, shorty, shorty_len);
+}