ART: Extract JNI macro assembler for arm64

Extract the JNI assembler parts from the regular assembler.

Change-Id: I0b0ad32e18f585b75e9da0237afe082c25a1d291
Test: m test-art-target (N9)
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index d7084da..4e88e64 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -22,11 +22,9 @@
 #include <vector>
 
 #include "base/arena_containers.h"
-#include "base/enums.h"
 #include "base/logging.h"
 #include "utils/arm64/managed_register_arm64.h"
 #include "utils/assembler.h"
-#include "utils/jni_macro_assembler.h"
 #include "offsets.h"
 
 // TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
@@ -63,38 +61,14 @@
   kStoreDWord
 };
 
-class Arm64Exception {
- private:
-  Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
-      : scratch_(scratch), stack_adjust_(stack_adjust) {
-    }
-
-  vixl::aarch64::Label* Entry() { return &exception_entry_; }
-
-  // Register used for passing Thread::Current()->exception_ .
-  const Arm64ManagedRegister scratch_;
-
-  // Stack adjust for ExceptionPool.
-  const size_t stack_adjust_;
-
-  vixl::aarch64::Label exception_entry_;
-
-  friend class Arm64Assembler;
-  DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
-};
-
-class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class Arm64Assembler FINAL : public Assembler {
  public:
-  explicit Arm64Assembler(ArenaAllocator* arena)
-      : Assembler(arena),
-        exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+  explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
 
   virtual ~Arm64Assembler() {}
 
   vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
 
-  DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
-
   // Finalize the code.
   void FinalizeCode() OVERRIDE;
 
@@ -105,110 +79,14 @@
   // Copy instructions out of assembly buffer into the given region of memory.
   void FinalizeInstructions(const MemoryRegion& region);
 
+  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
+
   void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
   void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
 
-  // Emit code that will create an activation on the stack.
-  void BuildFrame(size_t frame_size,
-                  ManagedRegister method_reg,
-                  ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
-  // Emit code that will remove an activation from the stack.
-  void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
-      OVERRIDE;
-
-  void IncreaseFrameSize(size_t adjust) OVERRIDE;
-  void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
-  // Store routines.
-  void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
-  void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-  void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
-                                FrameOffset fr_offs,
-                                ManagedRegister scratch) OVERRIDE;
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
-  void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
-                     ManagedRegister scratch) OVERRIDE;
-
-  // Load routines.
-  void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-  void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-  void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-  void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) OVERRIDE;
-  void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-  void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
-  // Copying routines.
-  void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-  void CopyRawPtrFromThread(FrameOffset fr_offs,
-                            ThreadOffset64 thr_offs,
-                            ManagedRegister scratch) OVERRIDE;
-  void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
-      OVERRIDE;
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-  void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
-            size_t size) OVERRIDE;
-  void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
-  void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister scratch, size_t size) OVERRIDE;
-  void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
-  // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
-  // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
-  // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) OVERRIDE;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) OVERRIDE;
-
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister scratch,
-                              bool null_allowed) OVERRIDE;
-
-  // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
-  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
-  // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
-  void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
-  // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
   // Jump to address (not setting link register)
   void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
 
-  // Generate code to check if Thread::Current()->exception_ is non-null
-  // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
   //
   // Heap poisoning.
   //
@@ -227,7 +105,6 @@
     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
   }
 
- private:
   static vixl::aarch64::Register reg_x(int code) {
     CHECK(code < kNumberOfXRegisters) << code;
     if (code == SP) {
@@ -256,37 +133,7 @@
     return vixl::aarch64::FPRegister::GetSRegFromCode(code);
   }
 
-  // Emits Exception block.
-  void EmitExceptionPoll(Arm64Exception *exception);
-
-  void StoreWToOffset(StoreOperandType type, WRegister source,
-                      XRegister base, int32_t offset);
-  void StoreToOffset(XRegister source, XRegister base, int32_t offset);
-  void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
-  void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
-
-  void LoadImmediate(XRegister dest,
-                     int32_t value,
-                     vixl::aarch64::Condition cond = vixl::aarch64::al);
-  void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
-  void LoadWFromOffset(LoadOperandType type,
-                       WRegister dest,
-                       XRegister base,
-                       int32_t offset);
-  void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
-  void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
-  void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
-  void AddConstant(XRegister rd,
-                   int32_t value,
-                   vixl::aarch64::Condition cond = vixl::aarch64::al);
-  void AddConstant(XRegister rd,
-                   XRegister rn,
-                   int32_t value,
-                   vixl::aarch64::Condition cond = vixl::aarch64::al);
-
-  // List of exception blocks to generate at the end of the code cache.
-  ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
-
+ private:
   // VIXL assembler.
   vixl::aarch64::MacroAssembler vixl_masm_;