diff options
| -rw-r--r-- | compiler/Android.mk | 1 | ||||
| -rw-r--r-- | compiler/jni/jni_cfi_test.cc | 21 | ||||
| -rw-r--r-- | compiler/jni/quick/jni_compiler.cc | 176 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_arm.cc | 37 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_arm.h | 36 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 35 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.h | 28 | ||||
| -rw-r--r-- | compiler/utils/assembler.cc | 133 | ||||
| -rw-r--r-- | compiler/utils/assembler.h | 153 | ||||
| -rw-r--r-- | compiler/utils/jni_macro_assembler.cc | 109 | ||||
| -rw-r--r-- | compiler/utils/jni_macro_assembler.h | 235 | ||||
| -rw-r--r-- | compiler/utils/jni_macro_assembler_test.h | 151 | ||||
| -rw-r--r-- | compiler/utils/mips/assembler_mips.cc | 36 | ||||
| -rw-r--r-- | compiler/utils/mips/assembler_mips.h | 35 | ||||
| -rw-r--r-- | compiler/utils/mips64/assembler_mips64.cc | 28 | ||||
| -rw-r--r-- | compiler/utils/mips64/assembler_mips64.h | 33 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.cc | 31 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.h | 34 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.cc | 18 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.h | 35 |
20 files changed, 783 insertions, 582 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk index 7ada749821..3b7b1e638c 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -81,6 +81,7 @@ LIBART_COMPILER_SRC_FILES := \ optimizing/x86_memory_gen.cc \ trampolines/trampoline_compiler.cc \ utils/assembler.cc \ + utils/jni_macro_assembler.cc \ utils/swap_space.cc \ compiler.cc \ elf_writer.cc \ diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc index 3526802d6c..524ce4d34e 100644 --- a/compiler/jni/jni_cfi_test.cc +++ b/compiler/jni/jni_cfi_test.cc @@ -19,10 +19,12 @@ #include "arch/instruction_set.h" #include "base/arena_allocator.h" +#include "base/enums.h" #include "cfi_test.h" #include "gtest/gtest.h" #include "jni/quick/calling_convention.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" #include "jni/jni_cfi_test_expected.inc" @@ -36,9 +38,23 @@ class JNICFITest : public CFITest { // Enable this flag to generate the expected outputs. static constexpr bool kGenerateExpected = false; - void TestImpl(InstructionSet isa, const char* isa_str, + void TestImpl(InstructionSet isa, + const char* isa_str, const std::vector<uint8_t>& expected_asm, const std::vector<uint8_t>& expected_cfi) { + if (Is64BitInstructionSet(isa)) { + TestImplSized<PointerSize::k64>(isa, isa_str, expected_asm, expected_cfi); + } else { + TestImplSized<PointerSize::k32>(isa, isa_str, expected_asm, expected_cfi); + } + } + + private: + template <PointerSize kPointerSize> + void TestImplSized(InstructionSet isa, + const char* isa_str, + const std::vector<uint8_t>& expected_asm, + const std::vector<uint8_t>& expected_cfi) { // Description of simple method. const bool is_static = true; const bool is_synchronized = false; @@ -55,7 +71,8 @@ class JNICFITest : public CFITest { ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters(); // Assemble the method. - std::unique_ptr<Assembler> jni_asm(Assembler::Create(&arena, isa)); + std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm( + JNIMacroAssembler<kPointerSize>::Create(&arena, isa)); jni_asm->cfi().SetEnabled(true); jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills()); diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 277b794157..f99f6a8792 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -26,6 +26,7 @@ #include "base/enums.h" #include "base/logging.h" #include "base/macros.h" +#include "memory_region.h" #include "calling_convention.h" #include "class_linker.h" #include "compiled_method.h" @@ -34,7 +35,9 @@ #include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" #include "jni_env_ext.h" +#include "debug/dwarf/debug_frame_opcode_writer.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" #include "utils/managed_register.h" #include "utils/arm/managed_register_arm.h" #include "utils/arm64/managed_register_arm64.h" @@ -47,22 +50,32 @@ namespace art { -static void CopyParameter(Assembler* jni_asm, +template <PointerSize kPointerSize> +static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm, ManagedRuntimeCallingConvention* mr_conv, JniCallingConvention* jni_conv, size_t frame_size, size_t out_arg_size); -static void SetNativeParameter(Assembler* jni_asm, +template <PointerSize kPointerSize> +static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm, JniCallingConvention* jni_conv, ManagedRegister in_reg); +template <PointerSize kPointerSize> +static std::unique_ptr<JNIMacroAssembler<kPointerSize>> GetMacroAssembler( + ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) { + return JNIMacroAssembler<kPointerSize>::Create(arena, isa, features); +} + // Generate the JNI bridge for the given method, general contract: // - Arguments are in the managed runtime format, either on stack or in // registers, a reference to the method object is supplied as part of this // convention. // -CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, - uint32_t access_flags, uint32_t method_idx, - const DexFile& dex_file) { +template <PointerSize kPointerSize> +static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, + uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) { const bool is_native = (access_flags & kAccNative) != 0; CHECK(is_native); const bool is_static = (access_flags & kAccStatic) != 0; @@ -70,7 +83,6 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); InstructionSet instruction_set = driver->GetInstructionSet(); const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures(); - const bool is_64_bit_target = Is64BitInstructionSet(instruction_set); ArenaPool pool; ArenaAllocator arena(&pool); @@ -101,8 +113,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, &arena, is_static, is_synchronized, jni_end_shorty, instruction_set)); // Assembler that holds generated instructions - std::unique_ptr<Assembler> jni_asm( - Assembler::Create(&arena, instruction_set, instruction_set_features)); + std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm = + GetMacroAssembler<kPointerSize>(&arena, instruction_set, instruction_set_features); jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo()); // Offsets into data structures @@ -124,21 +136,12 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, main_jni_conv->ReferenceCount(), mr_conv->InterproceduralScratchRegister()); - if (is_64_bit_target) { - __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(), - Thread::TopHandleScopeOffset<PointerSize::k64>(), - mr_conv->InterproceduralScratchRegister()); - __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<PointerSize::k64>(), - main_jni_conv->HandleScopeOffset(), - mr_conv->InterproceduralScratchRegister()); - } else { - __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(), - Thread::TopHandleScopeOffset<PointerSize::k32>(), + __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(), + Thread::TopHandleScopeOffset<kPointerSize>(), + mr_conv->InterproceduralScratchRegister()); + __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset<kPointerSize>(), + main_jni_conv->HandleScopeOffset(), mr_conv->InterproceduralScratchRegister()); - __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<PointerSize::k32>(), - main_jni_conv->HandleScopeOffset(), - mr_conv->InterproceduralScratchRegister()); - } // 3. Place incoming reference arguments into handle scope main_jni_conv->Next(); // Skip JNIEnv* @@ -188,11 +191,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, } // 4. Write out the end of the quick frames. - if (is_64_bit_target) { - __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<PointerSize::k64>()); - } else { - __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<PointerSize::k32>()); - } + __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset<kPointerSize>()); // 5. Move frame down to allow space for out going args. const size_t main_out_arg_size = main_jni_conv->OutArgSize(); @@ -202,10 +201,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, // Call the read barrier for the declaring class loaded from the method for a static call. // Note that we always have outgoing param space available for at least two params. if (kUseReadBarrier && is_static) { - ThreadOffset32 read_barrier32 = - QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pReadBarrierJni); - ThreadOffset64 read_barrier64 = - QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pReadBarrierJni); + ThreadOffset<kPointerSize> read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize, + pReadBarrierJni); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); main_jni_conv->Next(); // Skip JNIEnv. FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); @@ -225,21 +222,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, // Pass the current thread as the second argument and call. if (main_jni_conv->IsCurrentParamInRegister()) { __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); - if (is_64_bit_target) { - __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier64), - main_jni_conv->InterproceduralScratchRegister()); - } else { - __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier32), - main_jni_conv->InterproceduralScratchRegister()); - } + __ Call(main_jni_conv->CurrentParamRegister(), + Offset(read_barrier), + main_jni_conv->InterproceduralScratchRegister()); } else { __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), main_jni_conv->InterproceduralScratchRegister()); - if (is_64_bit_target) { - __ CallFromThread64(read_barrier64, main_jni_conv->InterproceduralScratchRegister()); - } else { - __ CallFromThread32(read_barrier32, main_jni_conv->InterproceduralScratchRegister()); - } + __ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister()); } main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset. } @@ -248,14 +237,10 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, // can occur. The result is the saved JNI local state that is restored by the exit call. We // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer // arguments. - ThreadOffset32 jni_start32 = + ThreadOffset<kPointerSize> jni_start = is_synchronized - ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStartSynchronized) - : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodStart); - ThreadOffset64 jni_start64 = - is_synchronized - ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStartSynchronized) - : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodStart); + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStart); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); FrameOffset locked_object_handle_scope_offset(0); if (is_synchronized) { @@ -276,21 +261,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, } if (main_jni_conv->IsCurrentParamInRegister()) { __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); - if (is_64_bit_target) { - __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64), - main_jni_conv->InterproceduralScratchRegister()); - } else { - __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32), - main_jni_conv->InterproceduralScratchRegister()); - } + __ Call(main_jni_conv->CurrentParamRegister(), + Offset(jni_start), + main_jni_conv->InterproceduralScratchRegister()); } else { __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), main_jni_conv->InterproceduralScratchRegister()); - if (is_64_bit_target) { - __ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister()); - } else { - __ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister()); - } + __ CallFromThread(jni_start, main_jni_conv->InterproceduralScratchRegister()); } if (is_synchronized) { // Check for exceptions from monitor enter. __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size); @@ -352,20 +329,12 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, if (main_jni_conv->IsCurrentParamInRegister()) { ManagedRegister jni_env = main_jni_conv->CurrentParamRegister(); DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister())); - if (is_64_bit_target) { - __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>()); - } else { - __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>()); - } + __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset<kPointerSize>()); } else { FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset(); - if (is_64_bit_target) { - __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<PointerSize::k64>(), - main_jni_conv->InterproceduralScratchRegister()); - } else { - __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<PointerSize::k32>(), - main_jni_conv->InterproceduralScratchRegister()); - } + __ CopyRawPtrFromThread(jni_env, + Thread::JniEnvOffset<kPointerSize>(), + main_jni_conv->InterproceduralScratchRegister()); } // 9. Plant call to native code associated with method. @@ -398,7 +367,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, + static_cast<size_t>(kMipsPointerSize)); } CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size); - __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue()); + __ Store(return_save_location, + main_jni_conv->ReturnRegister(), + main_jni_conv->SizeOfReturnValue()); } // Increase frame size for out args if needed by the end_jni_conv. @@ -414,27 +385,18 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, } // thread. end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size)); - ThreadOffset32 jni_end32(-1); - ThreadOffset64 jni_end64(-1); + ThreadOffset<kPointerSize> jni_end(-1); if (reference_return) { // Pass result. - jni_end32 = is_synchronized - ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, - pJniMethodEndWithReferenceSynchronized) - : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndWithReference); - jni_end64 = is_synchronized - ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, - pJniMethodEndWithReferenceSynchronized) - : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndWithReference); + jni_end = is_synchronized + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReferenceSynchronized) + : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReference); SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); end_jni_conv->Next(); } else { - jni_end32 = is_synchronized - ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEndSynchronized) - : QUICK_ENTRYPOINT_OFFSET(PointerSize::k32, pJniMethodEnd); - jni_end64 = is_synchronized - ? QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEndSynchronized) - : QUICK_ENTRYPOINT_OFFSET(PointerSize::k64, pJniMethodEnd); + jni_end = is_synchronized + ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndSynchronized) + : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEnd); } // Pass saved local reference state. if (end_jni_conv->IsCurrentParamOnStack()) { @@ -461,23 +423,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, } if (end_jni_conv->IsCurrentParamInRegister()) { __ GetCurrentThread(end_jni_conv->CurrentParamRegister()); - if (is_64_bit_target) { - __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64), - end_jni_conv->InterproceduralScratchRegister()); - } else { - __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end32), - end_jni_conv->InterproceduralScratchRegister()); - } + __ Call(end_jni_conv->CurrentParamRegister(), + Offset(jni_end), + end_jni_conv->InterproceduralScratchRegister()); } else { __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(), end_jni_conv->InterproceduralScratchRegister()); - if (is_64_bit_target) { - __ CallFromThread64(ThreadOffset64(jni_end64), - end_jni_conv->InterproceduralScratchRegister()); - } else { - __ CallFromThread32(ThreadOffset32(jni_end32), - end_jni_conv->InterproceduralScratchRegister()); - } + __ CallFromThread(jni_end, end_jni_conv->InterproceduralScratchRegister()); } // 13. Reload return value @@ -517,7 +469,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, } // Copy a single parameter from the managed to the JNI calling convention. -static void CopyParameter(Assembler* jni_asm, +template <PointerSize kPointerSize> +static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm, ManagedRuntimeCallingConvention* mr_conv, JniCallingConvention* jni_conv, size_t frame_size, size_t out_arg_size) { @@ -606,7 +559,8 @@ static void CopyParameter(Assembler* jni_asm, } } -static void SetNativeParameter(Assembler* jni_asm, +template <PointerSize kPointerSize> +static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm, JniCallingConvention* jni_conv, ManagedRegister in_reg) { if (jni_conv->IsCurrentParamOnStack()) { @@ -621,7 +575,13 @@ static void SetNativeParameter(Assembler* jni_asm, CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file) { - return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file); + if (Is64BitInstructionSet(compiler->GetInstructionSet())) { + return ArtJniCompileMethodInternal<PointerSize::k64>( + compiler, access_flags, method_idx, dex_file); + } else { + return ArtJniCompileMethodInternal<PointerSize::k32>( + compiler, access_flags, method_idx, dex_file); + } } } // namespace art diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 1796b3940c..aadc43f921 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -568,15 +568,6 @@ void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); } -void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest, - uint32_t imm, - ManagedRegister mscratch) { - ArmManagedRegister scratch = mscratch.AsArm(); - CHECK(scratch.IsCoreRegister()) << scratch; - LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value()); -} - static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size) { ArmManagedRegister dst = m_dst.AsArm(); @@ -601,19 +592,19 @@ void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { return EmitLoad(this, m_dst, SP, src.Int32Value(), size); } -void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) { +void ArmAssembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset32 src, size_t size) { return EmitLoad(this, m_dst, TR, src.Int32Value(), size); } -void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) { +void ArmAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) { ArmManagedRegister dst = m_dst.AsArm(); CHECK(dst.IsCoreRegister()) << dst; LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value()); } -void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister mscratch) { +void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), @@ -622,9 +613,9 @@ void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, SP, fr_offs.Int32Value()); } -void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void ArmAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), @@ -633,9 +624,9 @@ void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, TR, thr_offs.Int32Value()); } -void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void ArmAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL); @@ -643,7 +634,7 @@ void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, TR, thr_offs.Int32Value()); } -void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) { +void ArmAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value()); } @@ -832,8 +823,8 @@ void ArmAssembler::Call(FrameOffset base, Offset offset, // TODO: place reference map on call } -void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { +void ArmAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index 2b7414d892..bb88e6fdf4 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -23,12 +23,14 @@ #include "base/arena_allocator.h" #include "base/arena_containers.h" #include "base/bit_utils.h" +#include "base/enums.h" #include "base/logging.h" #include "base/stl_util.h" #include "base/value_object.h" #include "constants_arm.h" #include "utils/arm/managed_register_arm.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" #include "offsets.h" namespace art { @@ -433,10 +435,19 @@ extern const char* kConditionNames[]; // This is an abstract ARM assembler. Subclasses provide assemblers for the individual // instruction sets (ARM32, Thumb2, etc.) // -class ArmAssembler : public Assembler { +class ArmAssembler : public Assembler, public JNIMacroAssembler<PointerSize::k32> { public: virtual ~ArmAssembler() {} + size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } + DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } + void FinalizeCode() OVERRIDE { + Assembler::FinalizeCode(); + } + void FinalizeInstructions(const MemoryRegion& region) { + Assembler::FinalizeInstructions(region); + } + // Is this assembler for the thumb instruction set? virtual bool IsThumb() const = 0; @@ -904,13 +915,11 @@ class ArmAssembler : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch) - OVERRIDE; - - void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, - ManagedRegister scratch) OVERRIDE; + void StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; @@ -918,7 +927,7 @@ class ArmAssembler : public Assembler { // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -927,15 +936,16 @@ class ArmAssembler : public Assembler { void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; // Copying routines void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs, - ManagedRegister scratch) OVERRIDE; + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; @@ -990,7 +1000,7 @@ class ArmAssembler : public Assembler { // Call to address held at [base+offset] void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index dc1f24a152..53685bfa53 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -164,25 +164,16 @@ void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm, offs.Int32Value()); } -void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs, - uint32_t imm, +void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs, + FrameOffset fr_offs, ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; - LoadImmediate(scratch.AsXRegister(), imm); - StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value()); -} - -void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value()); StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); } -void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) { +void Arm64Assembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) { UseScratchRegisterScope temps(&vixl_masm_); Register temp = temps.AcquireX(); ___ Mov(temp, reg_x(SP)); @@ -286,7 +277,7 @@ void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { return Load(m_dst.AsArm64(), SP, src.Int32Value(), size); } -void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) { +void Arm64Assembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset64 src, size_t size) { return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); } @@ -319,7 +310,7 @@ void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, O ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value())); } -void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) { +void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) { Arm64ManagedRegister dst = m_dst.AsArm64(); CHECK(dst.IsXRegister()) << dst; LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value()); @@ -355,18 +346,18 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s } } -void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset64 tr_offs, - ManagedRegister m_scratch) { +void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 tr_offs, + ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); } -void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { +void Arm64Assembler::CopyRawPtrToThread(ThreadOffset64 tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); @@ -543,8 +534,8 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat ___ Blr(reg_x(scratch.AsXRegister())); } -void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { +void Arm64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; } diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index b8434b9263..d7084dad1c 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -22,9 +22,11 @@ #include <vector> #include "base/arena_containers.h" +#include "base/enums.h" #include "base/logging.h" #include "utils/arm64/managed_register_arm64.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" #include "offsets.h" // TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn @@ -81,7 +83,7 @@ class Arm64Exception { DISALLOW_COPY_AND_ASSIGN(Arm64Exception); }; -class Arm64Assembler FINAL : public Assembler { +class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> { public: explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena), @@ -91,6 +93,8 @@ class Arm64Assembler FINAL : public Assembler { vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; } + DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } + // Finalize the code. void FinalizeCode() OVERRIDE; @@ -122,28 +126,28 @@ class Arm64Assembler FINAL : public Assembler { void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch) - OVERRIDE; - void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, - ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE; + void StoreStackOffsetToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; // Load routines. void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; // Copying routines. void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs, - ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 thr_offs, + ManagedRegister scratch) OVERRIDE; + void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; @@ -196,7 +200,7 @@ class Arm64Assembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; // Jump to address (not setting link register) void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch); diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc index 0a1b7334b8..81159e69a0 100644 --- a/compiler/utils/assembler.cc +++ b/compiler/utils/assembler.cc @@ -121,137 +121,4 @@ void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() { } } -std::unique_ptr<Assembler> Assembler::Create( - ArenaAllocator* arena, - InstructionSet instruction_set, - const InstructionSetFeatures* instruction_set_features) { - switch (instruction_set) { -#ifdef ART_ENABLE_CODEGEN_arm - case kArm: - return std::unique_ptr<Assembler>(new (arena) arm::Arm32Assembler(arena)); - case kThumb2: - return std::unique_ptr<Assembler>(new (arena) arm::Thumb2Assembler(arena)); -#endif -#ifdef ART_ENABLE_CODEGEN_arm64 - case kArm64: - return std::unique_ptr<Assembler>(new (arena) arm64::Arm64Assembler(arena)); -#endif -#ifdef ART_ENABLE_CODEGEN_mips - case kMips: - return std::unique_ptr<Assembler>(new (arena) mips::MipsAssembler( - arena, - instruction_set_features != nullptr - ? instruction_set_features->AsMipsInstructionSetFeatures() - : nullptr)); -#endif -#ifdef ART_ENABLE_CODEGEN_mips64 - case kMips64: - return std::unique_ptr<Assembler>(new (arena) mips64::Mips64Assembler(arena)); -#endif -#ifdef ART_ENABLE_CODEGEN_x86 - case kX86: - return std::unique_ptr<Assembler>(new (arena) x86::X86Assembler(arena)); -#endif -#ifdef ART_ENABLE_CODEGEN_x86_64 - case kX86_64: - return std::unique_ptr<Assembler>(new (arena) x86_64::X86_64Assembler(arena)); -#endif - default: - LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; - return nullptr; - } -} - -void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED, - uint32_t imm ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED, - uint32_t imm ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::StoreStackOffsetToThread32( - ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::StoreStackOffsetToThread64( - ThreadOffset64 thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::StoreStackPointerToThread32( - ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::StoreStackPointerToThread64( - ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset32 src ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset64 src ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset32 offs ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset64 offs ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED, - ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED, - ThreadOffset64 thr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - } // namespace art diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index 89f7947cd5..8981776314 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -356,11 +356,6 @@ class DebugFrameOpCodeWriterForAssembler FINAL class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { public: - static std::unique_ptr<Assembler> Create( - ArenaAllocator* arena, - InstructionSet instruction_set, - const InstructionSetFeatures* instruction_set_features = nullptr); - // Finalize the code; emit slow paths, fixup branches, add literal pool, etc. virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); } @@ -376,144 +371,6 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { // TODO: Implement with disassembler. virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {} - // Emit code that will create an activation on the stack - virtual void BuildFrame(size_t frame_size, - ManagedRegister method_reg, - ArrayRef<const ManagedRegister> callee_save_regs, - const ManagedRegisterEntrySpills& entry_spills) = 0; - - // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0; - - virtual void IncreaseFrameSize(size_t adjust) = 0; - virtual void DecreaseFrameSize(size_t adjust) = 0; - - // Store routines - virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0; - virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0; - virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0; - - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0; - - virtual void StoreImmediateToThread32(ThreadOffset32 dest, - uint32_t imm, - ManagedRegister scratch); - virtual void StoreImmediateToThread64(ThreadOffset64 dest, - uint32_t imm, - ManagedRegister scratch); - - virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); - virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); - - virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs); - virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs); - - virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, - FrameOffset in_off, ManagedRegister scratch) = 0; - - // Load routines - virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0; - - virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size); - virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size); - - virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; - // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference. - virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool unpoison_reference) = 0; - - virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0; - - virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs); - virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs); - - // Copying routines - virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0; - - virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister scratch); - virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset64 thr_offs, - ManagedRegister scratch); - - virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); - virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch); - - virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0; - - virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size) = 0; - - virtual void MemoryBarrier(ManagedRegister scratch) = 0; - - // Sign extension - virtual void SignExtend(ManagedRegister mreg, size_t size) = 0; - - // Zero extension - virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0; - - // Exploit fast access in managed code to Thread::Current() - virtual void GetCurrentThread(ManagedRegister tr) = 0; - virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0; - - // Set up out_reg to hold a Object** into the handle scope, or to be null if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the handle scope entry to see if the value is - // null. - virtual void CreateHandleScopeEntry(ManagedRegister out_reg, - FrameOffset handlescope_offset, - ManagedRegister in_reg, - bool null_allowed) = 0; - - // Set up out_off to hold a Object** into the handle scope, or to be null if the - // value is null and null_allowed. - virtual void CreateHandleScopeEntry(FrameOffset out_off, - FrameOffset handlescope_offset, - ManagedRegister scratch, - bool null_allowed) = 0; - - // src holds a handle scope entry (Object**) load this into dst - virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0; - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0; - virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0; - - // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0; - virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0; - virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch); - virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch); - - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0; - virtual void Bind(Label* label) = 0; virtual void Jump(Label* label) = 0; @@ -525,13 +382,17 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { */ DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; } - protected: - explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {} - ArenaAllocator* GetArena() { return buffer_.GetArena(); } + AssemblerBuffer* GetBuffer() { + return &buffer_; + } + + protected: + explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {} + AssemblerBuffer buffer_; DebugFrameOpCodeWriterForAssembler cfi_; diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc new file mode 100644 index 0000000000..6c14888293 --- /dev/null +++ b/compiler/utils/jni_macro_assembler.cc @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni_macro_assembler.h" + +#include <algorithm> +#include <vector> + +#ifdef ART_ENABLE_CODEGEN_arm +#include "arm/assembler_arm32.h" +#include "arm/assembler_thumb2.h" +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "arm64/assembler_arm64.h" +#endif +#ifdef ART_ENABLE_CODEGEN_mips +#include "mips/assembler_mips.h" +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 +#include "mips64/assembler_mips64.h" +#endif +#ifdef ART_ENABLE_CODEGEN_x86 +#include "x86/assembler_x86.h" +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 +#include "x86_64/assembler_x86_64.h" +#endif +#include "base/casts.h" +#include "globals.h" +#include "memory_region.h" + +namespace art { + +using MacroAsm32UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k32>>; + +template <> +MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( + ArenaAllocator* arena, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features) { +#ifndef ART_ENABLE_CODEGEN_mips + UNUSED(instruction_set_features); +#endif + + switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm + case kArm: + return MacroAsm32UniquePtr(new (arena) arm::Arm32Assembler(arena)); + case kThumb2: + return MacroAsm32UniquePtr(new (arena) arm::Thumb2Assembler(arena)); +#endif +#ifdef ART_ENABLE_CODEGEN_mips + case kMips: + return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler( + arena, + instruction_set_features != nullptr + ? instruction_set_features->AsMipsInstructionSetFeatures() + : nullptr)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case kX86: + return MacroAsm32UniquePtr(new (arena) x86::X86Assembler(arena)); +#endif + default: + LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +using MacroAsm64UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k64>>; + +template <> +MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create( + ArenaAllocator* arena, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features ATTRIBUTE_UNUSED) { + switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm64 + case kArm64: + return MacroAsm64UniquePtr(new (arena) arm64::Arm64Assembler(arena)); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 + case kMips64: + return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(arena)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + case kX86_64: + return MacroAsm64UniquePtr(new (arena) x86_64::X86_64Assembler(arena)); +#endif + default: + LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +} // namespace art diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h new file mode 100644 index 0000000000..6f45bd62db --- /dev/null +++ b/compiler/utils/jni_macro_assembler.h @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_ +#define ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_ + +#include <vector> + +#include "arch/instruction_set.h" +#include "base/arena_allocator.h" +#include "base/arena_object.h" +#include "base/enums.h" +#include "base/logging.h" +#include "base/macros.h" +#include "managed_register.h" +#include "offsets.h" +#include "utils/array_ref.h" + +namespace art { + +class ArenaAllocator; +class DebugFrameOpCodeWriterForAssembler; +class InstructionSetFeatures; +class MemoryRegion; + +template <PointerSize kPointerSize> +class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { + public: + static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create( + ArenaAllocator* arena, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features = nullptr); + + // Finalize the code; emit slow paths, fixup branches, add literal pool, etc. + virtual void FinalizeCode() = 0; + + // Size of generated code + virtual size_t CodeSize() const = 0; + + // Copy instructions out of assembly buffer into the given region of memory + virtual void FinalizeInstructions(const MemoryRegion& region) = 0; + + // Emit code that will create an activation on the stack + virtual void BuildFrame(size_t frame_size, + ManagedRegister method_reg, + ArrayRef<const ManagedRegister> callee_save_regs, + const ManagedRegisterEntrySpills& entry_spills) = 0; + + // Emit code that will remove an activation from the stack + virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0; + + virtual void IncreaseFrameSize(size_t adjust) = 0; + virtual void DecreaseFrameSize(size_t adjust) = 0; + + // Store routines + virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0; + virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0; + virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0; + + virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0; + + virtual void StoreStackOffsetToThread(ThreadOffset<kPointerSize> thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) = 0; + + virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs) = 0; + + virtual void StoreSpanning(FrameOffset dest, + ManagedRegister src, + FrameOffset in_off, + ManagedRegister scratch) = 0; + + // Load routines + virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0; + + virtual void LoadFromThread(ManagedRegister dest, + ThreadOffset<kPointerSize> src, + size_t size) = 0; + + virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; + // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference. + virtual void LoadRef(ManagedRegister dest, + ManagedRegister base, + MemberOffset offs, + bool unpoison_reference) = 0; + + virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0; + + virtual void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset<kPointerSize> offs) = 0; + + // Copying routines + virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0; + + virtual void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset<kPointerSize> thr_offs, + ManagedRegister scratch) = 0; + + virtual void CopyRawPtrToThread(ThreadOffset<kPointerSize> thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) = 0; + + virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0; + + virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0; + + virtual void Copy(FrameOffset dest, + ManagedRegister src_base, + Offset src_offset, + ManagedRegister scratch, + size_t size) = 0; + + virtual void Copy(ManagedRegister dest_base, + Offset dest_offset, + FrameOffset src, + ManagedRegister scratch, + size_t size) = 0; + + virtual void Copy(FrameOffset dest, + FrameOffset src_base, + Offset src_offset, + ManagedRegister scratch, + size_t size) = 0; + + virtual void Copy(ManagedRegister dest, + Offset dest_offset, + ManagedRegister src, + Offset src_offset, + ManagedRegister scratch, + size_t size) = 0; + + virtual void Copy(FrameOffset dest, + Offset dest_offset, + FrameOffset src, + Offset src_offset, + ManagedRegister scratch, + size_t size) = 0; + + virtual void MemoryBarrier(ManagedRegister scratch) = 0; + + // Sign extension + virtual void SignExtend(ManagedRegister mreg, size_t size) = 0; + + // Zero extension + virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0; + + // Exploit fast access in managed code to Thread::Current() + virtual void GetCurrentThread(ManagedRegister tr) = 0; + virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0; + + // Set up out_reg to hold a Object** into the handle scope, or to be null if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the handle scope entry to see if the value is + // null. + virtual void CreateHandleScopeEntry(ManagedRegister out_reg, + FrameOffset handlescope_offset, + ManagedRegister in_reg, + bool null_allowed) = 0; + + // Set up out_off to hold a Object** into the handle scope, or to be null if the + // value is null and null_allowed. + virtual void CreateHandleScopeEntry(FrameOffset out_off, + FrameOffset handlescope_offset, + ManagedRegister scratch, + bool null_allowed) = 0; + + // src holds a handle scope entry (Object**) load this into dst + virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0; + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0; + virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0; + + // Call to address held at [base+offset] + virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0; + virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0; + virtual void CallFromThread(ThreadOffset<kPointerSize> offset, ManagedRegister scratch) = 0; + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0; + + virtual ~JNIMacroAssembler() {} + + /** + * @brief Buffer of DWARF's Call Frame Information opcodes. + * @details It is used by debuggers and other tools to unwind the call stack. + */ + virtual DebugFrameOpCodeWriterForAssembler& cfi() = 0; + + protected: + explicit JNIMacroAssembler() {} +}; + +template <typename T, PointerSize kPointerSize> +class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> { + public: + void FinalizeCode() OVERRIDE { + asm_.FinalizeCode(); + } + + size_t CodeSize() const OVERRIDE { + return asm_.CodeSize(); + } + + void FinalizeInstructions(const MemoryRegion& region) OVERRIDE { + asm_.FinalizeInstructions(region); + } + + DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE { + return asm_.cfi(); + } + + protected: + explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {} + + T asm_; +}; + +} // namespace art + +#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_ diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h new file mode 100644 index 0000000000..829f34b4b7 --- /dev/null +++ b/compiler/utils/jni_macro_assembler_test.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_ +#define ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_ + +#include "jni_macro_assembler.h" + +#include "assembler_test_base.h" +#include "common_runtime_test.h" // For ScratchFile + +#include <cstdio> +#include <cstdlib> +#include <fstream> +#include <iterator> +#include <sys/stat.h> + +namespace art { + +template<typename Ass> +class JNIMacroAssemblerTest : public testing::Test { + public: + Ass* GetAssembler() { + return assembler_.get(); + } + + typedef std::string (*TestFn)(JNIMacroAssemblerTest* assembler_test, Ass* assembler); + + void DriverFn(TestFn f, std::string test_name) { + DriverWrapper(f(this, assembler_.get()), test_name); + } + + // This driver assumes the assembler has already been called. + void DriverStr(std::string assembly_string, std::string test_name) { + DriverWrapper(assembly_string, test_name); + } + + // This is intended to be run as a test. + bool CheckTools() { + return test_helper_->CheckTools(); + } + + protected: + explicit JNIMacroAssemblerTest() {} + + void SetUp() OVERRIDE { + arena_.reset(new ArenaAllocator(&pool_)); + assembler_.reset(CreateAssembler(arena_.get())); + test_helper_.reset( + new AssemblerTestInfrastructure(GetArchitectureString(), + GetAssemblerCmdName(), + GetAssemblerParameters(), + GetObjdumpCmdName(), + GetObjdumpParameters(), + GetDisassembleCmdName(), + GetDisassembleParameters(), + GetAssemblyHeader())); + + SetUpHelpers(); + } + + void TearDown() OVERRIDE { + test_helper_.reset(); // Clean up the helper. + assembler_.reset(); + arena_.reset(); + } + + // Override this to set up any architecture-specific things, e.g., CPU revision. + virtual Ass* CreateAssembler(ArenaAllocator* arena) { + return new (arena) Ass(arena); + } + + // Override this to set up any architecture-specific things, e.g., register vectors. + virtual void SetUpHelpers() {} + + // Get the typically used name for this architecture, e.g., aarch64, x86_64, ... + virtual std::string GetArchitectureString() = 0; + + // Get the name of the assembler, e.g., "as" by default. + virtual std::string GetAssemblerCmdName() { + return "as"; + } + + // Switches to the assembler command. Default none. + virtual std::string GetAssemblerParameters() { + return ""; + } + + // Get the name of the objdump, e.g., "objdump" by default. + virtual std::string GetObjdumpCmdName() { + return "objdump"; + } + + // Switches to the objdump command. Default is " -h". + virtual std::string GetObjdumpParameters() { + return " -h"; + } + + // Get the name of the objdump, e.g., "objdump" by default. + virtual std::string GetDisassembleCmdName() { + return "objdump"; + } + + // Switches to the objdump command. As it's a binary, one needs to push the architecture and + // such to objdump, so it's architecture-specific and there is no default. + virtual std::string GetDisassembleParameters() = 0; + + // If the assembly file needs a header, return it in a sub-class. + virtual const char* GetAssemblyHeader() { + return nullptr; + } + + private: + // Override this to pad the code with NOPs to a certain size if needed. + virtual void Pad(std::vector<uint8_t>& data ATTRIBUTE_UNUSED) { + } + + void DriverWrapper(std::string assembly_text, std::string test_name) { + assembler_->FinalizeCode(); + size_t cs = assembler_->CodeSize(); + std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(cs)); + MemoryRegion code(&(*data)[0], data->size()); + assembler_->FinalizeInstructions(code); + Pad(*data); + test_helper_->Driver(*data, assembly_text, test_name); + } + + ArenaPool pool_; + std::unique_ptr<ArenaAllocator> arena_; + std::unique_ptr<Ass> assembler_; + std::unique_ptr<AssemblerTestInfrastructure> test_helper_; + + DISALLOW_COPY_AND_ASSIGN(JNIMacroAssemblerTest); +}; + +} // namespace art + +#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_ diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index e6b32def55..8b7da3fa77 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -2799,27 +2799,17 @@ void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); } -void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest, - uint32_t imm, +void MipsAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); CHECK(scratch.IsCoreRegister()) << scratch; - // Is this function even referenced anywhere else in the code? - LoadConst32(scratch.AsCoreRegister(), imm); - StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value()); -} - -void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - MipsManagedRegister scratch = mscratch.AsMips(); - CHECK(scratch.IsCoreRegister()) << scratch; Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, thr_offs.Int32Value()); } -void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) { +void MipsAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value()); } @@ -2836,7 +2826,7 @@ void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { return EmitLoad(mdest, SP, src.Int32Value(), size); } -void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) { +void MipsAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) { return EmitLoad(mdest, S1, src.Int32Value(), size); } @@ -2864,7 +2854,7 @@ void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offs base.AsMips().AsCoreRegister(), offs.Int32Value()); } -void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) { +void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) { MipsManagedRegister dest = mdest.AsMips(); CHECK(dest.IsCoreRegister()); LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value()); @@ -2918,9 +2908,9 @@ void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); } -void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister mscratch) { +void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), @@ -2929,9 +2919,9 @@ void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, SP, fr_offs.Int32Value()); } -void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void MipsAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), @@ -3103,8 +3093,8 @@ void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscrat // TODO: place reference map on call. } -void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED) { +void MipsAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED, + ManagedRegister mscratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "no mips implementation"; } diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 852ced6e25..41b6c6bd32 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -23,12 +23,14 @@ #include "arch/mips/instruction_set_features_mips.h" #include "base/arena_containers.h" +#include "base/enums.h" #include "base/macros.h" #include "constants_mips.h" #include "globals.h" #include "managed_register_mips.h" #include "offsets.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" #include "utils/label.h" namespace art { @@ -145,7 +147,7 @@ class MipsExceptionSlowPath { DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath); }; -class MipsAssembler FINAL : public Assembler { +class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> { public: explicit MipsAssembler(ArenaAllocator* arena, const MipsInstructionSetFeatures* instruction_set_features = nullptr) @@ -160,6 +162,9 @@ class MipsAssembler FINAL : public Assembler { cfi().DelayEmittingAdvancePCs(); } + size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } + DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } + virtual ~MipsAssembler() { for (auto& branch : branches_) { CHECK(branch.IsResolved()); @@ -500,15 +505,11 @@ class MipsAssembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset32 dest, - uint32_t imm, + void StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, ManagedRegister mscratch) OVERRIDE; - void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) OVERRIDE; - - void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister msrc, @@ -518,7 +519,7 @@ class MipsAssembler FINAL : public Assembler { // Load routines. void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -529,19 +530,19 @@ class MipsAssembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE; // Copying routines. void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE; - void CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister mscratch) OVERRIDE; - - void CopyRawPtrToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, ManagedRegister mscratch) OVERRIDE; + void CopyRawPtrToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) OVERRIDE; + void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE; void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE; @@ -617,7 +618,7 @@ class MipsAssembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE; - void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE; + void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index 3fd77a06b1..a2621cbb30 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -2115,16 +2115,16 @@ void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); } -void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void Mips64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { Mips64ManagedRegister scratch = mscratch.AsMips64(); CHECK(scratch.IsGpuRegister()) << scratch; Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value()); StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value()); } -void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) { +void Mips64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) { StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value()); } @@ -2141,7 +2141,7 @@ void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) return EmitLoad(mdest, SP, src.Int32Value(), size); } -void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) { +void Mips64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) { return EmitLoad(mdest, S1, src.Int32Value(), size); } @@ -2174,7 +2174,7 @@ void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, base.AsMips64().AsGpuRegister(), offs.Int32Value()); } -void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) { +void Mips64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) { Mips64ManagedRegister dest = mdest.AsMips64(); CHECK(dest.IsGpuRegister()); LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value()); @@ -2218,18 +2218,18 @@ void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src, StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); } -void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset64 thr_offs, - ManagedRegister mscratch) { +void Mips64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 thr_offs, + ManagedRegister mscratch) { Mips64ManagedRegister scratch = mscratch.AsMips64(); CHECK(scratch.IsGpuRegister()) << scratch; LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value()); StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value()); } -void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void Mips64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { Mips64ManagedRegister scratch = mscratch.AsMips64(); CHECK(scratch.IsGpuRegister()) << scratch; LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), @@ -2431,8 +2431,8 @@ void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr // TODO: place reference map on call } -void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED) { +void Mips64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED, + ManagedRegister mscratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "No MIPS64 implementation"; } diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 1ad05b038b..a7d350c010 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -20,12 +20,14 @@ #include <utility> #include <vector> +#include "base/enums.h" #include "base/macros.h" #include "constants_mips64.h" #include "globals.h" #include "managed_register_mips64.h" #include "offsets.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" #include "utils/label.h" namespace art { @@ -100,7 +102,7 @@ class Mips64ExceptionSlowPath { DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath); }; -class Mips64Assembler FINAL : public Assembler { +class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> { public: explicit Mips64Assembler(ArenaAllocator* arena) : Assembler(arena), @@ -118,6 +120,9 @@ class Mips64Assembler FINAL : public Assembler { } } + size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } + DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } + // Emit Machine Instructions. void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt); void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16); @@ -383,11 +388,11 @@ class Mips64Assembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; - void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) OVERRIDE; + void StoreStackOffsetToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) OVERRIDE; - void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off, ManagedRegister mscratch) OVERRIDE; @@ -395,7 +400,7 @@ class Mips64Assembler FINAL : public Assembler { // Load routines. void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -404,19 +409,19 @@ class Mips64Assembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE; // Copying routines. void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE; - void CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset64 thr_offs, - ManagedRegister mscratch) OVERRIDE; - - void CopyRawPtrToThread64(ThreadOffset64 thr_offs, - FrameOffset fr_offs, + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 thr_offs, ManagedRegister mscratch) OVERRIDE; + void CopyRawPtrToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) OVERRIDE; + void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE; void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE; @@ -471,7 +476,7 @@ class Mips64Assembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE; - void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE; + void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index bd5fc4031a..89b3c3f4f1 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -2068,20 +2068,16 @@ void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, movl(Address(ESP, dest), Immediate(imm)); } -void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) { - fs()->movl(Address::Absolute(dest), Immediate(imm)); -} - -void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void X86Assembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); } -void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) { +void X86Assembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { fs()->movl(Address::Absolute(thr_offs), ESP); } @@ -2117,7 +2113,7 @@ void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { } } -void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) { +void X86Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) { X86ManagedRegister dest = mdest.AsX86(); if (dest.IsNoRegister()) { CHECK_EQ(0u, size); @@ -2167,8 +2163,7 @@ void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); } -void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest, - ThreadOffset32 offs) { +void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister()); fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); @@ -2230,18 +2225,18 @@ void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src, movl(Address(ESP, dest), scratch.AsCpuRegister()); } -void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister mscratch) { +void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs)); Store(fr_offs, scratch, 4); } -void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void X86Assembler::CopyRawPtrToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); Load(scratch, fr_offs, 4); @@ -2387,7 +2382,7 @@ void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratc call(Address(scratch, offset)); } -void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) { +void X86Assembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) { fs()->call(Address::Absolute(offset)); } diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 6d519e425f..b6442feb69 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -21,6 +21,7 @@ #include "base/arena_containers.h" #include "base/bit_utils.h" +#include "base/enums.h" #include "base/macros.h" #include "constants_x86.h" #include "globals.h" @@ -28,6 +29,7 @@ #include "offsets.h" #include "utils/array_ref.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" namespace art { namespace x86 { @@ -302,11 +304,18 @@ class ConstantArea { ArenaVector<int32_t> buffer_; }; -class X86Assembler FINAL : public Assembler { +class X86Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> { public: explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} virtual ~X86Assembler() {} + size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } + DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } + void FinalizeCode() { Assembler::FinalizeCode(); } + void FinalizeInstructions(const MemoryRegion& region) { + Assembler::FinalizeInstructions(region); + } + /* * Emit Machine Instructions. */ @@ -654,13 +663,11 @@ class X86Assembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch) - OVERRIDE; - - void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, - ManagedRegister scratch) OVERRIDE; + void StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; @@ -668,7 +675,7 @@ class X86Assembler FINAL : public Assembler { // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -677,15 +684,16 @@ class X86Assembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; // Copying routines void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs, - ManagedRegister scratch) OVERRIDE; + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; @@ -742,7 +750,7 @@ class X86Assembler FINAL : public Assembler { // Call to address held at [base+offset] void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 977ce9dc0b..ce4ea1d8fd 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -2804,11 +2804,7 @@ void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq? } -void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) { - gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq? -} - -void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs, +void X86_64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); @@ -2817,7 +2813,7 @@ void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs, gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister()); } -void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) { +void X86_64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) { gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP)); } @@ -2858,7 +2854,7 @@ void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) } } -void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) { +void X86_64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) { X86_64ManagedRegister dest = mdest.AsX86_64(); if (dest.IsNoRegister()) { CHECK_EQ(0u, size); @@ -2907,7 +2903,7 @@ void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs)); } -void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) { +void X86_64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) { X86_64ManagedRegister dest = mdest.AsX86_64(); CHECK(dest.IsCpuRegister()); gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true)); @@ -2968,7 +2964,7 @@ void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister()); } -void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, +void X86_64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 thr_offs, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); @@ -2977,7 +2973,7 @@ void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, Store(fr_offs, scratch, 8); } -void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs, +void X86_64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); @@ -3130,7 +3126,7 @@ void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr call(Address(scratch, offset)); } -void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) { +void X86_64Assembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) { gs()->call(Address::Absolute(offset, true)); } diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 52e39cf7e6..d298da2e6c 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -28,6 +28,7 @@ #include "offsets.h" #include "utils/array_ref.h" #include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" namespace art { namespace x86_64 { @@ -332,11 +333,20 @@ class NearLabel : private Label { }; -class X86_64Assembler FINAL : public Assembler { +class X86_64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> { public: explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} virtual ~X86_64Assembler() {} + size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } + DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } + void FinalizeCode() OVERRIDE { + Assembler::FinalizeCode(); + } + void FinalizeInstructions(const MemoryRegion& region) { + Assembler::FinalizeInstructions(region); + } + /* * Emit Machine Instructions. */ @@ -723,13 +733,11 @@ class X86_64Assembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch) - OVERRIDE; - - void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, - ManagedRegister scratch) OVERRIDE; + void StoreStackOffsetToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; @@ -737,7 +745,7 @@ class X86_64Assembler FINAL : public Assembler { // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -746,15 +754,16 @@ class X86_64Assembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; // Copying routines void Move(ManagedRegister dest, ManagedRegister src, size_t size); - void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs, - ManagedRegister scratch) OVERRIDE; + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 thr_offs, + ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; @@ -812,7 +821,7 @@ class X86_64Assembler FINAL : public Assembler { // Call to address held at [base+offset] void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. |