diff options
Diffstat (limited to 'compiler/utils')
59 files changed, 1067 insertions, 2023 deletions
diff --git a/compiler/utils/arm/assembler_arm_shared.h b/compiler/utils/arm/assembler_arm_shared.h deleted file mode 100644 index 7464052d93..0000000000 --- a/compiler/utils/arm/assembler_arm_shared.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (C) 2016 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_ -#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_ - -namespace art { -namespace arm { - -enum LoadOperandType { - kLoadSignedByte, - kLoadUnsignedByte, - kLoadSignedHalfword, - kLoadUnsignedHalfword, - kLoadWord, - kLoadWordPair, - kLoadSWord, - kLoadDWord -}; - -enum StoreOperandType { - kStoreByte, - kStoreHalfword, - kStoreWord, - kStoreWordPair, - kStoreSWord, - kStoreDWord -}; - -} // namespace arm -} // namespace art - -#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_SHARED_H_ diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc index 77f5d7081a..c7ca003530 100644 --- a/compiler/utils/arm/assembler_arm_vixl.cc +++ b/compiler/utils/arm/assembler_arm_vixl.cc @@ -26,7 +26,7 @@ using namespace vixl::aarch32; // NOLINT(build/namespaces) -namespace art { +namespace art HIDDEN { namespace arm { #ifdef ___ @@ -81,9 +81,7 @@ void ArmVIXLAssembler::MaybeUnpoisonHeapReference(vixl32::Register reg) { } void ArmVIXLAssembler::GenerateMarkingRegisterCheck(vixl32::Register temp, int code) { - // The Marking Register is only used in the Baker read barrier configuration. - DCHECK(kEmitCompilerReadBarrier); - DCHECK(kUseBakerReadBarrier); + DCHECK(kReserveMarkingRegister); vixl32::Label mr_is_ok; diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h index 5bc8a70280..741119d7f7 100644 --- a/compiler/utils/arm/assembler_arm_vixl.h +++ b/compiler/utils/arm/assembler_arm_vixl.h @@ -19,15 +19,12 @@ #include <android-base/logging.h> -#include "base/arena_containers.h" #include "base/macros.h" #include "constants_arm.h" #include "dwarf/register.h" #include "offsets.h" -#include "utils/arm/assembler_arm_shared.h" #include "utils/arm/managed_register_arm.h" #include "utils/assembler.h" -#include "utils/jni_macro_assembler.h" // TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas. #pragma GCC diagnostic push @@ -37,7 +34,7 @@ namespace vixl32 = vixl::aarch32; -namespace art { +namespace art HIDDEN { namespace arm { inline dwarf::Reg DWARFReg(vixl32::Register reg) { @@ -48,6 +45,26 @@ inline dwarf::Reg DWARFReg(vixl32::SRegister reg) { return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode())); } +enum LoadOperandType { + kLoadSignedByte, + kLoadUnsignedByte, + kLoadSignedHalfword, + kLoadUnsignedHalfword, + kLoadWord, + kLoadWordPair, + kLoadSWord, + kLoadDWord +}; + +enum StoreOperandType { + kStoreByte, + kStoreHalfword, + kStoreWord, + kStoreWordPair, + kStoreSWord, + kStoreDWord +}; + class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler { public: // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and diff --git a/compiler/utils/arm/constants_arm.cc b/compiler/utils/arm/constants_arm.cc index b02b343b26..a927fc201a 100644 --- a/compiler/utils/arm/constants_arm.cc +++ b/compiler/utils/arm/constants_arm.cc @@ -16,7 +16,7 @@ #include "constants_arm.h" -namespace art { +namespace art HIDDEN { namespace arm { std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h index f42fd9777b..ef6d48dd3b 100644 --- a/compiler/utils/arm/constants_arm.h +++ b/compiler/utils/arm/constants_arm.h @@ -26,8 +26,9 @@ #include "arch/arm/registers_arm.h" #include "base/casts.h" #include "base/globals.h" +#include "base/macros.h" -namespace art { +namespace art HIDDEN { namespace arm { // Defines constants and accessor classes to assemble, disassemble and diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index 6e6d40dc92..54873454eb 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -20,6 +20,7 @@ #include <type_traits> #include "entrypoints/quick/quick_entrypoints.h" +#include "indirect_reference_table.h" #include "lock_word.h" #include "thread.h" @@ -27,9 +28,8 @@ using namespace vixl::aarch32; // NOLINT(build/namespaces) namespace vixl32 = vixl::aarch32; using vixl::ExactAssemblyScope; -using vixl::CodeBufferCheckScope; -namespace art { +namespace art HIDDEN { namespace arm { #ifdef ___ @@ -155,7 +155,7 @@ void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size, // Pop LR to PC unless we need to emit some read barrier code just before returning. bool emit_code_before_return = - (kEmitCompilerReadBarrier && kUseBakerReadBarrier) && + (gUseReadBarrier && kUseBakerReadBarrier) && (may_suspend || (kIsDebugBuild && emit_run_time_checks_in_debug_mode_)); if ((core_spill_mask & (1u << lr.GetCode())) != 0u && !emit_code_before_return) { DCHECK_EQ(core_spill_mask & (1u << pc.GetCode()), 0u); @@ -215,7 +215,9 @@ void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size, } } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Emit marking register refresh even with all GCs as we are still using the + // register due to nterp's dependency. + if (kReserveMarkingRegister) { if (may_suspend) { // The method may be suspended; refresh the Marking Register. ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value())); @@ -305,13 +307,6 @@ void ArmVIXLJNIMacroAssembler::Store(ManagedRegister m_base, } } -void ArmVIXLJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm()); - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - temps.Exclude(src); - asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value()); -} - void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm()); UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); @@ -319,70 +314,6 @@ void ArmVIXLJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msr asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value()); } -void ArmVIXLJNIMacroAssembler::StoreSpanning(FrameOffset dest, - ManagedRegister msrc, - FrameOffset in_off) { - vixl::aarch32::Register src = AsVIXLRegister(msrc.AsArm()); - asm_.StoreToOffset(kStoreWord, src, sp, dest.Int32Value()); - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - asm_.LoadFromOffset(kLoadWord, scratch, sp, in_off.Int32Value()); - asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value() + 4); -} - -void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - asm_.LoadFromOffset(kLoadWord, scratch, sp, src.Int32Value()); - asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value()); -} - -void ArmVIXLJNIMacroAssembler::CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - asm_.LoadFromOffset(kLoadWord, scratch, AsVIXLRegister(base.AsArm()), offs.Int32Value()); - if (unpoison_reference) { - asm_.MaybeUnpoisonHeapReference(scratch); - } - asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value()); -} - -void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister mdest, - ManagedRegister mbase, - MemberOffset offs, - bool unpoison_reference) { - vixl::aarch32::Register dest = AsVIXLRegister(mdest.AsArm()); - vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm()); - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - temps.Exclude(dest, base); - asm_.LoadFromOffset(kLoadWord, dest, base, offs.Int32Value()); - - if (unpoison_reference) { - asm_.MaybeUnpoisonHeapReference(dest); - } -} - -void ArmVIXLJNIMacroAssembler::LoadRef(ManagedRegister dest ATTRIBUTE_UNUSED, - FrameOffset src ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::LoadRawPtr(ManagedRegister dest ATTRIBUTE_UNUSED, - ManagedRegister base ATTRIBUTE_UNUSED, - Offset offs ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - asm_.LoadImmediate(scratch, imm); - asm_.StoreToOffset(kStoreWord, scratch, sp, dest.Int32Value()); -} - void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { return Load(m_dst.AsArm(), sp, src.Int32Value(), size); } @@ -394,11 +325,6 @@ void ArmVIXLJNIMacroAssembler::Load(ManagedRegister m_dst, return Load(m_dst.AsArm(), AsVIXLRegister(m_base.AsArm()), offs.Int32Value(), size); } -void ArmVIXLJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, - ThreadOffset32 src, - size_t size) { - return Load(m_dst.AsArm(), tr, src.Int32Value(), size); -} void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) { vixl::aarch32::Register dest = AsVIXLRegister(mdest.AsArm()); @@ -407,29 +333,15 @@ void ArmVIXLJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, Threa asm_.LoadFromOffset(kLoadWord, dest, tr, offs.Int32Value()); } -void ArmVIXLJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset32 thr_offs) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - asm_.LoadFromOffset(kLoadWord, scratch, tr, thr_offs.Int32Value()); - asm_.StoreToOffset(kStoreWord, scratch, sp, fr_offs.Int32Value()); -} - -void ArmVIXLJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, - FrameOffset fr_offs) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - asm_.AddConstant(scratch, sp, fr_offs.Int32Value()); - asm_.StoreToOffset(kStoreWord, scratch, tr, thr_offs.Int32Value()); -} - -void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { - asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value()); +void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) { + if (tag_sp) { + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + vixl32::Register reg = temps.Acquire(); + ___ Orr(reg, sp, 0x2); + asm_.StoreToOffset(kStoreWord, reg, tr, thr_offs.Int32Value()); + } else { + asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value()); + } } void ArmVIXLJNIMacroAssembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED, @@ -869,6 +781,11 @@ void ArmVIXLJNIMacroAssembler::Move(ManagedRegister mdst, } } +void ArmVIXLJNIMacroAssembler::Move(ManagedRegister mdst, size_t value) { + ArmManagedRegister dst = mdst.AsArm(); + ___ Mov(AsVIXLRegister(dst), static_cast<uint32_t>(value)); +} + void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) { DCHECK(size == 4 || size == 8) << size; UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); @@ -884,48 +801,6 @@ void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t si } } -void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED, - ManagedRegister src_base ATTRIBUTE_UNUSED, - Offset src_offset ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest_base ATTRIBUTE_UNUSED, - Offset dest_offset ATTRIBUTE_UNUSED, - FrameOffset src ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED, - FrameOffset src_base ATTRIBUTE_UNUSED, - Offset src_offset ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::Copy(ManagedRegister dest ATTRIBUTE_UNUSED, - Offset dest_offset ATTRIBUTE_UNUSED, - ManagedRegister src ATTRIBUTE_UNUSED, - Offset src_offset ATTRIBUTE_UNUSED, - ManagedRegister mscratch ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - -void ArmVIXLJNIMacroAssembler::Copy(FrameOffset dst ATTRIBUTE_UNUSED, - Offset dest_offset ATTRIBUTE_UNUSED, - FrameOffset src ATTRIBUTE_UNUSED, - Offset src_offset ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED, - size_t size ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - void ArmVIXLJNIMacroAssembler::CreateJObject(ManagedRegister mout_reg, FrameOffset spilled_reference_offset, ManagedRegister min_reg, @@ -971,33 +846,19 @@ void ArmVIXLJNIMacroAssembler::CreateJObject(ManagedRegister mout_reg, } } -void ArmVIXLJNIMacroAssembler::CreateJObject(FrameOffset out_off, - FrameOffset spilled_reference_offset, - bool null_allowed) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register scratch = temps.Acquire(); - if (null_allowed) { - asm_.LoadFromOffset(kLoadWord, scratch, sp, spilled_reference_offset.Int32Value()); - // Null values get a jobject value null. Otherwise, the jobject is - // the address of the spilled reference. - // e.g. scratch = (scratch == 0) ? 0 : (SP+spilled_reference_offset) - ___ Cmp(scratch, 0); - - // FIXME: Using 32-bit T32 instruction in IT-block is deprecated. - if (asm_.ShifterOperandCanHold(ADD, spilled_reference_offset.Int32Value())) { - ExactAssemblyScope guard(asm_.GetVIXLAssembler(), - 2 * vixl32::kMaxInstructionSizeInBytes, - CodeBufferCheckScope::kMaximumSize); - ___ it(ne, 0x8); - asm_.AddConstantInIt(scratch, sp, spilled_reference_offset.Int32Value(), ne); - } else { - // TODO: Implement this (old arm assembler would have crashed here). - UNIMPLEMENTED(FATAL); - } - } else { - asm_.AddConstant(scratch, sp, spilled_reference_offset.Int32Value()); - } - asm_.StoreToOffset(kStoreWord, scratch, sp, out_off.Int32Value()); +void ArmVIXLJNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister mreg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) { + constexpr uint32_t kGlobalOrWeakGlobalMask = + dchecked_integral_cast<uint32_t>(IndirectReferenceTable::GetGlobalOrWeakGlobalMask()); + constexpr uint32_t kIndirectRefKindMask = + dchecked_integral_cast<uint32_t>(IndirectReferenceTable::GetIndirectRefKindMask()); + vixl32::Register reg = AsVIXLRegister(mreg.AsArm()); + ___ Tst(reg, kGlobalOrWeakGlobalMask); + ___ B(ne, ArmVIXLJNIMacroLabel::Cast(slow_path)->AsArm()); + ___ Bics(reg, reg, kIndirectRefKindMask); + ___ B(eq, ArmVIXLJNIMacroLabel::Cast(resume)->AsArm()); // Skip load for null. + ___ Ldr(reg, MemOperand(reg)); } void ArmVIXLJNIMacroAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED, @@ -1165,7 +1026,7 @@ void ArmVIXLJNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnary UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); vixl32::Register test_reg; DCHECK_EQ(Thread::IsGcMarkingSize(), 4u); - DCHECK(kUseReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // TestGcMarking() is used in the JNI stub entry when the marking register is up to date. if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) { @@ -1213,15 +1074,19 @@ void ArmVIXLJNIMacroAssembler::TestMarkBit(ManagedRegister mref, } } +void ArmVIXLJNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) { + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + vixl32::Register scratch = temps.Acquire(); + ___ Mov(scratch, static_cast<uint32_t>(address)); + ___ Ldrb(scratch, MemOperand(scratch, 0)); + ___ CompareAndBranchIfNonZero(scratch, ArmVIXLJNIMacroLabel::Cast(label)->AsArm()); +} + void ArmVIXLJNIMacroAssembler::Bind(JNIMacroLabel* label) { CHECK(label != nullptr); ___ Bind(ArmVIXLJNIMacroLabel::Cast(label)->AsArm()); } -void ArmVIXLJNIMacroAssembler::MemoryBarrier(ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); -} - void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, @@ -1243,6 +1108,8 @@ void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister dest, } } else if (dest.IsRegisterPair()) { CHECK_EQ(8u, size) << dest; + // TODO: Use LDRD to improve stubs for @CriticalNative methods with parameters + // (long, long, ...). A single 32-bit LDRD is presumably faster than two 16-bit LDRs. ___ Ldr(AsVIXLRegisterPairLow(dest), MemOperand(base, offset)); ___ Ldr(AsVIXLRegisterPairHigh(dest), MemOperand(base, offset + 4)); } else if (dest.IsSRegister()) { diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h index ed453ae8ff..f6df7f2c53 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h @@ -23,13 +23,12 @@ #include "base/macros.h" #include "constants_arm.h" #include "offsets.h" -#include "utils/arm/assembler_arm_shared.h" #include "utils/arm/assembler_arm_vixl.h" #include "utils/arm/managed_register_arm.h" #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" -namespace art { +namespace art HIDDEN { namespace arm { class ArmVIXLJNIMacroAssembler final @@ -63,34 +62,14 @@ class ArmVIXLJNIMacroAssembler final // Store routines. void Store(FrameOffset offs, ManagedRegister src, size_t size) override; void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override; - void StoreRef(FrameOffset dest, ManagedRegister src) override; void StoreRawPtr(FrameOffset dest, ManagedRegister src) override; - void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override; - - void StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) override; - - void StoreStackPointerToThread(ThreadOffset32 thr_offs) override; - - void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override; + void StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) override; // Load routines. void Load(ManagedRegister dest, FrameOffset src, size_t size) override; void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override; - void LoadFromThread(ManagedRegister dest, - ThreadOffset32 src, - size_t size) override; - - void LoadRef(ManagedRegister dest, FrameOffset src) override; - - void LoadRef(ManagedRegister dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - - void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override; - void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override; // Copying routines. @@ -100,51 +79,7 @@ class ArmVIXLJNIMacroAssembler final void Move(ManagedRegister dest, ManagedRegister src, size_t size) override; - void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset32 thr_offs) override; - - void CopyRawPtrToThread(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch) override; - - void CopyRef(FrameOffset dest, FrameOffset src) override; - void CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - - void Copy(FrameOffset dest, FrameOffset src, size_t size) override; - - void Copy(FrameOffset dest, - ManagedRegister src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void Copy(ManagedRegister dest_base, - Offset dest_offset, - FrameOffset src, - ManagedRegister scratch, - size_t size) override; - - void Copy(FrameOffset dest, - FrameOffset src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void Copy(ManagedRegister dest, - Offset dest_offset, - ManagedRegister src, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void Copy(FrameOffset dest, - Offset dest_offset, - FrameOffset src, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; + void Move(ManagedRegister dest, size_t value) override; // Sign extension. void SignExtend(ManagedRegister mreg, size_t size) override; @@ -156,20 +91,10 @@ class ArmVIXLJNIMacroAssembler final void GetCurrentThread(ManagedRegister dest) override; void GetCurrentThread(FrameOffset dest_offset) override; - // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly - // stale reference that can be used to avoid loading the spilled value to - // see if the value is null. - void CreateJObject(ManagedRegister out_reg, - FrameOffset spilled_reference_offset, - ManagedRegister in_reg, - bool null_allowed) override; - - // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. - void CreateJObject(FrameOffset out_off, - FrameOffset spilled_reference_offset, - bool null_allowed) override; + // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path. + void DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) override; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. @@ -213,17 +138,28 @@ class ArmVIXLJNIMacroAssembler final void TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; // Emit a conditional jump to the label by applying a unary condition test to object's mark bit. void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; + // Emit a conditional jump to label if the loaded value from specified locations is not zero. + void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override; // Code at this offset will serve as the target for the Jump call. void Bind(JNIMacroLabel* label) override; - void MemoryBarrier(ManagedRegister scratch) override; - + private: + void Copy(FrameOffset dest, FrameOffset src, size_t size); void Load(ArmManagedRegister dest, vixl32::Register base, int32_t offset, size_t size); - private: + // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly + // stale reference that can be used to avoid loading the spilled value to + // see if the value is null. + void CreateJObject(ManagedRegister out_reg, + FrameOffset spilled_reference_offset, + ManagedRegister in_reg, + bool null_allowed); + // Used for testing. - friend class ArmVIXLAssemblerTest_VixlLoadFromOffset_Test; - friend class ArmVIXLAssemblerTest_VixlStoreToOffset_Test; + ART_FRIEND_TEST(ArmVIXLAssemblerTest, VixlJniHelpers); + ART_FRIEND_TEST(ArmVIXLAssemblerTest, VixlLoadFromOffset); + ART_FRIEND_TEST(ArmVIXLAssemblerTest, VixlStoreToOffset); }; class ArmVIXLJNIMacroLabel final diff --git a/compiler/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc index deff658b4f..07d50da910 100644 --- a/compiler/utils/arm/managed_register_arm.cc +++ b/compiler/utils/arm/managed_register_arm.cc @@ -18,7 +18,7 @@ #include "base/globals.h" -namespace art { +namespace art HIDDEN { namespace arm { // Returns true if this managed-register overlaps the other managed-register. diff --git a/compiler/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h index 6d942fa774..b3d436c10f 100644 --- a/compiler/utils/arm/managed_register_arm.h +++ b/compiler/utils/arm/managed_register_arm.h @@ -19,10 +19,11 @@ #include <android-base/logging.h> +#include "base/macros.h" #include "constants_arm.h" #include "utils/managed_register.h" -namespace art { +namespace art HIDDEN { namespace arm { // Values for register pairs. diff --git a/compiler/utils/arm/managed_register_arm_test.cc b/compiler/utils/arm/managed_register_arm_test.cc index 6f440a7c81..60f6090edd 100644 --- a/compiler/utils/arm/managed_register_arm_test.cc +++ b/compiler/utils/arm/managed_register_arm_test.cc @@ -16,9 +16,10 @@ #include "managed_register_arm.h" #include "base/globals.h" +#include "base/macros.h" #include "gtest/gtest.h" -namespace art { +namespace art HIDDEN { namespace arm { TEST(ArmManagedRegister, NoRegister) { diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 6100ed9855..26dce7c502 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -16,7 +16,6 @@ #include "arch/arm64/instruction_set_features_arm64.h" #include "assembler_arm64.h" -#include "base/bit_utils_iterator.h" #include "entrypoints/quick/quick_entrypoints.h" #include "heap_poisoning.h" #include "offsets.h" @@ -24,7 +23,7 @@ using namespace vixl::aarch64; // NOLINT(build/namespaces) -namespace art { +namespace art HIDDEN { namespace arm64 { #ifdef ___ @@ -187,9 +186,7 @@ void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) { } void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) { - // The Marking Register is only used in the Baker read barrier configuration. - DCHECK(kEmitCompilerReadBarrier); - DCHECK(kUseBakerReadBarrier); + DCHECK(kReserveMarkingRegister); vixl::aarch64::Register mr = reg_x(MR); // Marking Register. vixl::aarch64::Register tr = reg_x(TR); // Thread Register. diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index b49a13a067..f8168903bd 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -23,7 +23,6 @@ #include <android-base/logging.h> -#include "base/arena_containers.h" #include "base/bit_utils_iterator.h" #include "base/macros.h" #include "dwarf/register.h" @@ -38,7 +37,7 @@ #include "aarch64/macro-assembler-aarch64.h" #pragma GCC diagnostic pop -namespace art { +namespace art HIDDEN { class Arm64InstructionSetFeatures; diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc index 50ca468499..9e9f122cf6 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc @@ -17,6 +17,7 @@ #include "jni_macro_assembler_arm64.h" #include "entrypoints/quick/quick_entrypoints.h" +#include "indirect_reference_table.h" #include "lock_word.h" #include "managed_register_arm64.h" #include "offsets.h" @@ -24,7 +25,7 @@ using namespace vixl::aarch64; // NOLINT(build/namespaces) -namespace art { +namespace art HIDDEN { namespace arm64 { #ifdef ___ @@ -191,46 +192,22 @@ void Arm64JNIMacroAssembler::Store(ManagedRegister m_base, } } -void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) { - Arm64ManagedRegister src = m_src.AsArm64(); - CHECK(src.IsXRegister()) << src; - StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP, - offs.Int32Value()); -} - void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) { Arm64ManagedRegister src = m_src.AsArm64(); CHECK(src.IsXRegister()) << src; StoreToOffset(src.AsXRegister(), SP, offs.Int32Value()); } -void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - Register scratch = temps.AcquireW(); - ___ Mov(scratch, imm); - ___ Str(scratch, MEM_OP(reg_x(SP), offs.Int32Value())); -} - -void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs, FrameOffset fr_offs) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - Register scratch = temps.AcquireX(); - ___ Add(scratch, reg_x(SP), fr_offs.Int32Value()); - ___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value())); -} - -void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) { +void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs, bool tag_sp) { UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); Register scratch = temps.AcquireX(); ___ Mov(scratch, reg_x(SP)); + if (tag_sp) { + ___ Orr(scratch, scratch, 0x2); + } ___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value())); } -void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off ATTRIBUTE_UNUSED, - ManagedRegister m_source ATTRIBUTE_UNUSED, - FrameOffset in_off ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL); // This case is not applicable to ARM64. -} - // Load routines. void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) { if ((cond == al) || (cond == nv)) { @@ -329,45 +306,6 @@ void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, return Load(m_dst.AsArm64(), m_base.AsArm64().AsXRegister(), offs.Int32Value(), size); } -void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, - ThreadOffset64 src, - size_t size) { - return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); -} - -void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - CHECK(dst.IsXRegister()) << dst; - LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value()); -} - -void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, - ManagedRegister m_base, - MemberOffset offs, - bool unpoison_reference) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - Arm64ManagedRegister base = m_base.AsArm64(); - CHECK(dst.IsXRegister() && base.IsXRegister()); - LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(), - offs.Int32Value()); - if (unpoison_reference) { - WRegister ref_reg = dst.AsOverlappingWRegister(); - asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg)); - } -} - -void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst, - ManagedRegister m_base, - Offset offs) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - Arm64ManagedRegister base = m_base.AsArm64(); - CHECK(dst.IsXRegister() && base.IsXRegister()); - // Remove dst and base form the temp list - higher level API uses IP1, IP0. - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister())); - ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value())); -} - void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) { Arm64ManagedRegister dst = m_dst.AsArm64(); CHECK(dst.IsXRegister()) << dst; @@ -640,40 +578,10 @@ void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, } } -void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 tr_offs) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - Register scratch = temps.AcquireX(); - ___ Ldr(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value())); - ___ Str(scratch, MEM_OP(sp, fr_offs.Int32Value())); -} - -void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); - StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); -} - -void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - Register scratch = temps.AcquireW(); - ___ Ldr(scratch, MEM_OP(reg_x(SP), src.Int32Value())); - ___ Str(scratch, MEM_OP(reg_x(SP), dest.Int32Value())); -} - -void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - Register scratch = temps.AcquireW(); - ___ Ldr(scratch, MEM_OP(reg_x(base.AsArm64().AsXRegister()), offs.Int32Value())); - if (unpoison_reference) { - asm_.MaybeUnpoisonHeapReference(scratch); - } - ___ Str(scratch, MEM_OP(reg_x(SP), dest.Int32Value())); +void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, size_t value) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + DCHECK(dst.IsXRegister()); + ___ Mov(reg_x(dst.AsXRegister()), value); } void Arm64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) { @@ -684,105 +592,6 @@ void Arm64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size ___ Str(scratch, MEM_OP(reg_x(SP), dest.Int32Value())); } -void Arm64JNIMacroAssembler::Copy(FrameOffset dest, - ManagedRegister src_base, - Offset src_offset, - ManagedRegister m_scratch, - size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64ManagedRegister base = src_base.AsArm64(); - CHECK(base.IsXRegister()) << base; - CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(), - src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value()); - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value()); - StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base, - Offset dest_offs, - FrameOffset src, - ManagedRegister m_scratch, - size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64ManagedRegister base = m_dest_base.AsArm64(); - CHECK(base.IsXRegister()) << base; - CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(), - dest_offs.Int32Value()); - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value()); - StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/, - FrameOffset /*src_base*/, - Offset /*src_offset*/, - ManagedRegister /*mscratch*/, - size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; -} - -void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest, - Offset dest_offset, - ManagedRegister m_src, - Offset src_offset, - ManagedRegister m_scratch, - size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64ManagedRegister src = m_src.AsArm64(); - Arm64ManagedRegister dest = m_dest.AsArm64(); - CHECK(dest.IsXRegister()) << dest; - CHECK(src.IsXRegister()) << src; - CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - if (scratch.IsWRegister()) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(), - src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(), - dest_offset.Int32Value()); - } else { - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(), - src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(), - dest_offset.Int32Value()); - } - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value()); - StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/, - Offset /*dest_offset*/, - FrameOffset /*src*/, - Offset /*src_offset*/, - ManagedRegister /*scratch*/, - size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; -} - -void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) { - // TODO: Should we check that m_scratch is IP? - see arm. - ___ Dmb(InnerShareable, BarrierAll); -} - void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) { Arm64ManagedRegister reg = mreg.AsArm64(); CHECK(size == 1 || size == 2) << size; @@ -882,6 +691,19 @@ void Arm64JNIMacroAssembler::CreateJObject(FrameOffset out_off, ___ Str(scratch, MEM_OP(reg_x(SP), out_off.Int32Value())); } +void Arm64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister m_reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) { + constexpr uint64_t kGlobalOrWeakGlobalMask = IndirectReferenceTable::GetGlobalOrWeakGlobalMask(); + constexpr uint64_t kIndirectRefKindMask = IndirectReferenceTable::GetIndirectRefKindMask(); + constexpr size_t kGlobalOrWeakGlobalBit = WhichPowerOf2(kGlobalOrWeakGlobalMask); + Register reg = reg_w(m_reg.AsArm64().AsWRegister()); + ___ Tbnz(reg.X(), kGlobalOrWeakGlobalBit, Arm64JNIMacroLabel::Cast(slow_path)->AsArm64()); + ___ And(reg.X(), reg.X(), ~kIndirectRefKindMask); + ___ Cbz(reg.X(), Arm64JNIMacroLabel::Cast(resume)->AsArm64()); // Skip load for null. + ___ Ldr(reg, MEM_OP(reg.X())); +} + void Arm64JNIMacroAssembler::TryToTransitionFromRunnableToNative( JNIMacroLabel* label, ArrayRef<const ManagedRegister> scratch_regs ATTRIBUTE_UNUSED) { constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative); @@ -989,7 +811,7 @@ void Arm64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCo UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); Register test_reg; DCHECK_EQ(Thread::IsGcMarkingSize(), 4u); - DCHECK(kUseReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // TestGcMarking() is used in the JNI stub entry when the marking register is up to date. if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) { @@ -1037,6 +859,14 @@ void Arm64JNIMacroAssembler::TestMarkBit(ManagedRegister m_ref, } } +void Arm64JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) { + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + Register scratch = temps.AcquireX(); + ___ Mov(scratch, address); + ___ Ldrb(scratch.W(), MEM_OP(scratch, 0)); + ___ Cbnz(scratch.W(), Arm64JNIMacroLabel::Cast(label)->AsArm64()); +} + void Arm64JNIMacroAssembler::Bind(JNIMacroLabel* label) { CHECK(label != nullptr); ___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64()); @@ -1107,7 +937,9 @@ void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size, asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size); asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Emit marking register refresh even with all GCs as we are still using the + // register due to nterp's dependency. + if (kReserveMarkingRegister) { vixl::aarch64::Register mr = reg_x(MR); // Marking Register. vixl::aarch64::Register tr = reg_x(TR); // Thread Register. diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h index 2c04184848..2836e0947d 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.h +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h @@ -37,7 +37,7 @@ #include "aarch64/macro-assembler-aarch64.h" #pragma GCC diagnostic pop -namespace art { +namespace art HIDDEN { namespace arm64 { class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> { @@ -68,23 +68,12 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, // Store routines. void Store(FrameOffset offs, ManagedRegister src, size_t size) override; void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override; - void StoreRef(FrameOffset dest, ManagedRegister src) override; void StoreRawPtr(FrameOffset dest, ManagedRegister src) override; - void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override; - void StoreStackOffsetToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs) override; - void StoreStackPointerToThread(ThreadOffset64 thr_offs) override; - void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override; + void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override; // Load routines. void Load(ManagedRegister dest, FrameOffset src, size_t size) override; void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override; - void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override; - void LoadRef(ManagedRegister dest, FrameOffset src) override; - void LoadRef(ManagedRegister dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override; void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override; // Copying routines. @@ -92,43 +81,7 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, ArrayRef<ArgumentLocation> srcs, ArrayRef<FrameOffset> refs) override; void Move(ManagedRegister dest, ManagedRegister src, size_t size) override; - void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 thr_offs) override; - void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) - override; - void CopyRef(FrameOffset dest, FrameOffset src) override; - void CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - void Copy(FrameOffset dest, FrameOffset src, size_t size) override; - void Copy(FrameOffset dest, - ManagedRegister src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - void Copy(ManagedRegister dest_base, - Offset dest_offset, - FrameOffset src, - ManagedRegister scratch, - size_t size) override; - void Copy(FrameOffset dest, - FrameOffset src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - void Copy(ManagedRegister dest, - Offset dest_offset, - ManagedRegister src, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - void Copy(FrameOffset dest, - Offset dest_offset, - FrameOffset src, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - void MemoryBarrier(ManagedRegister scratch) override; + void Move(ManagedRegister dest, size_t value) override; // Sign extension. void SignExtend(ManagedRegister mreg, size_t size) override; @@ -140,20 +93,10 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, void GetCurrentThread(ManagedRegister dest) override; void GetCurrentThread(FrameOffset dest_offset) override; - // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly - // stale reference that can be used to avoid loading the spilled value to - // see if the value is null. - void CreateJObject(ManagedRegister out_reg, - FrameOffset spilled_reference_offset, - ManagedRegister in_reg, - bool null_allowed) override; - - // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. - void CreateJObject(FrameOffset out_off, - FrameOffset spilled_reference_offset, - bool null_allowed) override; + // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path. + void DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) override; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. @@ -197,6 +140,8 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, void TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; // Emit a conditional jump to the label by applying a unary condition test to object's mark bit. void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; + // Emit a conditional jump to label if the loaded value from specified locations is not zero. + void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override; // Code at this offset will serve as the target for the Jump call. void Bind(JNIMacroLabel* label) override; @@ -220,6 +165,24 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, void LoadFromOffset(XRegister dest, XRegister base, int32_t offset); void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset); void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset); + + void Copy(FrameOffset dest, FrameOffset src, size_t size); + + // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly + // stale reference that can be used to avoid loading the spilled value to + // see if the value is null. + void CreateJObject(ManagedRegister out_reg, + FrameOffset spilled_reference_offset, + ManagedRegister in_reg, + bool null_allowed); + + // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. + void CreateJObject(FrameOffset out_off, + FrameOffset spilled_reference_offset, + bool null_allowed); + void AddConstant(XRegister rd, int32_t value, vixl::aarch64::Condition cond = vixl::aarch64::al); diff --git a/compiler/utils/arm64/managed_register_arm64.cc b/compiler/utils/arm64/managed_register_arm64.cc index 5632265646..74a35452db 100644 --- a/compiler/utils/arm64/managed_register_arm64.cc +++ b/compiler/utils/arm64/managed_register_arm64.cc @@ -17,7 +17,7 @@ #include "managed_register_arm64.h" #include "base/globals.h" -namespace art { +namespace art HIDDEN { namespace arm64 { // TODO: Define convention diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h index 8a06f631a1..7e8c976b23 100644 --- a/compiler/utils/arm64/managed_register_arm64.h +++ b/compiler/utils/arm64/managed_register_arm64.h @@ -20,9 +20,10 @@ #include <android-base/logging.h> #include "arch/arm64/registers_arm64.h" +#include "base/macros.h" #include "utils/managed_register.h" -namespace art { +namespace art HIDDEN { namespace arm64 { const int kNumberOfXRegIds = kNumberOfXRegisters; diff --git a/compiler/utils/arm64/managed_register_arm64_test.cc b/compiler/utils/arm64/managed_register_arm64_test.cc index d151ac99e7..f250360639 100644 --- a/compiler/utils/arm64/managed_register_arm64_test.cc +++ b/compiler/utils/arm64/managed_register_arm64_test.cc @@ -18,9 +18,10 @@ #include "assembler_arm64.h" #include "base/globals.h" +#include "base/macros.h" #include "gtest/gtest.h" -namespace art { +namespace art HIDDEN { namespace arm64 { TEST(Arm64ManagedRegister, NoRegister) { diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc index d1d2a3d556..b82f0dc4b4 100644 --- a/compiler/utils/assembler.cc +++ b/compiler/utils/assembler.cc @@ -23,7 +23,7 @@ #include "base/globals.h" #include "base/memory_region.h" -namespace art { +namespace art HIDDEN { AssemblerBuffer::AssemblerBuffer(ArenaAllocator* allocator) : allocator_(allocator) { diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index 4b4fb14df6..13a5d9fd01 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -37,7 +37,7 @@ #include "x86/constants_x86.h" #include "x86_64/constants_x86_64.h" -namespace art { +namespace art HIDDEN { class Assembler; class AssemblerBuffer; diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index bb22fe5bde..d03e5a7abc 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -26,11 +26,12 @@ #include <fstream> #include <iterator> +#include "base/macros.h" #include "base/malloc_arena_pool.h" #include "assembler_test_base.h" #include "common_runtime_test.h" // For ScratchFile -namespace art { +namespace art HIDDEN { // Helper for a constexpr string length. constexpr size_t ConstexprStrLen(char const* str, size_t count = 0) { @@ -59,7 +60,7 @@ class AssemblerTest : public AssemblerTestBase { return assembler_.get(); } - typedef std::string (*TestFn)(AssemblerTest* assembler_test, Ass* assembler); + using TestFn = std::string (*)(AssemblerTest *, Ass *); void DriverFn(TestFn f, const std::string& test_name) { DriverWrapper(f(this, assembler_.get()), test_name); @@ -259,7 +260,7 @@ class AssemblerTest : public AssemblerTestBase { std::string (AssemblerTest::*GetName1)(const Reg1&), std::string (AssemblerTest::*GetName2)(const Reg2&), std::string (AssemblerTest::*GetName3)(const Reg3&), - std::string fmt, + const std::string& fmt, int bias) { std::string str; std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0)); diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h index bf73808603..73f3657413 100644 --- a/compiler/utils/assembler_test_base.h +++ b/compiler/utils/assembler_test_base.h @@ -26,6 +26,7 @@ #include "android-base/strings.h" +#include "base/macros.h" #include "base/os.h" #include "base/utils.h" #include "common_runtime_test.h" // For ScratchDir. @@ -34,7 +35,7 @@ #include "exec_utils.h" #include "stream/file_output_stream.h" -namespace art { +namespace art HIDDEN { // If you want to take a look at the differences between the ART assembler and clang, // set this flag to true. The disassembled files will then remain in the tmp directory. @@ -59,7 +60,7 @@ class AssemblerTestBase : public testing::Test { // This is intended to be run as a test. bool CheckTools() { - for (auto cmd : { GetAssemblerCommand()[0], GetDisassemblerCommand()[0] }) { + for (const std::string& cmd : { GetAssemblerCommand()[0], GetDisassemblerCommand()[0] }) { if (!OS::FileExists(cmd.c_str())) { LOG(ERROR) << "Could not find " << cmd; return false; @@ -84,7 +85,7 @@ class AssemblerTestBase : public testing::Test { // Assemble reference object file. std::string ref_obj_file = test_path(".ref.o"); - ASSERT_TRUE(Assemble(ref_asm_file.c_str(), ref_obj_file.c_str())); + ASSERT_TRUE(Assemble(ref_asm_file, ref_obj_file)); // Read the code produced by assembler from the ELF file. std::vector<uint8_t> ref_code; @@ -153,9 +154,14 @@ class AssemblerTestBase : public testing::Test { virtual std::vector<std::string> GetDisassemblerCommand() { switch (GetIsa()) { case InstructionSet::kThumb2: - return {FindTool("llvm-objdump"), "--disassemble", "--triple", "thumbv7a-linux-gnueabi"}; + return {FindTool("llvm-objdump"), + "--disassemble", + "--no-print-imm-hex", + "--triple", + "thumbv7a-linux-gnueabi"}; default: - return {FindTool("llvm-objdump"), "--disassemble", "--no-show-raw-insn"}; + return { + FindTool("llvm-objdump"), "--disassemble", "--no-print-imm-hex", "--no-show-raw-insn"}; } } diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc index b2d4dcd9f6..672cd3d10f 100644 --- a/compiler/utils/assembler_thumb_test.cc +++ b/compiler/utils/assembler_thumb_test.cc @@ -30,10 +30,11 @@ #include "utils/assembler_test_base.h" #include "base/hex_dump.h" +#include "base/macros.h" #include "base/malloc_arena_pool.h" #include "common_runtime_test.h" -namespace art { +namespace art HIDDEN { namespace arm { // Include results file (generated manually) @@ -143,7 +144,6 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) { __ Load(scratch_register, FrameOffset(4092), 4); __ Load(scratch_register, FrameOffset(4096), 4); __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512)); - __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference= */ false); // Stores __ Store(FrameOffset(32), method_register, 4); @@ -153,19 +153,67 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) { __ Store(FrameOffset(1024), method_register, 4); __ Store(FrameOffset(4092), scratch_register, 4); __ Store(FrameOffset(4096), scratch_register, 4); - __ StoreImmediateToFrame(FrameOffset(48), 0xFF); - __ StoreImmediateToFrame(FrameOffset(48), 0xFFFFFF); __ StoreRawPtr(FrameOffset(48), scratch_register); - __ StoreRef(FrameOffset(48), scratch_register); - __ StoreSpanning(FrameOffset(48), method_register, FrameOffset(48)); - __ StoreStackOffsetToThread(ThreadOffset32(512), FrameOffset(4096)); - __ StoreStackPointerToThread(ThreadOffset32(512)); + __ StoreStackPointerToThread(ThreadOffset32(512), false); + __ StoreStackPointerToThread(ThreadOffset32(512), true); + + // MoveArguments + static constexpr FrameOffset kInvalidReferenceOffset = + JNIMacroAssembler<kArmPointerSize>::kInvalidReferenceOffset; + static constexpr size_t kNativePointerSize = static_cast<size_t>(kArmPointerSize); + // Normal or @FastNative with parameters (Object, long, long, int, Object). + // Note: This shall not spill the reference R1 to [sp, #36]. The JNI compiler spills + // references in an separate initial pass before moving arguments and creating `jobject`s. + ArgumentLocation move_dests1[] = { + ArgumentLocation(ArmManagedRegister::FromCoreRegister(R2), kNativePointerSize), + ArgumentLocation(FrameOffset(0), 2 * kVRegSize), + ArgumentLocation(FrameOffset(8), 2 * kVRegSize), + ArgumentLocation(FrameOffset(16), kVRegSize), + ArgumentLocation(FrameOffset(20), kNativePointerSize), + }; + ArgumentLocation move_srcs1[] = { + ArgumentLocation(ArmManagedRegister::FromCoreRegister(R1), kVRegSize), + ArgumentLocation(ArmManagedRegister::FromRegisterPair(R2_R3), 2 * kVRegSize), + ArgumentLocation(FrameOffset(48), 2 * kVRegSize), + ArgumentLocation(FrameOffset(56), kVRegSize), + ArgumentLocation(FrameOffset(60), kVRegSize), + }; + FrameOffset move_refs1[] { + FrameOffset(36), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(60), + }; + __ MoveArguments(ArrayRef<ArgumentLocation>(move_dests1), + ArrayRef<ArgumentLocation>(move_srcs1), + ArrayRef<FrameOffset>(move_refs1)); + // @CriticalNative with parameters (long, long, long, int). + ArgumentLocation move_dests2[] = { + ArgumentLocation(ArmManagedRegister::FromRegisterPair(R0_R1), 2 * kVRegSize), + ArgumentLocation(ArmManagedRegister::FromRegisterPair(R2_R3), 2 * kVRegSize), + ArgumentLocation(FrameOffset(0), 2 * kVRegSize), + ArgumentLocation(FrameOffset(8), kVRegSize), + }; + ArgumentLocation move_srcs2[] = { + ArgumentLocation(ArmManagedRegister::FromRegisterPair(R2_R3), 2 * kVRegSize), + ArgumentLocation(FrameOffset(28), kVRegSize), + ArgumentLocation(FrameOffset(32), 2 * kVRegSize), + ArgumentLocation(FrameOffset(40), kVRegSize), + }; + FrameOffset move_refs2[] { + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + FrameOffset(kInvalidReferenceOffset), + }; + __ MoveArguments(ArrayRef<ArgumentLocation>(move_dests2), + ArrayRef<ArgumentLocation>(move_srcs2), + ArrayRef<FrameOffset>(move_refs2)); // Other __ Call(method_register, FrameOffset(48)); __ Copy(FrameOffset(48), FrameOffset(44), 4); - __ CopyRawPtrFromThread(FrameOffset(44), ThreadOffset32(512)); - __ CopyRef(FrameOffset(48), FrameOffset(44)); __ GetCurrentThread(method_register); __ GetCurrentThread(FrameOffset(48)); __ Move(hidden_arg_register, method_register, 4); @@ -176,7 +224,6 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) { __ CreateJObject(high_register, FrameOffset(48), high_register, true); __ CreateJObject(high_register, FrameOffset(48), high_register, false); __ CreateJObject(method_register, FrameOffset(48), high_register, true); - __ CreateJObject(FrameOffset(48), FrameOffset(64), true); __ CreateJObject(method_register, FrameOffset(0), high_register, true); __ CreateJObject(method_register, FrameOffset(1028), high_register, true); __ CreateJObject(high_register, FrameOffset(1028), high_register, true); diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc index b6c6025e41..aea7f14762 100644 --- a/compiler/utils/assembler_thumb_test_expected.cc.inc +++ b/compiler/utils/assembler_thumb_test_expected.cc.inc @@ -1,258 +1,259 @@ const char* const VixlJniHelpersResults = { - " 0: 2d e9 e0 4d push.w {r5, r6, r7, r8, r10, r11, lr}\n" - " 4: 2d ed 10 8a vpush {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n" - " 8: 81 b0 sub sp, #4\n" - " a: 00 90 str r0, [sp]\n" - " c: 19 91 str r1, [sp, #100]\n" - " e: 8d ed 1a 0a vstr s0, [sp, #104]\n" - " 12: 1b 92 str r2, [sp, #108]\n" - " 14: 1c 93 str r3, [sp, #112]\n" - " 16: 88 b0 sub sp, #32\n" - " 18: ad f5 80 5d sub.w sp, sp, #4096\n" - " 1c: 08 98 ldr r0, [sp, #32]\n" - " 1e: 1f 98 ldr r0, [sp, #124]\n" - " 20: 21 98 ldr r0, [sp, #132]\n" - " 22: ff 98 ldr r0, [sp, #1020]\n" - " 24: dd f8 00 04 ldr.w r0, [sp, #1024]\n" - " 28: dd f8 fc cf ldr.w r12, [sp, #4092]\n" - " 2c: 0d f5 80 5c add.w r12, sp, #4096\n" - " 30: dc f8 00 c0 ldr.w r12, [r12]\n" - " 34: d9 f8 00 c2 ldr.w r12, [r9, #512]\n" - " 38: dc f8 80 00 ldr.w r0, [r12, #128]\n" - " 3c: 08 90 str r0, [sp, #32]\n" - " 3e: 1f 90 str r0, [sp, #124]\n" - " 40: 21 90 str r0, [sp, #132]\n" - " 42: ff 90 str r0, [sp, #1020]\n" - " 44: cd f8 00 04 str.w r0, [sp, #1024]\n" - " 48: cd f8 fc cf str.w r12, [sp, #4092]\n" - " 4c: 4d f8 04 5d str r5, [sp, #-4]!\n" - " 50: 0d f5 80 55 add.w r5, sp, #4096\n" - " 54: c5 f8 04 c0 str.w r12, [r5, #4]\n" - " 58: 5d f8 04 5b ldr r5, [sp], #4\n" - " 5c: 4f f0 ff 0c mov.w r12, #255\n" - " 60: cd f8 30 c0 str.w r12, [sp, #48]\n" - " 64: 6f f0 7f 4c mvn r12, #4278190080\n" - " 68: cd f8 30 c0 str.w r12, [sp, #48]\n" - " 6c: cd f8 30 c0 str.w r12, [sp, #48]\n" - " 70: cd f8 30 c0 str.w r12, [sp, #48]\n" - " 74: 0c 90 str r0, [sp, #48]\n" - " 76: dd f8 30 c0 ldr.w r12, [sp, #48]\n" - " 7a: cd f8 34 c0 str.w r12, [sp, #52]\n" - " 7e: 0d f5 80 5c add.w r12, sp, #4096\n" - " 82: c9 f8 00 c2 str.w r12, [r9, #512]\n" - " 86: c9 f8 00 d2 str.w sp, [r9, #512]\n" - " 8a: d0 f8 30 e0 ldr.w lr, [r0, #48]\n" - " 8e: f0 47 blx lr\n" - " 90: dd f8 2c c0 ldr.w r12, [sp, #44]\n" - " 94: cd f8 30 c0 str.w r12, [sp, #48]\n" - " 98: d9 f8 00 c2 ldr.w r12, [r9, #512]\n" - " 9c: cd f8 2c c0 str.w r12, [sp, #44]\n" - " a0: dd f8 2c c0 ldr.w r12, [sp, #44]\n" - " a4: cd f8 30 c0 str.w r12, [sp, #48]\n" - " a8: 48 46 mov r0, r9\n" - " aa: cd f8 30 90 str.w r9, [sp, #48]\n" - " ae: 04 46 mov r4, r0\n" - " b0: 0d f1 30 0c add.w r12, sp, #48\n" - " b4: bb f1 00 0f cmp.w r11, #0\n" - " b8: 18 bf it ne\n" - " ba: e3 46 movne r11, r12\n" - " bc: 0d f1 30 0b add.w r11, sp, #48\n" - " c0: 5f ea 0b 00 movs.w r0, r11\n" - " c4: 18 bf it ne\n" - " c6: 0c a8 addne r0, sp, #48\n" - " c8: dd f8 40 c0 ldr.w r12, [sp, #64]\n" - " cc: bc f1 00 0f cmp.w r12, #0\n" - " d0: 18 bf it ne\n" - " d2: 0d f1 40 0c addne.w r12, sp, #64\n" - " d6: cd f8 30 c0 str.w r12, [sp, #48]\n" - " da: 5f ea 0b 00 movs.w r0, r11\n" - " de: 18 bf it ne\n" - " e0: 00 a8 addne r0, sp, #0\n" - " e2: 0d f2 04 40 addw r0, sp, #1028\n" - " e6: bb f1 00 0f cmp.w r11, #0\n" - " ea: 08 bf it eq\n" - " ec: 58 46 moveq r0, r11\n" - " ee: 0d f2 04 4c addw r12, sp, #1028\n" - " f2: bb f1 00 0f cmp.w r11, #0\n" - " f6: 18 bf it ne\n" - " f8: e3 46 movne r11, r12\n" - " fa: d9 f8 94 c0 ldr.w r12, [r9, #148]\n" - " fe: bc f1 00 0f cmp.w r12, #0\n" - " 102: 71 d1 bne 0x1e8 @ imm = #226\n" - " 104: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 108: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 10c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 110: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 114: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 118: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 11c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 120: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 124: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 128: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 12c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 130: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 134: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 138: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 13c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 140: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 144: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 148: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 14c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 150: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 154: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 158: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 15c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 160: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 164: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 168: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 16c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 170: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 174: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 178: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 17c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 180: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 184: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 188: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 18c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 190: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 194: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 198: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 19c: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1a0: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1a4: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1a8: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1ac: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1b0: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1b4: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1b8: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1bc: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1c0: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1c4: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1c8: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1cc: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1d0: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1d4: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1d8: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1dc: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1e0: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1e4: 00 f0 02 b8 b.w 0x1ec @ imm = #4\n" - " 1e8: 00 f0 1b b8 b.w 0x222 @ imm = #54\n" - " 1ec: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1f0: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1f4: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1f8: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 1fc: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 200: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 204: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 208: cd f8 ff c7 str.w r12, [sp, #2047]\n" - " 20c: 0d f5 80 5d add.w sp, sp, #4096\n" - " 210: 08 b0 add sp, #32\n" - " 212: 01 b0 add sp, #4\n" - " 214: bd ec 10 8a vpop {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n" - " 218: bd e8 e0 4d pop.w {r5, r6, r7, r8, r10, r11, lr}\n" - " 21c: d9 f8 24 80 ldr.w r8, [r9, #36]\n" - " 220: 70 47 bx lr\n" - " 222: d9 f8 94 00 ldr.w r0, [r9, #148]\n" - " 226: d9 f8 c8 e2 ldr.w lr, [r9, #712]\n" - " 22a: f0 47 blx lr\n" + " 0: e92d 4de0 push.w {r5, r6, r7, r8, r10, r11, lr}\n" + " 4: ed2d 8a10 vpush {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n" + " 8: b081 sub sp, #4\n" + " a: 9000 str r0, [sp]\n" + " c: 9119 str r1, [sp, #100]\n" + " e: ed8d 0a1a vstr s0, [sp, #104]\n" + " 12: 921b str r2, [sp, #108]\n" + " 14: 931c str r3, [sp, #112]\n" + " 16: b088 sub sp, #32\n" + " 18: f5ad 5d80 sub.w sp, sp, #4096\n" + " 1c: 9808 ldr r0, [sp, #32]\n" + " 1e: 981f ldr r0, [sp, #124]\n" + " 20: 9821 ldr r0, [sp, #132]\n" + " 22: 98ff ldr r0, [sp, #1020]\n" + " 24: f8dd 0400 ldr.w r0, [sp, #1024]\n" + " 28: f8dd cffc ldr.w r12, [sp, #4092]\n" + " 2c: f50d 5c80 add.w r12, sp, #4096\n" + " 30: f8dc c000 ldr.w r12, [r12]\n" + " 34: f8d9 c200 ldr.w r12, [r9, #512]\n" + " 38: 9008 str r0, [sp, #32]\n" + " 3a: 901f str r0, [sp, #124]\n" + " 3c: 9021 str r0, [sp, #132]\n" + " 3e: 90ff str r0, [sp, #1020]\n" + " 40: f8cd 0400 str.w r0, [sp, #1024]\n" + " 44: f8cd cffc str.w r12, [sp, #4092]\n" + " 48: f84d 5d04 str r5, [sp, #-4]!\n" + " 4c: f50d 5580 add.w r5, sp, #4096\n" + " 50: f8c5 c004 str.w r12, [r5, #4]\n" + " 54: f85d 5b04 ldr r5, [sp], #4\n" + " 58: f8cd c030 str.w r12, [sp, #48]\n" + " 5c: f8c9 d200 str.w sp, [r9, #512]\n" + " 60: f04d 0c02 orr r12, sp, #2\n" + " 64: f8c9 c200 str.w r12, [r9, #512]\n" + " 68: a909 add r1, sp, #36\n" + " 6a: e9cd 2300 strd r2, r3, [sp]\n" + " 6e: e9dd 020c ldrd r0, r2, [sp, #48]\n" + " 72: e9cd 0202 strd r0, r2, [sp, #8]\n" + " 76: e9dd 020e ldrd r0, r2, [sp, #56]\n" + " 7a: 2a00 cmp r2, #0\n" + " 7c: bf18 it ne\n" + " 7e: aa0f addne r2, sp, #60\n" + " 80: e9cd 0204 strd r0, r2, [sp, #16]\n" + " 84: 460a mov r2, r1\n" + " 86: e9dd 0108 ldrd r0, r1, [sp, #32]\n" + " 8a: e9cd 0100 strd r0, r1, [sp]\n" + " 8e: f8dd c028 ldr.w r12, [sp, #40]\n" + " 92: f8cd c008 str.w r12, [sp, #8]\n" + " 96: 4610 mov r0, r2\n" + " 98: 4619 mov r1, r3\n" + " 9a: 9a07 ldr r2, [sp, #28]\n" + " 9c: 9b08 ldr r3, [sp, #32]\n" + " 9e: f8d0 e030 ldr.w lr, [r0, #48]\n" + " a2: 47f0 blx lr\n" + " a4: f8dd c02c ldr.w r12, [sp, #44]\n" + " a8: f8cd c030 str.w r12, [sp, #48]\n" + " ac: 4648 mov r0, r9\n" + " ae: f8cd 9030 str.w r9, [sp, #48]\n" + " b2: 4604 mov r4, r0\n" + " b4: f10d 0c30 add.w r12, sp, #48\n" + " b8: f1bb 0f00 cmp.w r11, #0\n" + " bc: bf18 it ne\n" + " be: 46e3 movne r11, r12\n" + " c0: f10d 0b30 add.w r11, sp, #48\n" + " c4: ea5f 000b movs.w r0, r11\n" + " c8: bf18 it ne\n" + " ca: a80c addne r0, sp, #48\n" + " cc: ea5f 000b movs.w r0, r11\n" + " d0: bf18 it ne\n" + " d2: a800 addne r0, sp, #0\n" + " d4: f20d 4004 addw r0, sp, #1028\n" + " d8: f1bb 0f00 cmp.w r11, #0\n" + " dc: bf08 it eq\n" + " de: 4658 moveq r0, r11\n" + " e0: f20d 4c04 addw r12, sp, #1028\n" + " e4: f1bb 0f00 cmp.w r11, #0\n" + " e8: bf18 it ne\n" + " ea: 46e3 movne r11, r12\n" + " ec: f8d9 c09c ldr.w r12, [r9, #156]\n" + " f0: f1bc 0f00 cmp.w r12, #0\n" + " f4: d16f bne 0x1d6 @ imm = #222\n" + " f6: f8cd c7ff str.w r12, [sp, #2047]\n" + " fa: f8cd c7ff str.w r12, [sp, #2047]\n" + " fe: f8cd c7ff str.w r12, [sp, #2047]\n" + " 102: f8cd c7ff str.w r12, [sp, #2047]\n" + " 106: f8cd c7ff str.w r12, [sp, #2047]\n" + " 10a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 10e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 112: f8cd c7ff str.w r12, [sp, #2047]\n" + " 116: f8cd c7ff str.w r12, [sp, #2047]\n" + " 11a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 11e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 122: f8cd c7ff str.w r12, [sp, #2047]\n" + " 126: f8cd c7ff str.w r12, [sp, #2047]\n" + " 12a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 12e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 132: f8cd c7ff str.w r12, [sp, #2047]\n" + " 136: f8cd c7ff str.w r12, [sp, #2047]\n" + " 13a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 13e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 142: f8cd c7ff str.w r12, [sp, #2047]\n" + " 146: f8cd c7ff str.w r12, [sp, #2047]\n" + " 14a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 14e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 152: f8cd c7ff str.w r12, [sp, #2047]\n" + " 156: f8cd c7ff str.w r12, [sp, #2047]\n" + " 15a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 15e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 162: f8cd c7ff str.w r12, [sp, #2047]\n" + " 166: f8cd c7ff str.w r12, [sp, #2047]\n" + " 16a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 16e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 172: f8cd c7ff str.w r12, [sp, #2047]\n" + " 176: f8cd c7ff str.w r12, [sp, #2047]\n" + " 17a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 17e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 182: f8cd c7ff str.w r12, [sp, #2047]\n" + " 186: f8cd c7ff str.w r12, [sp, #2047]\n" + " 18a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 18e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 192: f8cd c7ff str.w r12, [sp, #2047]\n" + " 196: f8cd c7ff str.w r12, [sp, #2047]\n" + " 19a: f8cd c7ff str.w r12, [sp, #2047]\n" + " 19e: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1a2: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1a6: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1aa: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1ae: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1b2: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1b6: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1ba: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1be: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1c2: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1c6: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1ca: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1ce: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1d2: f000 b803 b.w 0x1dc @ imm = #6\n" + " 1d6: f000 b81e b.w 0x216 @ imm = #60\n" + " 1da: 0000 movs r0, r0\n" + " 1dc: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1e0: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1e4: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1e8: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1ec: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1f0: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1f4: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1f8: f8cd c7ff str.w r12, [sp, #2047]\n" + " 1fc: f8cd c7ff str.w r12, [sp, #2047]\n" + " 200: f50d 5d80 add.w sp, sp, #4096\n" + " 204: b008 add sp, #32\n" + " 206: b001 add sp, #4\n" + " 208: ecbd 8a10 vpop {s16, s17, s18, s19, s20, s21, s22, s23, s24, s25, s26, s27, s28, s29, s30, s31}\n" + " 20c: e8bd 4de0 pop.w {r5, r6, r7, r8, r10, r11, lr}\n" + " 210: f8d9 8024 ldr.w r8, [r9, #36]\n" + " 214: 4770 bx lr\n" + " 216: f8d9 009c ldr.w r0, [r9, #156]\n" + " 21a: f8d9 e2d0 ldr.w lr, [r9, #720]\n" + " 21e: 47f0 blx lr\n" }; const char* const VixlLoadFromOffsetResults = { - " 0: e2 68 ldr r2, [r4, #12]\n" - " 2: d4 f8 ff 2f ldr.w r2, [r4, #4095]\n" - " 6: 04 f5 80 52 add.w r2, r4, #4096\n" - " a: 12 68 ldr r2, [r2]\n" - " c: 04 f5 80 12 add.w r2, r4, #1048576\n" - " 10: d2 f8 a4 20 ldr.w r2, [r2, #164]\n" - " 14: 4f f4 80 52 mov.w r2, #4096\n" - " 18: c0 f2 10 02 movt r2, #16\n" - " 1c: 22 44 add r2, r4\n" - " 1e: 12 68 ldr r2, [r2]\n" - " 20: 4f f4 80 5c mov.w r12, #4096\n" - " 24: c0 f2 10 0c movt r12, #16\n" - " 28: 64 44 add r4, r12\n" - " 2a: 24 68 ldr r4, [r4]\n" - " 2c: a2 89 ldrh r2, [r4, #12]\n" - " 2e: b4 f8 ff 2f ldrh.w r2, [r4, #4095]\n" - " 32: 04 f5 80 52 add.w r2, r4, #4096\n" - " 36: 12 88 ldrh r2, [r2]\n" - " 38: 04 f5 80 12 add.w r2, r4, #1048576\n" - " 3c: b2 f8 a4 20 ldrh.w r2, [r2, #164]\n" - " 40: 4f f4 80 52 mov.w r2, #4096\n" - " 44: c0 f2 10 02 movt r2, #16\n" - " 48: 22 44 add r2, r4\n" - " 4a: 12 88 ldrh r2, [r2]\n" - " 4c: 4f f4 80 5c mov.w r12, #4096\n" - " 50: c0 f2 10 0c movt r12, #16\n" - " 54: 64 44 add r4, r12\n" - " 56: 24 88 ldrh r4, [r4]\n" - " 58: d4 e9 03 23 ldrd r2, r3, [r4, #12]\n" - " 5c: d4 e9 ff 23 ldrd r2, r3, [r4, #1020]\n" - " 60: 04 f5 80 62 add.w r2, r4, #1024\n" - " 64: d2 e9 00 23 ldrd r2, r3, [r2]\n" - " 68: 04 f5 80 22 add.w r2, r4, #262144\n" - " 6c: d2 e9 29 23 ldrd r2, r3, [r2, #164]\n" - " 70: 4f f4 80 62 mov.w r2, #1024\n" - " 74: c0 f2 04 02 movt r2, #4\n" - " 78: 22 44 add r2, r4\n" - " 7a: d2 e9 00 23 ldrd r2, r3, [r2]\n" - " 7e: 4f f4 80 6c mov.w r12, #1024\n" - " 82: c0 f2 04 0c movt r12, #4\n" - " 86: 64 44 add r4, r12\n" - " 88: d4 e9 00 45 ldrd r4, r5, [r4]\n" - " 8c: dc f8 0c 00 ldr.w r0, [r12, #12]\n" - " 90: a4 f5 80 12 sub.w r2, r4, #1048576\n" - " 94: d2 f8 a4 20 ldr.w r2, [r2, #164]\n" - " 98: 94 f9 0c 20 ldrsb.w r2, [r4, #12]\n" - " 9c: 22 7b ldrb r2, [r4, #12]\n" - " 9e: b4 f9 0c 20 ldrsh.w r2, [r4, #12]\n" + " 0: 68e2 ldr r2, [r4, #12]\n" + " 2: f8d4 2fff ldr.w r2, [r4, #4095]\n" + " 6: f504 5280 add.w r2, r4, #4096\n" + " a: 6812 ldr r2, [r2]\n" + " c: f504 1280 add.w r2, r4, #1048576\n" + " 10: f8d2 20a4 ldr.w r2, [r2, #164]\n" + " 14: f44f 5280 mov.w r2, #4096\n" + " 18: f2c0 0210 movt r2, #16\n" + " 1c: 4422 add r2, r4\n" + " 1e: 6812 ldr r2, [r2]\n" + " 20: f44f 5c80 mov.w r12, #4096\n" + " 24: f2c0 0c10 movt r12, #16\n" + " 28: 4464 add r4, r12\n" + " 2a: 6824 ldr r4, [r4]\n" + " 2c: 89a2 ldrh r2, [r4, #12]\n" + " 2e: f8b4 2fff ldrh.w r2, [r4, #4095]\n" + " 32: f504 5280 add.w r2, r4, #4096\n" + " 36: 8812 ldrh r2, [r2]\n" + " 38: f504 1280 add.w r2, r4, #1048576\n" + " 3c: f8b2 20a4 ldrh.w r2, [r2, #164]\n" + " 40: f44f 5280 mov.w r2, #4096\n" + " 44: f2c0 0210 movt r2, #16\n" + " 48: 4422 add r2, r4\n" + " 4a: 8812 ldrh r2, [r2]\n" + " 4c: f44f 5c80 mov.w r12, #4096\n" + " 50: f2c0 0c10 movt r12, #16\n" + " 54: 4464 add r4, r12\n" + " 56: 8824 ldrh r4, [r4]\n" + " 58: e9d4 2303 ldrd r2, r3, [r4, #12]\n" + " 5c: e9d4 23ff ldrd r2, r3, [r4, #1020]\n" + " 60: f504 6280 add.w r2, r4, #1024\n" + " 64: e9d2 2300 ldrd r2, r3, [r2]\n" + " 68: f504 2280 add.w r2, r4, #262144\n" + " 6c: e9d2 2329 ldrd r2, r3, [r2, #164]\n" + " 70: f44f 6280 mov.w r2, #1024\n" + " 74: f2c0 0204 movt r2, #4\n" + " 78: 4422 add r2, r4\n" + " 7a: e9d2 2300 ldrd r2, r3, [r2]\n" + " 7e: f44f 6c80 mov.w r12, #1024\n" + " 82: f2c0 0c04 movt r12, #4\n" + " 86: 4464 add r4, r12\n" + " 88: e9d4 4500 ldrd r4, r5, [r4]\n" + " 8c: f8dc 000c ldr.w r0, [r12, #12]\n" + " 90: f5a4 1280 sub.w r2, r4, #1048576\n" + " 94: f8d2 20a4 ldr.w r2, [r2, #164]\n" + " 98: f994 200c ldrsb.w r2, [r4, #12]\n" + " 9c: 7b22 ldrb r2, [r4, #12]\n" + " 9e: f9b4 200c ldrsh.w r2, [r4, #12]\n" }; const char* const VixlStoreToOffsetResults = { - " 0: e2 60 str r2, [r4, #12]\n" - " 2: c4 f8 ff 2f str.w r2, [r4, #4095]\n" - " 6: 04 f5 80 5c add.w r12, r4, #4096\n" - " a: cc f8 00 20 str.w r2, [r12]\n" - " e: 04 f5 80 1c add.w r12, r4, #1048576\n" - " 12: cc f8 a4 20 str.w r2, [r12, #164]\n" - " 16: 4f f4 80 5c mov.w r12, #4096\n" - " 1a: c0 f2 10 0c movt r12, #16\n" - " 1e: a4 44 add r12, r4\n" - " 20: cc f8 00 20 str.w r2, [r12]\n" - " 24: 4f f4 80 5c mov.w r12, #4096\n" - " 28: c0 f2 10 0c movt r12, #16\n" - " 2c: a4 44 add r12, r4\n" - " 2e: cc f8 00 40 str.w r4, [r12]\n" - " 32: a2 81 strh r2, [r4, #12]\n" - " 34: a4 f8 ff 2f strh.w r2, [r4, #4095]\n" - " 38: 04 f5 80 5c add.w r12, r4, #4096\n" - " 3c: ac f8 00 20 strh.w r2, [r12]\n" - " 40: 04 f5 80 1c add.w r12, r4, #1048576\n" - " 44: ac f8 a4 20 strh.w r2, [r12, #164]\n" - " 48: 4f f4 80 5c mov.w r12, #4096\n" - " 4c: c0 f2 10 0c movt r12, #16\n" - " 50: a4 44 add r12, r4\n" - " 52: ac f8 00 20 strh.w r2, [r12]\n" - " 56: 4f f4 80 5c mov.w r12, #4096\n" - " 5a: c0 f2 10 0c movt r12, #16\n" - " 5e: a4 44 add r12, r4\n" - " 60: ac f8 00 40 strh.w r4, [r12]\n" - " 64: c4 e9 03 23 strd r2, r3, [r4, #12]\n" - " 68: c4 e9 ff 23 strd r2, r3, [r4, #1020]\n" - " 6c: 04 f5 80 6c add.w r12, r4, #1024\n" - " 70: cc e9 00 23 strd r2, r3, [r12]\n" - " 74: 04 f5 80 2c add.w r12, r4, #262144\n" - " 78: cc e9 29 23 strd r2, r3, [r12, #164]\n" - " 7c: 4f f4 80 6c mov.w r12, #1024\n" - " 80: c0 f2 04 0c movt r12, #4\n" - " 84: a4 44 add r12, r4\n" - " 86: cc e9 00 23 strd r2, r3, [r12]\n" - " 8a: 4f f4 80 6c mov.w r12, #1024\n" - " 8e: c0 f2 04 0c movt r12, #4\n" - " 92: a4 44 add r12, r4\n" - " 94: cc e9 00 45 strd r4, r5, [r12]\n" - " 98: cc f8 0c 00 str.w r0, [r12, #12]\n" - " 9c: a4 f5 80 1c sub.w r12, r4, #1048576\n" - " a0: cc f8 a4 20 str.w r2, [r12, #164]\n" - " a4: 22 73 strb r2, [r4, #12]\n" + " 0: 60e2 str r2, [r4, #12]\n" + " 2: f8c4 2fff str.w r2, [r4, #4095]\n" + " 6: f504 5c80 add.w r12, r4, #4096\n" + " a: f8cc 2000 str.w r2, [r12]\n" + " e: f504 1c80 add.w r12, r4, #1048576\n" + " 12: f8cc 20a4 str.w r2, [r12, #164]\n" + " 16: f44f 5c80 mov.w r12, #4096\n" + " 1a: f2c0 0c10 movt r12, #16\n" + " 1e: 44a4 add r12, r4\n" + " 20: f8cc 2000 str.w r2, [r12]\n" + " 24: f44f 5c80 mov.w r12, #4096\n" + " 28: f2c0 0c10 movt r12, #16\n" + " 2c: 44a4 add r12, r4\n" + " 2e: f8cc 4000 str.w r4, [r12]\n" + " 32: 81a2 strh r2, [r4, #12]\n" + " 34: f8a4 2fff strh.w r2, [r4, #4095]\n" + " 38: f504 5c80 add.w r12, r4, #4096\n" + " 3c: f8ac 2000 strh.w r2, [r12]\n" + " 40: f504 1c80 add.w r12, r4, #1048576\n" + " 44: f8ac 20a4 strh.w r2, [r12, #164]\n" + " 48: f44f 5c80 mov.w r12, #4096\n" + " 4c: f2c0 0c10 movt r12, #16\n" + " 50: 44a4 add r12, r4\n" + " 52: f8ac 2000 strh.w r2, [r12]\n" + " 56: f44f 5c80 mov.w r12, #4096\n" + " 5a: f2c0 0c10 movt r12, #16\n" + " 5e: 44a4 add r12, r4\n" + " 60: f8ac 4000 strh.w r4, [r12]\n" + " 64: e9c4 2303 strd r2, r3, [r4, #12]\n" + " 68: e9c4 23ff strd r2, r3, [r4, #1020]\n" + " 6c: f504 6c80 add.w r12, r4, #1024\n" + " 70: e9cc 2300 strd r2, r3, [r12]\n" + " 74: f504 2c80 add.w r12, r4, #262144\n" + " 78: e9cc 2329 strd r2, r3, [r12, #164]\n" + " 7c: f44f 6c80 mov.w r12, #1024\n" + " 80: f2c0 0c04 movt r12, #4\n" + " 84: 44a4 add r12, r4\n" + " 86: e9cc 2300 strd r2, r3, [r12]\n" + " 8a: f44f 6c80 mov.w r12, #1024\n" + " 8e: f2c0 0c04 movt r12, #4\n" + " 92: 44a4 add r12, r4\n" + " 94: e9cc 4500 strd r4, r5, [r12]\n" + " 98: f8cc 000c str.w r0, [r12, #12]\n" + " 9c: f5a4 1c80 sub.w r12, r4, #1048576\n" + " a0: f8cc 20a4 str.w r2, [r12, #164]\n" + " a4: 7322 strb r2, [r4, #12]\n" }; diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h index 377b7fe352..5f68a7c701 100644 --- a/compiler/utils/atomic_dex_ref_map-inl.h +++ b/compiler/utils/atomic_dex_ref_map-inl.h @@ -21,12 +21,13 @@ #include <type_traits> +#include "base/macros.h" #include "dex/class_reference.h" #include "dex/dex_file-inl.h" #include "dex/method_reference.h" #include "dex/type_reference.h" -namespace art { +namespace art HIDDEN { template <typename DexFileReferenceType, typename Value> inline size_t AtomicDexRefMap<DexFileReferenceType, Value>::NumberOfDexIndices( diff --git a/compiler/utils/atomic_dex_ref_map.h b/compiler/utils/atomic_dex_ref_map.h index a8c285f765..b10fef50c5 100644 --- a/compiler/utils/atomic_dex_ref_map.h +++ b/compiler/utils/atomic_dex_ref_map.h @@ -19,10 +19,11 @@ #include "base/atomic.h" #include "base/dchecked_vector.h" +#include "base/macros.h" #include "base/safe_map.h" #include "dex/dex_file_reference.h" -namespace art { +namespace art HIDDEN { class DexFile; diff --git a/compiler/utils/atomic_dex_ref_map_test.cc b/compiler/utils/atomic_dex_ref_map_test.cc index 864531ed91..329735b796 100644 --- a/compiler/utils/atomic_dex_ref_map_test.cc +++ b/compiler/utils/atomic_dex_ref_map_test.cc @@ -18,12 +18,13 @@ #include <memory> +#include "base/macros.h" #include "common_runtime_test.h" #include "dex/dex_file-inl.h" #include "dex/method_reference.h" #include "scoped_thread_state_change-inl.h" -namespace art { +namespace art HIDDEN { class AtomicDexRefMapTest : public CommonRuntimeTest {}; diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h index d4a9cc829b..db744c53f7 100644 --- a/compiler/utils/dedupe_set-inl.h +++ b/compiler/utils/dedupe_set-inl.h @@ -27,11 +27,12 @@ #include "android-base/stringprintf.h" #include "base/hash_set.h" +#include "base/macros.h" #include "base/mutex.h" #include "base/stl_util.h" #include "base/time_utils.h" -namespace art { +namespace art HIDDEN { template <typename InKey, typename StoreKey, diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h index a1ba208d2c..42db8e3ca0 100644 --- a/compiler/utils/dedupe_set.h +++ b/compiler/utils/dedupe_set.h @@ -23,7 +23,7 @@ #include "base/macros.h" -namespace art { +namespace art HIDDEN { class Thread; diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc index b390508ed4..89385e7c82 100644 --- a/compiler/utils/dedupe_set_test.cc +++ b/compiler/utils/dedupe_set_test.cc @@ -21,11 +21,12 @@ #include <vector> #include "base/array_ref.h" +#include "base/macros.h" #include "dedupe_set-inl.h" #include "gtest/gtest.h" #include "thread-current-inl.h" -namespace art { +namespace art HIDDEN { class DedupeSetTestHashFunc { public: diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc index d6d49f8faa..8b47b38e63 100644 --- a/compiler/utils/jni_macro_assembler.cc +++ b/compiler/utils/jni_macro_assembler.cc @@ -35,7 +35,7 @@ #include "base/globals.h" #include "base/memory_region.h" -namespace art { +namespace art HIDDEN { using MacroAsm32UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k32>>; @@ -58,6 +58,7 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator)); #endif default: + UNUSED(allocator); LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set; UNREACHABLE(); } diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h index 7022e3df92..0c729705dc 100644 --- a/compiler/utils/jni_macro_assembler.h +++ b/compiler/utils/jni_macro_assembler.h @@ -30,7 +30,7 @@ #include "managed_register.h" #include "offsets.h" -namespace art { +namespace art HIDDEN { class ArenaAllocator; class DebugFrameOpCodeWriterForAssembler; @@ -118,37 +118,18 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { // Store routines virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0; virtual void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) = 0; - virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0; virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0; - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) = 0; - - virtual void StoreStackOffsetToThread(ThreadOffset<kPointerSize> thr_offs, - FrameOffset fr_offs) = 0; - - virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs) = 0; - - virtual void StoreSpanning(FrameOffset dest, - ManagedRegister src, - FrameOffset in_off) = 0; + // Stores stack pointer by tagging it if required so we can walk the stack. In debuggable runtimes + // we use tag to tell if we are using JITed code or AOT code. In non-debuggable runtimes we never + // use JITed code when AOT code is present. So checking for AOT code is sufficient to detect which + // code is being executed. We avoid tagging in non-debuggable runtimes to reduce instructions. + virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs, bool tag_sp) = 0; // Load routines virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0; virtual void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) = 0; - virtual void LoadFromThread(ManagedRegister dest, - ThreadOffset<kPointerSize> src, - size_t size) = 0; - - virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; - // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference. - virtual void LoadRef(ManagedRegister dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) = 0; - - virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0; - virtual void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset<kPointerSize> offs) = 0; // Copying routines @@ -165,53 +146,7 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0; - virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset<kPointerSize> thr_offs) = 0; - - virtual void CopyRawPtrToThread(ThreadOffset<kPointerSize> thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch) = 0; - - virtual void CopyRef(FrameOffset dest, FrameOffset src) = 0; - virtual void CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) = 0; - - virtual void Copy(FrameOffset dest, FrameOffset src, size_t size) = 0; - - virtual void Copy(FrameOffset dest, - ManagedRegister src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) = 0; - - virtual void Copy(ManagedRegister dest_base, - Offset dest_offset, - FrameOffset src, - ManagedRegister scratch, - size_t size) = 0; - - virtual void Copy(FrameOffset dest, - FrameOffset src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) = 0; - - virtual void Copy(ManagedRegister dest, - Offset dest_offset, - ManagedRegister src, - Offset src_offset, - ManagedRegister scratch, - size_t size) = 0; - - virtual void Copy(FrameOffset dest, - Offset dest_offset, - FrameOffset src, - Offset src_offset, - ManagedRegister scratch, - size_t size) = 0; - - virtual void MemoryBarrier(ManagedRegister scratch) = 0; + virtual void Move(ManagedRegister dst, size_t value) = 0; // Sign extension virtual void SignExtend(ManagedRegister mreg, size_t size) = 0; @@ -223,20 +158,10 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { virtual void GetCurrentThread(ManagedRegister dest) = 0; virtual void GetCurrentThread(FrameOffset dest_offset) = 0; - // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly - // stale reference that can be used to avoid loading the spilled value to - // see if the value is null. - virtual void CreateJObject(ManagedRegister out_reg, - FrameOffset spilled_reference_offset, - ManagedRegister in_reg, - bool null_allowed) = 0; - - // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. - virtual void CreateJObject(FrameOffset out_off, - FrameOffset spilled_reference_offset, - bool null_allowed) = 0; + // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path. + virtual void DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) = 0; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. @@ -282,6 +207,8 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> { virtual void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) = 0; + // Emit a conditional jump to label if the loaded value from specified locations is not zero. + virtual void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) = 0; // Code at this offset will serve as the target for the Jump call. virtual void Bind(JNIMacroLabel* label) = 0; diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h index e77177e43e..ac8e7d3010 100644 --- a/compiler/utils/jni_macro_assembler_test.h +++ b/compiler/utils/jni_macro_assembler_test.h @@ -20,6 +20,7 @@ #include "jni_macro_assembler.h" #include "assembler_test_base.h" +#include "base/macros.h" #include "base/malloc_arena_pool.h" #include "common_runtime_test.h" // For ScratchFile @@ -30,7 +31,7 @@ #include <fstream> #include <iterator> -namespace art { +namespace art HIDDEN { template<typename Ass> class JNIMacroAssemblerTest : public AssemblerTestBase { @@ -39,7 +40,7 @@ class JNIMacroAssemblerTest : public AssemblerTestBase { return assembler_.get(); } - typedef std::string (*TestFn)(JNIMacroAssemblerTest* assembler_test, Ass* assembler); + using TestFn = std::string (*)(JNIMacroAssemblerTest *, Ass *); void DriverFn(TestFn f, const std::string& test_name) { DriverWrapper(f(this, assembler_.get()), test_name); diff --git a/compiler/utils/label.h b/compiler/utils/label.h index 282500b1b7..0368d90a26 100644 --- a/compiler/utils/label.h +++ b/compiler/utils/label.h @@ -20,7 +20,9 @@ #include <android-base/logging.h> #include <android-base/macros.h> -namespace art { +#include "base/macros.h" + +namespace art HIDDEN { class Assembler; class AssemblerBuffer; diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h index a3b33ba94d..ba6b46b3b3 100644 --- a/compiler/utils/managed_register.h +++ b/compiler/utils/managed_register.h @@ -20,9 +20,10 @@ #include <type_traits> #include <vector> +#include "base/macros.h" #include "base/value_object.h" -namespace art { +namespace art HIDDEN { namespace arm { class ArmManagedRegister; @@ -31,6 +32,10 @@ namespace arm64 { class Arm64ManagedRegister; } // namespace arm64 +namespace riscv64 { +class Riscv64ManagedRegister; +} // namespace riscv64 + namespace x86 { class X86ManagedRegister; } // namespace x86 @@ -50,6 +55,7 @@ class ManagedRegister : public ValueObject { constexpr arm::ArmManagedRegister AsArm() const; constexpr arm64::Arm64ManagedRegister AsArm64() const; + constexpr riscv64::Riscv64ManagedRegister AsRiscv64() const; constexpr x86::X86ManagedRegister AsX86() const; constexpr x86_64::X86_64ManagedRegister AsX86_64() const; diff --git a/compiler/utils/riscv64/managed_register_riscv64.cc b/compiler/utils/riscv64/managed_register_riscv64.cc new file mode 100644 index 0000000000..560019ae09 --- /dev/null +++ b/compiler/utils/riscv64/managed_register_riscv64.cc @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "managed_register_riscv64.h" + +#include "base/globals.h" + +namespace art { +namespace riscv64 { + +bool Riscv64ManagedRegister::Overlaps(const Riscv64ManagedRegister& other) const { + if (IsNoRegister() || other.IsNoRegister()) { + return false; + } + CHECK(IsValidManagedRegister()); + CHECK(other.IsValidManagedRegister()); + + return Equals(other); +} + +void Riscv64ManagedRegister::Print(std::ostream& os) const { + if (!IsValidManagedRegister()) { + os << "No Register"; + } else if (IsXRegister()) { + os << "XRegister: " << static_cast<int>(AsXRegister()); + } else if (IsFRegister()) { + os << "FRegister: " << static_cast<int>(AsFRegister()); + } else { + os << "??: " << RegId(); + } +} + +std::ostream& operator<<(std::ostream& os, const Riscv64ManagedRegister& reg) { + reg.Print(os); + return os; +} + +} // namespace riscv64 +} // namespace art diff --git a/compiler/utils/riscv64/managed_register_riscv64.h b/compiler/utils/riscv64/managed_register_riscv64.h new file mode 100644 index 0000000000..8e02a9dcc8 --- /dev/null +++ b/compiler/utils/riscv64/managed_register_riscv64.h @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_RISCV64_MANAGED_REGISTER_RISCV64_H_ +#define ART_COMPILER_UTILS_RISCV64_MANAGED_REGISTER_RISCV64_H_ + +#include <android-base/logging.h> + +#include "arch/riscv64/registers_riscv64.h" +#include "base/globals.h" +#include "base/macros.h" +#include "utils/managed_register.h" + +namespace art { +namespace riscv64 { + +const int kNumberOfXRegIds = kNumberOfXRegisters; +const int kNumberOfXAllocIds = kNumberOfXRegisters; + +const int kNumberOfFRegIds = kNumberOfFRegisters; +const int kNumberOfFAllocIds = kNumberOfFRegisters; + +const int kNumberOfRegIds = kNumberOfXRegIds + kNumberOfFRegIds; +const int kNumberOfAllocIds = kNumberOfXAllocIds + kNumberOfFAllocIds; + +// Register ids map: +// [0..R[ core registers (enum XRegister) +// [R..F[ floating-point registers (enum FRegister) +// where +// R = kNumberOfXRegIds +// F = R + kNumberOfFRegIds + +// An instance of class 'ManagedRegister' represents a single Riscv64 register. +// A register can be one of the following: +// * core register (enum XRegister) +// * floating-point register (enum FRegister) +// +// 'ManagedRegister::NoRegister()' provides an invalid register. +// There is a one-to-one mapping between ManagedRegister and register id. +class Riscv64ManagedRegister : public ManagedRegister { + public: + constexpr XRegister AsXRegister() const { + CHECK(IsXRegister()); + return static_cast<XRegister>(id_); + } + + constexpr FRegister AsFRegister() const { + CHECK(IsFRegister()); + return static_cast<FRegister>(id_ - kNumberOfXRegIds); + } + + constexpr bool IsXRegister() const { + CHECK(IsValidManagedRegister()); + return (0 <= id_) && (id_ < kNumberOfXRegIds); + } + + constexpr bool IsFRegister() const { + CHECK(IsValidManagedRegister()); + const int test = id_ - kNumberOfXRegIds; + return (0 <= test) && (test < kNumberOfFRegIds); + } + + void Print(std::ostream& os) const; + + // Returns true if the two managed-registers ('this' and 'other') overlap. + // Either managed-register may be the NoRegister. If both are the NoRegister + // then false is returned. + bool Overlaps(const Riscv64ManagedRegister& other) const; + + static constexpr Riscv64ManagedRegister FromXRegister(XRegister r) { + CHECK_NE(r, kNoXRegister); + return FromRegId(r); + } + + static constexpr Riscv64ManagedRegister FromFRegister(FRegister r) { + CHECK_NE(r, kNoFRegister); + return FromRegId(r + kNumberOfXRegIds); + } + + private: + constexpr bool IsValidManagedRegister() const { return (0 <= id_) && (id_ < kNumberOfRegIds); } + + constexpr int RegId() const { + CHECK(!IsNoRegister()); + return id_; + } + + int AllocId() const { + CHECK(IsValidManagedRegister()); + CHECK_LT(id_, kNumberOfAllocIds); + return id_; + } + + int AllocIdLow() const; + int AllocIdHigh() const; + + friend class ManagedRegister; + + explicit constexpr Riscv64ManagedRegister(int reg_id) : ManagedRegister(reg_id) {} + + static constexpr Riscv64ManagedRegister FromRegId(int reg_id) { + Riscv64ManagedRegister reg(reg_id); + CHECK(reg.IsValidManagedRegister()); + return reg; + } +}; + +std::ostream& operator<<(std::ostream& os, const Riscv64ManagedRegister& reg); + +} // namespace riscv64 + +constexpr inline riscv64::Riscv64ManagedRegister ManagedRegister::AsRiscv64() const { + riscv64::Riscv64ManagedRegister reg(id_); + CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister()); + return reg; +} + +} // namespace art + +#endif // ART_COMPILER_UTILS_RISCV64_MANAGED_REGISTER_RISCV64_H_ diff --git a/compiler/utils/riscv64/managed_register_riscv64_test.cc b/compiler/utils/riscv64/managed_register_riscv64_test.cc new file mode 100644 index 0000000000..c6ad2dc38a --- /dev/null +++ b/compiler/utils/riscv64/managed_register_riscv64_test.cc @@ -0,0 +1,204 @@ +/* + * Copyright (C) 2023 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "managed_register_riscv64.h" + +#include "base/globals.h" +#include "gtest/gtest.h" + +namespace art { +namespace riscv64 { + +TEST(Riscv64ManagedRegister, NoRegister) { + Riscv64ManagedRegister reg = ManagedRegister::NoRegister().AsRiscv64(); + EXPECT_TRUE(reg.IsNoRegister()); +} + +TEST(Riscv64ManagedRegister, XRegister) { + Riscv64ManagedRegister reg = Riscv64ManagedRegister::FromXRegister(Zero); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(Zero, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(RA); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(RA, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(SP); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(SP, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(GP); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(GP, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(T0); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(T0, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(T2); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(T2, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(S0); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(S0, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(A0); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(A0, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(A7); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(A7, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(S2); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(S2, reg.AsXRegister()); + + reg = Riscv64ManagedRegister::FromXRegister(T3); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_TRUE(reg.IsXRegister()); + EXPECT_FALSE(reg.IsFRegister()); + EXPECT_EQ(T3, reg.AsXRegister()); +} + +TEST(Riscv64ManagedRegister, FRegister) { + Riscv64ManagedRegister reg = Riscv64ManagedRegister::FromFRegister(FT0); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FT0, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + + reg = Riscv64ManagedRegister::FromFRegister(FT1); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FT1, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FT1))); + + reg = Riscv64ManagedRegister::FromFRegister(FS0); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FS0, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FS0))); + + reg = Riscv64ManagedRegister::FromFRegister(FA0); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FA0, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FA0))); + + reg = Riscv64ManagedRegister::FromFRegister(FA7); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FA7, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FA7))); + + reg = Riscv64ManagedRegister::FromFRegister(FS4); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FS4, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FS4))); + + reg = Riscv64ManagedRegister::FromFRegister(FT11); + EXPECT_FALSE(reg.IsNoRegister()); + EXPECT_FALSE(reg.IsXRegister()); + EXPECT_TRUE(reg.IsFRegister()); + EXPECT_EQ(FT11, reg.AsFRegister()); + EXPECT_TRUE(reg.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); +} + +TEST(Riscv64ManagedRegister, Equals) { + ManagedRegister no_reg = ManagedRegister::NoRegister(); + EXPECT_TRUE(no_reg.Equals(Riscv64ManagedRegister::NoRegister())); + EXPECT_FALSE(no_reg.Equals(Riscv64ManagedRegister::FromXRegister(Zero))); + EXPECT_FALSE(no_reg.Equals(Riscv64ManagedRegister::FromXRegister(A1))); + EXPECT_FALSE(no_reg.Equals(Riscv64ManagedRegister::FromXRegister(S2))); + EXPECT_FALSE(no_reg.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + EXPECT_FALSE(no_reg.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); + + Riscv64ManagedRegister reg_Zero = Riscv64ManagedRegister::FromXRegister(Zero); + EXPECT_FALSE(reg_Zero.Equals(Riscv64ManagedRegister::NoRegister())); + EXPECT_TRUE(reg_Zero.Equals(Riscv64ManagedRegister::FromXRegister(Zero))); + EXPECT_FALSE(reg_Zero.Equals(Riscv64ManagedRegister::FromXRegister(A1))); + EXPECT_FALSE(reg_Zero.Equals(Riscv64ManagedRegister::FromXRegister(S2))); + EXPECT_FALSE(reg_Zero.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + EXPECT_FALSE(reg_Zero.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); + + Riscv64ManagedRegister reg_A1 = Riscv64ManagedRegister::FromXRegister(A1); + EXPECT_FALSE(reg_A1.Equals(Riscv64ManagedRegister::NoRegister())); + EXPECT_FALSE(reg_A1.Equals(Riscv64ManagedRegister::FromXRegister(Zero))); + EXPECT_FALSE(reg_A1.Equals(Riscv64ManagedRegister::FromXRegister(A0))); + EXPECT_TRUE(reg_A1.Equals(Riscv64ManagedRegister::FromXRegister(A1))); + EXPECT_FALSE(reg_A1.Equals(Riscv64ManagedRegister::FromXRegister(S2))); + EXPECT_FALSE(reg_A1.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + EXPECT_FALSE(reg_A1.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); + + Riscv64ManagedRegister reg_S2 = Riscv64ManagedRegister::FromXRegister(S2); + EXPECT_FALSE(reg_S2.Equals(Riscv64ManagedRegister::NoRegister())); + EXPECT_FALSE(reg_S2.Equals(Riscv64ManagedRegister::FromXRegister(Zero))); + EXPECT_FALSE(reg_S2.Equals(Riscv64ManagedRegister::FromXRegister(A1))); + EXPECT_FALSE(reg_S2.Equals(Riscv64ManagedRegister::FromXRegister(S1))); + EXPECT_TRUE(reg_S2.Equals(Riscv64ManagedRegister::FromXRegister(S2))); + EXPECT_FALSE(reg_S2.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + EXPECT_FALSE(reg_S2.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); + + Riscv64ManagedRegister reg_F0 = Riscv64ManagedRegister::FromFRegister(FT0); + EXPECT_FALSE(reg_F0.Equals(Riscv64ManagedRegister::NoRegister())); + EXPECT_FALSE(reg_F0.Equals(Riscv64ManagedRegister::FromXRegister(Zero))); + EXPECT_FALSE(reg_F0.Equals(Riscv64ManagedRegister::FromXRegister(A1))); + EXPECT_FALSE(reg_F0.Equals(Riscv64ManagedRegister::FromXRegister(S2))); + EXPECT_TRUE(reg_F0.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + EXPECT_FALSE(reg_F0.Equals(Riscv64ManagedRegister::FromFRegister(FT1))); + EXPECT_FALSE(reg_F0.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); + + Riscv64ManagedRegister reg_F31 = Riscv64ManagedRegister::FromFRegister(FT11); + EXPECT_FALSE(reg_F31.Equals(Riscv64ManagedRegister::NoRegister())); + EXPECT_FALSE(reg_F31.Equals(Riscv64ManagedRegister::FromXRegister(Zero))); + EXPECT_FALSE(reg_F31.Equals(Riscv64ManagedRegister::FromXRegister(A1))); + EXPECT_FALSE(reg_F31.Equals(Riscv64ManagedRegister::FromXRegister(S2))); + EXPECT_FALSE(reg_F31.Equals(Riscv64ManagedRegister::FromFRegister(FT0))); + EXPECT_FALSE(reg_F31.Equals(Riscv64ManagedRegister::FromFRegister(FT1))); + EXPECT_TRUE(reg_F31.Equals(Riscv64ManagedRegister::FromFRegister(FT11))); +} + +} // namespace riscv64 +} // namespace art diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h index c348f2c8ee..d0fff73df3 100644 --- a/compiler/utils/stack_checks.h +++ b/compiler/utils/stack_checks.h @@ -18,8 +18,9 @@ #define ART_COMPILER_UTILS_STACK_CHECKS_H_ #include "arch/instruction_set.h" +#include "base/macros.h" -namespace art { +namespace art HIDDEN { // Size of a frame that we definitely consider large. Anything larger than this should // definitely get a stack overflow check. diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc deleted file mode 100644 index 6e0773bba4..0000000000 --- a/compiler/utils/swap_space.cc +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (C) 2014 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "swap_space.h" - -#include <sys/mman.h> - -#include <algorithm> -#include <numeric> - -#include "base/bit_utils.h" -#include "base/macros.h" -#include "base/mutex.h" -#include "thread-current-inl.h" - -namespace art { - -// The chunk size by which the swap file is increased and mapped. -static constexpr size_t kMininumMapSize = 16 * MB; - -static constexpr bool kCheckFreeMaps = false; - -template <typename FreeBySizeSet> -static void DumpFreeMap(const FreeBySizeSet& free_by_size) { - size_t last_size = static_cast<size_t>(-1); - for (const auto& entry : free_by_size) { - if (last_size != entry.size) { - last_size = entry.size; - LOG(INFO) << "Size " << last_size; - } - LOG(INFO) << " 0x" << std::hex << entry.free_by_start_entry->Start() - << " size=" << std::dec << entry.free_by_start_entry->size; - } -} - -void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) { - auto free_by_start_pos = free_by_size_pos->free_by_start_entry; - free_by_size_.erase(free_by_size_pos); - free_by_start_.erase(free_by_start_pos); -} - -inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) { - DCHECK_NE(chunk.size, 0u); - auto insert_result = free_by_start_.insert(chunk); - DCHECK(insert_result.second); - free_by_size_.emplace(chunk.size, insert_result.first); -} - -SwapSpace::SwapSpace(int fd, size_t initial_size) - : fd_(fd), - size_(0), - lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) { - // Assume that the file is unlinked. - - InsertChunk(NewFileChunk(initial_size)); -} - -SwapSpace::~SwapSpace() { - // Unmap all mmapped chunks. Nothing should be allocated anymore at - // this point, so there should be only full size chunks in free_by_start_. - for (const SpaceChunk& chunk : free_by_start_) { - if (munmap(chunk.ptr, chunk.size) != 0) { - PLOG(ERROR) << "Failed to unmap swap space chunk at " - << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size; - } - } - // All arenas are backed by the same file. Just close the descriptor. - close(fd_); -} - -template <typename FreeByStartSet, typename FreeBySizeSet> -static size_t CollectFree(const FreeByStartSet& free_by_start, const FreeBySizeSet& free_by_size) { - if (free_by_start.size() != free_by_size.size()) { - LOG(FATAL) << "Size: " << free_by_start.size() << " vs " << free_by_size.size(); - } - - // Calculate over free_by_size. - size_t sum1 = 0; - for (const auto& entry : free_by_size) { - sum1 += entry.free_by_start_entry->size; - } - - // Calculate over free_by_start. - size_t sum2 = 0; - for (const auto& entry : free_by_start) { - sum2 += entry.size; - } - - if (sum1 != sum2) { - LOG(FATAL) << "Sum: " << sum1 << " vs " << sum2; - } - return sum1; -} - -void* SwapSpace::Alloc(size_t size) { - MutexLock lock(Thread::Current(), lock_); - size = RoundUp(size, 8U); - - // Check the free list for something that fits. - // TODO: Smarter implementation. Global biggest chunk, ... - auto it = free_by_start_.empty() - ? free_by_size_.end() - : free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() }); - if (it != free_by_size_.end()) { - SpaceChunk old_chunk = *it->free_by_start_entry; - if (old_chunk.size == size) { - RemoveChunk(it); - } else { - // Avoid deallocating and allocating the std::set<> nodes. - // This would be much simpler if we could use replace() from Boost.Bimap. - - // The free_by_start_ map contains disjoint intervals ordered by the `ptr`. - // Shrinking the interval does not affect the ordering. - it->free_by_start_entry->ptr += size; - it->free_by_start_entry->size -= size; - - auto node = free_by_size_.extract(it); - node.value().size -= size; - free_by_size_.insert(std::move(node)); - } - return old_chunk.ptr; - } else { - // Not a big enough free chunk, need to increase file size. - SpaceChunk new_chunk = NewFileChunk(size); - if (new_chunk.size != size) { - // Insert the remainder. - SpaceChunk remainder = { new_chunk.ptr + size, new_chunk.size - size }; - InsertChunk(remainder); - } - return new_chunk.ptr; - } -} - -SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) { -#if !defined(__APPLE__) - size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize)); - int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part)); - if (result != 0) { - PLOG(FATAL) << "Unable to increase swap file."; - } - uint8_t* ptr = reinterpret_cast<uint8_t*>( - mmap(nullptr, next_part, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, size_)); - if (ptr == MAP_FAILED) { - LOG(ERROR) << "Unable to mmap new swap file chunk."; - LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size; - LOG(ERROR) << "Free list:"; - DumpFreeMap(free_by_size_); - LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_); - PLOG(FATAL) << "Unable to mmap new swap file chunk."; - } - size_ += next_part; - SpaceChunk new_chunk = {ptr, next_part}; - return new_chunk; -#else - UNUSED(min_size, kMininumMapSize); - LOG(FATAL) << "No swap file support on the Mac."; - UNREACHABLE(); -#endif -} - -// TODO: Full coalescing. -void SwapSpace::Free(void* ptr, size_t size) { - MutexLock lock(Thread::Current(), lock_); - size = RoundUp(size, 8U); - - size_t free_before = 0; - if (kCheckFreeMaps) { - free_before = CollectFree(free_by_start_, free_by_size_); - } - - SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size }; - auto it = free_by_start_.lower_bound(chunk); - if (it != free_by_start_.begin()) { - auto prev = it; - --prev; - CHECK_LE(prev->End(), chunk.Start()); - if (prev->End() == chunk.Start()) { - // Merge *prev with this chunk. - chunk.size += prev->size; - chunk.ptr -= prev->size; - auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev }); - DCHECK(erase_pos != free_by_size_.end()); - RemoveChunk(erase_pos); - // "prev" is invalidated but "it" remains valid. - } - } - if (it != free_by_start_.end()) { - CHECK_LE(chunk.End(), it->Start()); - if (chunk.End() == it->Start()) { - // Merge *it with this chunk. - chunk.size += it->size; - auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it }); - DCHECK(erase_pos != free_by_size_.end()); - RemoveChunk(erase_pos); - // "it" is invalidated but we don't need it anymore. - } - } - InsertChunk(chunk); - - if (kCheckFreeMaps) { - size_t free_after = CollectFree(free_by_start_, free_by_size_); - - if (free_after != free_before + size) { - DumpFreeMap(free_by_size_); - CHECK_EQ(free_after, free_before + size) << "Should be " << size << " difference from " << free_before; - } - } -} - -} // namespace art diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h deleted file mode 100644 index 827e9a6366..0000000000 --- a/compiler/utils/swap_space.h +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright (C) 2014 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_UTILS_SWAP_SPACE_H_ -#define ART_COMPILER_UTILS_SWAP_SPACE_H_ - -#include <stddef.h> -#include <stdint.h> -#include <cstdlib> -#include <list> -#include <set> -#include <vector> - -#include <android-base/logging.h> - -#include "base/logging.h" -#include "base/macros.h" -#include "base/mutex.h" - -namespace art { - -// An arena pool that creates arenas backed by an mmaped file. -class SwapSpace { - public: - SwapSpace(int fd, size_t initial_size); - ~SwapSpace(); - void* Alloc(size_t size) REQUIRES(!lock_); - void Free(void* ptr, size_t size) REQUIRES(!lock_); - - size_t GetSize() { - return size_; - } - - private: - // Chunk of space. - struct SpaceChunk { - // We need mutable members as we keep these objects in a std::set<> (providing only const - // access) but we modify these members while carefully preserving the std::set<> ordering. - mutable uint8_t* ptr; - mutable size_t size; - - uintptr_t Start() const { - return reinterpret_cast<uintptr_t>(ptr); - } - uintptr_t End() const { - return reinterpret_cast<uintptr_t>(ptr) + size; - } - }; - - class SortChunkByPtr { - public: - bool operator()(const SpaceChunk& a, const SpaceChunk& b) const { - return reinterpret_cast<uintptr_t>(a.ptr) < reinterpret_cast<uintptr_t>(b.ptr); - } - }; - - using FreeByStartSet = std::set<SpaceChunk, SortChunkByPtr>; - - // Map size to an iterator to free_by_start_'s entry. - struct FreeBySizeEntry { - FreeBySizeEntry(size_t sz, FreeByStartSet::const_iterator entry) - : size(sz), free_by_start_entry(entry) { } - - // We need mutable members as we keep these objects in a std::set<> (providing only const - // access) but we modify these members while carefully preserving the std::set<> ordering. - mutable size_t size; - mutable FreeByStartSet::const_iterator free_by_start_entry; - }; - struct FreeBySizeComparator { - bool operator()(const FreeBySizeEntry& lhs, const FreeBySizeEntry& rhs) const { - if (lhs.size != rhs.size) { - return lhs.size < rhs.size; - } else { - return lhs.free_by_start_entry->Start() < rhs.free_by_start_entry->Start(); - } - } - }; - using FreeBySizeSet = std::set<FreeBySizeEntry, FreeBySizeComparator>; - - SpaceChunk NewFileChunk(size_t min_size) REQUIRES(lock_); - - void RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) REQUIRES(lock_); - void InsertChunk(const SpaceChunk& chunk) REQUIRES(lock_); - - int fd_; - size_t size_; - - // NOTE: Boost.Bimap would be useful for the two following members. - - // Map start of a free chunk to its size. - FreeByStartSet free_by_start_ GUARDED_BY(lock_); - // Free chunks ordered by size. - FreeBySizeSet free_by_size_ GUARDED_BY(lock_); - - mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - DISALLOW_COPY_AND_ASSIGN(SwapSpace); -}; - -template <typename T> class SwapAllocator; - -template <> -class SwapAllocator<void> { - public: - using value_type = void; - using pointer = void*; - using const_pointer = const void*; - - template <typename U> - struct rebind { - using other = SwapAllocator<U>; - }; - - explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {} - - template <typename U> - SwapAllocator(const SwapAllocator<U>& other) - : swap_space_(other.swap_space_) {} - - SwapAllocator(const SwapAllocator& other) = default; - SwapAllocator& operator=(const SwapAllocator& other) = default; - ~SwapAllocator() = default; - - private: - SwapSpace* swap_space_; - - template <typename U> - friend class SwapAllocator; - - template <typename U> - friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs); -}; - -template <typename T> -class SwapAllocator { - public: - using value_type = T; - using pointer = T*; - using reference = T&; - using const_pointer = const T*; - using const_reference = const T&; - using size_type = size_t; - using difference_type = ptrdiff_t; - - template <typename U> - struct rebind { - using other = SwapAllocator<U>; - }; - - explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {} - - template <typename U> - SwapAllocator(const SwapAllocator<U>& other) - : swap_space_(other.swap_space_) {} - - SwapAllocator(const SwapAllocator& other) = default; - SwapAllocator& operator=(const SwapAllocator& other) = default; - ~SwapAllocator() = default; - - size_type max_size() const { - return static_cast<size_type>(-1) / sizeof(T); - } - - pointer address(reference x) const { return &x; } - const_pointer address(const_reference x) const { return &x; } - - pointer allocate(size_type n, SwapAllocator<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) { - DCHECK_LE(n, max_size()); - if (swap_space_ == nullptr) { - T* result = reinterpret_cast<T*>(malloc(n * sizeof(T))); - CHECK_IMPLIES(result == nullptr, n == 0u); // Abort if malloc() fails. - return result; - } else { - return reinterpret_cast<T*>(swap_space_->Alloc(n * sizeof(T))); - } - } - void deallocate(pointer p, size_type n) { - if (swap_space_ == nullptr) { - free(p); - } else { - swap_space_->Free(p, n * sizeof(T)); - } - } - - void construct(pointer p, const_reference val) { - new (static_cast<void*>(p)) value_type(val); - } - template <class U, class... Args> - void construct(U* p, Args&&... args) { - ::new (static_cast<void*>(p)) U(std::forward<Args>(args)...); - } - void destroy(pointer p) { - p->~value_type(); - } - - inline bool operator==(SwapAllocator const& other) { - return swap_space_ == other.swap_space_; - } - inline bool operator!=(SwapAllocator const& other) { - return !operator==(other); - } - - private: - SwapSpace* swap_space_; - - template <typename U> - friend class SwapAllocator; - - template <typename U> - friend bool operator==(const SwapAllocator<U>& lhs, const SwapAllocator<U>& rhs); -}; - -template <typename T> -inline bool operator==(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) { - return lhs.swap_space_ == rhs.swap_space_; -} - -template <typename T> -inline bool operator!=(const SwapAllocator<T>& lhs, const SwapAllocator<T>& rhs) { - return !(lhs == rhs); -} - -template <typename T> -using SwapVector = std::vector<T, SwapAllocator<T>>; -template <typename T, typename Comparator> -using SwapSet = std::set<T, Comparator, SwapAllocator<T>>; - -} // namespace art - -#endif // ART_COMPILER_UTILS_SWAP_SPACE_H_ diff --git a/compiler/utils/swap_space_test.cc b/compiler/utils/swap_space_test.cc deleted file mode 100644 index 1650080e66..0000000000 --- a/compiler/utils/swap_space_test.cc +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (C) 2014 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "utils/swap_space.h" - -#include <fcntl.h> -#include <sys/stat.h> -#include <sys/types.h> - -#include <cstdio> - -#include "gtest/gtest.h" - -#include "base/os.h" -#include "base/unix_file/fd_file.h" -#include "common_runtime_test.h" - -namespace art { - -class SwapSpaceTest : public CommonRuntimeTest { -}; - -static void SwapTest(bool use_file) { - ScratchFile scratch; - int fd = scratch.GetFd(); - unlink(scratch.GetFilename().c_str()); - - SwapSpace pool(fd, 1 * MB); - SwapAllocator<void> alloc(use_file ? &pool : nullptr); - - SwapVector<int32_t> v(alloc); - v.reserve(1000000); - for (int32_t i = 0; i < 1000000; ++i) { - v.push_back(i); - EXPECT_EQ(i, v[i]); - } - - SwapVector<int32_t> v2(alloc); - v2.reserve(1000000); - for (int32_t i = 0; i < 1000000; ++i) { - v2.push_back(i); - EXPECT_EQ(i, v2[i]); - } - - SwapVector<int32_t> v3(alloc); - v3.reserve(500000); - for (int32_t i = 0; i < 1000000; ++i) { - v3.push_back(i); - EXPECT_EQ(i, v2[i]); - } - - // Verify contents. - for (int32_t i = 0; i < 1000000; ++i) { - EXPECT_EQ(i, v[i]); - EXPECT_EQ(i, v2[i]); - EXPECT_EQ(i, v3[i]); - } - - scratch.Close(); -} - -TEST_F(SwapSpaceTest, Memory) { - SwapTest(false); -} - -TEST_F(SwapSpaceTest, Swap) { - SwapTest(true); -} - -} // namespace art diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 861b27e6af..a6b90114b2 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -21,7 +21,7 @@ #include "entrypoints/quick/quick_entrypoints.h" #include "thread.h" -namespace art { +namespace art HIDDEN { namespace x86 { std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) { diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index c346ba9235..0f7854dc5c 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -32,7 +32,7 @@ #include "offsets.h" #include "utils/assembler.h" -namespace art { +namespace art HIDDEN { namespace x86 { class Immediate : public ValueObject { diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index 89c73c0ade..5da6f04402 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -17,11 +17,12 @@ #include "assembler_x86.h" #include "base/arena_allocator.h" +#include "base/macros.h" #include "base/malloc_arena_pool.h" #include "base/stl_util.h" #include "utils/assembler_test.h" -namespace art { +namespace art HIDDEN { TEST(AssemblerX86, CreateBuffer) { MallocArenaPool pool; diff --git a/compiler/utils/x86/constants_x86.h b/compiler/utils/x86/constants_x86.h index 477b915bb9..0c0a7d4133 100644 --- a/compiler/utils/x86/constants_x86.h +++ b/compiler/utils/x86/constants_x86.h @@ -25,7 +25,7 @@ #include "base/globals.h" #include "base/macros.h" -namespace art { +namespace art HIDDEN { namespace x86 { enum ByteRegister { diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc index 685f5f1b48..154e50b4e4 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.cc +++ b/compiler/utils/x86/jni_macro_assembler_x86.cc @@ -18,11 +18,12 @@ #include "base/casts.h" #include "entrypoints/quick/quick_entrypoints.h" +#include "indirect_reference_table.h" #include "lock_word.h" #include "thread.h" #include "utils/assembler.h" -namespace art { +namespace art HIDDEN { namespace x86 { static Register GetScratchRegister() { @@ -165,36 +166,24 @@ void X86JNIMacroAssembler::Store(ManagedRegister mbase, } } -void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - X86ManagedRegister src = msrc.AsX86(); - CHECK(src.IsCpuRegister()); - __ movl(Address(ESP, dest), src.AsCpuRegister()); -} - void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { X86ManagedRegister src = msrc.AsX86(); CHECK(src.IsCpuRegister()); __ movl(Address(ESP, dest), src.AsCpuRegister()); } -void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) { - __ movl(Address(ESP, dest), Immediate(imm)); -} - -void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) { - Register scratch = GetScratchRegister(); - __ leal(scratch, Address(ESP, fr_offs)); - __ fs()->movl(Address::Absolute(thr_offs), scratch); -} - -void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { - __ fs()->movl(Address::Absolute(thr_offs), ESP); -} - -void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/, - ManagedRegister /*src*/, - FrameOffset /*in_off*/) { - UNIMPLEMENTED(FATAL); // this case only currently exists for ARM +void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) { + if (tag_sp) { + // There is no free register, store contents onto stack and restore back later. + Register scratch = ECX; + __ movl(Address(ESP, -32), scratch); + __ movl(scratch, ESP); + __ orl(scratch, Immediate(0x2)); + __ fs()->movl(Address::Absolute(thr_offs), scratch); + __ movl(scratch, Address(ESP, -32)); + } else { + __ fs()->movl(Address::Absolute(thr_offs), ESP); + } } void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { @@ -233,61 +222,6 @@ void X86JNIMacroAssembler::Load(ManagedRegister mdest, } } -void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (dest.IsCpuRegister()) { - if (size == 1u) { - __ fs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src)); - } else { - CHECK_EQ(4u, size); - __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src)); - } - } else if (dest.IsRegisterPair()) { - CHECK_EQ(8u, size); - __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); - __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4))); - } else if (dest.IsX87Register()) { - if (size == 4) { - __ fs()->flds(Address::Absolute(src)); - } else { - __ fs()->fldl(Address::Absolute(src)); - } - } else { - CHECK(dest.IsXmmRegister()); - if (size == 4) { - __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src)); - } else { - __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src)); - } - } -} - -void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister()); - __ movl(dest.AsCpuRegister(), Address(ESP, src)); -} - -void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool unpoison_reference) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); - __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); - if (unpoison_reference) { - __ MaybeUnpoisonHeapReference(dest.AsCpuRegister()); - } -} - -void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, - ManagedRegister base, - Offset offs) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); - __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); -} - void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister()); @@ -402,37 +336,9 @@ void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, siz } } -void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) { - Register scratch = GetScratchRegister(); - __ movl(scratch, Address(ESP, src)); - __ movl(Address(ESP, dest), scratch); -} - -void X86JNIMacroAssembler::CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) { - Register scratch = GetScratchRegister(); - __ movl(scratch, Address(base.AsX86().AsCpuRegister(), offs)); - if (unpoison_reference) { - __ MaybeUnpoisonHeapReference(scratch); - } - __ movl(Address(ESP, dest), scratch); -} - -void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset32 thr_offs) { - Register scratch = GetScratchRegister(); - __ fs()->movl(scratch, Address::Absolute(thr_offs)); - __ movl(Address(ESP, fr_offs), scratch); -} - -void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - Load(scratch, fr_offs, 4); - __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); +void X86JNIMacroAssembler::Move(ManagedRegister mdest, size_t value) { + X86ManagedRegister dest = mdest.AsX86(); + __ movl(dest.AsCpuRegister(), Immediate(value)); } void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) { @@ -446,67 +352,6 @@ void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) } } -void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/, - ManagedRegister /*src_base*/, - Offset /*src_offset*/, - ManagedRegister /*scratch*/, - size_t /*size*/) { - UNIMPLEMENTED(FATAL); -} - -void X86JNIMacroAssembler::Copy(ManagedRegister dest_base, - Offset dest_offset, - FrameOffset src, - ManagedRegister scratch, - size_t size) { - CHECK(scratch.IsNoRegister()); - CHECK_EQ(size, 4u); - __ pushl(Address(ESP, src)); - __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset)); -} - -void X86JNIMacroAssembler::Copy(FrameOffset dest, - FrameOffset src_base, - Offset src_offset, - ManagedRegister mscratch, - size_t size) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - CHECK_EQ(size, 4u); - __ movl(scratch, Address(ESP, src_base)); - __ movl(scratch, Address(scratch, src_offset)); - __ movl(Address(ESP, dest), scratch); -} - -void X86JNIMacroAssembler::Copy(ManagedRegister dest, - Offset dest_offset, - ManagedRegister src, - Offset src_offset, - ManagedRegister scratch, - size_t size) { - CHECK_EQ(size, 4u); - CHECK(scratch.IsNoRegister()); - __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset)); - __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset)); -} - -void X86JNIMacroAssembler::Copy(FrameOffset dest, - Offset dest_offset, - FrameOffset src, - Offset src_offset, - ManagedRegister mscratch, - size_t size) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - CHECK_EQ(size, 4u); - CHECK_EQ(dest.Int32Value(), src.Int32Value()); - __ movl(scratch, Address(ESP, src)); - __ pushl(Address(scratch, src_offset)); - __ popl(Address(scratch, dest_offset)); -} - -void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) { - __ mfence(); -} - void X86JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg, FrameOffset spilled_reference_offset, ManagedRegister min_reg, @@ -547,6 +392,20 @@ void X86JNIMacroAssembler::CreateJObject(FrameOffset out_off, __ movl(Address(ESP, out_off), scratch); } +void X86JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) { + constexpr uint32_t kGlobalOrWeakGlobalMask = + dchecked_integral_cast<uint32_t>(IndirectReferenceTable::GetGlobalOrWeakGlobalMask()); + constexpr uint32_t kIndirectRefKindMask = + dchecked_integral_cast<uint32_t>(IndirectReferenceTable::GetIndirectRefKindMask()); + __ testl(reg.AsX86().AsCpuRegister(), Immediate(kGlobalOrWeakGlobalMask)); + __ j(kNotZero, X86JNIMacroLabel::Cast(slow_path)->AsX86()); + __ andl(reg.AsX86().AsCpuRegister(), Immediate(~kIndirectRefKindMask)); + __ j(kZero, X86JNIMacroLabel::Cast(resume)->AsX86()); // Skip load for null. + __ movl(reg.AsX86().AsCpuRegister(), Address(reg.AsX86().AsCpuRegister(), /*disp=*/ 0)); +} + void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { // TODO: not validating references } @@ -724,6 +583,12 @@ void X86JNIMacroAssembler::TestMarkBit(ManagedRegister mref, __ j(UnaryConditionToX86Condition(cond), X86JNIMacroLabel::Cast(label)->AsX86()); } + +void X86JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) { + __ cmpb(Address::Absolute(address), Immediate(0)); + __ j(kNotZero, X86JNIMacroLabel::Cast(label)->AsX86()); +} + void X86JNIMacroAssembler::Bind(JNIMacroLabel* label) { CHECK(label != nullptr); __ Bind(X86JNIMacroLabel::Cast(label)->AsX86()); diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h index 29fccfd386..6b177f533b 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.h +++ b/compiler/utils/x86/jni_macro_assembler_x86.h @@ -27,7 +27,7 @@ #include "offsets.h" #include "utils/jni_macro_assembler.h" -namespace art { +namespace art HIDDEN { namespace x86 { class X86JNIMacroLabel; @@ -59,30 +59,14 @@ class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, Poi // Store routines void Store(FrameOffset offs, ManagedRegister src, size_t size) override; void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override; - void StoreRef(FrameOffset dest, ManagedRegister src) override; void StoreRawPtr(FrameOffset dest, ManagedRegister src) override; - void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override; - - void StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) override; - - void StoreStackPointerToThread(ThreadOffset32 thr_offs) override; - - void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override; + void StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) override; // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) override; void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override; - void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) override; - - void LoadRef(ManagedRegister dest, FrameOffset src) override; - - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool unpoison_reference) override; - - void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override; - void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) override; // Copying routines @@ -92,35 +76,7 @@ class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, Poi void Move(ManagedRegister dest, ManagedRegister src, size_t size) override; - void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset32 thr_offs) override; - - void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) - override; - - void CopyRef(FrameOffset dest, FrameOffset src) override; - void CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - - void Copy(FrameOffset dest, FrameOffset src, size_t size) override; - - void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch, - size_t size) override; - - void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch, - size_t size) override; - - void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch, - size_t size) override; - - void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) override; - - void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size) override; - - void MemoryBarrier(ManagedRegister) override; + void Move(ManagedRegister dest, size_t value) override; // Sign extension void SignExtend(ManagedRegister mreg, size_t size) override; @@ -132,20 +88,10 @@ class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, Poi void GetCurrentThread(ManagedRegister dest) override; void GetCurrentThread(FrameOffset dest_offset) override; - // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly - // stale reference that can be used to avoid loading the spilled value to - // see if the value is null. - void CreateJObject(ManagedRegister out_reg, - FrameOffset spilled_reference_offset, - ManagedRegister in_reg, - bool null_allowed) override; - - // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. - void CreateJObject(FrameOffset out_off, - FrameOffset spilled_reference_offset, - bool null_allowed) override; + // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path. + void DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) override; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. @@ -189,10 +135,29 @@ class X86JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86Assembler, Poi void TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; // Emit a conditional jump to the label by applying a unary condition test to object's mark bit. void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; + // Emit a conditional jump to label if the loaded value from specified locations is not zero. + void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override; // Code at this offset will serve as the target for the Jump call. void Bind(JNIMacroLabel* label) override; private: + void Copy(FrameOffset dest, FrameOffset src, size_t size); + + // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly + // stale reference that can be used to avoid loading the spilled value to + // see if the value is null. + void CreateJObject(ManagedRegister out_reg, + FrameOffset spilled_reference_offset, + ManagedRegister in_reg, + bool null_allowed); + + // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. + void CreateJObject(FrameOffset out_off, + FrameOffset spilled_reference_offset, + bool null_allowed); + DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler); }; diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc index cc7cedf93e..bef948056f 100644 --- a/compiler/utils/x86/managed_register_x86.cc +++ b/compiler/utils/x86/managed_register_x86.cc @@ -18,7 +18,7 @@ #include "base/globals.h" -namespace art { +namespace art HIDDEN { namespace x86 { // Define register pairs. diff --git a/compiler/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h index 27555bfd32..def4f68b27 100644 --- a/compiler/utils/x86/managed_register_x86.h +++ b/compiler/utils/x86/managed_register_x86.h @@ -17,10 +17,11 @@ #ifndef ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_ #define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_ +#include "base/macros.h" #include "constants_x86.h" #include "utils/managed_register.h" -namespace art { +namespace art HIDDEN { namespace x86 { // Values for register pairs. diff --git a/compiler/utils/x86/managed_register_x86_test.cc b/compiler/utils/x86/managed_register_x86_test.cc index 28af5313c7..9f5e1970ac 100644 --- a/compiler/utils/x86/managed_register_x86_test.cc +++ b/compiler/utils/x86/managed_register_x86_test.cc @@ -17,9 +17,10 @@ #include "managed_register_x86.h" #include "base/globals.h" +#include "base/macros.h" #include "gtest/gtest.h" -namespace art { +namespace art HIDDEN { namespace x86 { TEST(X86ManagedRegister, NoRegister) { diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 21a44810ba..3fdf05bed9 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -21,7 +21,7 @@ #include "entrypoints/quick/quick_entrypoints.h" #include "thread.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { std::ostream& operator<<(std::ostream& os, const CpuRegister& reg) { diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index ea944c200e..235ea03e2b 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -30,9 +30,8 @@ #include "managed_register_x86_64.h" #include "offsets.h" #include "utils/assembler.h" -#include "utils/jni_macro_assembler.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { // Encodes an immediate value for operands. diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index f7e890d112..a7c206afaa 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -21,13 +21,14 @@ #include <random> #include "base/bit_utils.h" +#include "base/macros.h" #include "base/malloc_arena_pool.h" #include "base/stl_util.h" #include "jni_macro_assembler_x86_64.h" #include "utils/assembler_test.h" #include "utils/jni_macro_assembler_test.h" -namespace art { +namespace art HIDDEN { TEST(AssemblerX86_64, CreateBuffer) { MallocArenaPool pool; diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h index 301c8fc09b..52ac987766 100644 --- a/compiler/utils/x86_64/constants_x86_64.h +++ b/compiler/utils/x86_64/constants_x86_64.h @@ -25,7 +25,7 @@ #include "base/globals.h" #include "base/macros.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { class CpuRegister { diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc index d5d1bbadc9..388845730e 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc @@ -19,10 +19,11 @@ #include "base/casts.h" #include "base/memory_region.h" #include "entrypoints/quick/quick_entrypoints.h" +#include "indirect_reference_table.h" #include "lock_word.h" #include "thread.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { static dwarf::Reg DWARFReg(Register reg) { @@ -194,37 +195,21 @@ void X86_64JNIMacroAssembler::Store(ManagedRegister mbase, } } -void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - X86_64ManagedRegister src = msrc.AsX86_64(); - CHECK(src.IsCpuRegister()); - __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister()); -} - void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { X86_64ManagedRegister src = msrc.AsX86_64(); CHECK(src.IsCpuRegister()); __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister()); } -void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm) { - __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq? -} - -void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs, - FrameOffset fr_offs) { - CpuRegister scratch = GetScratchRegister(); - __ leaq(scratch, Address(CpuRegister(RSP), fr_offs)); - __ gs()->movq(Address::Absolute(thr_offs, true), scratch); -} - -void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) { - __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP)); -} - -void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/, - ManagedRegister /*src*/, - FrameOffset /*in_off*/) { - UNIMPLEMENTED(FATAL); // this case only currently exists for ARM +void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) { + if (tag_sp) { + CpuRegister reg = GetScratchRegister(); + __ movq(reg, CpuRegister(RSP)); + __ orq(reg, Immediate(0x2)); + __ gs()->movq(Address::Absolute(thr_offs, true), reg); + } else { + __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP)); + } } void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { @@ -263,67 +248,6 @@ void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, } } -void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, - ThreadOffset64 src, size_t size) { - X86_64ManagedRegister dest = mdest.AsX86_64(); - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (dest.IsCpuRegister()) { - if (size == 1u) { - __ gs()->movzxb(dest.AsCpuRegister(), Address::Absolute(src, true)); - } else { - CHECK_EQ(4u, size); - __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true)); - } - } else if (dest.IsRegisterPair()) { - CHECK_EQ(8u, size); - __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true)); - } else if (dest.IsX87Register()) { - if (size == 4) { - __ gs()->flds(Address::Absolute(src, true)); - } else { - __ gs()->fldl(Address::Absolute(src, true)); - } - } else { - CHECK(dest.IsXmmRegister()); - if (size == 4) { - __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true)); - } else { - __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true)); - } - } -} - -void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { - X86_64ManagedRegister dest = mdest.AsX86_64(); - CHECK(dest.IsCpuRegister()); - __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src)); -} - -void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, - ManagedRegister mbase, - MemberOffset offs, - bool unpoison_reference) { - X86_64ManagedRegister base = mbase.AsX86_64(); - X86_64ManagedRegister dest = mdest.AsX86_64(); - CHECK(base.IsCpuRegister()); - CHECK(dest.IsCpuRegister()); - __ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs)); - if (unpoison_reference) { - __ MaybeUnpoisonHeapReference(dest.AsCpuRegister()); - } -} - -void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, - ManagedRegister mbase, - Offset offs) { - X86_64ManagedRegister base = mbase.AsX86_64(); - X86_64ManagedRegister dest = mdest.AsX86_64(); - CHECK(base.IsCpuRegister()); - CHECK(dest.IsCpuRegister()); - __ movq(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs)); -} - void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) { X86_64ManagedRegister dest = mdest.AsX86_64(); CHECK(dest.IsCpuRegister()); @@ -477,37 +401,10 @@ void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, } } -void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) { - CpuRegister scratch = GetScratchRegister(); - __ movl(scratch, Address(CpuRegister(RSP), src)); - __ movl(Address(CpuRegister(RSP), dest), scratch); -} - -void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) { - CpuRegister scratch = GetScratchRegister(); - __ movl(scratch, Address(base.AsX86_64().AsCpuRegister(), offs)); - if (unpoison_reference) { - __ MaybeUnpoisonHeapReference(scratch); - } - __ movl(Address(CpuRegister(RSP), dest), scratch); -} - -void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 thr_offs) { - CpuRegister scratch = GetScratchRegister(); - __ gs()->movq(scratch, Address::Absolute(thr_offs, true)); - __ movq(Address(CpuRegister(RSP), fr_offs), scratch); -} -void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - X86_64ManagedRegister scratch = mscratch.AsX86_64(); - CHECK(scratch.IsCpuRegister()); - Load(scratch, fr_offs, 8); - __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister()); +void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, size_t value) { + X86_64ManagedRegister dest = mdest.AsX86_64(); + __ movq(dest.AsCpuRegister(), Immediate(value)); } void X86_64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) { @@ -522,67 +419,6 @@ void X86_64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t siz } } -void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/, - ManagedRegister /*src_base*/, - Offset /*src_offset*/, - ManagedRegister /*scratch*/, - size_t /*size*/) { - UNIMPLEMENTED(FATAL); -} - -void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base, - Offset dest_offset, - FrameOffset src, - ManagedRegister scratch, - size_t size) { - CHECK(scratch.IsNoRegister()); - CHECK_EQ(size, 4u); - __ pushq(Address(CpuRegister(RSP), src)); - __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset)); -} - -void X86_64JNIMacroAssembler::Copy(FrameOffset dest, - FrameOffset src_base, - Offset src_offset, - ManagedRegister mscratch, - size_t size) { - CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister(); - CHECK_EQ(size, 4u); - __ movq(scratch, Address(CpuRegister(RSP), src_base)); - __ movq(scratch, Address(scratch, src_offset)); - __ movq(Address(CpuRegister(RSP), dest), scratch); -} - -void X86_64JNIMacroAssembler::Copy(ManagedRegister dest, - Offset dest_offset, - ManagedRegister src, - Offset src_offset, - ManagedRegister scratch, - size_t size) { - CHECK_EQ(size, 4u); - CHECK(scratch.IsNoRegister()); - __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset)); - __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset)); -} - -void X86_64JNIMacroAssembler::Copy(FrameOffset dest, - Offset dest_offset, - FrameOffset src, - Offset src_offset, - ManagedRegister mscratch, - size_t size) { - CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister(); - CHECK_EQ(size, 4u); - CHECK_EQ(dest.Int32Value(), src.Int32Value()); - __ movq(scratch, Address(CpuRegister(RSP), src)); - __ pushq(Address(scratch, src_offset)); - __ popq(Address(scratch, dest_offset)); -} - -void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) { - __ mfence(); -} - void X86_64JNIMacroAssembler::CreateJObject(ManagedRegister mout_reg, FrameOffset spilled_reference_offset, ManagedRegister min_reg, @@ -629,6 +465,19 @@ void X86_64JNIMacroAssembler::CreateJObject(FrameOffset out_off, __ movq(Address(CpuRegister(RSP), out_off), scratch); } +void X86_64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) { + constexpr uint64_t kGlobalOrWeakGlobalMask = IndirectReferenceTable::GetGlobalOrWeakGlobalMask(); + constexpr uint64_t kIndirectRefKindMask = IndirectReferenceTable::GetIndirectRefKindMask(); + // TODO: Add `testq()` with `imm32` to assembler to avoid using 64-bit pointer as 32-bit value. + __ testl(reg.AsX86_64().AsCpuRegister(), Immediate(kGlobalOrWeakGlobalMask)); + __ j(kNotZero, X86_64JNIMacroLabel::Cast(slow_path)->AsX86_64()); + __ andq(reg.AsX86_64().AsCpuRegister(), Immediate(~kIndirectRefKindMask)); + __ j(kZero, X86_64JNIMacroLabel::Cast(resume)->AsX86_64()); // Skip load for null. + __ movl(reg.AsX86_64().AsCpuRegister(), Address(reg.AsX86_64().AsCpuRegister(), /*disp=*/ 0)); +} + void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { // TODO: not validating references } @@ -803,6 +652,13 @@ void X86_64JNIMacroAssembler::TestMarkBit(ManagedRegister mref, __ j(UnaryConditionToX86_64Condition(cond), X86_64JNIMacroLabel::Cast(label)->AsX86_64()); } +void X86_64JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) { + CpuRegister scratch = GetScratchRegister(); + __ movq(scratch, Immediate(address)); + __ cmpb(Address(scratch, 0), Immediate(0)); + __ j(kNotZero, X86_64JNIMacroLabel::Cast(label)->AsX86_64()); +} + void X86_64JNIMacroAssembler::Bind(JNIMacroLabel* label) { CHECK(label != nullptr); __ Bind(X86_64JNIMacroLabel::Cast(label)->AsX86_64()); diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h index e080f0b3df..da0aef9869 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h @@ -28,7 +28,7 @@ #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assembler, @@ -60,32 +60,14 @@ class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assemble // Store routines void Store(FrameOffset offs, ManagedRegister src, size_t size) override; void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override; - void StoreRef(FrameOffset dest, ManagedRegister src) override; void StoreRawPtr(FrameOffset dest, ManagedRegister src) override; - void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override; - - void StoreStackOffsetToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs) override; - - void StoreStackPointerToThread(ThreadOffset64 thr_offs) override; - - void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override; + void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override; // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) override; void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override; - void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override; - - void LoadRef(ManagedRegister dest, FrameOffset src) override; - - void LoadRef(ManagedRegister dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - - void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override; - void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override; // Copying routines @@ -95,52 +77,7 @@ class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assemble void Move(ManagedRegister dest, ManagedRegister src, size_t size) override; - void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 thr_offs) override; - - void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) - override; - - void CopyRef(FrameOffset dest, FrameOffset src) override; - void CopyRef(FrameOffset dest, - ManagedRegister base, - MemberOffset offs, - bool unpoison_reference) override; - - void Copy(FrameOffset dest, FrameOffset src, size_t size) override; - - void Copy(FrameOffset dest, - ManagedRegister src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void Copy(ManagedRegister dest_base, - Offset dest_offset, - FrameOffset src, - ManagedRegister scratch, - size_t size) override; - - void Copy(FrameOffset dest, - FrameOffset src_base, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void Copy(ManagedRegister dest, - Offset dest_offset, - ManagedRegister src, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void Copy(FrameOffset dest, - Offset dest_offset, - FrameOffset src, - Offset src_offset, - ManagedRegister scratch, - size_t size) override; - - void MemoryBarrier(ManagedRegister) override; + void Move(ManagedRegister dest, size_t value) override; // Sign extension void SignExtend(ManagedRegister mreg, size_t size) override; @@ -152,20 +89,10 @@ class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assemble void GetCurrentThread(ManagedRegister dest) override; void GetCurrentThread(FrameOffset dest_offset) override; - // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly - // stale reference that can be used to avoid loading the spilled value to - // see if the value is null. - void CreateJObject(ManagedRegister out_reg, - FrameOffset spilled_reference_offset, - ManagedRegister in_reg, - bool null_allowed) override; - - // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), - // or to be null if the value is null and `null_allowed`. - void CreateJObject(FrameOffset out_off, - FrameOffset spilled_reference_offset, - bool null_allowed) override; + // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path. + void DecodeJNITransitionOrLocalJObject(ManagedRegister reg, + JNIMacroLabel* slow_path, + JNIMacroLabel* resume) override; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. @@ -209,10 +136,29 @@ class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd<X86_64Assemble void TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; // Emit a conditional jump to the label by applying a unary condition test to object's mark bit. void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) override; + // Emit a conditional jump to label if the loaded value from specified locations is not zero. + void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override; // Code at this offset will serve as the target for the Jump call. void Bind(JNIMacroLabel* label) override; private: + void Copy(FrameOffset dest, FrameOffset src, size_t size); + + // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly + // stale reference that can be used to avoid loading the spilled value to + // see if the value is null. + void CreateJObject(ManagedRegister out_reg, + FrameOffset spilled_reference_offset, + ManagedRegister in_reg, + bool null_allowed); + + // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value), + // or to be null if the value is null and `null_allowed`. + void CreateJObject(FrameOffset out_off, + FrameOffset spilled_reference_offset, + bool null_allowed); + DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler); }; diff --git a/compiler/utils/x86_64/managed_register_x86_64.cc b/compiler/utils/x86_64/managed_register_x86_64.cc index c0eec9d86c..75ff8aaf1d 100644 --- a/compiler/utils/x86_64/managed_register_x86_64.cc +++ b/compiler/utils/x86_64/managed_register_x86_64.cc @@ -18,7 +18,7 @@ #include "base/globals.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { // Define register pairs. diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h index 62c0e373a7..7a1be0bd8f 100644 --- a/compiler/utils/x86_64/managed_register_x86_64.h +++ b/compiler/utils/x86_64/managed_register_x86_64.h @@ -17,10 +17,11 @@ #ifndef ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_ #define ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_ +#include "base/macros.h" #include "constants_x86_64.h" #include "utils/managed_register.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { // Values for register pairs. diff --git a/compiler/utils/x86_64/managed_register_x86_64_test.cc b/compiler/utils/x86_64/managed_register_x86_64_test.cc index 46a405ffaf..048268bf9b 100644 --- a/compiler/utils/x86_64/managed_register_x86_64_test.cc +++ b/compiler/utils/x86_64/managed_register_x86_64_test.cc @@ -16,9 +16,10 @@ #include "managed_register_x86_64.h" #include "base/globals.h" +#include "base/macros.h" #include "gtest/gtest.h" -namespace art { +namespace art HIDDEN { namespace x86_64 { TEST(X86_64ManagedRegister, NoRegister) { |