diff options
| author | 2016-07-26 09:02:02 -0700 | |
|---|---|---|
| committer | 2016-08-01 18:54:48 -0700 | |
| commit | 542451cc546779f5c67840e105c51205a1b0a8fd (patch) | |
| tree | 11e09bb5abaee12dddffefbe7e425291076dfa7a /compiler/utils | |
| parent | 85c4a4b8c9eabfe16e4e49f9b4aa78c1bf4be023 (diff) | |
ART: Convert pointer size to enum
Move away from size_t to dedicated enum (class).
Bug: 30373134
Bug: 30419309
Test: m test-art-host
Change-Id: Id453c330f1065012e7d4f9fc24ac477cc9bb9269
Diffstat (limited to 'compiler/utils')
| -rw-r--r-- | compiler/utils/arm/assembler_arm.cc | 43 | ||||
| -rw-r--r-- | compiler/utils/arm/assembler_arm.h | 16 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 44 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.h | 16 | ||||
| -rw-r--r-- | compiler/utils/assembler.cc | 44 | ||||
| -rw-r--r-- | compiler/utils/assembler.h | 72 | ||||
| -rw-r--r-- | compiler/utils/mips/assembler_mips.cc | 28 | ||||
| -rw-r--r-- | compiler/utils/mips/assembler_mips.h | 18 | ||||
| -rw-r--r-- | compiler/utils/mips64/assembler_mips64.cc | 26 | ||||
| -rw-r--r-- | compiler/utils/mips64/assembler_mips64.h | 21 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.cc | 39 | ||||
| -rw-r--r-- | compiler/utils/x86/assembler_x86.h | 18 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.cc | 34 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.h | 18 |
14 files changed, 232 insertions, 205 deletions
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index a7f4547514..1796b3940c 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -384,7 +384,7 @@ static dwarf::Reg DWARFReg(SRegister reg) { return dwarf::Reg::ArmFp(static_cast<int>(reg)); } -constexpr size_t kFramePointerSize = kArmPointerSize; +constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize); void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, @@ -568,8 +568,9 @@ void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); } -void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, - ManagedRegister mscratch) { +void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest, + uint32_t imm, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; LoadImmediate(scratch.AsCoreRegister(), imm); @@ -600,19 +601,19 @@ void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { return EmitLoad(this, m_dst, SP, src.Int32Value(), size); } -void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) { +void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) { return EmitLoad(this, m_dst, TR, src.Int32Value(), size); } -void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) { +void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) { ArmManagedRegister dst = m_dst.AsArm(); CHECK(dst.IsCoreRegister()) << dst; LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value()); } void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset<4> thr_offs, - ManagedRegister mscratch) { + ThreadOffset32 thr_offs, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), @@ -621,9 +622,9 @@ void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, SP, fr_offs.Int32Value()); } -void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), @@ -632,9 +633,9 @@ void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, TR, thr_offs.Int32Value()); } -void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { ArmManagedRegister scratch = mscratch.AsArm(); CHECK(scratch.IsCoreRegister()) << scratch; AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL); @@ -642,7 +643,7 @@ void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, TR, thr_offs.Int32Value()); } -void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) { +void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) { StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value()); } @@ -831,7 +832,8 @@ void ArmAssembler::Call(FrameOffset base, Offset offset, // TODO: place reference map on call } -void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) { +void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } @@ -848,8 +850,10 @@ void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) ArmManagedRegister scratch = mscratch.AsArm(); ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust); buffer_.EnqueueSlowPath(slow); - LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - TR, Thread::ExceptionOffset<4>().Int32Value()); + LoadFromOffset(kLoadWord, + scratch.AsCoreRegister(), + TR, + Thread::ExceptionOffset<kArmPointerSize>().Int32Value()); cmp(scratch.AsCoreRegister(), ShifterOperand(0)); b(slow->Entry(), NE); } @@ -865,7 +869,10 @@ void ArmExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving R0 as this call won't return. __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); // Set up call to Thread::Current()->pDeliverException. - __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value()); + __ LoadFromOffset(kLoadWord, + R12, + TR, + QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value()); __ blx(R12); #undef __ } diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h index 9cf72a2601..2b7414d892 100644 --- a/compiler/utils/arm/assembler_arm.h +++ b/compiler/utils/arm/assembler_arm.h @@ -904,13 +904,13 @@ class ArmAssembler : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch) + void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, + void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE; + void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; @@ -918,7 +918,7 @@ class ArmAssembler : public Assembler { // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE; + void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -927,15 +927,15 @@ class ArmAssembler : public Assembler { void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE; + void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; // Copying routines void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs, + void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs, ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; @@ -990,7 +990,7 @@ class ArmAssembler : public Assembler { // Call to address held at [base+offset] void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 9f2027f0a2..d82caf57e3 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -164,24 +164,25 @@ void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm, offs.Int32Value()); } -void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm, - ManagedRegister m_scratch) { +void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs, + uint32_t imm, + ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; LoadImmediate(scratch.AsXRegister(), imm); StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value()); } -void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { +void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value()); StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); } -void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) { +void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) { UseScratchRegisterScope temps(vixl_masm_); Register temp = temps.AcquireX(); ___ Mov(temp, reg_x(SP)); @@ -285,7 +286,7 @@ void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { return Load(m_dst.AsArm64(), SP, src.Int32Value(), size); } -void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) { +void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) { return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); } @@ -318,7 +319,7 @@ void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, O ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value())); } -void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) { +void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) { Arm64ManagedRegister dst = m_dst.AsArm64(); CHECK(dst.IsXRegister()) << dst; LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value()); @@ -355,17 +356,17 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s } void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset<8> tr_offs, - ManagedRegister m_scratch) { + ThreadOffset64 tr_offs, + ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); } -void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { +void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); @@ -542,7 +543,8 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat ___ Blr(reg_x(scratch.AsXRegister())); } -void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) { +void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; } @@ -612,7 +614,9 @@ void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjus CHECK_ALIGNED(stack_adjust, kStackAlignment); Arm64ManagedRegister scratch = m_scratch.AsArm64(); exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust)); - LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value()); + LoadFromOffset(scratch.AsXRegister(), + TR, + Thread::ExceptionOffset<kArm64PointerSize>().Int32Value()); ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry()); } @@ -629,7 +633,9 @@ void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) { // Pass exception object as argument. // Don't care about preserving X0 as this won't return. ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister())); - ___ Ldr(temp, MEM_OP(reg_x(TR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value())); + ___ Ldr(temp, + MEM_OP(reg_x(TR), + QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value())); ___ Blr(temp); // Call should never return. @@ -720,7 +726,7 @@ void Arm64Assembler::BuildFrame(size_t frame_size, // Increase frame to required size. DCHECK_ALIGNED(frame_size, kStackAlignment); - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize)); IncreaseFrameSize(frame_size); // Save callee-saves. @@ -734,7 +740,7 @@ void Arm64Assembler::BuildFrame(size_t frame_size, StoreToOffset(X0, SP, 0); // Write out entry spills - int32_t offset = frame_size + kArm64PointerSize; + int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize); for (size_t i = 0; i < entry_spills.size(); ++i) { Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); if (reg.IsNoRegister()) { @@ -776,7 +782,7 @@ void Arm64Assembler::RemoveFrame(size_t frame_size, // For now we only check that the size of the frame is large enough to hold spills and method // reference. - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize)); DCHECK_ALIGNED(frame_size, kStackAlignment); DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR))); diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index a48154424c..24b798201a 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -126,28 +126,28 @@ class Arm64Assembler FINAL : public Assembler { void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch) + void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, + void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE; + void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; // Load routines. void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE; + void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, bool unpoison_reference) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE; + void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; // Copying routines. void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs, + void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs, ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; @@ -200,7 +200,7 @@ class Arm64Assembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; // Jump to address (not setting link register) void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch); diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc index e6c3a18d04..0a1b7334b8 100644 --- a/compiler/utils/assembler.cc +++ b/compiler/utils/assembler.cc @@ -162,90 +162,94 @@ std::unique_ptr<Assembler> Assembler::Create( } } -void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED, +void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED, uint32_t imm ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED, +void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED, uint32_t imm ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { +void Assembler::StoreStackOffsetToThread32( + ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, + FrameOffset fr_offs ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED, - FrameOffset fr_offs ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { +void Assembler::StoreStackOffsetToThread64( + ThreadOffset64 thr_offs ATTRIBUTE_UNUSED, + FrameOffset fr_offs ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) { +void Assembler::StoreStackPointerToThread32( + ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) { +void Assembler::StoreStackPointerToThread64( + ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset<4> src ATTRIBUTE_UNUSED, + ThreadOffset32 src ATTRIBUTE_UNUSED, size_t size ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset<8> src ATTRIBUTE_UNUSED, + ThreadOffset64 src ATTRIBUTE_UNUSED, size_t size ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset<4> offs ATTRIBUTE_UNUSED) { + ThreadOffset32 offs ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED, - ThreadOffset<8> offs ATTRIBUTE_UNUSED) { + ThreadOffset64 offs ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED, - ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED, + ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED, - ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED, + ThreadOffset64 thr_offs ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED, +void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED, FrameOffset fr_offs ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED, +void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED, FrameOffset fr_offs ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED, +void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } -void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED, +void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED, ManagedRegister scratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL); } diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index 80aa630424..89f7947cd5 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -24,6 +24,7 @@ #include "arm/constants_arm.h" #include "base/arena_allocator.h" #include "base/arena_object.h" +#include "base/enums.h" #include "base/logging.h" #include "base/macros.h" #include "debug/dwarf/debug_frame_opcode_writer.h" @@ -382,8 +383,7 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { const ManagedRegisterEntrySpills& entry_spills) = 0; // Emit code that will remove an activation from the stack - virtual void RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) = 0; + virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0; virtual void IncreaseFrameSize(size_t adjust) = 0; virtual void DecreaseFrameSize(size_t adjust) = 0; @@ -393,23 +393,24 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0; virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0; - virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister scratch) = 0; + virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0; - virtual void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, + virtual void StoreImmediateToThread32(ThreadOffset32 dest, + uint32_t imm, ManagedRegister scratch); - virtual void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, + virtual void StoreImmediateToThread64(ThreadOffset64 dest, + uint32_t imm, ManagedRegister scratch); - virtual void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, + virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch); - virtual void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, + virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch); - virtual void StoreStackPointerToThread32(ThreadOffset<4> thr_offs); - virtual void StoreStackPointerToThread64(ThreadOffset<8> thr_offs); + virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs); + virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs); virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) = 0; @@ -417,8 +418,8 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { // Load routines virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0; - virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size); - virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size); + virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size); + virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size); virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0; // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference. @@ -427,24 +428,27 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0; - virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs); - virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs); + virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs); + virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs); // Copying routines virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0; - virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs, + virtual void CopyRawPtrFromThread32(FrameOffset fr_offs, + ThreadOffset32 thr_offs, ManagedRegister scratch); - virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs, + virtual void CopyRawPtrFromThread64(FrameOffset fr_offs, + ThreadOffset64 thr_offs, ManagedRegister scratch); - virtual void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, + virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs, + FrameOffset fr_offs, ManagedRegister scratch); - virtual void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, + virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs, + FrameOffset fr_offs, ManagedRegister scratch); - virtual void CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister scratch) = 0; + virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0; virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0; @@ -474,24 +478,26 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { // Exploit fast access in managed code to Thread::Current() virtual void GetCurrentThread(ManagedRegister tr) = 0; - virtual void GetCurrentThread(FrameOffset dest_offset, - ManagedRegister scratch) = 0; + virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0; // Set up out_reg to hold a Object** into the handle scope, or to be null if the // value is null and null_allowed. in_reg holds a possibly stale reference // that can be used to avoid loading the handle scope entry to see if the value is // null. - virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, - ManagedRegister in_reg, bool null_allowed) = 0; + virtual void CreateHandleScopeEntry(ManagedRegister out_reg, + FrameOffset handlescope_offset, + ManagedRegister in_reg, + bool null_allowed) = 0; // Set up out_off to hold a Object** into the handle scope, or to be null if the // value is null and null_allowed. - virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, - ManagedRegister scratch, bool null_allowed) = 0; + virtual void CreateHandleScopeEntry(FrameOffset out_off, + FrameOffset handlescope_offset, + ManagedRegister scratch, + bool null_allowed) = 0; // src holds a handle scope entry (Object**) load this into dst - virtual void LoadReferenceFromHandleScope(ManagedRegister dst, - ManagedRegister src) = 0; + virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0; // Heap::VerifyObject on src. In some cases (such as a reference to this) we // know that src may not be null. @@ -499,12 +505,10 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> { virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0; // Call to address held at [base+offset] - virtual void Call(ManagedRegister base, Offset offset, - ManagedRegister scratch) = 0; - virtual void Call(FrameOffset base, Offset offset, - ManagedRegister scratch) = 0; - virtual void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch); - virtual void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch); + virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0; + virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0; + virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch); + virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch); // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 608b3bc23c..e6b32def55 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -26,6 +26,11 @@ namespace art { namespace mips { +static_assert(static_cast<size_t>(kMipsPointerSize) == kMipsWordSize, + "Unexpected Mips pointer size."); +static_assert(kMipsPointerSize == PointerSize::k32, "Unexpected Mips pointer size."); + + std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { if (rhs >= D0 && rhs < kNumberOfDRegisters) { os << "d" << static_cast<int>(rhs); @@ -2794,7 +2799,8 @@ void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); } -void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, uint32_t imm, +void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest, + uint32_t imm, ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); CHECK(scratch.IsCoreRegister()) << scratch; @@ -2803,7 +2809,7 @@ void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, u StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value()); } -void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs, +void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); @@ -2813,7 +2819,7 @@ void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_o S1, thr_offs.Int32Value()); } -void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) { +void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) { StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value()); } @@ -2830,8 +2836,7 @@ void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { return EmitLoad(mdest, SP, src.Int32Value(), size); } -void MipsAssembler::LoadFromThread32(ManagedRegister mdest, - ThreadOffset<kMipsWordSize> src, size_t size) { +void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) { return EmitLoad(mdest, S1, src.Int32Value(), size); } @@ -2859,8 +2864,7 @@ void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offs base.AsMips().AsCoreRegister(), offs.Int32Value()); } -void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, - ThreadOffset<kMipsWordSize> offs) { +void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) { MipsManagedRegister dest = mdest.AsMips(); CHECK(dest.IsCoreRegister()); LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value()); @@ -2915,7 +2919,7 @@ void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m } void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset<kMipsWordSize> thr_offs, + ThreadOffset32 thr_offs, ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); CHECK(scratch.IsCoreRegister()) << scratch; @@ -2925,7 +2929,7 @@ void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs, SP, fr_offs.Int32Value()); } -void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs, +void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { MipsManagedRegister scratch = mscratch.AsMips(); @@ -3099,7 +3103,7 @@ void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscrat // TODO: place reference map on call. } -void MipsAssembler::CallFromThread32(ThreadOffset<kMipsWordSize> offset ATTRIBUTE_UNUSED, +void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED, ManagedRegister mscratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "no mips implementation"; } @@ -3117,7 +3121,7 @@ void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) MipsManagedRegister scratch = mscratch.AsMips(); exception_blocks_.emplace_back(scratch, stack_adjust); LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), - S1, Thread::ExceptionOffset<kMipsWordSize>().Int32Value()); + S1, Thread::ExceptionOffset<kMipsPointerSize>().Int32Value()); // TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry()); // as the NAL instruction (occurring in long R2 branches) may become deprecated. // For now use common for R2 and R6 instructions as this code must execute on both. @@ -3135,7 +3139,7 @@ void MipsAssembler::EmitExceptionPoll(MipsExceptionSlowPath* exception) { Move(A0, exception->scratch_.AsCoreRegister()); // Set up call to Thread::Current()->pDeliverException. LoadFromOffset(kLoadWord, T9, S1, - QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pDeliverException).Int32Value()); + QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pDeliverException).Int32Value()); Jr(T9); Nop(); diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index 8367e68ebc..852ced6e25 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -500,15 +500,15 @@ class MipsAssembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, + void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; - void StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs, + void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) OVERRIDE; - void StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) OVERRIDE; + void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister msrc, @@ -518,9 +518,7 @@ class MipsAssembler FINAL : public Assembler { // Load routines. void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister mdest, - ThreadOffset<kMipsWordSize> src, - size_t size) OVERRIDE; + void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -531,16 +529,16 @@ class MipsAssembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset<kMipsWordSize> offs) OVERRIDE; + void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE; // Copying routines. void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE; void CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset<kMipsWordSize> thr_offs, + ThreadOffset32 thr_offs, ManagedRegister mscratch) OVERRIDE; - void CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs, + void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) OVERRIDE; @@ -619,7 +617,7 @@ class MipsAssembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE; - void CallFromThread32(ThreadOffset<kMipsWordSize> offset, ManagedRegister mscratch) OVERRIDE; + void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index 447ede5166..3fd77a06b1 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -26,6 +26,11 @@ namespace art { namespace mips64 { +static_assert(static_cast<size_t>(kMips64PointerSize) == kMips64DoublewordSize, + "Unexpected Mips64 pointer size."); +static_assert(kMips64PointerSize == PointerSize::k64, "Unexpected Mips64 pointer size."); + + void Mips64Assembler::FinalizeCode() { for (auto& exception_block : exception_blocks_) { EmitExceptionPoll(&exception_block); @@ -2110,7 +2115,7 @@ void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value()); } -void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, +void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { Mips64ManagedRegister scratch = mscratch.AsMips64(); @@ -2119,7 +2124,7 @@ void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordS StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value()); } -void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) { +void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) { StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value()); } @@ -2136,9 +2141,7 @@ void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) return EmitLoad(mdest, SP, src.Int32Value(), size); } -void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, - ThreadOffset<kMips64DoublewordSize> src, - size_t size) { +void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) { return EmitLoad(mdest, S1, src.Int32Value(), size); } @@ -2171,8 +2174,7 @@ void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, base.AsMips64().AsGpuRegister(), offs.Int32Value()); } -void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, - ThreadOffset<kMips64DoublewordSize> offs) { +void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) { Mips64ManagedRegister dest = mdest.AsMips64(); CHECK(dest.IsGpuRegister()); LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value()); @@ -2217,7 +2219,7 @@ void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src, } void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset<kMips64DoublewordSize> thr_offs, + ThreadOffset64 thr_offs, ManagedRegister mscratch) { Mips64ManagedRegister scratch = mscratch.AsMips64(); CHECK(scratch.IsGpuRegister()) << scratch; @@ -2225,7 +2227,7 @@ void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value()); } -void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, +void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { Mips64ManagedRegister scratch = mscratch.AsMips64(); @@ -2429,7 +2431,7 @@ void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr // TODO: place reference map on call } -void Mips64Assembler::CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset ATTRIBUTE_UNUSED, +void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED, ManagedRegister mscratch ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "No MIPS64 implementation"; } @@ -2449,7 +2451,7 @@ void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjus LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, - Thread::ExceptionOffset<kMips64DoublewordSize>().Int32Value()); + Thread::ExceptionOffset<kMips64PointerSize>().Int32Value()); Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry()); } @@ -2466,7 +2468,7 @@ void Mips64Assembler::EmitExceptionPoll(Mips64ExceptionSlowPath* exception) { LoadFromOffset(kLoadDoubleword, T9, S1, - QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pDeliverException).Int32Value()); + QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pDeliverException).Int32Value()); Jr(T9); Nop(); diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 0cd07089d0..1ad05b038b 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -383,10 +383,11 @@ class Mips64Assembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE; - void StoreStackOffsetToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs, + void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, + FrameOffset fr_offs, ManagedRegister mscratch) OVERRIDE; - void StoreStackPointerToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs) OVERRIDE; + void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off, ManagedRegister mscratch) OVERRIDE; @@ -394,9 +395,7 @@ class Mips64Assembler FINAL : public Assembler { // Load routines. void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread64(ManagedRegister mdest, - ThreadOffset<kMips64DoublewordSize> src, - size_t size) OVERRIDE; + void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -405,16 +404,17 @@ class Mips64Assembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread64(ManagedRegister mdest, - ThreadOffset<kMips64DoublewordSize> offs) OVERRIDE; + void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE; // Copying routines. void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE; - void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<kMips64DoublewordSize> thr_offs, + void CopyRawPtrFromThread64(FrameOffset fr_offs, + ThreadOffset64 thr_offs, ManagedRegister mscratch) OVERRIDE; - void CopyRawPtrToThread64(ThreadOffset<kMips64DoublewordSize> thr_offs, FrameOffset fr_offs, + void CopyRawPtrToThread64(ThreadOffset64 thr_offs, + FrameOffset fr_offs, ManagedRegister mscratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE; @@ -471,8 +471,7 @@ class Mips64Assembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE; - void CallFromThread64(ThreadOffset<kMips64DoublewordSize> offset, - ManagedRegister mscratch) OVERRIDE; + void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index f931d75e77..87f5647b8d 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -2051,21 +2051,20 @@ void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, movl(Address(ESP, dest), Immediate(imm)); } -void X86Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, - ManagedRegister) { +void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) { fs()->movl(Address::Absolute(dest), Immediate(imm)); } -void X86Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); } -void X86Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) { +void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) { fs()->movl(Address::Absolute(thr_offs), ESP); } @@ -2101,7 +2100,7 @@ void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { } } -void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) { +void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) { X86ManagedRegister dest = mdest.AsX86(); if (dest.IsNoRegister()) { CHECK_EQ(0u, size); @@ -2111,7 +2110,7 @@ void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, } else if (dest.IsRegisterPair()) { CHECK_EQ(8u, size); fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); - fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset<4>(src.Int32Value()+4))); + fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4))); } else if (dest.IsX87Register()) { if (size == 4) { fs()->flds(Address::Absolute(src)); @@ -2152,7 +2151,7 @@ void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, } void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest, - ThreadOffset<4> offs) { + ThreadOffset32 offs) { X86ManagedRegister dest = mdest.AsX86(); CHECK(dest.IsCpuRegister()); fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); @@ -2215,17 +2214,17 @@ void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src, } void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset<4> thr_offs, - ManagedRegister mscratch) { + ThreadOffset32 thr_offs, + ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs)); Store(fr_offs, scratch, 4); } -void X86Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { +void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); CHECK(scratch.IsCpuRegister()); Load(scratch, fr_offs, 4); @@ -2371,26 +2370,26 @@ void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratc call(Address(scratch, offset)); } -void X86Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister /*mscratch*/) { +void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) { fs()->call(Address::Absolute(offset)); } void X86Assembler::GetCurrentThread(ManagedRegister tr) { fs()->movl(tr.AsX86().AsCpuRegister(), - Address::Absolute(Thread::SelfOffset<4>())); + Address::Absolute(Thread::SelfOffset<kX86PointerSize>())); } void X86Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) { X86ManagedRegister scratch = mscratch.AsX86(); - fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<4>())); + fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>())); movl(Address(ESP, offset), scratch.AsCpuRegister()); } void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust); buffer_.EnqueueSlowPath(slow); - fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<4>()), Immediate(0)); + fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0)); j(kNotEqual, slow->Entry()); } @@ -2403,8 +2402,8 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) { __ DecreaseFrameSize(stack_adjust_); } // Pass exception as argument in EAX - __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<4>())); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException))); + __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>())); + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException))); // this call should never return __ int3(); #undef __ diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index fa616620b6..75648f21a2 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -195,7 +195,7 @@ class Address : public Operand { return result; } - static Address Absolute(ThreadOffset<4> addr) { + static Address Absolute(ThreadOffset32 addr) { return Absolute(addr.Int32Value()); } @@ -652,13 +652,13 @@ class X86Assembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch) + void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, + void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE; + void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; @@ -666,7 +666,7 @@ class X86Assembler FINAL : public Assembler { // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE; + void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -675,15 +675,15 @@ class X86Assembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE; + void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; // Copying routines void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs, + void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs, ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; @@ -740,7 +740,7 @@ class X86Assembler FINAL : public Assembler { // Call to address held at [base+offset] void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 3046710603..977ce9dc0b 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -2683,7 +2683,8 @@ void X86_64Assembler::BuildFrame(size_t frame_size, } } - DCHECK_EQ(kX86_64PointerSize, kFramePointerSize); + static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize, + "Unexpected frame pointer size."); movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); @@ -2803,12 +2804,11 @@ void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq? } -void X86_64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, - ManagedRegister) { +void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) { gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq? } -void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, +void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); @@ -2817,7 +2817,7 @@ void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister()); } -void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) { +void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) { gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP)); } @@ -2858,7 +2858,7 @@ void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) } } -void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) { +void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) { X86_64ManagedRegister dest = mdest.AsX86_64(); if (dest.IsNoRegister()) { CHECK_EQ(0u, size); @@ -2907,7 +2907,7 @@ void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs)); } -void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) { +void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) { X86_64ManagedRegister dest = mdest.AsX86_64(); CHECK(dest.IsCpuRegister()); gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true)); @@ -2969,7 +2969,7 @@ void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister } void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, - ThreadOffset<8> thr_offs, + ThreadOffset64 thr_offs, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); CHECK(scratch.IsCpuRegister()); @@ -2977,7 +2977,7 @@ void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, Store(fr_offs, scratch, 8); } -void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, +void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); @@ -3130,17 +3130,19 @@ void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr call(Address(scratch, offset)); } -void X86_64Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister /*mscratch*/) { +void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) { gs()->call(Address::Absolute(offset, true)); } void X86_64Assembler::GetCurrentThread(ManagedRegister tr) { - gs()->movq(tr.AsX86_64().AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true)); + gs()->movq(tr.AsX86_64().AsCpuRegister(), + Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true)); } void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) { X86_64ManagedRegister scratch = mscratch.AsX86_64(); - gs()->movq(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<8>(), true)); + gs()->movq(scratch.AsCpuRegister(), + Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true)); movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister()); } @@ -3156,7 +3158,7 @@ class X86_64ExceptionSlowPath FINAL : public SlowPath { void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust); buffer_.EnqueueSlowPath(slow); - gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<8>(), true), Immediate(0)); + gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0)); j(kNotEqual, slow->Entry()); } @@ -3169,8 +3171,10 @@ void X86_64ExceptionSlowPath::Emit(Assembler *sasm) { __ DecreaseFrameSize(stack_adjust_); } // Pass exception as argument in RDI - __ gs()->movq(CpuRegister(RDI), Address::Absolute(Thread::ExceptionOffset<8>(), true)); - __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), true)); + __ gs()->movq(CpuRegister(RDI), + Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true)); + __ gs()->call( + Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true)); // this call should never return __ int3(); #undef __ diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 361f73cffa..52e39cf7e6 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -258,7 +258,7 @@ class Address : public Operand { } // If no_rip is true then the Absolute address isn't RIP relative. - static Address Absolute(ThreadOffset<8> addr, bool no_rip = false) { + static Address Absolute(ThreadOffset64 addr, bool no_rip = false) { return Absolute(addr.Int32Value(), no_rip); } @@ -723,13 +723,13 @@ class X86_64Assembler FINAL : public Assembler { void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch) + void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, + void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE; + void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; @@ -737,7 +737,7 @@ class X86_64Assembler FINAL : public Assembler { // Load routines void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE; + void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; @@ -746,15 +746,15 @@ class X86_64Assembler FINAL : public Assembler { void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE; + void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; // Copying routines void Move(ManagedRegister dest, ManagedRegister src, size_t size); - void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs, + void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs, ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; @@ -812,7 +812,7 @@ class X86_64Assembler FINAL : public Assembler { // Call to address held at [base+offset] void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; // Generate code to check if Thread::Current()->exception_ is non-null // and branch to a ExceptionSlowPath if it is. |