summaryrefslogtreecommitdiff
path: root/compiler/utils
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils')
-rw-r--r--compiler/utils/arm/assembler_arm.cc32
-rw-r--r--compiler/utils/arm/assembler_arm.h14
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc117
-rw-r--r--compiler/utils/arm64/assembler_arm64.h19
-rw-r--r--compiler/utils/assembler.h14
-rw-r--r--compiler/utils/mips/assembler_mips.cc34
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/x86/assembler_x86.cc22
-rw-r--r--compiler/utils/x86/assembler_x86.h14
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc24
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h14
11 files changed, 170 insertions, 148 deletions
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 5c839dd54e..64685c1714 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1752,53 +1752,53 @@ void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
#endif
}
-void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
}
-void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
ArmManagedRegister scratch = mscratch.AsArm();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
ArmManagedRegister out_reg = mout_reg.AsArm();
ArmManagedRegister in_reg = min_reg.AsArm();
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index f5be04acb7..396e60330b 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -521,20 +521,20 @@ class ArmAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index b4bb979d42..27188b2331 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -50,11 +50,11 @@ void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
}
void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
- ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(TR1));
+ ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(ETR));
}
void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
- StoreToOffset(TR1, SP, offset.Int32Value());
+ StoreToOffset(ETR, SP, offset.Int32Value());
}
// See Arm64 PCS Section 5.2.2.1.
@@ -79,11 +79,13 @@ void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
// VIXL macro-assembler handles all variants.
___ Add(reg_x(rd), reg_x(rn), value);
} else {
- // ip1 = rd + value
- // rd = cond ? ip1 : rn
- CHECK_NE(rn, IP1);
- ___ Add(reg_x(IP1), reg_x(rn), value);
- ___ Csel(reg_x(rd), reg_x(IP1), reg_x(rd), COND_OP(cond));
+ // temp = rd + value
+ // rd = cond ? temp : rn
+ vixl::UseScratchRegisterScope temps(vixl_masm_);
+ temps.Exclude(reg_x(rd), reg_x(rn));
+ vixl::Register temp = temps.AcquireX();
+ ___ Add(temp, reg_x(rn), value);
+ ___ Csel(reg_x(rd), temp, reg_x(rd), COND_OP(cond));
}
}
@@ -162,7 +164,7 @@ void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadImmediate(scratch.AsCoreRegister(), imm);
- StoreToOffset(scratch.AsCoreRegister(), TR1, offs.Int32Value());
+ StoreToOffset(scratch.AsCoreRegister(), ETR, offs.Int32Value());
}
void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
@@ -171,13 +173,14 @@ void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
}
void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
- // Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg.
- ___ Mov(reg_x(IP1), reg_x(SP));
- StoreToOffset(IP1, TR1, tr_offs.Int32Value());
+ vixl::UseScratchRegisterScope temps(vixl_masm_);
+ vixl::Register temp = temps.AcquireX();
+ ___ Mov(temp, reg_x(SP));
+ ___ Str(temp, MEM_OP(reg_x(ETR), tr_offs.Int32Value()));
}
void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
@@ -195,12 +198,14 @@ void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
if ((cond == AL) || (cond == NV)) {
___ Mov(reg_x(dest), value);
} else {
- // ip1 = value
- // rd = cond ? ip1 : rd
+ // temp = value
+ // rd = cond ? temp : rd
if (value != 0) {
- CHECK_NE(dest, IP1);
- ___ Mov(reg_x(IP1), value);
- ___ Csel(reg_x(dest), reg_x(IP1), reg_x(dest), COND_OP(cond));
+ vixl::UseScratchRegisterScope temps(vixl_masm_);
+ temps.Exclude(reg_x(dest));
+ vixl::Register temp = temps.AcquireX();
+ ___ Mov(temp, value);
+ ___ Csel(reg_x(dest), temp, reg_x(dest), COND_OP(cond));
} else {
___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
}
@@ -276,7 +281,7 @@ void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
}
void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
- return Load(m_dst.AsArm64(), TR1, src.Int32Value(), size);
+ return Load(m_dst.AsArm64(), ETR, src.Int32Value(), size);
}
void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
@@ -298,13 +303,16 @@ void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, O
Arm64ManagedRegister dst = m_dst.AsArm64();
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
- LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
+ // Remove dst and base form the temp list - higher level API uses IP1, IP0.
+ vixl::UseScratchRegisterScope temps(vixl_masm_);
+ temps.Exclude(reg_x(dst.AsCoreRegister()), reg_x(base.AsCoreRegister()));
+ ___ Ldr(reg_x(dst.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
}
void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(dst.AsCoreRegister(), TR1, offs.Int32Value());
+ LoadFromOffset(dst.AsCoreRegister(), ETR, offs.Int32Value());
}
// Copying routines.
@@ -342,7 +350,7 @@ void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value());
+ LoadFromOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
}
@@ -352,7 +360,7 @@ void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
}
void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
@@ -511,7 +519,10 @@ void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(base.IsCoreRegister()) << base;
CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
+ // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
+ vixl::UseScratchRegisterScope temps(vixl_masm_);
+ temps.Exclude(reg_x(base.AsCoreRegister()), reg_x(scratch.AsCoreRegister()));
+ ___ Ldr(reg_x(scratch.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsCoreRegister()));
}
@@ -528,52 +539,52 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
-void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs,
+void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
ManagedRegister m_in_reg, bool null_allowed) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale sirt entries in x registers.
+ // For now we only hold stale handle scope entries in x registers.
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
- sirt_offs.Int32Value());
+ handle_scope_offs.Int32Value());
in_reg = out_reg;
}
___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
}
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL);
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
}
}
-void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
ManagedRegister m_scratch, bool null_allowed) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
// Move this logic in add constants with flags.
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
}
StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg,
+void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
ManagedRegister m_in_reg) {
Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
@@ -595,13 +606,17 @@ void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjus
Arm64ManagedRegister scratch = m_scratch.AsArm64();
Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
exception_blocks_.push_back(current_exception);
- LoadFromOffset(scratch.AsCoreRegister(), TR1, Thread::ExceptionOffset<8>().Int32Value());
+ LoadFromOffset(scratch.AsCoreRegister(), ETR, Thread::ExceptionOffset<8>().Int32Value());
___ Cmp(reg_x(scratch.AsCoreRegister()), 0);
___ B(current_exception->Entry(), COND_OP(NE));
}
void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- // Bind exception poll entry.
+ vixl::UseScratchRegisterScope temps(vixl_masm_);
+ temps.Exclude(reg_x(exception->scratch_.AsCoreRegister()));
+ vixl::Register temp = temps.AcquireX();
+
+ // Bind exception poll entry.
___ Bind(exception->Entry());
if (exception->stack_adjust_ != 0) { // Fix up the frame.
DecreaseFrameSize(exception->stack_adjust_);
@@ -609,12 +624,14 @@ void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
// Pass exception object as argument.
// Don't care about preserving X0 as this won't return.
___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
- LoadFromOffset(IP1, TR1, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
+ ___ Ldr(temp, MEM_OP(reg_x(ETR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
- // FIXME: Temporary fix for TR (XSELF).
- ___ Mov(reg_x(TR), reg_x(TR1));
+ // Move ETR(Callee saved) back to TR(Caller saved) reg. We use ETR on calls
+ // to external functions that might trash TR. We do not need the original
+ // X19 saved in BuildFrame().
+ ___ Mov(reg_x(TR), reg_x(ETR));
- ___ Blr(reg_x(IP1));
+ ___ Blr(temp);
// Call should never return.
___ Brk();
}
@@ -634,8 +651,10 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
___ PushCalleeSavedRegisters();
- // FIXME: Temporary fix for TR (XSELF).
- ___ Mov(reg_x(TR1), reg_x(TR));
+ // Move TR(Caller saved) to ETR(Callee saved). The original X19 has been
+ // saved by PushCalleeSavedRegisters(). This way we make sure that TR is not
+ // trashed by native code.
+ ___ Mov(reg_x(ETR), reg_x(TR));
// Increate frame to required size - must be at least space to push Method*.
CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
@@ -681,8 +700,10 @@ void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedReg
size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
DecreaseFrameSize(adjust);
- // FIXME: Temporary fix for TR (XSELF).
- ___ Mov(reg_x(TR), reg_x(TR1));
+ // We move ETR (Callee Saved) back to TR (Caller Saved) which might have
+ // been trashed in the native call. The original X19 (ETR) is restored as
+ // part of PopCalleeSavedRegisters().
+ ___ Mov(reg_x(TR), reg_x(ETR));
// Pop callee saved and return to LR.
___ PopCalleeSavedRegisters();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 97fb93af82..c866b29224 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -85,6 +85,7 @@ class Arm64Assembler FINAL : public Assembler {
vixl_masm_(new vixl::MacroAssembler(vixl_buf_, kBufferSizeArm64)) {}
virtual ~Arm64Assembler() {
+ delete vixl_masm_;
delete[] vixl_buf_;
}
@@ -160,20 +161,20 @@ class Arm64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst.
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
@@ -237,8 +238,8 @@ class Arm64Assembler FINAL : public Assembler {
// Vixl buffer.
byte* vixl_buf_;
- // Unique ptr - vixl assembler.
- UniquePtr<vixl::MacroAssembler> vixl_masm_;
+ // Vixl assembler.
+ vixl::MacroAssembler* vixl_masm_;
// List of exception blocks to generate at the end of the code cache.
std::vector<Arm64Exception*> exception_blocks_;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 219c87fd79..19239e1256 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -453,20 +453,20 @@ class Assembler {
virtual void GetCurrentThread(FrameOffset dest_offset,
ManagedRegister scratch) = 0;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
ManagedRegister in_reg, bool null_allowed) = 0;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ virtual void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister scratch, bool null_allowed) = 0;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src) = 0;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 9001f8a41f..8001dcd2fb 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -827,8 +827,8 @@ void MipsAssembler::MemoryBarrier(ManagedRegister) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
-void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
@@ -836,27 +836,27 @@ void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
Label null_arg;
- // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
- // the address in the SIRT holding the reference.
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
// e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, sirt_offset.Int32Value());
+ SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
LoadImmediate(out_reg.AsCoreRegister(), 0);
}
EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
}
-void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
MipsManagedRegister scratch = mscratch.AsMips();
@@ -864,21 +864,21 @@ void MipsAssembler::CreateSirtEntry(FrameOffset out_off,
if (null_allowed) {
Label null_arg;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- sirt_offset.Int32Value());
- // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
- // the address in the SIRT holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
Bind(&null_arg, false);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value());
+ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
-// Given a SIRT entry, load the associated reference.
-void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 75ee8b95bd..216cb4164e 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -238,20 +238,20 @@ class MipsAssembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister mscratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister mscratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 6a3efc5431..0791c63f90 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1727,8 +1727,8 @@ void X86Assembler::MemoryBarrier(ManagedRegister) {
#endif
}
-void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
@@ -1742,34 +1742,34 @@ void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
}
-void X86Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86ManagedRegister scratch = mscratch.AsX86();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
Bind(&null_arg);
} else {
- leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
}
Store(out_off, scratch, 4);
}
-// Given a SIRT entry, load the associated reference.
-void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86ManagedRegister out_reg = mout_reg.AsX86();
X86ManagedRegister in_reg = min_reg.AsX86();
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 057c80af91..2fc6049042 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -541,20 +541,20 @@ class X86Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 8eaeae17ac..0ede8755e3 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1989,8 +1989,8 @@ void X86_64Assembler::MemoryBarrier(ManagedRegister) {
#endif
}
-void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
ManagedRegister min_reg, bool null_allowed) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
@@ -1998,7 +1998,7 @@ void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
// Use out_reg as indicator of NULL
in_reg = out_reg;
// TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
CHECK(in_reg.IsCpuRegister());
CHECK(out_reg.IsCpuRegister());
@@ -2010,34 +2010,34 @@ void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
}
testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
}
-void X86_64Assembler::CreateSirtEntry(FrameOffset out_off,
- FrameOffset sirt_offset,
+void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
ManagedRegister mscratch,
bool null_allowed) {
X86_64ManagedRegister scratch = mscratch.AsX86_64();
CHECK(scratch.IsCpuRegister());
if (null_allowed) {
Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
Bind(&null_arg);
} else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), sirt_offset));
+ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
}
Store(out_off, scratch, 8);
}
-// Given a SIRT entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+// Given a handle scope entry, load the associated reference.
+void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
ManagedRegister min_reg) {
X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
X86_64ManagedRegister in_reg = min_reg.AsX86_64();
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 87fb359112..548d379346 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -566,20 +566,20 @@ class X86_64Assembler FINAL : public Assembler {
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
- // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the SIRT entry to see if the value is
+ // that can be used to avoid loading the handle scope entry to see if the value is
// NULL.
- void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset, ManagedRegister in_reg,
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, ManagedRegister in_reg,
bool null_allowed) OVERRIDE;
- // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
// value is null and null_allowed.
- void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset, ManagedRegister scratch,
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister scratch,
bool null_allowed) OVERRIDE;
- // src holds a SIRT entry (Object**) load this into dst
- virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
ManagedRegister src);
// Heap::VerifyObject on src. In some cases (such as a reference to this) we