diff options
| author | 2015-04-22 13:56:20 -0700 | |
|---|---|---|
| committer | 2015-06-02 09:21:27 -0700 | |
| commit | 3d21bdf8894e780d349c481e5c9e29fe1556051c (patch) | |
| tree | 61a5231f36c0dabd73457fec81df103462a05aff /compiler/utils/arm64/assembler_arm64.cc | |
| parent | 71f0a8a123fa27bdc857a98afebbaf0ed09dac15 (diff) | |
Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot.
TODO: Test and fix bugs in mips64.
Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS.
Some of the savings are from removal of virtual methods and direct
methods object arrays.
Bug: 19264997
(cherry picked from commit e401d146407d61eeb99f8d6176b2ac13c4df1e33)
Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d
Fix some ArtMethod related bugs
Added root visiting for runtime methods, not currently required
since the GcRoots in these methods are null.
Added missing GetInterfaceMethodIfProxy in GetMethodLine, fixes
--trace run-tests 005, 044.
Fixed optimizing compiler bug where we used a normal stack location
instead of double on ARM64, this fixes the debuggable tests.
TODO: Fix JDWP tests.
Bug: 19264997
Change-Id: I7c55f69c61d1b45351fd0dc7185ffe5efad82bd3
ART: Fix casts for 64-bit pointers on 32-bit compiler.
Bug: 19264997
Change-Id: Ief45cdd4bae5a43fc8bfdfa7cf744e2c57529457
Fix JDWP tests after ArtMethod change
Fixes Throwable::GetStackDepth for exception event detection after
internal stack trace representation change.
Adds missing ArtMethod::GetInterfaceMethodIfProxy call in case of
proxy method.
Bug: 19264997
Change-Id: I363e293796848c3ec491c963813f62d868da44d2
Fix accidental IMT and root marking regression
Was always using the conflict trampoline. Also included fix for
regression in GC time caused by extra roots. Most of the regression
was IMT.
Fixed bug in DumpGcPerformanceInfo where we would get SIGABRT due to
detached thread.
EvaluateAndApplyChanges:
From ~2500 -> ~1980
GC time: 8.2s -> 7.2s due to 1s less of MarkConcurrentRoots
Bug: 19264997
Change-Id: I4333e80a8268c2ed1284f87f25b9f113d4f2c7e0
Fix bogus image test assert
Previously we were comparing the size of the non moving space to
size of the image file.
Now we properly compare the size of the image space against the size
of the image file.
Bug: 19264997
Change-Id: I7359f1f73ae3df60c5147245935a24431c04808a
[MIPS64] Fix art_quick_invoke_stub argument offsets.
ArtMethod reference's size got bigger, so we need to move other args
and leave enough space for ArtMethod* and 'this' pointer.
This fixes mips64 boot.
Bug: 19264997
Change-Id: I47198d5f39a4caab30b3b77479d5eedaad5006ab
Diffstat (limited to 'compiler/utils/arm64/assembler_arm64.cc')
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 3ee79a103f..7d98a30ff3 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -293,14 +293,14 @@ void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value()); } -void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, - MemberOffset offs) { +void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs, + bool poison_reference) { Arm64ManagedRegister dst = m_dst.AsArm64(); Arm64ManagedRegister base = m_base.AsArm64(); CHECK(dst.IsXRegister() && base.IsXRegister()); LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(), offs.Int32Value()); - if (kPoisonHeapReferences) { + if (kPoisonHeapReferences && poison_reference) { WRegister ref_reg = dst.AsOverlappingWRegister(); ___ Neg(reg_w(ref_reg), vixl::Operand(reg_w(ref_reg))); } @@ -535,7 +535,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; // Call *(*(SP + base) + offset) - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, base.Int32Value()); + LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value()); LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value()); ___ Blr(reg_x(scratch.AsXRegister())); } @@ -544,8 +544,9 @@ void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegiste UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; } -void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs, - ManagedRegister m_in_reg, bool null_allowed) { +void Arm64Assembler::CreateHandleScopeEntry( + ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg, + bool null_allowed) { Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); // For now we only hold stale handle scope entries in x registers. @@ -571,7 +572,7 @@ void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffs } void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset, - ManagedRegister m_scratch, bool null_allowed) { + ManagedRegister m_scratch, bool null_allowed) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsXRegister()) << scratch; if (null_allowed) { @@ -590,7 +591,7 @@ void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset han } void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg, - ManagedRegister m_in_reg) { + ManagedRegister m_in_reg) { Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); CHECK(out_reg.IsXRegister()) << out_reg; @@ -706,7 +707,7 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, // Increase frame to required size. DCHECK_ALIGNED(frame_size, kStackAlignment); - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>)); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize); IncreaseFrameSize(frame_size); // Save callee-saves. @@ -720,13 +721,12 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR))); ___ Mov(reg_x(ETR), reg_x(TR)); - // Write StackReference<Method>. + // Write ArtMethod* DCHECK(X0 == method_reg.AsArm64().AsXRegister()); - DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); - StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0); + StoreToOffset(X0, SP, 0); // Write out entry spills - int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>); + int32_t offset = frame_size + kArm64PointerSize; for (size_t i = 0; i < entry_spills.size(); ++i) { Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); if (reg.IsNoRegister()) { @@ -768,7 +768,7 @@ void Arm64Assembler::RemoveFrame(size_t frame_size, // For now we only check that the size of the frame is large enough to hold spills and method // reference. - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>)); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + kArm64PointerSize); DCHECK_ALIGNED(frame_size, kStackAlignment); // Note: This is specific to JNI method frame. |