From fb1b08cbb9c6ac149d75de16c14fdaa8b68baaa4 Mon Sep 17 00:00:00 2001 From: Mythri Alle Date: Thu, 12 May 2022 15:59:11 +0000 Subject: Revert "Reland^2 "Don't use AOT code for native methods for java debuggable runtime"" This reverts commit 5da52cd20ea0d24b038ae20c6c96aa22ac3a24a0. Reason for revert: https://ci.chromium.org/ui/p/art/builders/ci/host-x86_64-cdex-fast/5172/overview Change-Id: I9cebbaa145810547531a90af9da7961c0b6255d1 --- compiler/jni/jni_compiler_test.cc | 2 +- compiler/jni/quick/jni_compiler.cc | 11 +-- compiler/utils/arm/jni_macro_assembler_arm_vixl.cc | 11 +-- compiler/utils/arm/jni_macro_assembler_arm_vixl.h | 2 +- compiler/utils/arm64/jni_macro_assembler_arm64.cc | 5 +- compiler/utils/arm64/jni_macro_assembler_arm64.h | 2 +- compiler/utils/assembler_thumb_test.cc | 3 +- .../utils/assembler_thumb_test_expected.cc.inc | 110 ++++++++++----------- compiler/utils/jni_macro_assembler.h | 6 +- compiler/utils/x86/jni_macro_assembler_x86.cc | 14 +-- compiler/utils/x86/jni_macro_assembler_x86.h | 2 +- .../utils/x86_64/jni_macro_assembler_x86_64.cc | 11 +-- compiler/utils/x86_64/jni_macro_assembler_x86_64.h | 2 +- 13 files changed, 70 insertions(+), 111 deletions(-) (limited to 'compiler') diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index d05324baee..0a1f017828 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -414,7 +414,7 @@ void JniCompilerTest::AssertCallerObjectLocked(JNIEnv* env) { CHECK(!caller->IsCriticalNative()); CHECK(caller->IsSynchronized()); ObjPtr lock; - if (self->GetManagedStack()->GetTopQuickFrameGenericJniTag()) { + if (self->GetManagedStack()->GetTopQuickFrameTag()) { // Generic JNI. lock = GetGenericJniSynchronizationObject(self, caller); } else if (caller->IsStatic()) { diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index b88ebaf956..6cb50211e1 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -95,13 +95,6 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp const InstructionSetFeatures* instruction_set_features = compiler_options.GetInstructionSetFeatures(); - // When walking the stack the top frame doesn't have a pc associated with it. We then depend on - // the invariant that we don't have JITed code when AOT code is available. In debuggable runtimes - // this invariant doesn't hold. So we tag the SP for JITed code to indentify if we are executing - // JITed code or AOT code. Since tagging involves additional instructions we tag only in - // debuggable runtimes. - bool should_tag_sp = compiler_options.GetDebuggable() && compiler_options.IsJitCompiler(); - // i.e. if the method was annotated with @FastNative const bool is_fast_native = (access_flags & kAccFastNative) != 0u; @@ -226,7 +219,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp // because garbage collections are disabled within the execution of a // @CriticalNative method. if (LIKELY(!is_critical_native)) { - __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset(), should_tag_sp); + __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset()); } // 2. Lock the object (if synchronized) and transition out of Runnable (if normal native). @@ -612,7 +605,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp if (reference_return) { // Suspend check entry point overwrites top of managed stack and leaves it clobbered. // We need to restore the top for subsequent runtime call to `JniDecodeReferenceResult()`. - __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset(), should_tag_sp); + __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset()); } if (reference_return && main_out_arg_size != 0) { __ IncreaseFrameSize(main_out_arg_size); diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index 61151fe6be..6e6d40dc92 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -428,15 +428,8 @@ void ArmVIXLJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, asm_.StoreToOffset(kStoreWord, scratch, tr, thr_offs.Int32Value()); } -void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) { - if (tag_sp) { - UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); - vixl32::Register reg = temps.Acquire(); - ___ Orr(reg, sp, 0x2); - asm_.StoreToOffset(kStoreWord, reg, tr, thr_offs.Int32Value()); - } else { - asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value()); - } +void ArmVIXLJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { + asm_.StoreToOffset(kStoreWord, sp, tr, thr_offs.Int32Value()); } void ArmVIXLJNIMacroAssembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED, diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h index 980de41392..ed453ae8ff 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h @@ -70,7 +70,7 @@ class ArmVIXLJNIMacroAssembler final void StoreStackOffsetToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs) override; - void StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) override; + void StoreStackPointerToThread(ThreadOffset32 thr_offs) override; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override; diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc index 323a01e60b..50ca468499 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc @@ -218,13 +218,10 @@ void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs, Fr ___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value())); } -void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs, bool tag_sp) { +void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) { UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); Register scratch = temps.AcquireX(); ___ Mov(scratch, reg_x(SP)); - if (tag_sp) { - ___ Orr(scratch, scratch, 0x2); - } ___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value())); } diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h index daea95ded8..2c04184848 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.h +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h @@ -72,7 +72,7 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd { virtual void StoreStackOffsetToThread(ThreadOffset thr_offs, FrameOffset fr_offs) = 0; - // Stores stack pointer by tagging it if required so we can walk the stack. In debuggable runtimes - // we use tag to tell if we are using JITed code or AOT code. In non-debuggable runtimes we never - // use JITed code when AOT code is present. So checking for AOT code is sufficient to detect which - // code is being executed. We avoid tagging in non-debuggable runtimes to reduce instructions. - virtual void StoreStackPointerToThread(ThreadOffset thr_offs, bool tag_sp) = 0; + virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0; virtual void StoreSpanning(FrameOffset dest, ManagedRegister src, diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc index 55d54283fa..685f5f1b48 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.cc +++ b/compiler/utils/x86/jni_macro_assembler_x86.cc @@ -187,18 +187,8 @@ void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, Fra __ fs()->movl(Address::Absolute(thr_offs), scratch); } -void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs, bool tag_sp) { - if (tag_sp) { - // There is no free register, store contents onto stack and restore back later. - Register scratch = ECX; - __ movl(Address(ESP, -32), scratch); - __ movl(scratch, ESP); - __ orl(scratch, Immediate(0x2)); - __ fs()->movl(Address::Absolute(thr_offs), scratch); - __ movl(scratch, Address(ESP, -32)); - } else { - __ fs()->movl(Address::Absolute(thr_offs), ESP); - } +void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { + __ fs()->movl(Address::Absolute(thr_offs), ESP); } void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/, diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h index f8ce38b541..29fccfd386 100644 --- a/compiler/utils/x86/jni_macro_assembler_x86.h +++ b/compiler/utils/x86/jni_macro_assembler_x86.h @@ -66,7 +66,7 @@ class X86JNIMacroAssembler final : public JNIMacroAssemblerFwdmovq(Address::Absolute(thr_offs, true), scratch); } -void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) { - if (tag_sp) { - CpuRegister reg = GetScratchRegister(); - __ movq(reg, CpuRegister(RSP)); - __ orq(reg, Immediate(0x2)); - __ gs()->movq(Address::Absolute(thr_offs, true), reg); - } else { - __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP)); - } +void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) { + __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP)); } void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/, diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h index feaf27e53d..e080f0b3df 100644 --- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h +++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h @@ -67,7 +67,7 @@ class X86_64JNIMacroAssembler final : public JNIMacroAssemblerFwd