Make suspend check test specific flags.
Make 20 bits in `Thread.tls32_.state_and_flags` available
for new uses.
Code size changes per suspend check:
- x86/x86-64: +3B (CMP r/m32, imm8 -> TST r/m32, imm32)
- arm: none (CMP -> TST, both 32-bit with high register)
- arm64: +4B (CBNZ/CBZ -> TST+BNE/BEQ)
Note: Using implicit suspend checks on arm64 would sidestep
this code size increase entirely.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Bug: 172332525
Change-Id: If5b0be0183efba3f397596b22e03a8b7afb87f85
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 933e270..775bfcf 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1994,12 +1994,12 @@
Register temp = temps.AcquireW();
__ Ldr(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64PointerSize>().SizeValue()));
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
+ __ Tst(temp, Thread::SuspendOrCheckpointRequestFlags());
if (successor == nullptr) {
- __ Cbnz(temp, slow_path->GetEntryLabel());
+ __ B(ne, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
} else {
- __ Cbz(temp, codegen_->GetLabelOf(successor));
+ __ B(eq, codegen_->GetLabelOf(successor));
__ B(slow_path->GetEntryLabel());
// slow_path will return to GetLabelOf(successor).
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index c514c22..841d59b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7168,12 +7168,12 @@
vixl32::Register temp = temps.Acquire();
GetAssembler()->LoadFromOffset(
kLoadWord, temp, tr, Thread::ThreadFlagsOffset<kArmPointerSize>().Int32Value());
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
+ __ Tst(temp, Thread::SuspendOrCheckpointRequestFlags());
if (successor == nullptr) {
- __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
+ __ B(ne, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
} else {
- __ CompareAndBranchIfZero(temp, codegen_->GetLabelOf(successor));
+ __ B(eq, codegen_->GetLabelOf(successor));
__ B(slow_path->GetEntryLabel());
}
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f19eaae..5434407 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -6683,14 +6683,13 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
- __ fs()->cmpl(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>().Int32Value()),
- Immediate(0));
+ __ fs()->testl(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>().Int32Value()),
+ Immediate(Thread::SuspendOrCheckpointRequestFlags()));
if (successor == nullptr) {
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotZero, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
} else {
- __ j(kEqual, codegen_->GetLabelOf(successor));
+ __ j(kZero, codegen_->GetLabelOf(successor));
__ jmp(slow_path->GetEntryLabel());
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index b0bdffe..fa61c67 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -6018,15 +6018,14 @@
DCHECK_EQ(slow_path->GetSuccessor(), successor);
}
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
- __ gs()->cmpl(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip= */ true),
- Immediate(0));
+ __ gs()->testl(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
+ /* no_rip= */ true),
+ Immediate(Thread::SuspendOrCheckpointRequestFlags()));
if (successor == nullptr) {
- __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ j(kNotZero, slow_path->GetEntryLabel());
__ Bind(slow_path->GetReturnLabel());
} else {
- __ j(kEqual, codegen_->GetLabelOf(successor));
+ __ j(kZero, codegen_->GetLabelOf(successor));
__ jmp(slow_path->GetEntryLabel());
}
}
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 2b3c2dd..3d45abd 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -1058,8 +1058,7 @@
tr,
Thread::ThreadFlagsOffset<kArmPointerSize>().Int32Value());
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
- ___ Cmp(scratch, 0);
+ ___ Tst(scratch, Thread::SuspendOrCheckpointRequestFlags());
___ BPreferNear(ne, ArmVIXLJNIMacroLabel::Cast(label)->AsArm());
// TODO: think about using CBNZ here.
}
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index e2d29fd..a505db0 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -893,8 +893,8 @@
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
Register scratch = temps.AcquireW();
___ Ldr(scratch, MEM_OP(reg_x(TR), Thread::ThreadFlagsOffset<kArm64PointerSize>().Int32Value()));
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
- ___ Cbnz(scratch, Arm64JNIMacroLabel::Cast(label)->AsArm64());
+ ___ Tst(scratch, Thread::SuspendOrCheckpointRequestFlags());
+ ___ B(ne, Arm64JNIMacroLabel::Cast(label)->AsArm64());
}
void Arm64JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 904cca4..4ba3aa1 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -590,9 +590,9 @@
}
void X86JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
- __ fs()->cmpl(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>()), Immediate(0));
- __ j(kNotEqual, X86JNIMacroLabel::Cast(label)->AsX86());
+ __ fs()->testl(Address::Absolute(Thread::ThreadFlagsOffset<kX86PointerSize>()),
+ Immediate(Thread::SuspendOrCheckpointRequestFlags()));
+ __ j(kNotZero, X86JNIMacroLabel::Cast(label)->AsX86());
}
void X86JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 2fb2797..de99e74 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -672,10 +672,9 @@
}
void X86_64JNIMacroAssembler::SuspendCheck(JNIMacroLabel* label) {
- static_assert(static_cast<std::underlying_type_t<ThreadState>>(ThreadState::kRunnable) == 0u);
- __ gs()->cmpl(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>(), true),
- Immediate(0));
- __ j(kNotEqual, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
+ __ gs()->testl(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>(), true),
+ Immediate(Thread::SuspendOrCheckpointRequestFlags()));
+ __ j(kNotZero, X86_64JNIMacroLabel::Cast(label)->AsX86_64());
}
void X86_64JNIMacroAssembler::ExceptionPoll(JNIMacroLabel* label) {