No need to lock when calling Thread.interrupted.
Also intrinsify the Thread.interrupted call.
The rationale behind this optimization is that the flag can only
have two values, and only self can set it to false.
Test: libcore, jdwp, run-tests, 050-sync-test
Change-Id: I5c2b43bf872ba0bfafcb54b2cfcd19181864bc4c
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 69cf9a1..1df884e 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2753,6 +2753,27 @@
}
}
+void IntrinsicLocationsBuilderARM::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM::VisitThreadInterrupted(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+ int32_t offset = Thread::InterruptedOffset<kArmPointerSize>().Int32Value();
+ __ LoadFromOffset(kLoadWord, out, TR, offset);
+ Label done;
+ __ CompareAndBranchIfZero(out, &done);
+ __ dmb(ISH);
+ __ LoadImmediate(IP, 0);
+ __ StoreToOffset(kStoreWord, IP, TR, offset);
+ __ dmb(ISH);
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble)
UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat)
UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 65a8222..b511c5a 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3033,6 +3033,28 @@
}
}
+void IntrinsicLocationsBuilderARM64::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitThreadInterrupted(HInvoke* invoke) {
+ MacroAssembler* masm = GetVIXLAssembler();
+ Register out = RegisterFrom(invoke->GetLocations()->Out(), Primitive::kPrimInt);
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+
+ __ Add(temp, tr, Thread::InterruptedOffset<kArm64PointerSize>().Int32Value());
+ __ Ldar(out.W(), MemOperand(temp));
+
+ vixl::aarch64::Label done;
+ __ Cbz(out.W(), &done);
+ __ Stlr(wzr, MemOperand(temp));
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit)
UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit)
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 356d5bc..2d9781a 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3157,6 +3157,29 @@
}
}
+void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
+ ArmVIXLAssembler* assembler = GetAssembler();
+ vixl32::Register out = RegisterFrom(invoke->GetLocations()->Out());
+ int32_t offset = Thread::InterruptedOffset<kArmPointerSize>().Int32Value();
+ __ Ldr(out, MemOperand(tr, offset));
+ UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ vixl32::Label done;
+ __ CompareAndBranchIfZero(out, &done, /* far_target */ false);
+ __ Dmb(vixl32::ISH);
+ __ Mov(temp, 0);
+ assembler->StoreToOffset(kStoreWord, temp, tr, offset);
+ __ Dmb(vixl32::ISH);
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index abf5b12..3077e98 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -3248,6 +3248,8 @@
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS, ThreadInterrupted)
+
UNREACHABLE_INTRINSICS(MIPS)
#undef __
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 9dce59b..e5ee845 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2664,6 +2664,8 @@
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
+UNIMPLEMENTED_INTRINSIC(MIPS64, ThreadInterrupted)
+
UNREACHABLE_INTRINSICS(MIPS64)
#undef __
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8e45747..57adcc3 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3407,6 +3407,27 @@
}
}
+void IntrinsicLocationsBuilderX86::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86::VisitThreadInterrupted(HInvoke* invoke) {
+ X86Assembler* assembler = GetAssembler();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+ Address address = Address::Absolute(Thread::InterruptedOffset<kX86PointerSize>().Int32Value());
+ NearLabel done;
+ __ fs()->movl(out, address);
+ __ testl(out, out);
+ __ j(kEqual, &done);
+ __ fs()->movl(address, Immediate(0));
+ codegen_->MemoryFence();
+ __ Bind(&done);
+}
+
+
UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index af0b193..773383e 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -3085,6 +3085,27 @@
}
}
+void IntrinsicLocationsBuilderX86_64::VisitThreadInterrupted(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
+ Address address = Address::Absolute
+ (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+ NearLabel done;
+ __ gs()->movl(out, address);
+ __ testl(out, out);
+ __ j(kEqual, &done);
+ __ gs()->movl(address, Immediate(0));
+ codegen_->MemoryFence();
+ __ Bind(&done);
+}
+
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)