diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 19 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 19 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_mips.cc | 22 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_mips64.cc | 22 |
4 files changed, 74 insertions, 8 deletions
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 60c7a8f2ef..d2cfa4f187 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -461,6 +461,10 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; } + HBasicBlock* GetSuccessor() const { + return successor_; + } + private: // If not null, the block to branch to after the suspend check. HBasicBlock* const successor_; @@ -1996,8 +2000,19 @@ void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATT void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor) { SuspendCheckSlowPathMIPS* slow_path = - new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor); - codegen_->AddSlowPath(slow_path); + down_cast<SuspendCheckSlowPathMIPS*>(instruction->GetSlowPath()); + + if (slow_path == nullptr) { + slow_path = + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor); + instruction->SetSlowPath(slow_path); + codegen_->AddSlowPath(slow_path); + if (successor != nullptr) { + DCHECK(successor->IsLoopHeader()); + } + } else { + DCHECK_EQ(slow_path->GetSuccessor(), successor); + } __ LoadFromOffset(kLoadUnsignedHalfword, TMP, diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 5292638c07..28ca7cb94a 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -415,6 +415,10 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; } + HBasicBlock* GetSuccessor() const { + return successor_; + } + private: // If not null, the block to branch to after the suspend check. HBasicBlock* const successor_; @@ -1834,8 +1838,19 @@ void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind A void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor) { SuspendCheckSlowPathMIPS64* slow_path = - new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor); - codegen_->AddSlowPath(slow_path); + down_cast<SuspendCheckSlowPathMIPS64*>(instruction->GetSlowPath()); + + if (slow_path == nullptr) { + slow_path = + new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor); + instruction->SetSlowPath(slow_path); + codegen_->AddSlowPath(slow_path); + if (successor != nullptr) { + DCHECK(successor->IsLoopHeader()); + } + } else { + DCHECK_EQ(slow_path->GetSuccessor(), successor); + } __ LoadFromOffset(kLoadUnsignedHalfword, TMP, diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 4a8fbf26ce..98ccce79e4 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -3203,6 +3203,26 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) { } } +// static boolean java.lang.Thread.interrupted() +void IntrinsicLocationsBuilderMIPS::VisitThreadInterrupted(HInvoke* invoke) { + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); + locations->SetOut(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorMIPS::VisitThreadInterrupted(HInvoke* invoke) { + MipsAssembler* assembler = GetAssembler(); + Register out = invoke->GetLocations()->Out().AsRegister<Register>(); + int32_t offset = Thread::InterruptedOffset<kMipsPointerSize>().Int32Value(); + __ LoadFromOffset(kLoadWord, out, TR, offset); + MipsLabel done; + __ Beqz(out, &done); + __ Sync(0); + __ StoreToOffset(kStoreWord, ZERO, TR, offset); + __ Sync(0); + __ Bind(&done); +} + // Unimplemented intrinsics. UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil) @@ -3232,8 +3252,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject) -UNIMPLEMENTED_INTRINSIC(MIPS, ThreadInterrupted) - UNREACHABLE_INTRINSICS(MIPS) #undef __ diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 512fb68fad..f62913430e 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -2586,6 +2586,26 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) { } } +// static boolean java.lang.Thread.interrupted() +void IntrinsicLocationsBuilderMIPS64::VisitThreadInterrupted(HInvoke* invoke) { + LocationSummary* locations = + new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified); + locations->SetOut(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorMIPS64::VisitThreadInterrupted(HInvoke* invoke) { + Mips64Assembler* assembler = GetAssembler(); + GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>(); + int32_t offset = Thread::InterruptedOffset<kMips64PointerSize>().Int32Value(); + __ LoadFromOffset(kLoadWord, out, TR, offset); + Mips64Label done; + __ Beqzc(out, &done); + __ Sync(0); + __ StoreToOffset(kStoreWord, ZERO, TR, offset); + __ Sync(0); + __ Bind(&done); +} + UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent) UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy) @@ -2605,8 +2625,6 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject) -UNIMPLEMENTED_INTRINSIC(MIPS64, ThreadInterrupted) - UNREACHABLE_INTRINSICS(MIPS64) #undef __ |