summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc32
-rw-r--r--compiler/optimizing/intrinsics_utils.h5
-rw-r--r--test/082-inline-execute/src/Main.java1
3 files changed, 37 insertions, 1 deletions
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 0419625cff..b02b32ea8c 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -239,6 +239,15 @@ static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invok
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
+static void CreateIntIntToIntSlowPathCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Force kOutputOverlap; see comments in IntrinsicSlowPath::EmitNativeCode.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
static void GenReverseBytes(LocationSummary* locations,
DataType::Type type,
MacroAssembler* masm) {
@@ -3700,8 +3709,29 @@ void IntrinsicCodeGeneratorARM64::VisitVarHandleSet(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
+ CreateIntIntToIntSlowPathCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitIntegerDivideUnsigned(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ MacroAssembler* masm = GetVIXLAssembler();
+ Register dividend = WRegisterFrom(locations->InAt(0));
+ Register divisor = WRegisterFrom(locations->InAt(1));
+ Register out = WRegisterFrom(locations->Out());
+
+ // Check if divisor is zero, bail to managed implementation to handle.
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Cbz(divisor, slow_path->GetEntryLabel());
+
+ __ Udiv(out, dividend, divisor);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(ARM64, IntegerDivideUnsigned)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h
index 8c9dd14b19..b4ef5ddde3 100644
--- a/compiler/optimizing/intrinsics_utils.h
+++ b/compiler/optimizing/intrinsics_utils.h
@@ -78,6 +78,11 @@ class IntrinsicSlowPath : public TSlowPathCode {
Location out = invoke_->GetLocations()->Out();
if (out.IsValid()) {
DCHECK(out.IsRegisterKind()); // TODO: Replace this when we support output in memory.
+ // We want to double-check that we don't overwrite a live register with the return
+ // value.
+ // Note: For the possible kNoOutputOverlap case we can't simply remove the OUT register
+ // from the GetLiveRegisters() - theoretically it might be needed after the return from
+ // the slow path.
DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->OverlapsRegisters(out));
codegen->MoveFromReturnRegister(out, invoke_->GetType());
}
diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java
index 338a3ddf00..b565985ea7 100644
--- a/test/082-inline-execute/src/Main.java
+++ b/test/082-inline-execute/src/Main.java
@@ -1388,6 +1388,7 @@ public class Main {
Assert.assertEquals(Integer.divideUnsigned(100000, -1), 0);
Assert.assertEquals(Integer.divideUnsigned(Integer.MAX_VALUE, -1), 0);
Assert.assertEquals(Integer.divideUnsigned(-2, -1), 0);
+ Assert.assertEquals(Integer.divideUnsigned(-1, -2), 1);
Assert.assertEquals(Integer.divideUnsigned(-173448, 13), 330368757);
Assert.assertEquals(Integer.divideUnsigned(Integer.MIN_VALUE, 2), (1 << 30));
Assert.assertEquals(Integer.divideUnsigned(-1, Integer.MIN_VALUE), 1);