Add Math.multiplyHigh intrinsic

Test: ./art/test/testrunner/testrunner.py --target  --optimizing --64 -t 082-inline-execute
Test: ./art/test/testrunner/testrunner.py --host --optimizing --64 -t 082-inline-execute
Change-Id: I4b6cafa8b9e513eca7c5c139440024d87a7ef758
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index d58d8f4..d0c64c2 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3856,6 +3856,23 @@
   GenerateDivideUnsigned(invoke, codegen_);
 }
 
+void IntrinsicLocationsBuilderARM64::VisitMathMultiplyHigh(HInvoke* invoke) {
+  CreateIntIntToIntLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitMathMultiplyHigh(HInvoke* invoke) {
+  LocationSummary* locations = invoke->GetLocations();
+  MacroAssembler* masm = codegen_->GetVIXLAssembler();
+  DataType::Type type = invoke->GetType();
+  DCHECK(type == DataType::Type::kInt64);
+
+  Register x = RegisterFrom(locations->InAt(0), type);
+  Register y = RegisterFrom(locations->InAt(1), type);
+  Register out = RegisterFrom(locations->Out(), type);
+
+  __ Smulh(out, x, y);
+}
+
 class VarHandleSlowPathARM64 : public IntrinsicSlowPathARM64 {
  public:
   VarHandleSlowPathARM64(HInvoke* invoke, std::memory_order order)