ARM: Fix breaking changes from recent VIXL update.

Test: test-art-target, test-art-host

Change-Id: I31de1e2075226542b9919f6ca054fd5bf237e690
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 1d8ea09..99b8b5d 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1458,30 +1458,38 @@
   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
 }
 
+// Forward declaration.
+//
+// ART build system imposes a size limit (deviceFrameSizeLimit) on the stack frames generated
+// by the compiler for every C++ function, and if this function gets inlined in
+// IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo, the limit will be exceeded, resulting in a
+// build failure. That is the reason why NO_INLINE attribute is used.
+static void NO_INLINE GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
+                                                  HInvoke* invoke,
+                                                  vixl32::Label* end,
+                                                  vixl32::Label* different_compression);
+
 void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
   ArmVIXLAssembler* assembler = GetAssembler();
   LocationSummary* locations = invoke->GetLocations();
 
-  vixl32::Register str = InputRegisterAt(invoke, 0);
-  vixl32::Register arg = InputRegisterAt(invoke, 1);
-  vixl32::Register out = OutputRegister(invoke);
+  const vixl32::Register str = InputRegisterAt(invoke, 0);
+  const vixl32::Register arg = InputRegisterAt(invoke, 1);
+  const vixl32::Register out = OutputRegister(invoke);
 
-  vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0));
-  vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
-  vixl32::Register temp2 = RegisterFrom(locations->GetTemp(2));
+  const vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0));
+  const vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
+  const vixl32::Register temp2 = RegisterFrom(locations->GetTemp(2));
   vixl32::Register temp3;
   if (mirror::kUseStringCompression) {
     temp3 = RegisterFrom(locations->GetTemp(3));
   }
 
-  vixl32::Label loop;
-  vixl32::Label find_char_diff;
   vixl32::Label end;
   vixl32::Label different_compression;
 
   // Get offsets of count and value fields within a string object.
   const int32_t count_offset = mirror::String::CountOffset().Int32Value();
-  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
 
   // Note that the null check must have been done earlier.
   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
@@ -1546,6 +1554,38 @@
     __ add(ne, temp0, temp0, temp0);
   }
 
+
+  GenerateStringCompareToLoop(assembler, invoke, &end, &different_compression);
+
+  __ Bind(&end);
+
+  if (can_slow_path) {
+    __ Bind(slow_path->GetExitLabel());
+  }
+}
+
+static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
+                                                  HInvoke* invoke,
+                                                  vixl32::Label* end,
+                                                  vixl32::Label* different_compression) {
+  LocationSummary* locations = invoke->GetLocations();
+
+  const vixl32::Register str = InputRegisterAt(invoke, 0);
+  const vixl32::Register arg = InputRegisterAt(invoke, 1);
+  const vixl32::Register out = OutputRegister(invoke);
+
+  const vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0));
+  const vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
+  const vixl32::Register temp2 = RegisterFrom(locations->GetTemp(2));
+  vixl32::Register temp3;
+  if (mirror::kUseStringCompression) {
+    temp3 = RegisterFrom(locations->GetTemp(3));
+  }
+
+  vixl32::Label loop;
+  vixl32::Label find_char_diff;
+
+  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
   // Store offset of string value in preparation for comparison loop.
   __ Mov(temp1, value_offset);
 
@@ -1577,12 +1617,12 @@
   // With string compression, we have compared 8 bytes, otherwise 4 chars.
   __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
   __ B(hi, &loop, /* far_target */ false);
-  __ B(&end);
+  __ B(end);
 
   __ Bind(&find_char_diff_2nd_cmp);
   if (mirror::kUseStringCompression) {
     __ Subs(temp0, temp0, 4);  // 4 bytes previously compared.
-    __ B(ls, &end, /* far_target */ false);  // Was the second comparison fully beyond the end?
+    __ B(ls, end, /* far_target */ false);  // Was the second comparison fully beyond the end?
   } else {
     // Without string compression, we can start treating temp0 as signed
     // and rely on the signed comparison below.
@@ -1610,7 +1650,7 @@
   // the remaining string data, so just return length diff (out).
   // The comparison is unsigned for string compression, otherwise signed.
   __ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
-  __ B((mirror::kUseStringCompression ? ls : le), &end, /* far_target */ false);
+  __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false);
 
   // Extract the characters and calculate the difference.
   if (mirror::kUseStringCompression) {
@@ -1637,8 +1677,8 @@
   temps.Release(temp_reg);
 
   if (mirror::kUseStringCompression) {
-    __ B(&end);
-    __ Bind(&different_compression);
+    __ B(end);
+    __ Bind(different_compression);
 
     // Comparison for different compression style.
     const size_t c_char_size = DataType::Size(DataType::Type::kInt8);
@@ -1680,7 +1720,7 @@
     __ B(ne, &different_compression_diff, /* far_target */ false);
     __ Subs(temp0, temp0, 2);
     __ B(hi, &different_compression_loop, /* far_target */ false);
-    __ B(&end);
+    __ B(end);
 
     // Calculate the difference.
     __ Bind(&different_compression_diff);
@@ -1698,12 +1738,6 @@
     __ it(cc);
     __ rsb(cc, out, out, 0);
   }
-
-  __ Bind(&end);
-
-  if (can_slow_path) {
-    __ Bind(slow_path->GetExitLabel());
-  }
 }
 
 // The cut off for unrolling the loop in String.equals() intrinsic for const strings.
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 34849cd..d6b24da 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -483,13 +483,9 @@
 
 void ArmVIXLMacroAssembler::B(vixl32::Label* label) {
   if (!label->IsBound()) {
-    // Try to use 16-bit T2 encoding of B instruction.
+    // Try to use a 16-bit encoding of the B instruction.
     DCHECK(OutsideITBlock());
-    ExactAssemblyScope guard(this,
-                             k16BitT32InstructionSizeInBytes,
-                             CodeBufferCheckScope::kMaximumSize);
-    b(al, Narrow, label);
-    AddBranchLabel(label);
+    BPreferNear(label);
     return;
   }
   MacroAssembler::B(label);
@@ -497,18 +493,11 @@
 
 void ArmVIXLMacroAssembler::B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target) {
   if (!label->IsBound() && !is_far_target) {
-    // Try to use 16-bit T2 encoding of B instruction.
+    // Try to use a 16-bit encoding of the B instruction.
     DCHECK(OutsideITBlock());
-    ExactAssemblyScope guard(this,
-                             k16BitT32InstructionSizeInBytes,
-                             CodeBufferCheckScope::kMaximumSize);
-    b(cond, Narrow, label);
-    AddBranchLabel(label);
+    BPreferNear(cond, label);
     return;
   }
-  // To further reduce the Bcc encoding size and use 16-bit T1 encoding,
-  // we can provide a hint to this function: i.e. far_target=false.
-  // By default this function uses 'EncodingSizeType::Best' which generates 32-bit T3 encoding.
   MacroAssembler::B(cond, label);
 }
 
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 0bae4d4..2b3e979 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -624,14 +624,8 @@
                       Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
 
   ___ Cmp(scratch.AsVIXLRegister(), 0);
-  {
-    ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
-                             vixl32::kMaxInstructionSizeInBytes,
-                             CodeBufferCheckScope::kMaximumSize);
-    vixl32::Label* label = exception_blocks_.back()->Entry();
-    ___ b(ne, Narrow, label);
-    ___ AddBranchLabel(label);
-  }
+  vixl32::Label* label = exception_blocks_.back()->Entry();
+  ___ BPreferNear(ne, label);
   // TODO: think about using CBNZ here.
 }