Small changes in ARM and x86-64 SystemArrayCopy intrinsics.

Have these intrinsics share a more uniform style with the
ARM64 SystemArrayCopy intrinsic.

Also make some changes/improvements in:
- art::IntrinsicOptimizations
- art::arm64::GenSystemArrayCopyAddresses

Change-Id: Ieeb224795229580f8e5f7219c586d04786d8c705
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 3da8285..863dd1c 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -124,11 +124,12 @@
 void Set##name() { SetBit(k##name); }                  \
 bool Get##name() const { return IsBitSet(k##name); }   \
 private:                                               \
-static constexpr int k##name = bit
+static constexpr size_t k##name = bit
 
 class IntrinsicOptimizations : public ValueObject {
  public:
-  explicit IntrinsicOptimizations(HInvoke* invoke) : value_(invoke->GetIntrinsicOptimizations()) {}
+  explicit IntrinsicOptimizations(HInvoke* invoke)
+      : value_(invoke->GetIntrinsicOptimizations()) {}
   explicit IntrinsicOptimizations(const HInvoke& invoke)
       : value_(invoke.GetIntrinsicOptimizations()) {}
 
@@ -138,15 +139,17 @@
 
  protected:
   bool IsBitSet(uint32_t bit) const {
+    DCHECK_LT(bit, sizeof(uint32_t) * kBitsPerByte);
     return (*value_ & (1 << bit)) != 0u;
   }
 
   void SetBit(uint32_t bit) {
-    *(const_cast<uint32_t*>(value_)) |= (1 << bit);
+    DCHECK_LT(bit, sizeof(uint32_t) * kBitsPerByte);
+    *(const_cast<uint32_t* const>(value_)) |= (1 << bit);
   }
 
  private:
-  const uint32_t *value_;
+  const uint32_t* const value_;
 
   DISALLOW_COPY_AND_ASSIGN(IntrinsicOptimizations);
 };
@@ -158,7 +161,7 @@
 void Set##name() { SetBit(k##name); }                                 \
 bool Get##name() const { return IsBitSet(k##name); }                  \
 private:                                                              \
-static constexpr int k##name = bit + kNumberOfGenericOptimizations
+static constexpr size_t k##name = bit + kNumberOfGenericOptimizations
 
 class StringEqualsOptimizations : public IntrinsicOptimizations {
  public:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 4b94c94..86b7bc1 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1394,15 +1394,13 @@
   SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
   codegen_->AddSlowPath(slow_path);
 
-  Label ok;
+  Label conditions_on_positions_validated;
   SystemArrayCopyOptimizations optimizations(invoke);
 
-  if (!optimizations.GetDestinationIsSource()) {
-    if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
-      __ cmp(src, ShifterOperand(dest));
-    }
+  if (!optimizations.GetDestinationIsSource() &&
+      (!src_pos.IsConstant() || !dest_pos.IsConstant())) {
+    __ cmp(src, ShifterOperand(dest));
   }
-
   // If source and destination are the same, we go to slow path if we need to do
   // forward copying.
   if (src_pos.IsConstant()) {
@@ -1413,14 +1411,14 @@
              || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
     } else {
       if (!optimizations.GetDestinationIsSource()) {
-        __ b(&ok, NE);
+        __ b(&conditions_on_positions_validated, NE);
       }
       __ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant));
       __ b(slow_path->GetEntryLabel(), GT);
     }
   } else {
     if (!optimizations.GetDestinationIsSource()) {
-      __ b(&ok, NE);
+      __ b(&conditions_on_positions_validated, NE);
     }
     if (dest_pos.IsConstant()) {
       int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
@@ -1431,7 +1429,7 @@
     __ b(slow_path->GetEntryLabel(), LT);
   }
 
-  __ Bind(&ok);
+  __ Bind(&conditions_on_positions_validated);
 
   if (!optimizations.GetSourceIsNotNull()) {
     // Bail out if the source is null.
@@ -1482,7 +1480,7 @@
     bool did_unpoison = false;
     if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
         !optimizations.GetSourceIsNonPrimitiveArray()) {
-      // One or two of the references need to be unpoisoned. Unpoisoned them
+      // One or two of the references need to be unpoisoned. Unpoison them
       // both to make the identity check valid.
       __ MaybeUnpoisonHeapReference(temp1);
       __ MaybeUnpoisonHeapReference(temp2);
@@ -1491,6 +1489,7 @@
 
     if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
       // Bail out if the destination is not a non primitive array.
+      // /* HeapReference<Class> */ temp3 = temp1->component_type_
       __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
       __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
       __ MaybeUnpoisonHeapReference(temp3);
@@ -1501,7 +1500,7 @@
 
     if (!optimizations.GetSourceIsNonPrimitiveArray()) {
       // Bail out if the source is not a non primitive array.
-      // Bail out if the destination is not a non primitive array.
+      // /* HeapReference<Class> */ temp3 = temp2->component_type_
       __ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
       __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
       __ MaybeUnpoisonHeapReference(temp3);
@@ -1518,8 +1517,10 @@
       if (!did_unpoison) {
         __ MaybeUnpoisonHeapReference(temp1);
       }
+      // /* HeapReference<Class> */ temp1 = temp1->component_type_
       __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
       __ MaybeUnpoisonHeapReference(temp1);
+      // /* HeapReference<Class> */ temp1 = temp1->super_class_
       __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
       // No need to unpoison the result, we're comparing against null.
       __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
@@ -1530,8 +1531,10 @@
   } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
     DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
     // Bail out if the source is not a non primitive array.
+    // /* HeapReference<Class> */ temp1 = src->klass_
     __ LoadFromOffset(kLoadWord, temp1, src, class_offset);
     __ MaybeUnpoisonHeapReference(temp1);
+    // /* HeapReference<Class> */ temp3 = temp1->component_type_
     __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
     __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
     __ MaybeUnpoisonHeapReference(temp3);
@@ -1585,7 +1588,7 @@
                        temp2,
                        dest,
                        Register(kNoRegister),
-                       /* can_be_null */ false);
+                       /* value_can_be_null */ false);
 
   __ Bind(slow_path->GetExitLabel());
 }
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 927e2ec..04ae3a6 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1819,39 +1819,32 @@
                                         const Register& dst_base,
                                         const Register& src_end) {
   DCHECK(type == Primitive::kPrimNot || type == Primitive::kPrimChar)
-         << "Unexpected element type: "
-         << type;
-  const int32_t char_size = Primitive::ComponentSize(type);
-  const int32_t char_size_shift = Primitive::ComponentSizeShift(type);
+      << "Unexpected element type: " << type;
+  const int32_t element_size = Primitive::ComponentSize(type);
+  const int32_t element_size_shift = Primitive::ComponentSizeShift(type);
 
-  uint32_t offset = mirror::Array::DataOffset(char_size).Uint32Value();
+  uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
   if (src_pos.IsConstant()) {
     int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
-    __ Add(src_base, src, char_size * constant + offset);
+    __ Add(src_base, src, element_size * constant + data_offset);
   } else {
-    __ Add(src_base, src, offset);
-    __ Add(src_base,
-           src_base,
-           Operand(XRegisterFrom(src_pos), LSL, char_size_shift));
+    __ Add(src_base, src, data_offset);
+    __ Add(src_base, src_base, Operand(XRegisterFrom(src_pos), LSL, element_size_shift));
   }
 
   if (dst_pos.IsConstant()) {
     int32_t constant = dst_pos.GetConstant()->AsIntConstant()->GetValue();
-    __ Add(dst_base, dst, char_size * constant + offset);
+    __ Add(dst_base, dst, element_size * constant + data_offset);
   } else {
-    __ Add(dst_base, dst, offset);
-    __ Add(dst_base,
-           dst_base,
-           Operand(XRegisterFrom(dst_pos), LSL, char_size_shift));
+    __ Add(dst_base, dst, data_offset);
+    __ Add(dst_base, dst_base, Operand(XRegisterFrom(dst_pos), LSL, element_size_shift));
   }
 
   if (copy_length.IsConstant()) {
     int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue();
-    __ Add(src_end, src_base, char_size * constant);
+    __ Add(src_end, src_base, element_size * constant);
   } else {
-    __ Add(src_end,
-           src_base,
-           Operand(XRegisterFrom(copy_length), LSL, char_size_shift));
+    __ Add(src_end, src_base, Operand(XRegisterFrom(copy_length), LSL, element_size_shift));
   }
 }
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 9ca4ef0..1d32dc7 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1150,15 +1150,13 @@
   SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
   codegen_->AddSlowPath(slow_path);
 
-  NearLabel ok;
+  NearLabel conditions_on_positions_validated;
   SystemArrayCopyOptimizations optimizations(invoke);
 
-  if (!optimizations.GetDestinationIsSource()) {
-    if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
-      __ cmpl(src, dest);
-    }
+  if (!optimizations.GetDestinationIsSource() &&
+      (!src_pos.IsConstant() || !dest_pos.IsConstant())) {
+    __ cmpl(src, dest);
   }
-
   // If source and destination are the same, we go to slow path if we need to do
   // forward copying.
   if (src_pos.IsConstant()) {
@@ -1169,14 +1167,14 @@
              || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
     } else {
       if (!optimizations.GetDestinationIsSource()) {
-        __ j(kNotEqual, &ok);
+        __ j(kNotEqual, &conditions_on_positions_validated);
       }
       __ cmpl(dest_pos.AsRegister<CpuRegister>(), Immediate(src_pos_constant));
       __ j(kGreater, slow_path->GetEntryLabel());
     }
   } else {
     if (!optimizations.GetDestinationIsSource()) {
-      __ j(kNotEqual, &ok);
+      __ j(kNotEqual, &conditions_on_positions_validated);
     }
     if (dest_pos.IsConstant()) {
       int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
@@ -1188,7 +1186,7 @@
     }
   }
 
-  __ Bind(&ok);
+  __ Bind(&conditions_on_positions_validated);
 
   if (!optimizations.GetSourceIsNotNull()) {
     // Bail out if the source is null.
@@ -1241,7 +1239,7 @@
     bool did_unpoison = false;
     if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
         !optimizations.GetSourceIsNonPrimitiveArray()) {
-      // One or two of the references need to be unpoisoned. Unpoisoned them
+      // One or two of the references need to be unpoisoned. Unpoison them
       // both to make the identity check valid.
       __ MaybeUnpoisonHeapReference(temp1);
       __ MaybeUnpoisonHeapReference(temp2);
@@ -1250,6 +1248,7 @@
 
     if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
       // Bail out if the destination is not a non primitive array.
+      // /* HeapReference<Class> */ TMP = temp1->component_type_
       __ movl(CpuRegister(TMP), Address(temp1, component_offset));
       __ testl(CpuRegister(TMP), CpuRegister(TMP));
       __ j(kEqual, slow_path->GetEntryLabel());
@@ -1260,6 +1259,7 @@
 
     if (!optimizations.GetSourceIsNonPrimitiveArray()) {
       // Bail out if the source is not a non primitive array.
+      // /* HeapReference<Class> */ TMP = temp2->component_type_
       __ movl(CpuRegister(TMP), Address(temp2, component_offset));
       __ testl(CpuRegister(TMP), CpuRegister(TMP));
       __ j(kEqual, slow_path->GetEntryLabel());
@@ -1276,8 +1276,10 @@
       if (!did_unpoison) {
         __ MaybeUnpoisonHeapReference(temp1);
       }
+      // /* HeapReference<Class> */ temp1 = temp1->component_type_
       __ movl(temp1, Address(temp1, component_offset));
       __ MaybeUnpoisonHeapReference(temp1);
+      // /* HeapReference<Class> */ temp1 = temp1->super_class_
       __ movl(temp1, Address(temp1, super_offset));
       // No need to unpoison the result, we're comparing against null.
       __ testl(temp1, temp1);
@@ -1289,8 +1291,10 @@
   } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
     DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
     // Bail out if the source is not a non primitive array.
+    // /* HeapReference<Class> */ temp1 = src->klass_
     __ movl(temp1, Address(src, class_offset));
     __ MaybeUnpoisonHeapReference(temp1);
+    // /* HeapReference<Class> */ TMP = temp1->component_type_
     __ movl(CpuRegister(TMP), Address(temp1, component_offset));
     __ testl(CpuRegister(TMP), CpuRegister(TMP));
     __ j(kEqual, slow_path->GetEntryLabel());