Fix cyclic region allocation for large regions in ART's RegionSpace.

When using the cyclic region allocation strategy,
art::gc::space::RegionSpace::AllocLarge (large region allocation)
calls art::gc::space::RegionSpace::AllocLargeInRange once (if it
succeeds in allocating in the area located past the current cyclic
alloc region index) or twice (if the first call failed and it tries to
allocate from the beginning of the region space).

The upper bound (`cyclic_alloc_region_index_ + num_regions_ - 1`) of
the range passed to that second call to
art::gc::space::RegionSpace::AllocLargeInRange was incorrect for two
reasons:
1. `num_regions_` (the total number of regions in the regions space)
   was used instead of `num_regs` (the number of regions in the large
   region begin alocated);
2. we did not ensure that this upper bound did not go beyond
   `num_regions_`.

This change addresses these two issues and renames `num_regs` as
`num_regs_in_large_region` to remove the ambiguity in the first
reason.

This change also fixes the names of arguments in
art::gc::space::RegionSpace::AllocLargeInRange's declaration.

Test: art/test/testrunner/testrunner.py --gcstress
Bug: 112246149
Bug: 111766751
Bug: 74064045
Change-Id: I3fb29c8db7a3d00a98ef318e840a17b3443bb940
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index c6ec174..436eb2c 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -247,14 +247,14 @@
                                                /* out */ size_t* bytes_tl_bulk_allocated) {
   DCHECK_ALIGNED(num_bytes, kAlignment);
   DCHECK_GT(num_bytes, kRegionSize);
-  size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
-  DCHECK_GT(num_regs, 0U);
-  DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
-  DCHECK_LE(num_bytes, num_regs * kRegionSize);
+  size_t num_regs_in_large_region = RoundUp(num_bytes, kRegionSize) / kRegionSize;
+  DCHECK_GT(num_regs_in_large_region, 0U);
+  DCHECK_LT((num_regs_in_large_region - 1) * kRegionSize, num_bytes);
+  DCHECK_LE(num_bytes, num_regs_in_large_region * kRegionSize);
   MutexLock mu(Thread::Current(), region_lock_);
   if (!kForEvac) {
     // Retain sufficient free regions for full evacuation.
-    if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
+    if ((num_non_free_regions_ + num_regs_in_large_region) * 2 > num_regions_) {
       return nullptr;
     }
   }
@@ -265,7 +265,7 @@
     size_t next_region1 = -1;
     mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
                                                           num_regions_,
-                                                          num_regs,
+                                                          num_regs_in_large_region,
                                                           bytes_allocated,
                                                           usable_size,
                                                           bytes_tl_bulk_allocated,
@@ -280,16 +280,16 @@
     }
 
     // If the previous attempt failed, try to find a range of free regions within
-    // [0, cyclic_alloc_region_index_ + num_regions_ - 1).
+    // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
     size_t next_region2 = -1;
-    mirror::Object* region2 =
-        AllocLargeInRange<kForEvac>(0,
-                                    cyclic_alloc_region_index_ + num_regions_ - 1,
-                                    num_regs,
-                                    bytes_allocated,
-                                    usable_size,
-                                    bytes_tl_bulk_allocated,
-                                    &next_region2);
+    mirror::Object* region2 = AllocLargeInRange<kForEvac>(
+            0,
+            std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
+            num_regs_in_large_region,
+            bytes_allocated,
+            usable_size,
+            bytes_tl_bulk_allocated,
+            &next_region2);
     if (region2 != nullptr) {
       DCHECK_LT(0u, next_region2);
       DCHECK_LE(next_region2, num_regions_);
@@ -302,7 +302,7 @@
     // Try to find a range of free regions within [0, num_regions_).
     mirror::Object* region = AllocLargeInRange<kForEvac>(0,
                                                          num_regions_,
-                                                         num_regs,
+                                                         num_regs_in_large_region,
                                                          bytes_allocated,
                                                          usable_size,
                                                          bytes_tl_bulk_allocated);
@@ -316,17 +316,21 @@
 template<bool kForEvac>
 inline mirror::Object* RegionSpace::AllocLargeInRange(size_t begin,
                                                       size_t end,
-                                                      size_t num_regs,
+                                                      size_t num_regs_in_large_region,
                                                       /* out */ size_t* bytes_allocated,
                                                       /* out */ size_t* usable_size,
                                                       /* out */ size_t* bytes_tl_bulk_allocated,
                                                       /* out */ size_t* next_region) {
+  DCHECK_LE(0u, begin);
+  DCHECK_LT(begin, end);
+  DCHECK_LE(end, num_regions_);
   size_t left = begin;
-  while (left + num_regs - 1 < end) {
+  while (left + num_regs_in_large_region - 1 < end) {
     bool found = true;
     size_t right = left;
-    DCHECK_LT(right, left + num_regs) << "The inner loop should iterate at least once";
-    while (right < left + num_regs) {
+    DCHECK_LT(right, left + num_regs_in_large_region)
+        << "The inner loop should iterate at least once";
+    while (right < left + num_regs_in_large_region) {
       if (regions_[right].IsFree()) {
         ++right;
         // Ensure `right` is not going beyond the past-the-end index of the region space.
@@ -338,7 +342,7 @@
     }
     if (found) {
       // `right` points to the one region past the last free region.
-      DCHECK_EQ(left + num_regs, right);
+      DCHECK_EQ(left + num_regs_in_large_region, right);
       Region* first_reg = &regions_[left];
       DCHECK(first_reg->IsFree());
       first_reg->UnfreeLarge(this, time_);
@@ -347,7 +351,7 @@
       } else {
         ++num_non_free_regions_;
       }
-      size_t allocated = num_regs * kRegionSize;
+      size_t allocated = num_regs_in_large_region * kRegionSize;
       // We make 'top' all usable bytes, as the caller of this
       // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
       first_reg->SetTop(first_reg->Begin() + allocated);
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index a129171..fa33a8a 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -586,17 +586,17 @@
   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
 
   // Scan region range [`begin`, `end`) in increasing order to try to
-  // allocate a large region having a size of `num_regs` regions. If
-  // there is no space in the region space to allocate this large
-  // region, return null.
+  // allocate a large region having a size of `num_regs_in_large_region`
+  // regions. If there is no space in the region space to allocate this
+  // large region, return null.
   //
   // If argument `next_region` is not null, use `*next_region` to
   // return the index to the region next to the allocated large region
   // returned by this method.
   template<bool kForEvac>
-  mirror::Object* AllocLargeInRange(size_t num_regs,
-                                    size_t begin,
+  mirror::Object* AllocLargeInRange(size_t begin,
                                     size_t end,
+                                    size_t num_regs_in_large_region,
                                     /* out */ size_t* bytes_allocated,
                                     /* out */ size_t* usable_size,
                                     /* out */ size_t* bytes_tl_bulk_allocated,