Store inline caches in StackHandleScope<> in HInliner.

Avoid a managed heap allocation.

Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 181943478
Change-Id: I7ce65c93ad2f59490dbfa2aaccba98b6ca1fd585
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index adfee74..9d9a7d3 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -43,6 +43,7 @@
 #include "gc/allocator/dlmalloc.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "handle.h"
+#include "handle_scope-inl.h"
 #include "instrumentation.h"
 #include "intern_table.h"
 #include "jit/jit.h"
@@ -591,17 +592,20 @@
   is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
 }
 
-void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
-                                       Handle<mirror::ObjectArray<mirror::Class>> array) {
+void JitCodeCache::CopyInlineCacheInto(
+    const InlineCache& ic,
+    /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes) {
+  static_assert(arraysize(ic.classes_) == InlineCache::kIndividualCacheSize);
+  DCHECK_EQ(classes->NumberOfReferences(), InlineCache::kIndividualCacheSize);
+  DCHECK_EQ(classes->RemainingSlots(), InlineCache::kIndividualCacheSize);
   WaitUntilInlineCacheAccessible(Thread::Current());
   // Note that we don't need to lock `lock_` here, the compiler calling
   // this method has already ensured the inline cache will not be deleted.
-  for (size_t in_cache = 0, in_array = 0;
-       in_cache < InlineCache::kIndividualCacheSize;
-       ++in_cache) {
-    mirror::Class* object = ic.classes_[in_cache].Read();
+  for (const GcRoot<mirror::Class>& root : ic.classes_) {
+    mirror::Class* object = root.Read();
     if (object != nullptr) {
-      array->Set(in_array++, object);
+      DCHECK_NE(classes->RemainingSlots(), 0u);
+      classes->NewHandle(object);
     }
   }
 }
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index ea1e924..76b7f77 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -34,6 +34,7 @@
 #include "base/safe_map.h"
 #include "compilation_kind.h"
 #include "jit_memory_region.h"
+#include "profiling_info.h"
 
 namespace art {
 
@@ -305,7 +306,8 @@
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
+  void CopyInlineCacheInto(const InlineCache& ic,
+                           /*out*/StackHandleScope<InlineCache::kIndividualCacheSize>* classes)
       REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);