Introduce an enum for the compilation kind.

Test: test.py
Change-Id: I5329e50a6b4521933b6b171c8c0fbc618c3f67cd
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 3cf255c..e3200c4 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -485,6 +485,7 @@
         "base/callee_save_type.h",
         "base/locks.h",
         "class_status.h",
+        "compilation_kind.h",
         "gc_root.h",
         "gc/allocator_type.h",
         "gc/allocator/rosalloc.h",
diff --git a/runtime/compilation_kind.h b/runtime/compilation_kind.h
new file mode 100644
index 0000000..c289e98
--- /dev/null
+++ b/runtime/compilation_kind.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_COMPILATION_KIND_H_
+#define ART_RUNTIME_COMPILATION_KIND_H_
+
+#include <iosfwd>
+#include <stdint.h>
+
+namespace art {
+
+enum class CompilationKind {
+  kOsr,
+  kBaseline,
+  kOptimized,
+};
+
+std::ostream& operator<<(std::ostream& os, CompilationKind rhs);
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_COMPILATION_KIND_H_
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index c7db749..8aae7bf 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -28,6 +28,7 @@
 #include "base/scoped_flock.h"
 #include "base/utils.h"
 #include "class_root-inl.h"
+#include "compilation_kind.h"
 #include "debugger.h"
 #include "dex/type_lookup_table.h"
 #include "gc/space/image_space.h"
@@ -289,7 +290,10 @@
   return true;
 }
 
-bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit) {
+bool Jit::CompileMethod(ArtMethod* method,
+                        Thread* self,
+                        CompilationKind compilation_kind,
+                        bool prejit) {
   DCHECK(Runtime::Current()->UseJitCompilation());
   DCHECK(!method->IsRuntimeMethod());
 
@@ -319,7 +323,7 @@
   }
 
   JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
-  if (osr && GetCodeCache()->IsSharedRegion(*region)) {
+  if ((compilation_kind == CompilationKind::kOsr) && GetCodeCache()->IsSharedRegion(*region)) {
     VLOG(jit) << "JIT not osr compiling "
               << method->PrettyMethod()
               << " due to using shared region";
@@ -329,20 +333,20 @@
   // If we get a request to compile a proxy method, we pass the actual Java method
   // of that proxy method, as the compiler does not expect a proxy method.
   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, baseline, region)) {
+  if (!code_cache_->NotifyCompilationOf(
+          method_to_compile, self, compilation_kind, prejit, region)) {
     return false;
   }
 
   VLOG(jit) << "Compiling method "
             << ArtMethod::PrettyMethod(method_to_compile)
-            << " osr=" << std::boolalpha << osr
-            << " baseline=" << std::boolalpha << baseline;
-  bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, baseline, osr);
-  code_cache_->DoneCompiling(method_to_compile, self, osr, baseline);
+            << " kind=" << compilation_kind;
+  bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
+  code_cache_->DoneCompiling(method_to_compile, self, compilation_kind);
   if (!success) {
     VLOG(jit) << "Failed to compile method "
               << ArtMethod::PrettyMethod(method_to_compile)
-              << " osr=" << std::boolalpha << osr;
+              << " kind=" << compilation_kind;
   }
   if (kIsDebugBuild) {
     if (self->IsExceptionPending()) {
@@ -758,12 +762,11 @@
   enum class TaskKind {
     kAllocateProfile,
     kCompile,
-    kCompileBaseline,
-    kCompileOsr,
     kPreCompile,
   };
 
-  JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind), klass_(nullptr) {
+  JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind)
+      : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) {
     ScopedObjectAccess soa(Thread::Current());
     // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
     // until compilation is done.
@@ -787,15 +790,12 @@
     {
       ScopedObjectAccess soa(self);
       switch (kind_) {
-        case TaskKind::kPreCompile:
         case TaskKind::kCompile:
-        case TaskKind::kCompileBaseline:
-        case TaskKind::kCompileOsr: {
+        case TaskKind::kPreCompile: {
           Runtime::Current()->GetJit()->CompileMethod(
               method_,
               self,
-              /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
-              /* osr= */ (kind_ == TaskKind::kCompileOsr),
+              compilation_kind_,
               /* prejit= */ (kind_ == TaskKind::kPreCompile));
           break;
         }
@@ -817,6 +817,7 @@
  private:
   ArtMethod* const method_;
   const TaskKind kind_;
+  const CompilationKind compilation_kind_;
   jobject klass_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
@@ -1343,9 +1344,10 @@
       (entry_point == GetQuickResolutionStub())) {
     method->SetPreCompiled();
     if (!add_to_queue) {
-      CompileMethod(method, self, /* baseline= */ false, /* osr= */ false, /* prejit= */ true);
+      CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true);
     } else {
-      Task* task = new JitCompileTask(method, JitCompileTask::TaskKind::kPreCompile);
+      Task* task = new JitCompileTask(
+          method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized);
       if (compile_after_boot) {
         MutexLock mu(Thread::Current(), boot_completed_lock_);
         if (!boot_completed_) {
@@ -1553,7 +1555,10 @@
         // We failed allocating. Instead of doing the collection on the Java thread, we push
         // an allocation to a compiler thread, that will do the collection.
         thread_pool_->AddTask(
-            self, new JitCompileTask(method, JitCompileTask::TaskKind::kAllocateProfile));
+            self,
+            new JitCompileTask(method,
+                               JitCompileTask::TaskKind::kAllocateProfile,
+                               CompilationKind::kOptimized));  // Dummy compilation kind.
       }
     }
   }
@@ -1561,11 +1566,13 @@
     if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
       if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
         DCHECK(thread_pool_ != nullptr);
-        JitCompileTask::TaskKind kind =
+        CompilationKind compilation_kind =
             (options_->UseTieredJitCompilation() || options_->UseBaselineCompiler())
-                ? JitCompileTask::TaskKind::kCompileBaseline
-                : JitCompileTask::TaskKind::kCompile;
-        thread_pool_->AddTask(self, new JitCompileTask(method, kind));
+                ? CompilationKind::kBaseline
+                : CompilationKind::kOptimized;
+        thread_pool_->AddTask(
+            self,
+            new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, compilation_kind));
       }
     }
     if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
@@ -1576,7 +1583,8 @@
       if (!code_cache_->IsOsrCompiled(method)) {
         DCHECK(thread_pool_ != nullptr);
         thread_pool_->AddTask(
-            self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
+            self,
+            new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
       }
     }
   }
@@ -1592,7 +1600,10 @@
   // task that will compile optimize the method.
   if (options_->UseTieredJitCompilation()) {
     thread_pool_->AddTask(
-        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+        self,
+        new JitCompileTask(method,
+                           JitCompileTask::TaskKind::kCompile,
+                           CompilationKind::kOptimized));
   }
 }
 
@@ -1623,7 +1634,8 @@
       }
       // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
       // conflicts with jitzygote optimizations.
-      JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
+      JitCompileTask compile_task(
+          method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized);
       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
       ScopedSetRuntimeThread ssrt(thread);
       compile_task.Run(thread);
@@ -1852,16 +1864,21 @@
     // If we already have compiled code for it, nterp may be stuck in a loop.
     // Compile OSR.
     thread_pool_->AddTask(
-        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
+        self,
+        new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
     return;
   }
   if (GetCodeCache()->CanAllocateProfilingInfo()) {
     ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
     thread_pool_->AddTask(
-        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileBaseline));
+        self,
+        new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
   } else {
     thread_pool_->AddTask(
-        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+        self,
+        new JitCompileTask(method,
+                           JitCompileTask::TaskKind::kCompile,
+                           CompilationKind::kOptimized));
   }
 }
 
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e9fd915..853db10 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -24,6 +24,7 @@
 #include "base/mutex.h"
 #include "base/runtime_debug.h"
 #include "base/timing_logger.h"
+#include "compilation_kind.h"
 #include "handle.h"
 #include "offsets.h"
 #include "interpreter/mterp/mterp.h"
@@ -192,7 +193,7 @@
  public:
   virtual ~JitCompilerInterface() {}
   virtual bool CompileMethod(
-      Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr)
+      Thread* self, JitMemoryRegion* region, ArtMethod* method, CompilationKind compilation_kind)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
   virtual void TypesLoaded(mirror::Class**, size_t count)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
@@ -243,7 +244,7 @@
   // Create JIT itself.
   static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
 
-  bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit)
+  bool CompileMethod(ArtMethod* method, Thread* self, CompilationKind compilation_kind, bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const JitCodeCache* GetCodeCache() const {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 166beef..4ea61c6 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -663,10 +663,10 @@
                           ArrayRef<const uint8_t> stack_map,
                           const std::vector<uint8_t>& debug_info,
                           bool is_full_debug_info,
-                          bool osr,
+                          CompilationKind compilation_kind,
                           bool has_should_deoptimize_flag,
                           const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
-  DCHECK(!method->IsNative() || !osr);
+  DCHECK(!method->IsNative() || (compilation_kind != CompilationKind::kOsr));
 
   if (!method->IsNative()) {
     // We need to do this before grabbing the lock_ because it needs to be able to see the string
@@ -749,7 +749,7 @@
       } else {
         method_code_map_.Put(code_ptr, method);
       }
-      if (osr) {
+      if (compilation_kind == CompilationKind::kOsr) {
         number_of_osr_compilations_++;
         osr_code_map_.Put(method, code_ptr);
       } else if (NeedsClinitCheckBeforeCall(method) &&
@@ -773,7 +773,7 @@
       GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
     }
     VLOG(jit)
-        << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
+        << "JIT added (kind=" << compilation_kind << ") "
         << ArtMethod::PrettyMethod(method) << "@" << method
         << " ccache_size=" << PrettySize(CodeCacheSizeLocked()) << ": "
         << " dcache_size=" << PrettySize(DataCacheSizeLocked()) << ": "
@@ -1287,32 +1287,45 @@
   }
 }
 
-void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) {
-  DCHECK(IsMethodBeingCompiled(method, osr, baseline));
-  if (osr) {
-    current_osr_compilations_.erase(method);
-  } else if (baseline) {
-    current_baseline_compilations_.erase(method);
-  } else {
-    current_optimized_compilations_.erase(method);
+void JitCodeCache::RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+  DCHECK(IsMethodBeingCompiled(method, kind));
+  switch (kind) {
+    case CompilationKind::kOsr:
+      current_osr_compilations_.erase(method);
+      break;
+    case CompilationKind::kBaseline:
+      current_baseline_compilations_.erase(method);
+      break;
+    case CompilationKind::kOptimized:
+      current_optimized_compilations_.erase(method);
+      break;
   }
 }
 
-void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) {
-  DCHECK(!IsMethodBeingCompiled(method, osr, baseline));
-  if (osr) {
-    current_osr_compilations_.insert(method);
-  } else if (baseline) {
-    current_baseline_compilations_.insert(method);
-  } else {
-    current_optimized_compilations_.insert(method);
+void JitCodeCache::AddMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+  DCHECK(!IsMethodBeingCompiled(method, kind));
+  switch (kind) {
+    case CompilationKind::kOsr:
+      current_osr_compilations_.insert(method);
+      break;
+    case CompilationKind::kBaseline:
+      current_baseline_compilations_.insert(method);
+      break;
+    case CompilationKind::kOptimized:
+      current_optimized_compilations_.insert(method);
+      break;
   }
 }
 
-bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline) {
-  return osr ? ContainsElement(current_osr_compilations_, method)
-             : baseline ? ContainsElement(current_baseline_compilations_, method)
-                        : ContainsElement(current_optimized_compilations_, method);
+bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method, CompilationKind kind) {
+  switch (kind) {
+    case CompilationKind::kOsr:
+      return ContainsElement(current_osr_compilations_, method);
+    case CompilationKind::kBaseline:
+      return ContainsElement(current_baseline_compilations_, method);
+    case CompilationKind::kOptimized:
+      return ContainsElement(current_optimized_compilations_, method);
+  }
 }
 
 bool JitCodeCache::IsMethodBeingCompiled(ArtMethod* method) {
@@ -1679,19 +1692,19 @@
 
 bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
                                        Thread* self,
-                                       bool osr,
+                                       CompilationKind compilation_kind,
                                        bool prejit,
-                                       bool baseline,
                                        JitMemoryRegion* region) {
   const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
-  if (!osr && ContainsPc(existing_entry_point)) {
+  if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
     OatQuickMethodHeader* method_header =
         OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
-    if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == baseline) {
+    bool is_baseline = (compilation_kind == CompilationKind::kBaseline);
+    if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == is_baseline) {
       VLOG(jit) << "Not compiling "
                 << method->PrettyMethod()
                 << " because it has already been compiled"
-                << " baseline=" << std::boolalpha << baseline;
+                << " kind=" << compilation_kind;
       return false;
     }
   }
@@ -1719,7 +1732,7 @@
     }
   }
 
-  if (osr) {
+  if (compilation_kind == CompilationKind::kOsr) {
     MutexLock mu(self, *Locks::jit_lock_);
     if (osr_code_map_.find(method) != osr_code_map_.end()) {
       return false;
@@ -1756,7 +1769,9 @@
     return new_compilation;
   } else {
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-    if (CanAllocateProfilingInfo() && baseline && info == nullptr) {
+    if (CanAllocateProfilingInfo() &&
+        (compilation_kind == CompilationKind::kBaseline) &&
+        (info == nullptr)) {
       // We can retry allocation here as we're the JIT thread.
       if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
         info = method->GetProfilingInfo(kRuntimePointerSize);
@@ -1773,10 +1788,10 @@
       }
     }
     MutexLock mu(self, *Locks::jit_lock_);
-    if (IsMethodBeingCompiled(method, osr, baseline)) {
+    if (IsMethodBeingCompiled(method, compilation_kind)) {
       return false;
     }
-    AddMethodBeingCompiled(method, osr, baseline);
+    AddMethodBeingCompiled(method, compilation_kind);
     return true;
   }
 }
@@ -1800,7 +1815,9 @@
   info->DecrementInlineUse();
 }
 
-void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool baseline) {
+void JitCodeCache::DoneCompiling(ArtMethod* method,
+                                 Thread* self,
+                                 CompilationKind compilation_kind) {
   DCHECK_EQ(Thread::Current(), self);
   MutexLock mu(self, *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
@@ -1813,7 +1830,7 @@
       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
     }  // else Commit() updated entrypoints of all methods in the JniStubData.
   } else {
-    RemoveMethodBeingCompiled(method, osr, baseline);
+    RemoveMethodBeingCompiled(method, compilation_kind);
   }
 }
 
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 7e00bcb..4340603 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -32,6 +32,7 @@
 #include "base/mem_map.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
+#include "compilation_kind.h"
 #include "jit_memory_region.h"
 
 namespace art {
@@ -195,9 +196,8 @@
 
   bool NotifyCompilationOf(ArtMethod* method,
                            Thread* self,
-                           bool osr,
+                           CompilationKind compilation_kind,
                            bool prejit,
-                           bool baseline,
                            JitMemoryRegion* region)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
@@ -214,7 +214,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
-  void DoneCompiling(ArtMethod* method, Thread* self, bool osr, bool baseline)
+  void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::jit_lock_);
 
@@ -268,7 +268,7 @@
               ArrayRef<const uint8_t> stack_map,      // Compiler output (source).
               const std::vector<uint8_t>& debug_info,
               bool is_full_debug_info,
-              bool osr,
+              CompilationKind compilation_kind,
               bool has_should_deoptimize_flag,
               const ArenaSet<ArtMethod*>& cha_single_implementation_list)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -500,16 +500,15 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Record that `method` is being compiled with the given mode.
-  // TODO: introduce an enum for the mode.
-  void AddMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline)
+  void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
       REQUIRES(Locks::jit_lock_);
 
   // Remove `method` from the list of methods meing compiled with the given mode.
-  void RemoveMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline)
+  void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
       REQUIRES(Locks::jit_lock_);
 
   // Return whether `method` is being compiled with the given mode.
-  bool IsMethodBeingCompiled(ArtMethod* method, bool osr, bool baseline)
+  bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
       REQUIRES(Locks::jit_lock_);
 
   // Return whether `method` is being compiled in any mode.