Add support for collection ProfilingInfo objects.
Change-Id: I8bb6069530253a7372acdf2b5aee71e1de644822
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 4e415b8..e10f962 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -35,6 +35,7 @@
class ArtMethod;
class LinearAlloc;
+class ProfilingInfo;
namespace jit {
@@ -109,11 +110,21 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
+ // will collect and retry if the first allocation is unsuccessful.
+ ProfilingInfo* AddProfilingInfo(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& entries,
+ bool retry_allocation)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
// Take ownership of code_mem_map.
JitCodeCache(MemMap* code_map, MemMap* data_map);
@@ -133,6 +144,12 @@
REQUIRES(!lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ ProfilingInfo* AddProfilingInfoInternal(Thread* self,
+ ArtMethod* method,
+ const std::vector<uint32_t>& entries)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)
@@ -157,8 +174,10 @@
void* data_mspace_ GUARDED_BY(lock_);
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
- // This map holds compiled code associated to the ArtMethod
+ // This map holds compiled code associated to the ArtMethod.
SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
+ // ProfilingInfo objects we have allocated.
+ std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};