Use DlMallocSpace for the JIT code cache.
- Also tidy up some code in the JIT compiler.
- And mprotect code space to be writable only when allocating.
Change-Id: I46ea5c029aec489f2af63452de31db3736aebc20
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index f485e4a..fa90c18 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
#include "oat_file.h"
@@ -48,34 +49,26 @@
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- const uint8_t* CodeCachePtr() const {
- return code_cache_ptr_;
- }
-
- size_t CodeCacheSize() const {
- return code_cache_ptr_ - code_cache_begin_;
- }
-
- size_t CodeCacheRemain() const {
- return code_cache_end_ - code_cache_ptr_;
- }
-
- const uint8_t* DataCachePtr() const {
- return data_cache_ptr_;
- }
-
- size_t DataCacheSize() const {
- return data_cache_ptr_ - data_cache_begin_;
- }
-
- size_t DataCacheRemain() const {
- return data_cache_end_ - data_cache_ptr_;
- }
-
size_t NumMethods() const {
return num_methods_;
}
+ size_t CodeCacheSize() REQUIRES(!lock_);
+
+ size_t DataCacheSize() REQUIRES(!lock_);
+
+ // Allocate and write code and its metadata to the code cache.
+ uint8_t* CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_);
+
// Return true if the code cache contains the code pointer which si the entrypoint of the method.
bool ContainsMethod(ArtMethod* method) const
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -83,9 +76,6 @@
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
-
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
@@ -105,25 +95,19 @@
private:
// Takes ownership of code_mem_map.
- explicit JitCodeCache(MemMap* code_mem_map);
-
- // Unimplemented, TODO: Determine if it is necessary.
- void FlushInstructionCache();
+ JitCodeCache(MemMap* code_map, MemMap* data_map);
// Lock which guards.
Mutex lock_;
- // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
- // headers in code cache which point to things in the data cache. If the maps are more than 4GB
- // apart, having multiple maps wouldn't work.
- std::unique_ptr<MemMap> mem_map_;
- // Code cache section.
- uint8_t* code_cache_ptr_;
- const uint8_t* code_cache_begin_;
- const uint8_t* code_cache_end_;
- // Data cache section.
- uint8_t* data_cache_ptr_;
- const uint8_t* data_cache_begin_;
- const uint8_t* data_cache_end_;
+ // Mem map which holds code.
+ std::unique_ptr<MemMap> code_map_;
+ // Mem map which holds data (stack maps and profiling info).
+ std::unique_ptr<MemMap> data_map_;
+ // The opaque mspace for allocating code.
+ void* code_mspace_;
+ // The opaque mspace for allocating data.
+ void* data_mspace_;
+ // Number of compiled methods.
size_t num_methods_;
// This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.