Pass the memory region to allocate into to the compiler / allocation.
Test: test.py
Bug: 119800099
Change-Id: Ie3cba5abe3dd4f8756af5ecfd6c26320de314fe8
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 2cfcf5c..27ac3ff 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -63,7 +63,8 @@
void* Jit::jit_compiler_handle_ = nullptr;
void* (*Jit::jit_load_)(void) = nullptr;
void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
+bool (*Jit::jit_compile_method_)(void*, JitMemoryRegion*, ArtMethod*, Thread*, bool, bool)
+ = nullptr;
void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
void (*Jit::jit_update_options_)(void*) = nullptr;
@@ -273,10 +274,13 @@
return false;
}
+ JitMemoryRegion* region = GetCodeCache()->GetPrivateRegion();
+
VLOG(jit) << "Compiling method "
<< ArtMethod::PrettyMethod(method_to_compile)
<< " osr=" << std::boolalpha << osr;
- bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
+ bool success = jit_compile_method_(
+ jit_compiler_handle_, region, method_to_compile, self, baseline, osr);
code_cache_->DoneCompiling(method_to_compile, self, osr);
if (!success) {
VLOG(jit) << "Failed to compile method "
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 92d2b55..fcfddfe 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -44,6 +44,7 @@
namespace jit {
class JitCodeCache;
+class JitMemoryRegion;
class JitOptions;
static constexpr int16_t kJitCheckForOSR = -1;
@@ -340,7 +341,7 @@
static void* jit_compiler_handle_;
static void* (*jit_load_)(void);
static void (*jit_unload_)(void*);
- static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
+ static bool (*jit_compile_method_)(void*, JitMemoryRegion*, ArtMethod*, Thread*, bool, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
static void (*jit_update_options_)(void*);
static bool (*jit_generate_debug_info_)(void*);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f430d58..15553d4 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -322,6 +322,7 @@
}
uint8_t* JitCodeCache::CommitCode(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,
@@ -333,6 +334,7 @@
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
uint8_t* result = CommitCodeInternal(self,
+ region,
method,
stack_map,
roots_data,
@@ -347,6 +349,7 @@
// Retry.
GarbageCollectCache(self);
result = CommitCodeInternal(self,
+ region,
method,
stack_map,
roots_data,
@@ -671,6 +674,7 @@
}
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,
@@ -698,7 +702,7 @@
// finish.
WaitForPotentialCollectionToCompleteRunnable(self);
{
- ScopedCodeCacheWrite scc(private_region_);
+ ScopedCodeCacheWrite scc(*region);
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
@@ -707,7 +711,7 @@
// AllocateCode allocates memory in non-executable region for alignment header and code. The
// header size may include alignment padding.
- uint8_t* nox_memory = private_region_.AllocateCode(total_size);
+ uint8_t* nox_memory = region->AllocateCode(total_size);
if (nox_memory == nullptr) {
return nullptr;
}
@@ -718,7 +722,7 @@
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
// From here code_ptr points to executable code.
- code_ptr = private_region_.GetExecutableAddress(code_ptr);
+ code_ptr = region->GetExecutableAddress(code_ptr);
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? code_ptr - stack_map : 0u,
@@ -730,7 +734,7 @@
}
// Update method_header pointer to executable code region.
- method_header = private_region_.GetExecutableAddress(method_header);
+ method_header = region->GetExecutableAddress(method_header);
// Both instruction and data caches need flushing to the point of unification where both share
// a common view of memory. Flushing the data cache ensures the dirty cachelines from the
@@ -747,7 +751,7 @@
// For reference, this behavior is caused by this commit:
// https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
//
- if (private_region_.HasDualCodeMapping()) {
+ if (region->HasDualCodeMapping()) {
// Flush the data cache lines associated with the non-executable copy of the code just added.
FlushDataCache(nox_memory, nox_memory + total_size);
}
@@ -1025,14 +1029,16 @@
}
void JitCodeCache::ClearData(Thread* self,
+ JitMemoryRegion* region,
uint8_t* stack_map_data,
uint8_t* roots_data) {
DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
MutexLock mu(self, *Locks::jit_lock_);
- private_region_.FreeData(reinterpret_cast<uint8_t*>(roots_data));
+ region->FreeData(reinterpret_cast<uint8_t*>(roots_data));
}
size_t JitCodeCache::ReserveData(Thread* self,
+ JitMemoryRegion* region,
size_t stack_map_size,
size_t number_of_roots,
ArtMethod* method,
@@ -1046,7 +1052,7 @@
ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
- result = private_region_.AllocateData(size);
+ result = region->AllocateData(size);
}
if (result == nullptr) {
@@ -1055,7 +1061,7 @@
ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
- result = private_region_.AllocateData(size);
+ result = region->AllocateData(size);
}
MutexLock mu(self, *Locks::jit_lock_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a56f6f0..9683b48 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -125,6 +125,7 @@
// even if `has_should_deoptimize_flag` is false, which can happen due to CHA
// guard elimination.
uint8_t* CommitCode(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,
@@ -155,6 +156,7 @@
// for storing `number_of_roots` roots. Returns null if there is no more room.
// Return the number of bytes allocated.
size_t ReserveData(Thread* self,
+ JitMemoryRegion* region,
size_t stack_map_size,
size_t number_of_roots,
ArtMethod* method,
@@ -164,7 +166,8 @@
REQUIRES(!Locks::jit_lock_);
// Clear data from the data portion of the code cache.
- void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
+ void ClearData(
+ Thread* self, JitMemoryRegion* region, uint8_t* stack_map_data, uint8_t* roots_data)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -273,12 +276,15 @@
// is debuggable.
void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
+ JitMemoryRegion* GetPrivateRegion() { return &private_region_; }
+
private:
JitCodeCache();
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
uint8_t* CommitCodeInternal(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,