Pass the memory region to allocate into to the compiler / allocation.
Test: test.py
Bug: 119800099
Change-Id: Ie3cba5abe3dd4f8756af5ecfd6c26320de314fe8
diff --git a/compiler/compiler.h b/compiler/compiler.h
index a496c6c..e363e70 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -29,6 +29,7 @@
namespace jit {
class JitCodeCache;
class JitLogger;
+class JitMemoryRegion;
} // namespace jit
namespace mirror {
class ClassLoader;
@@ -41,7 +42,6 @@
class CompilerOptions;
class DexFile;
template<class T> class Handle;
-class OatWriter;
class Thread;
class Compiler {
@@ -73,6 +73,7 @@
virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
+ jit::JitMemoryRegion* region ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
bool baseline ATTRIBUTE_UNUSED,
bool osr ATTRIBUTE_UNUSED,
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b42e9f2..f19de4e 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -129,11 +129,11 @@
}
extern "C" bool jit_compile_method(
- void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
+ void* handle, JitMemoryRegion* region, ArtMethod* method, Thread* self, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method, baseline, osr);
+ return jit_compiler->CompileMethod(self, region, method, baseline, osr);
}
extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
@@ -181,7 +181,8 @@
}
}
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
+bool JitCompiler::CompileMethod(
+ Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) {
SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
DCHECK(!method->IsProxyMethod());
@@ -198,7 +199,8 @@
TimingLogger::ScopedTiming t2("Compiling", &logger);
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
uint64_t start_ns = NanoTime();
- success = compiler_->JitCompile(self, code_cache, method, baseline, osr, jit_logger_.get());
+ success = compiler_->JitCompile(
+ self, code_cache, region, method, baseline, osr, jit_logger_.get());
uint64_t duration_ns = NanoTime() - start_ns;
VLOG(jit) << "Compilation of "
<< method->PrettyMethod()
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index d008de4..06315a5 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -22,7 +22,6 @@
namespace art {
class ArtMethod;
-class CompiledMethod;
class Compiler;
class CompilerOptions;
class Thread;
@@ -30,6 +29,7 @@
namespace jit {
class JitLogger;
+class JitMemoryRegion;
class JitCompiler {
public:
@@ -37,7 +37,8 @@
virtual ~JitCompiler();
// Compilation entrypoint. Returns whether the compilation succeeded.
- bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
+ bool CompileMethod(
+ Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
const CompilerOptions& GetCompilerOptions() const {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c799b12..9da282b 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -295,6 +295,7 @@
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
+ jit::JitMemoryRegion* region,
ArtMethod* method,
bool baseline,
bool osr,
@@ -1248,6 +1249,7 @@
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
+ jit::JitMemoryRegion* region,
ArtMethod* method,
bool baseline,
bool osr,
@@ -1282,6 +1284,7 @@
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
+ region,
stack_map.size(),
/* number_of_roots= */ 0,
method,
@@ -1295,6 +1298,7 @@
const void* code = code_cache->CommitCode(
self,
+ region,
method,
stack_map_data,
roots_data,
@@ -1306,6 +1310,7 @@
/* has_should_deoptimize_flag= */ false,
cha_single_implementation_list);
if (code == nullptr) {
+ code_cache->ClearData(self, region, stack_map_data, roots_data);
return false;
}
@@ -1379,6 +1384,7 @@
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
+ region,
stack_map.size(),
number_of_roots,
method,
@@ -1400,6 +1406,7 @@
const void* code = code_cache->CommitCode(
self,
+ region,
method,
stack_map_data,
roots_data,
@@ -1413,7 +1420,7 @@
if (code == nullptr) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
- code_cache->ClearData(self, stack_map_data, roots_data);
+ code_cache->ClearData(self, region, stack_map_data, roots_data);
return false;
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 2cfcf5c..27ac3ff 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -63,7 +63,8 @@
void* Jit::jit_compiler_handle_ = nullptr;
void* (*Jit::jit_load_)(void) = nullptr;
void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
+bool (*Jit::jit_compile_method_)(void*, JitMemoryRegion*, ArtMethod*, Thread*, bool, bool)
+ = nullptr;
void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
void (*Jit::jit_update_options_)(void*) = nullptr;
@@ -273,10 +274,13 @@
return false;
}
+ JitMemoryRegion* region = GetCodeCache()->GetPrivateRegion();
+
VLOG(jit) << "Compiling method "
<< ArtMethod::PrettyMethod(method_to_compile)
<< " osr=" << std::boolalpha << osr;
- bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
+ bool success = jit_compile_method_(
+ jit_compiler_handle_, region, method_to_compile, self, baseline, osr);
code_cache_->DoneCompiling(method_to_compile, self, osr);
if (!success) {
VLOG(jit) << "Failed to compile method "
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 92d2b55..fcfddfe 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -44,6 +44,7 @@
namespace jit {
class JitCodeCache;
+class JitMemoryRegion;
class JitOptions;
static constexpr int16_t kJitCheckForOSR = -1;
@@ -340,7 +341,7 @@
static void* jit_compiler_handle_;
static void* (*jit_load_)(void);
static void (*jit_unload_)(void*);
- static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
+ static bool (*jit_compile_method_)(void*, JitMemoryRegion*, ArtMethod*, Thread*, bool, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
static void (*jit_update_options_)(void*);
static bool (*jit_generate_debug_info_)(void*);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index f430d58..15553d4 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -322,6 +322,7 @@
}
uint8_t* JitCodeCache::CommitCode(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,
@@ -333,6 +334,7 @@
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
uint8_t* result = CommitCodeInternal(self,
+ region,
method,
stack_map,
roots_data,
@@ -347,6 +349,7 @@
// Retry.
GarbageCollectCache(self);
result = CommitCodeInternal(self,
+ region,
method,
stack_map,
roots_data,
@@ -671,6 +674,7 @@
}
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,
@@ -698,7 +702,7 @@
// finish.
WaitForPotentialCollectionToCompleteRunnable(self);
{
- ScopedCodeCacheWrite scc(private_region_);
+ ScopedCodeCacheWrite scc(*region);
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
@@ -707,7 +711,7 @@
// AllocateCode allocates memory in non-executable region for alignment header and code. The
// header size may include alignment padding.
- uint8_t* nox_memory = private_region_.AllocateCode(total_size);
+ uint8_t* nox_memory = region->AllocateCode(total_size);
if (nox_memory == nullptr) {
return nullptr;
}
@@ -718,7 +722,7 @@
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
// From here code_ptr points to executable code.
- code_ptr = private_region_.GetExecutableAddress(code_ptr);
+ code_ptr = region->GetExecutableAddress(code_ptr);
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? code_ptr - stack_map : 0u,
@@ -730,7 +734,7 @@
}
// Update method_header pointer to executable code region.
- method_header = private_region_.GetExecutableAddress(method_header);
+ method_header = region->GetExecutableAddress(method_header);
// Both instruction and data caches need flushing to the point of unification where both share
// a common view of memory. Flushing the data cache ensures the dirty cachelines from the
@@ -747,7 +751,7 @@
// For reference, this behavior is caused by this commit:
// https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
//
- if (private_region_.HasDualCodeMapping()) {
+ if (region->HasDualCodeMapping()) {
// Flush the data cache lines associated with the non-executable copy of the code just added.
FlushDataCache(nox_memory, nox_memory + total_size);
}
@@ -1025,14 +1029,16 @@
}
void JitCodeCache::ClearData(Thread* self,
+ JitMemoryRegion* region,
uint8_t* stack_map_data,
uint8_t* roots_data) {
DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
MutexLock mu(self, *Locks::jit_lock_);
- private_region_.FreeData(reinterpret_cast<uint8_t*>(roots_data));
+ region->FreeData(reinterpret_cast<uint8_t*>(roots_data));
}
size_t JitCodeCache::ReserveData(Thread* self,
+ JitMemoryRegion* region,
size_t stack_map_size,
size_t number_of_roots,
ArtMethod* method,
@@ -1046,7 +1052,7 @@
ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
- result = private_region_.AllocateData(size);
+ result = region->AllocateData(size);
}
if (result == nullptr) {
@@ -1055,7 +1061,7 @@
ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, *Locks::jit_lock_);
WaitForPotentialCollectionToComplete(self);
- result = private_region_.AllocateData(size);
+ result = region->AllocateData(size);
}
MutexLock mu(self, *Locks::jit_lock_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a56f6f0..9683b48 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -125,6 +125,7 @@
// even if `has_should_deoptimize_flag` is false, which can happen due to CHA
// guard elimination.
uint8_t* CommitCode(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,
@@ -155,6 +156,7 @@
// for storing `number_of_roots` roots. Returns null if there is no more room.
// Return the number of bytes allocated.
size_t ReserveData(Thread* self,
+ JitMemoryRegion* region,
size_t stack_map_size,
size_t number_of_roots,
ArtMethod* method,
@@ -164,7 +166,8 @@
REQUIRES(!Locks::jit_lock_);
// Clear data from the data portion of the code cache.
- void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
+ void ClearData(
+ Thread* self, JitMemoryRegion* region, uint8_t* stack_map_data, uint8_t* roots_data)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -273,12 +276,15 @@
// is debuggable.
void ClearEntryPointsInZygoteExecSpace() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
+ JitMemoryRegion* GetPrivateRegion() { return &private_region_; }
+
private:
JitCodeCache();
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
uint8_t* CommitCodeInternal(Thread* self,
+ JitMemoryRegion* region,
ArtMethod* method,
uint8_t* stack_map,
uint8_t* roots_data,