diff options
Diffstat (limited to 'runtime/jit/jit_code_cache.cc')
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 48 |
1 files changed, 9 insertions, 39 deletions
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index d227cecf2a..dd1dbeabc2 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -206,10 +206,8 @@ JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data, if (is_zygote) { // Zygote should never collect code to share the memory with the children. jit_code_cache->garbage_collect_code_ = false; - jit_code_cache->shared_region_ = std::move(region); - } else { - jit_code_cache->private_region_ = std::move(region); } + jit_code_cache->private_region_ = std::move(region); VLOG(jit) << "Created jit code cache: initial capacity=" << PrettySize(initial_capacity) @@ -385,8 +383,7 @@ static uint32_t GetNumberOfRoots(const uint8_t* stack_map) { return reinterpret_cast<const uint32_t*>(stack_map)[-1]; } -static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots, - bool is_shared_region) +static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots) REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) { if (!kIsDebugBuild) { return; @@ -399,10 +396,6 @@ static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); } - // Ensure that we don't put movable objects in the shared region. - if (is_shared_region) { - CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get())); - } } } @@ -671,7 +664,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, if (!method->IsNative()) { // We need to do this before grabbing the lock_ because it needs to be able to see the string // InternTable. Native methods do not have roots. - DCheckRootsAreValid(roots, IsSharedRegion(*region)); + DCheckRootsAreValid(roots); } size_t root_table_size = ComputeRootTableSize(roots.size()); @@ -1408,13 +1401,6 @@ ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, bool retry_allocation) // No thread safety analysis as we are using TryLock/Unlock explicitly. NO_THREAD_SAFETY_ANALYSIS { - // If we don't have a private region, we cannot allocate a profiling info. - // A shared region doesn't support in general GC objects, which a profiling info - // can reference. - if (!private_region_.IsValid()) { - return nullptr; - } - ProfilingInfo* info = nullptr; if (!retry_allocation) { // If we are allocating for the interpreter, just try to lock, to avoid @@ -1468,9 +1454,7 @@ ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNU } void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) { - return shared_region_.OwnsSpace(mspace) - ? shared_region_.MoreCore(mspace, increment) - : private_region_.MoreCore(mspace, increment); + return private_region_.MoreCore(mspace, increment); } void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations, @@ -1562,11 +1546,7 @@ bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { return osr_code_map_.find(method) != osr_code_map_.end(); } -bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, - Thread* self, - bool osr, - bool prejit, - JitMemoryRegion* region) { +bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr, bool prejit) { if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { return false; } @@ -1628,7 +1608,7 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize); if (info == nullptr) { // When prejitting, we don't allocate a profiling info. - if (!prejit && !IsSharedRegion(*region)) { + if (!prejit) { VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled"; // Because the counter is not atomic, there are some rare cases where we may not hit the // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this. @@ -1737,19 +1717,13 @@ void JitCodeCache::Dump(std::ostream& os) { void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) { if (is_zygote) { - // Don't create a private region for a child zygote. Regions are usually map shared - // (to satisfy dual-view), and we don't want children of a child zygote to inherit it. + // Don't transition if this is for a child zygote. return; } - - if (private_region_.IsValid()) { - // In case the zygote was running with its own private region (happens for - // unit tests), move the region to the shared one. - CHECK(!shared_region_.IsValid()); - std::swap(shared_region_, private_region_); - } MutexLock mu(Thread::Current(), *Locks::jit_lock_); + shared_region_ = std::move(private_region_); + // Reset all statistics to be specific to this process. number_of_compilations_ = 0; number_of_osr_compilations_ = 0; @@ -1767,9 +1741,5 @@ void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) { } } -JitMemoryRegion* JitCodeCache::GetCurrentRegion() { - return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_; -} - } // namespace jit } // namespace art |