diff options
| -rw-r--r-- | build/Android.gtest.mk | 1 | ||||
| -rw-r--r-- | compiler/driver/compiler_driver.cc | 9 | ||||
| -rw-r--r-- | compiler/jit/jit_compiler.cc | 155 | ||||
| -rw-r--r-- | compiler/jit/jit_compiler.h | 17 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 28 | ||||
| -rw-r--r-- | runtime/gc/space/malloc_space.cc | 2 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.cc | 173 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache.h | 68 | ||||
| -rw-r--r-- | runtime/jit/jit_code_cache_test.cc | 106 |
9 files changed, 249 insertions, 310 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 1b54a510fd..6295e1527b 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -204,7 +204,6 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \ runtime/interpreter/safe_math_test.cc \ runtime/interpreter/unstarted_runtime_test.cc \ runtime/java_vm_ext_test.cc \ - runtime/jit/jit_code_cache_test.cc \ runtime/lambda/closure_test.cc \ runtime/lambda/shorty_field_type_test.cc \ runtime/leb128_test.cc \ diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 8324bf30d6..b9565846ce 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -594,7 +594,7 @@ static void CompileMethod(Thread* self, } } else if ((access_flags & kAccAbstract) != 0) { // Abstract methods don't have code. - } else { + } else if (Runtime::Current()->IsAotCompiler()) { const VerifiedMethod* verified_method = driver->GetVerificationResults()->GetVerifiedMethod(method_ref); bool compile = compilation_enabled && @@ -633,6 +633,13 @@ static void CompileMethod(Thread* self, ? dex_to_dex_compilation_level : optimizer::DexToDexCompilationLevel::kRequired); } + } else { + // This is for the JIT compiler, which has already ensured the class is verified. + // We can go straight to compiling. + DCHECK(Runtime::Current()->UseJit()); + compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type, + class_def_idx, method_idx, class_loader, + dex_file, dex_cache); } if (kTimeCompileMethod) { uint64_t duration_ns = NanoTime() - start_ns; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index fcbd483f7a..3d1b42f51c 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -158,61 +158,66 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) { StackHandleScope<2> hs(self); self->AssertNoPendingException(); Runtime* runtime = Runtime::Current(); + + // Check if the method is already compiled. if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) { VLOG(jit) << "Already compiled " << PrettyMethod(method); - return true; // Already compiled + return true; } - Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass())); - { - TimingLogger::ScopedTiming t2("Initializing", &logger); - if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { - VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method); - return false; - } + + // Don't compile the method if we are supposed to be deoptimized. + if (runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) { + return false; } - const DexFile* dex_file = h_class->GetDexCache()->GetDexFile(); - MethodReference method_ref(dex_file, method->GetDexMethodIndex()); - // Only verify if we don't already have verification results. - if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) { - TimingLogger::ScopedTiming t2("Verifying", &logger); - std::string error; - if (verifier::MethodVerifier::VerifyMethod(method, true, &error) == - verifier::MethodVerifier::kHardFailure) { - VLOG(jit) << "Not compile method " << PrettyMethod(method) - << " due to verification failure " << error; - return false; - } + + // Ensure the class is initialized. + Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass())); + if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { + VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method); + return false; } + + // Do the compilation. CompiledMethod* compiled_method = nullptr; { TimingLogger::ScopedTiming t2("Compiling", &logger); compiled_method = compiler_driver_->CompileArtMethod(self, method); } + + // Trim maps to reduce memory usage. + // TODO: measure how much this increases compile time. { TimingLogger::ScopedTiming t2("TrimMaps", &logger); - // Trim maps to reduce memory usage, TODO: measure how much this increases compile time. runtime->GetArenaPool()->TrimMaps(); } + + // Check if we failed compiling. if (compiled_method == nullptr) { return false; } + total_time_ += NanoTime() - start_time; - // Don't add the method if we are supposed to be deoptimized. bool result = false; - if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) { - const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method); - if (code != nullptr) { - // Already have some compiled code, just use this instead of linking. - // TODO: Fix recompilation. - method->SetEntryPointFromQuickCompiledCode(code); + const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method); + + if (code != nullptr) { + // Already have some compiled code, just use this instead of linking. + // TODO: Fix recompilation. + method->SetEntryPointFromQuickCompiledCode(code); + result = true; + } else { + TimingLogger::ScopedTiming t2("LinkCode", &logger); + OatFile::OatMethod oat_method(nullptr, 0); + if (AddToCodeCache(method, compiled_method, &oat_method)) { + oat_method.LinkMethod(method); + CHECK(runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) << PrettyMethod(method); result = true; - } else { - TimingLogger::ScopedTiming t2("MakeExecutable", &logger); - result = MakeExecutable(compiled_method, method); } } + // Remove the compiled method to save memory. - compiler_driver_->RemoveCompiledMethod(method_ref); + compiler_driver_->RemoveCompiledMethod( + MethodReference(h_class->GetDexCache()->GetDexFile(), method->GetDexMethodIndex())); runtime->GetJit()->AddTimingLogger(logger); return result; } @@ -221,41 +226,8 @@ CompilerCallbacks* JitCompiler::GetCompilerCallbacks() const { return callbacks_.get(); } -uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_method, - uint8_t* reserve_begin, uint8_t* reserve_end, - const uint8_t* mapping_table, - const uint8_t* vmap_table, - const uint8_t* gc_map) { - reserve_begin += sizeof(OatQuickMethodHeader); - reserve_begin = reinterpret_cast<uint8_t*>( - compiled_method->AlignCode(reinterpret_cast<uintptr_t>(reserve_begin))); - const auto* quick_code = compiled_method->GetQuickCode(); - CHECK_LE(reserve_begin, reserve_end); - CHECK_LE(quick_code->size(), static_cast<size_t>(reserve_end - reserve_begin)); - auto* code_ptr = reserve_begin; - OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1; - // Construct the header last. - const auto frame_size_in_bytes = compiled_method->GetFrameSizeInBytes(); - const auto core_spill_mask = compiled_method->GetCoreSpillMask(); - const auto fp_spill_mask = compiled_method->GetFpSpillMask(); - const auto code_size = quick_code->size(); - CHECK_NE(code_size, 0U); - std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr); - // After we are done writing we need to update the method header. - // Write out the method header last. - method_header = new(method_header) OatQuickMethodHeader( - (mapping_table == nullptr) ? 0 : code_ptr - mapping_table, - (vmap_table == nullptr) ? 0 : code_ptr - vmap_table, - (gc_map == nullptr) ? 0 : code_ptr - gc_map, - frame_size_in_bytes, - core_spill_mask, - fp_spill_mask, - code_size); - // Return the code ptr. - return code_ptr; -} - -bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method, +bool JitCompiler::AddToCodeCache(ArtMethod* method, + const CompiledMethod* compiled_method, OatFile::OatMethod* out_method) { Runtime* runtime = Runtime::Current(); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); @@ -265,7 +237,6 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil } const auto code_size = quick_code->size(); Thread* const self = Thread::Current(); - const uint8_t* base = code_cache->CodeCachePtr(); auto* const mapping_table = compiled_method->GetMappingTable(); auto* const vmap_table = compiled_method->GetVmapTable(); auto* const gc_map = compiled_method->GetGcMap(); @@ -298,45 +269,35 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil } } - // Don't touch this until you protect / unprotect the code. - const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32; - uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size); - if (code_reserve == nullptr) { + uint8_t* const code = code_cache->CommitCode(self, + mapping_table_ptr, + vmap_table_ptr, + gc_map_ptr, + compiled_method->GetFrameSizeInBytes(), + compiled_method->GetCoreSpillMask(), + compiled_method->GetFpSpillMask(), + compiled_method->GetQuickCode()->data(), + compiled_method->GetQuickCode()->size()); + + if (code == nullptr) { return false; } - auto* code_ptr = WriteMethodHeaderAndCode( - compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr, - vmap_table_ptr, gc_map_ptr); - - __builtin___clear_cache(reinterpret_cast<char*>(code_ptr), - reinterpret_cast<char*>(code_ptr + quick_code->size())); const size_t thumb_offset = compiled_method->CodeDelta(); - const uint32_t code_offset = code_ptr - base + thumb_offset; - *out_method = OatFile::OatMethod(base, code_offset); + const uint32_t code_offset = sizeof(OatQuickMethodHeader) + thumb_offset; + *out_method = OatFile::OatMethod(code, code_offset); DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr); DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr); DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr); DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes()); DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask()); DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask()); - VLOG(jit) << "JIT added " << PrettyMethod(method) << "@" << method << " ccache_size=" - << PrettySize(code_cache->CodeCacheSize()) << ": " << reinterpret_cast<void*>(code_ptr) - << "," << reinterpret_cast<void*>(code_ptr + code_size); - return true; -} - -bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) { - CHECK(method != nullptr); - CHECK(compiled_method != nullptr); - OatFile::OatMethod oat_method(nullptr, 0); - if (!AddToCodeCache(method, compiled_method, &oat_method)) { - return false; - } - // TODO: Flush instruction cache. - oat_method.LinkMethod(method); - CHECK(Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) - << PrettyMethod(method); + VLOG(jit) + << "JIT added " + << PrettyMethod(method) << "@" << method + << " ccache_size=" << PrettySize(code_cache->CodeCacheSize()) << ": " + << reinterpret_cast<void*>(code + code_offset) + << "," << reinterpret_cast<void*>(code + code_offset + code_size); return true; } diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index ef68caa5fa..757f3f386a 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -39,10 +39,6 @@ class JitCompiler { virtual ~JitCompiler(); bool CompileMethod(Thread* self, ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); - // This is in the compiler since the runtime doesn't have access to the compiled method - // structures. - bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method, - OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_); CompilerCallbacks* GetCompilerCallbacks() const; size_t GetTotalCompileTime() const { return total_time_; @@ -58,12 +54,13 @@ class JitCompiler { std::unique_ptr<CompilerDriver> compiler_driver_; std::unique_ptr<const InstructionSetFeatures> instruction_set_features_; - explicit JitCompiler(); - uint8_t* WriteMethodHeaderAndCode( - const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end, - const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map); - bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) - SHARED_REQUIRES(Locks::mutator_lock_); + JitCompiler(); + + // This is in the compiler since the runtime doesn't have access to the compiled method + // structures. + bool AddToCodeCache(ArtMethod* method, + const CompiledMethod* compiled_method, + OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(JitCompiler); }; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 17a4743290..5404e56a5b 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -838,18 +838,26 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, Handle<mirror::DexCache> dex_cache) const { CompilerDriver* compiler_driver = GetCompilerDriver(); CompiledMethod* method = nullptr; - const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); - DCHECK(!verified_method->HasRuntimeThrow()); - if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) - || CanHandleVerificationFailure(verified_method)) { - method = TryCompile(code_item, access_flags, invoke_type, class_def_idx, - method_idx, jclass_loader, dex_file, dex_cache); - } else { - if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { - MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); + if (Runtime::Current()->IsAotCompiler()) { + const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx); + DCHECK(!verified_method->HasRuntimeThrow()); + if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) + || CanHandleVerificationFailure(verified_method)) { + method = TryCompile(code_item, access_flags, invoke_type, class_def_idx, + method_idx, jclass_loader, dex_file, dex_cache); } else { - MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified); + if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) { + MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime); + } else { + MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified); + } } + } else { + // This is for the JIT compiler, which has already ensured the class is verified. + // We can go straight to compiling. + DCHECK(Runtime::Current()->UseJit()); + method = TryCompile(code_item, access_flags, invoke_type, class_def_idx, + method_idx, jclass_loader, dex_file, dex_cache); } if (kIsDebugBuild && diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index 3a0d814a20..b1572cc7ea 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -56,7 +56,7 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map, mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create( StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)), Begin(), NonGrowthLimitCapacity())); - CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #" + CHECK(mark_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #" << bitmap_index; } for (auto& freed : recent_freed_objects_) { diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 4c5316227c..7e95e71a5b 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -25,37 +25,77 @@ namespace art { namespace jit { +static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC; +static constexpr int kProtData = PROT_READ | PROT_WRITE; +static constexpr int kProtCode = PROT_READ | PROT_EXEC; + +#define CHECKED_MPROTECT(memory, size, prot) \ + do { \ + int rc = mprotect(memory, size, prot); \ + if (UNLIKELY(rc != 0)) { \ + errno = rc; \ + PLOG(FATAL) << "Failed to mprotect jit code cache"; \ + } \ + } while (false) \ + JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) { CHECK_GT(capacity, 0U); CHECK_LT(capacity, kMaxCapacity); std::string error_str; // Map name specific for android_os_Debug.cpp accounting. - MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity, - PROT_READ | PROT_WRITE | PROT_EXEC, false, false, &error_str); - if (map == nullptr) { + MemMap* data_map = MemMap::MapAnonymous( + "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str); + if (data_map == nullptr) { + std::ostringstream oss; + oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity; + *error_msg = oss.str(); + return nullptr; + } + + // Data cache is 1 / 4 of the map. + // TODO: Make this variable? + size_t data_size = RoundUp(data_map->Size() / 4, kPageSize); + size_t code_size = data_map->Size() - data_size; + uint8_t* divider = data_map->Begin() + data_size; + + // We need to have 32 bit offsets from method headers in code cache which point to things + // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work. + MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str); + if (code_map == nullptr) { std::ostringstream oss; oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity; *error_msg = oss.str(); return nullptr; } - return new JitCodeCache(map); + DCHECK_EQ(code_map->Size(), code_size); + DCHECK_EQ(code_map->Begin(), divider); + return new JitCodeCache(code_map, data_map); } -JitCodeCache::JitCodeCache(MemMap* mem_map) - : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) { - VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size()); - mem_map_.reset(mem_map); - uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize); - // Data cache is 1 / 4 of the map. TODO: Make this variable? - // Put data at the start. - data_cache_ptr_ = mem_map->Begin(); - data_cache_end_ = divider; - data_cache_begin_ = data_cache_ptr_; - mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE); - // Code cache after. - code_cache_begin_ = divider; - code_cache_ptr_ = divider; - code_cache_end_ = mem_map->End(); +JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map) + : lock_("Jit code cache", kJitCodeCacheLock), + code_map_(code_map), + data_map_(data_map), + num_methods_(0) { + + VLOG(jit) << "Created jit code cache: data size=" + << PrettySize(data_map_->Size()) + << ", code size=" + << PrettySize(code_map_->Size()); + + code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/); + data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/); + + if (code_mspace_ == nullptr || data_mspace_ == nullptr) { + PLOG(FATAL) << "create_mspace_with_base failed"; + } + + // Prevent morecore requests from the mspace. + mspace_set_footprint_limit(code_mspace_, code_map_->Size()); + mspace_set_footprint_limit(data_mspace_, data_map_->Size()); + + CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); + CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData); } bool JitCodeCache::ContainsMethod(ArtMethod* method) const { @@ -63,44 +103,93 @@ bool JitCodeCache::ContainsMethod(ArtMethod* method) const { } bool JitCodeCache::ContainsCodePtr(const void* ptr) const { - return ptr >= code_cache_begin_ && ptr < code_cache_end_; + return code_map_->Begin() <= ptr && ptr < code_map_->End(); } -void JitCodeCache::FlushInstructionCache() { - UNIMPLEMENTED(FATAL); - // TODO: Investigate if we need to do this. - // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize())); -} +class ScopedCodeCacheWrite { + public: + explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) { + CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll); + } + ~ScopedCodeCacheWrite() { + CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); + } + private: + MemMap* const code_map_; + + DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite); +}; + +uint8_t* JitCodeCache::CommitCode(Thread* self, + const uint8_t* mapping_table, + const uint8_t* vmap_table, + const uint8_t* gc_map, + size_t frame_size_in_bytes, + size_t core_spill_mask, + size_t fp_spill_mask, + const uint8_t* code, + size_t code_size) { + size_t total_size = RoundUp(sizeof(OatQuickMethodHeader) + code_size + 32, sizeof(void*)); + OatQuickMethodHeader* method_header = nullptr; + uint8_t* code_ptr; -uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) { MutexLock mu(self, lock_); - if (size > CodeCacheRemain()) { - return nullptr; + { + ScopedCodeCacheWrite scc(code_map_.get()); + uint8_t* result = reinterpret_cast<uint8_t*>(mspace_malloc(code_mspace_, total_size)); + if (result == nullptr) { + return nullptr; + } + code_ptr = reinterpret_cast<uint8_t*>( + RoundUp(reinterpret_cast<size_t>(result + sizeof(OatQuickMethodHeader)), + GetInstructionSetAlignment(kRuntimeISA))); + + std::copy(code, code + code_size, code_ptr); + method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1; + new (method_header) OatQuickMethodHeader( + (mapping_table == nullptr) ? 0 : code_ptr - mapping_table, + (vmap_table == nullptr) ? 0 : code_ptr - vmap_table, + (gc_map == nullptr) ? 0 : code_ptr - gc_map, + frame_size_in_bytes, + core_spill_mask, + fp_spill_mask, + code_size); } + + __builtin___clear_cache(reinterpret_cast<char*>(code_ptr), + reinterpret_cast<char*>(code_ptr + code_size)); + ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region. - code_cache_ptr_ += size; - return code_cache_ptr_ - size; + return reinterpret_cast<uint8_t*>(method_header); +} + +size_t JitCodeCache::CodeCacheSize() { + MutexLock mu(Thread::Current(), lock_); + size_t bytes_allocated = 0; + mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); + return bytes_allocated; +} + +size_t JitCodeCache::DataCacheSize() { + MutexLock mu(Thread::Current(), lock_); + size_t bytes_allocated = 0; + mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated); + return bytes_allocated; } uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) { - MutexLock mu(self, lock_); size = RoundUp(size, sizeof(void*)); - if (size > DataCacheRemain()) { - return nullptr; - } - data_cache_ptr_ += size; - return data_cache_ptr_ - size; + MutexLock mu(self, lock_); + return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size)); } uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) { - MutexLock mu(self, lock_); - const size_t size = RoundUp(end - begin, sizeof(void*)); - if (size > DataCacheRemain()) { + uint8_t* result = ReserveData(self, end - begin); + if (result == nullptr) { return nullptr; // Out of space in the data cache. } - std::copy(begin, end, data_cache_ptr_); - data_cache_ptr_ += size; - return data_cache_ptr_ - size; + std::copy(begin, end, result); + return result; } const void* JitCodeCache::GetCodeFor(ArtMethod* method) { diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index f485e4aded..fa90c1806f 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -22,6 +22,7 @@ #include "atomic.h" #include "base/macros.h" #include "base/mutex.h" +#include "gc/allocator/dlmalloc.h" #include "gc_root.h" #include "jni.h" #include "oat_file.h" @@ -48,33 +49,25 @@ class JitCodeCache { // in the out arg error_msg. static JitCodeCache* Create(size_t capacity, std::string* error_msg); - const uint8_t* CodeCachePtr() const { - return code_cache_ptr_; - } - - size_t CodeCacheSize() const { - return code_cache_ptr_ - code_cache_begin_; - } - - size_t CodeCacheRemain() const { - return code_cache_end_ - code_cache_ptr_; - } - - const uint8_t* DataCachePtr() const { - return data_cache_ptr_; + size_t NumMethods() const { + return num_methods_; } - size_t DataCacheSize() const { - return data_cache_ptr_ - data_cache_begin_; - } + size_t CodeCacheSize() REQUIRES(!lock_); - size_t DataCacheRemain() const { - return data_cache_end_ - data_cache_ptr_; - } + size_t DataCacheSize() REQUIRES(!lock_); - size_t NumMethods() const { - return num_methods_; - } + // Allocate and write code and its metadata to the code cache. + uint8_t* CommitCode(Thread* self, + const uint8_t* mapping_table, + const uint8_t* vmap_table, + const uint8_t* gc_map, + size_t frame_size_in_bytes, + size_t core_spill_mask, + size_t fp_spill_mask, + const uint8_t* code, + size_t code_size) + REQUIRES(!lock_); // Return true if the code cache contains the code pointer which si the entrypoint of the method. bool ContainsMethod(ArtMethod* method) const @@ -83,9 +76,6 @@ class JitCodeCache { // Return true if the code cache contains a code ptr. bool ContainsCodePtr(const void* ptr) const; - // Reserve a region of code of size at least "size". Returns null if there is no more room. - uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_); - // Reserve a region of data of size at least "size". Returns null if there is no more room. uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_); @@ -105,25 +95,19 @@ class JitCodeCache { private: // Takes ownership of code_mem_map. - explicit JitCodeCache(MemMap* code_mem_map); - - // Unimplemented, TODO: Determine if it is necessary. - void FlushInstructionCache(); + JitCodeCache(MemMap* code_map, MemMap* data_map); // Lock which guards. Mutex lock_; - // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method - // headers in code cache which point to things in the data cache. If the maps are more than 4GB - // apart, having multiple maps wouldn't work. - std::unique_ptr<MemMap> mem_map_; - // Code cache section. - uint8_t* code_cache_ptr_; - const uint8_t* code_cache_begin_; - const uint8_t* code_cache_end_; - // Data cache section. - uint8_t* data_cache_ptr_; - const uint8_t* data_cache_begin_; - const uint8_t* data_cache_end_; + // Mem map which holds code. + std::unique_ptr<MemMap> code_map_; + // Mem map which holds data (stack maps and profiling info). + std::unique_ptr<MemMap> data_map_; + // The opaque mspace for allocating code. + void* code_mspace_; + // The opaque mspace for allocating data. + void* data_mspace_; + // Number of compiled methods. size_t num_methods_; // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks. diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc deleted file mode 100644 index c76dc1110a..0000000000 --- a/runtime/jit/jit_code_cache_test.cc +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) 2015 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "common_runtime_test.h" - -#include "art_method-inl.h" -#include "class_linker.h" -#include "jit_code_cache.h" -#include "scoped_thread_state_change.h" -#include "thread-inl.h" - -namespace art { -namespace jit { - -class JitCodeCacheTest : public CommonRuntimeTest { - public: -}; - -TEST_F(JitCodeCacheTest, TestCoverage) { - std::string error_msg; - constexpr size_t kSize = 1 * MB; - std::unique_ptr<JitCodeCache> code_cache( - JitCodeCache::Create(kSize, &error_msg)); - ASSERT_TRUE(code_cache.get() != nullptr) << error_msg; - ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr); - ASSERT_EQ(code_cache->CodeCacheSize(), 0u); - ASSERT_GT(code_cache->CodeCacheRemain(), 0u); - ASSERT_TRUE(code_cache->DataCachePtr() != nullptr); - ASSERT_EQ(code_cache->DataCacheSize(), 0u); - ASSERT_GT(code_cache->DataCacheRemain(), 0u); - ASSERT_EQ(code_cache->CodeCacheRemain() + code_cache->DataCacheRemain(), kSize); - ASSERT_EQ(code_cache->NumMethods(), 0u); - ScopedObjectAccess soa(Thread::Current()); - StackHandleScope<1> hs(soa.Self()); - uint8_t* const reserved_code = code_cache->ReserveCode(soa.Self(), 4 * KB); - ASSERT_TRUE(reserved_code != nullptr); - ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code)); - ASSERT_EQ(code_cache->NumMethods(), 1u); - Runtime* const runtime = Runtime::Current(); - ClassLinker* const class_linker = runtime->GetClassLinker(); - ArtMethod* method = &class_linker->AllocArtMethodArray(soa.Self(), - runtime->GetLinearAlloc(), - 1)->At(0); - ASSERT_FALSE(code_cache->ContainsMethod(method)); - method->SetEntryPointFromQuickCompiledCode(reserved_code); - ASSERT_TRUE(code_cache->ContainsMethod(method)); - ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code); - // Save the code and then change it. - code_cache->SaveCompiledCode(method, reserved_code); - method->SetEntryPointFromQuickCompiledCode(nullptr); - ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code); - const uint8_t data_arr[] = {1, 2, 3, 4, 5}; - uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr)); - ASSERT_TRUE(data_ptr != nullptr); - ASSERT_EQ(memcmp(data_ptr, data_arr, sizeof(data_arr)), 0); -} - -TEST_F(JitCodeCacheTest, TestOverflow) { - std::string error_msg; - constexpr size_t kSize = 1 * MB; - std::unique_ptr<JitCodeCache> code_cache( - JitCodeCache::Create(kSize, &error_msg)); - ASSERT_TRUE(code_cache.get() != nullptr) << error_msg; - ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr); - size_t code_bytes = 0; - size_t data_bytes = 0; - constexpr size_t kCodeArrSize = 4 * KB; - constexpr size_t kDataArrSize = 4 * KB; - uint8_t data_arr[kDataArrSize]; - std::fill_n(data_arr, arraysize(data_arr), 53); - // Add code and data until we are full. - uint8_t* code_ptr = nullptr; - uint8_t* data_ptr = nullptr; - do { - code_ptr = code_cache->ReserveCode(Thread::Current(), kCodeArrSize); - data_ptr = code_cache->AddDataArray(Thread::Current(), data_arr, data_arr + kDataArrSize); - if (code_ptr != nullptr) { - code_bytes += kCodeArrSize; - } - if (data_ptr != nullptr) { - data_bytes += kDataArrSize; - } - } while (code_ptr != nullptr || data_ptr != nullptr); - // Make sure we added a reasonable amount - CHECK_GT(code_bytes, 0u); - CHECK_LE(code_bytes, kSize); - CHECK_GT(data_bytes, 0u); - CHECK_LE(data_bytes, kSize); - CHECK_GE(code_bytes + data_bytes, kSize * 4 / 5); -} - -} // namespace jit -} // namespace art |