diff options
author | 2020-03-18 21:19:06 +0000 | |
---|---|---|
committer | 2020-03-25 14:10:23 +0000 | |
commit | 41617b18f1c09e3031710d58fdb93c5aa43399ac (patch) | |
tree | 36a0f3e3dc27e97980b96e1150ede718aee775fa | |
parent | 842555d72ee7511c193a65f34841cc92170a1850 (diff) |
Add more logging and sanity checks for JIT mini-debug-info.
Used when diagnosing b/151137723. Keep it around.
Bug: 151137723
Test: test.py -r --jit
Change-Id: I10cc613c7396607e221fdc1f5972d26c1ac03fa8
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 22 | ||||
-rw-r--r-- | runtime/jit/debugger_interface.cc | 17 | ||||
-rw-r--r-- | runtime/jit/debugger_interface.h | 3 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 69 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.h | 6 |
5 files changed, 103 insertions, 14 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 9978a6fa18..346f60d4dc 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -406,7 +406,7 @@ class OptimizingCompiler final : public Compiler { PassObserver* pass_observer, VariableSizedHandleScope* handles) const; - void GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info); + std::vector<uint8_t> GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info); std::unique_ptr<OptimizingCompilerStats> compilation_stats_; @@ -1262,6 +1262,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize(); // Add debug info after we know the code location but before we update entry-point. + std::vector<uint8_t> debug_info; if (compiler_options.GenerateAnyDebugInfo()) { debug::MethodDebugInfo info = {}; info.custom_name = "art_jni_trampoline"; @@ -1280,7 +1281,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.frame_size_in_bytes = jni_compiled_method.GetFrameSize(); info.code_info = nullptr; info.cfi = jni_compiled_method.GetCfi(); - GenerateJitDebugInfo(info); + debug_info = GenerateJitDebugInfo(info); } if (!code_cache->Commit(self, @@ -1291,6 +1292,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, reserved_data, roots, ArrayRef<const uint8_t>(stack_map), + debug_info, + /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(), osr, /* has_should_deoptimize_flag= */ false, cha_single_implementation_list)) { @@ -1370,6 +1373,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, // Add debug info after we know the code location but before we update entry-point. const CompilerOptions& compiler_options = GetCompilerOptions(); + std::vector<uint8_t> debug_info; if (compiler_options.GenerateAnyDebugInfo()) { debug::MethodDebugInfo info = {}; DCHECK(info.custom_name.empty()); @@ -1388,7 +1392,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.frame_size_in_bytes = codegen->GetFrameSize(); info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data(); info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); - GenerateJitDebugInfo(info); + debug_info = GenerateJitDebugInfo(info); } if (!code_cache->Commit(self, @@ -1399,6 +1403,8 @@ bool OptimizingCompiler::JitCompile(Thread* self, reserved_data, roots, ArrayRef<const uint8_t>(stack_map), + debug_info, + /* is_full_debug_info= */ compiler_options.GetGenerateDebugInfo(), osr, codegen->GetGraph()->HasShouldDeoptimizeFlag(), codegen->GetGraph()->GetCHASingleImplementationList())) { @@ -1427,7 +1433,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, return true; } -void OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) { +std::vector<uint8_t> OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) { const CompilerOptions& compiler_options = GetCompilerOptions(); if (compiler_options.GenerateAnyDebugInfo()) { // If both flags are passed, generate full debug info. @@ -1436,13 +1442,9 @@ void OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info // Create entry for the single method that we just compiled. InstructionSet isa = compiler_options.GetInstructionSet(); const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures(); - std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info); - - // NB: Don't allow packing of full info since it would remove non-backtrace data. - MutexLock mu(Thread::Current(), *Locks::jit_lock_); - const void* code_ptr = reinterpret_cast<const void*>(info.code_address); - AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info); + return debug::MakeElfFileForJIT(isa, features, mini_debug_info, info); } + return std::vector<uint8_t>(); } } // namespace art diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc index 0efa4d2cdd..2f929bb919 100644 --- a/runtime/jit/debugger_interface.cc +++ b/runtime/jit/debugger_interface.cc @@ -25,6 +25,7 @@ #include "base/time_utils.h" #include "base/utils.h" #include "dex/dex_file.h" +#include "elf/elf_debug_reader.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" #include "jit/jit_memory_region.h" @@ -606,6 +607,8 @@ void RemoveNativeDebugInfoForJit(const void* code_ptr) { // Method removal is very expensive since we need to decompress and read ELF files. // Collet methods to be removed and do the removal in bulk later. g_removed_jit_functions.push_back(code_ptr); + + VLOG(jit) << "JIT mini-debug-info removed for " << code_ptr; } void RepackNativeDebugInfoForJitLocked() { @@ -645,4 +648,18 @@ Mutex* GetNativeDebugInfoLock() { return &g_jit_debug_lock; } +void ForEachNativeDebugSymbol(std::function<void(const void*, size_t, const char*)> cb) { + MutexLock mu(Thread::Current(), g_jit_debug_lock); + using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type; + for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) { + ArrayRef<const uint8_t> buffer(it->symfile_addr_, it->symfile_size_); + if (!buffer.empty()) { + ElfDebugReader<ElfRuntimeTypes> reader(buffer); + reader.VisitFunctionSymbols([&](ElfRuntimeTypes::Sym sym, const char* name) { + cb(reinterpret_cast<const void*>(sym.st_value), sym.st_size, name); + }); + } + } +} + } // namespace art diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h index 8433e693d7..d6a8063864 100644 --- a/runtime/jit/debugger_interface.h +++ b/runtime/jit/debugger_interface.h @@ -72,6 +72,9 @@ size_t GetJitMiniDebugInfoMemUsage() REQUIRES_SHARED(Locks::jit_lock_); // TODO: Unwinding should be race-free. Remove this. Mutex* GetNativeDebugInfoLock(); +// Call given callback for every stored symbol. The callback parameters are (address, size, name). +void ForEachNativeDebugSymbol(std::function<void(const void*, size_t, const char*)> cb); + } // namespace art #endif // ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_ diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 75ce1c06e2..29951a7c76 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -173,17 +173,21 @@ class JitCodeCache::JniStubData { return methods_; } - void RemoveMethodsIn(const LinearAlloc& alloc) { - auto kept_end = std::remove_if( + void RemoveMethodsIn(const LinearAlloc& alloc) REQUIRES_SHARED(Locks::mutator_lock_) { + auto kept_end = std::partition( methods_.begin(), methods_.end(), - [&alloc](ArtMethod* method) { return alloc.ContainsUnsafe(method); }); + [&alloc](ArtMethod* method) { return !alloc.ContainsUnsafe(method); }); + for (auto it = kept_end; it != methods_.end(); it++) { + VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_; + } methods_.erase(kept_end, methods_.end()); } - bool RemoveMethod(ArtMethod* method) { + bool RemoveMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { auto it = std::find(methods_.begin(), methods_.end(), method); if (it != methods_.end()) { + VLOG(jit) << "JIT removed (JNI) " << (*it)->PrettyMethod() << ": " << code_; methods_.erase(it); return true; } else { @@ -497,6 +501,26 @@ void JitCodeCache::FreeAllMethodHeaders( // We have potentially removed a lot of debug info. Do maintenance pass to save space. RepackNativeDebugInfoForJit(); + + // Check that the set of compiled methods exactly matches native debug information. + if (kIsDebugBuild) { + std::map<const void*, ArtMethod*> compiled_methods; + VisitAllMethods([&](const void* addr, ArtMethod* method) { + CHECK(addr != nullptr && method != nullptr); + compiled_methods.emplace(addr, method); + }); + std::set<const void*> debug_info; + ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) { + addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit. + CHECK(debug_info.emplace(addr).second) << "Duplicate debug info: " << addr << " " << name; + CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name; + }); + if (!debug_info.empty()) { // If debug-info generation is enabled. + for (auto it : compiled_methods) { + CHECK_EQ(debug_info.count(it.first), 1u) << "No debug info: " << it.second->PrettyMethod(); + } + } + } } void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { @@ -525,6 +549,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { if (alloc.ContainsUnsafe(it->second)) { method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first)); + VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first; it = method_code_map_.erase(it); } else { ++it; @@ -636,6 +661,8 @@ bool JitCodeCache::Commit(Thread* self, ArrayRef<const uint8_t> reserved_data, const std::vector<Handle<mirror::Object>>& roots, ArrayRef<const uint8_t> stack_map, + const std::vector<uint8_t>& debug_info, + bool is_full_debug_info, bool osr, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) { @@ -669,6 +696,13 @@ bool JitCodeCache::Commit(Thread* self, number_of_compilations_++; + // We need to update the debug info before the entry point gets set. + // At the same time we want to do under JIT lock so that debug info and JIT maps are in sync. + if (!debug_info.empty()) { + // NB: Don't allow packing of full info since it would remove non-backtrace data. + AddNativeDebugInfoForJit(code_ptr, debug_info, /*allow_packing=*/ !is_full_debug_info); + } + // We need to update the entry point in the runnable state for the instrumentation. { // The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the @@ -811,6 +845,7 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) { if (release_memory) { FreeCodeAndData(it->first); } + VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first; it = method_code_map_.erase(it); } else { ++it; @@ -1198,6 +1233,9 @@ void JitCodeCache::RemoveUnmarkedCode(Thread* self) { ++it; } else { method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode())); + for (ArtMethod* method : data->GetMethods()) { + VLOG(jit) << "JIT removed (JNI) " << method->PrettyMethod() << ": " << data->GetCode(); + } it = jni_stubs_map_.erase(it); } } @@ -1209,6 +1247,7 @@ void JitCodeCache::RemoveUnmarkedCode(Thread* self) { } else { OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr); method_headers.insert(header); + VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first; it = method_code_map_.erase(it); } } @@ -1873,6 +1912,28 @@ JitMemoryRegion* JitCodeCache::GetCurrentRegion() { return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_; } +void JitCodeCache::VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) { + for (const auto& it : jni_stubs_map_) { + const JniStubData& data = it.second; + if (data.IsCompiled()) { + for (ArtMethod* method : data.GetMethods()) { + cb(data.GetCode(), method); + } + } + } + for (auto it : method_code_map_) { // Includes OSR methods. + cb(it.first, it.second); + } + for (auto it : saved_compiled_methods_map_) { + cb(it.second, it.first); + } + for (auto it : zygote_map_) { + if (it.code_ptr != nullptr && it.method != nullptr) { + cb(it.code_ptr, it.method); + } + } +} + void ZygoteMap::Initialize(uint32_t number_of_methods) { MutexLock mu(Thread::Current(), *Locks::jit_lock_); // Allocate for 40-80% capacity. This will offer OK lookup times, and termination diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 9ef1e4f98b..50e1e2b179 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -266,6 +266,8 @@ class JitCodeCache { ArrayRef<const uint8_t> reserved_data, // Uninitialized destination. const std::vector<Handle<mirror::Object>>& roots, ArrayRef<const uint8_t> stack_map, // Compiler output (source). + const std::vector<uint8_t>& debug_info, + bool is_full_debug_info, bool osr, bool has_should_deoptimize_flag, const ArenaSet<ArtMethod*>& cha_single_implementation_list) @@ -440,6 +442,10 @@ class JitCodeCache { REQUIRES(Locks::jit_lock_) REQUIRES(Locks::mutator_lock_); + // Call given callback for every compiled method in the code cache. + void VisitAllMethods(const std::function<void(const void*, ArtMethod*)>& cb) + REQUIRES(Locks::jit_lock_); + // Free code and data allocations for `code_ptr`. void FreeCodeAndData(const void* code_ptr) REQUIRES(Locks::jit_lock_) |