David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2015 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "debugger_interface.h" |
| 18 | |
Andreas Gampe | 5794381 | 2017-12-06 21:39:13 -0800 | [diff] [blame] | 19 | #include <android-base/logging.h> |
| 20 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 21 | #include "base/array_ref.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 22 | #include "base/mutex.h" |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 23 | #include "base/time_utils.h" |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 24 | #include "thread-current-inl.h" |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 25 | #include "thread.h" |
| 26 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 27 | #include <atomic> |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 28 | #include <unordered_map> |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 29 | #include <cstddef> |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 30 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 31 | // |
| 32 | // Debug interface for native tools (gdb, lldb, libunwind, simpleperf). |
| 33 | // |
| 34 | // See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html |
| 35 | // |
| 36 | // There are two ways for native tools to access the debug data safely: |
| 37 | // |
| 38 | // 1) Synchronously, by setting a breakpoint in the __*_debug_register_code |
| 39 | // method, which is called after every modification of the linked list. |
| 40 | // GDB does this, but it is complex to set up and it stops the process. |
| 41 | // |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 42 | // 2) Asynchronously, by monitoring the action_seqlock_. |
| 43 | // * The seqlock is a monotonically increasing counter which is incremented |
| 44 | // before and after every modification of the linked list. Odd value of |
| 45 | // the counter means the linked list is being modified (it is locked). |
| 46 | // * The tool should read the value of the seqlock both before and after |
| 47 | // copying the linked list. If the seqlock values match and are even, |
| 48 | // the copy is consistent. Otherwise, the reader should try again. |
| 49 | // * Note that using the data directly while is it being modified |
| 50 | // might crash the tool. Therefore, the only safe way is to make |
| 51 | // a copy and use the copy only after the seqlock has been checked. |
| 52 | // * Note that the process might even free and munmap the data while |
| 53 | // it is being copied, therefore the reader should either handle |
| 54 | // SEGV or use OS calls to read the memory (e.g. process_vm_readv). |
| 55 | // * The seqlock can be used to determine the number of modifications of |
| 56 | // the linked list, which can be used to intelligently cache the data. |
| 57 | // Note the possible overflow of the seqlock. It is intentionally |
| 58 | // 32-bit, since 64-bit atomics can be tricky on some architectures. |
| 59 | // * The timestamps on the entry record the time when the entry was |
| 60 | // created which is relevant if the unwinding is not live and is |
| 61 | // postponed until much later. All timestamps must be unique. |
| 62 | // * Memory barriers are used to make it possible to reason about |
| 63 | // the data even when it is being modified (e.g. the process crashed |
| 64 | // while that data was locked, and thus it will be never unlocked). |
| 65 | // * In particular, it should be possible to: |
| 66 | // 1) read the seqlock and then the linked list head pointer. |
| 67 | // 2) copy the entry and check that seqlock has not changed. |
| 68 | // 3) copy the symfile and check that seqlock has not changed. |
| 69 | // 4) go back to step 2 using the next pointer (if non-null). |
| 70 | // This safely creates copy of all symfiles, although other data |
| 71 | // might be inconsistent/unusable (e.g. prev_, action_timestamp_). |
| 72 | // * For full conformance with the C++ memory model, all seqlock |
| 73 | // protected accesses should be atomic. We currently do this in the |
| 74 | // more critical cases. The rest will have to be fixed before |
| 75 | // attempting to run TSAN on this code. |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 76 | // |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 77 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 78 | namespace art { |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 79 | extern "C" { |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 80 | enum JITAction { |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 81 | JIT_NOACTION = 0, |
| 82 | JIT_REGISTER_FN, |
| 83 | JIT_UNREGISTER_FN |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 84 | }; |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 85 | |
| 86 | struct JITCodeEntry { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 87 | // Atomic to ensure the reader can always iterate over the linked list |
| 88 | // (e.g. the process could crash in the middle of writing this field). |
| 89 | std::atomic<JITCodeEntry*> next_; |
| 90 | // Non-atomic. The reader should not use it. It is only used for deletion. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 91 | JITCodeEntry* prev_; |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 92 | const uint8_t* symfile_addr_; |
| 93 | uint64_t symfile_size_; // Beware of the offset (12 on x86; but 16 on ARM32). |
| 94 | |
| 95 | // Android-specific fields: |
| 96 | uint64_t register_timestamp_; // CLOCK_MONOTONIC time of entry registration. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 97 | }; |
| 98 | |
| 99 | struct JITDescriptor { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 100 | uint32_t version_ = 1; // NB: GDB supports only version 1. |
| 101 | uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values. |
| 102 | JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action. |
| 103 | std::atomic<JITCodeEntry*> head_{nullptr}; // Head of link list of all entries. |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 104 | |
| 105 | // Android-specific fields: |
| 106 | uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'}; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 107 | uint32_t flags_ = 0; // Reserved for future use. Must be 0. |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 108 | uint32_t sizeof_descriptor = sizeof(JITDescriptor); |
| 109 | uint32_t sizeof_entry = sizeof(JITCodeEntry); |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 110 | std::atomic_uint32_t action_seqlock_{0}; // Incremented before and after any modification. |
| 111 | uint64_t action_timestamp_ = 1; // CLOCK_MONOTONIC time of last action. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 112 | }; |
| 113 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 114 | // Check that std::atomic has the expected layout. |
| 115 | static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment"); |
| 116 | static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size"); |
| 117 | static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment"); |
| 118 | static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size"); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 119 | |
| 120 | // GDB may set breakpoint here. We must ensure it is not removed or deduplicated. |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 121 | void __attribute__((noinline)) __jit_debug_register_code() { |
| 122 | __asm__(""); |
| 123 | } |
| 124 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 125 | // Alternatively, native tools may overwrite this field to execute custom handler. |
David Srbecky | e8b4e85 | 2016-03-15 17:02:41 +0000 | [diff] [blame] | 126 | void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code; |
| 127 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 128 | // The root data structure describing of all JITed methods. |
| 129 | JITDescriptor __jit_debug_descriptor {}; |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 130 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 131 | // The following globals mirror the ones above, but are used to register dex files. |
| 132 | void __attribute__((noinline)) __dex_debug_register_code() { |
| 133 | __asm__(""); |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 134 | } |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 135 | void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code; |
| 136 | JITDescriptor __dex_debug_descriptor {}; |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 137 | } |
| 138 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 139 | // Mark the descriptor as "locked", so native tools know the data is being modified. |
| 140 | static void ActionSeqlock(JITDescriptor& descriptor) { |
| 141 | DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked"; |
| 142 | descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed); |
| 143 | // Ensure that any writes within the locked section cannot be reordered before the increment. |
| 144 | std::atomic_thread_fence(std::memory_order_release); |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 145 | } |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 146 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 147 | // Mark the descriptor as "unlocked", so native tools know the data is safe to read. |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 148 | static void ActionSequnlock(JITDescriptor& descriptor) { |
| 149 | DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked"; |
| 150 | // Ensure that any writes within the locked section cannot be reordered after the increment. |
| 151 | std::atomic_thread_fence(std::memory_order_release); |
| 152 | descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 153 | } |
Vladimir Marko | 93205e3 | 2016-04-13 11:59:46 +0100 | [diff] [blame] | 154 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 155 | static JITCodeEntry* CreateJITCodeEntryInternal( |
| 156 | JITDescriptor& descriptor, |
| 157 | void (*register_code_ptr)(), |
| 158 | const ArrayRef<const uint8_t>& symfile) |
| 159 | REQUIRES(Locks::native_debug_interface_lock_) { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 160 | // Ensure the timestamp is monotonically increasing even in presence of low |
| 161 | // granularity system timer. This ensures each entry has unique timestamp. |
| 162 | uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime()); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 163 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 164 | JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 165 | JITCodeEntry* entry = new JITCodeEntry; |
Vladimir Marko | 93205e3 | 2016-04-13 11:59:46 +0100 | [diff] [blame] | 166 | CHECK(entry != nullptr); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 167 | entry->symfile_addr_ = symfile.data(); |
Vladimir Marko | 93205e3 | 2016-04-13 11:59:46 +0100 | [diff] [blame] | 168 | entry->symfile_size_ = symfile.size(); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 169 | entry->prev_ = nullptr; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 170 | entry->next_.store(head, std::memory_order_relaxed); |
| 171 | entry->register_timestamp_ = timestamp; |
| 172 | |
| 173 | // We are going to modify the linked list, so take the seqlock. |
| 174 | ActionSeqlock(descriptor); |
| 175 | if (head != nullptr) { |
| 176 | head->prev_ = entry; |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 177 | } |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 178 | descriptor.head_.store(entry, std::memory_order_relaxed); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 179 | descriptor.relevant_entry_ = entry; |
| 180 | descriptor.action_flag_ = JIT_REGISTER_FN; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 181 | descriptor.action_timestamp_ = timestamp; |
| 182 | ActionSequnlock(descriptor); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 183 | |
| 184 | (*register_code_ptr)(); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 185 | return entry; |
| 186 | } |
| 187 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 188 | static void DeleteJITCodeEntryInternal( |
| 189 | JITDescriptor& descriptor, |
| 190 | void (*register_code_ptr)(), |
| 191 | JITCodeEntry* entry) |
| 192 | REQUIRES(Locks::native_debug_interface_lock_) { |
| 193 | CHECK(entry != nullptr); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 194 | |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 195 | // Ensure the timestamp is monotonically increasing even in presence of low |
| 196 | // granularity system timer. This ensures each entry has unique timestamp. |
| 197 | uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime()); |
| 198 | |
| 199 | // We are going to modify the linked list, so take the seqlock. |
| 200 | ActionSeqlock(descriptor); |
| 201 | JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 202 | if (entry->prev_ != nullptr) { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 203 | entry->prev_->next_.store(next, std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 204 | } else { |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 205 | descriptor.head_.store(next, std::memory_order_relaxed); |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 206 | } |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 207 | if (next != nullptr) { |
| 208 | next->prev_ = entry->prev_; |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 209 | } |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 210 | descriptor.relevant_entry_ = entry; |
| 211 | descriptor.action_flag_ = JIT_UNREGISTER_FN; |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 212 | descriptor.action_timestamp_ = timestamp; |
| 213 | ActionSequnlock(descriptor); |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 214 | |
| 215 | (*register_code_ptr)(); |
David Srbecky | d767f2d | 2018-02-26 16:18:40 +0000 | [diff] [blame] | 216 | |
| 217 | // Ensure that clear below can not be reordered above the unlock above. |
| 218 | std::atomic_thread_fence(std::memory_order_release); |
| 219 | |
| 220 | // Aggressively clear the entry as an extra check of the synchronisation. |
| 221 | memset(entry, 0, sizeof(*entry)); |
| 222 | |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 223 | delete entry; |
| 224 | } |
| 225 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 226 | static std::unordered_map<const void*, JITCodeEntry*> __dex_debug_entries |
David Srbecky | fb3de3d | 2018-01-29 16:11:49 +0000 | [diff] [blame] | 227 | GUARDED_BY(Locks::native_debug_interface_lock_); |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 228 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 229 | void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) { |
| 230 | MutexLock mu(current_thread, *Locks::native_debug_interface_lock_); |
| 231 | DCHECK(dexfile.data() != nullptr); |
| 232 | // This is just defensive check. The class linker should not register the dex file twice. |
| 233 | if (__dex_debug_entries.count(dexfile.data()) == 0) { |
| 234 | JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor, |
| 235 | __dex_debug_register_code_ptr, |
| 236 | dexfile); |
| 237 | __dex_debug_entries.emplace(dexfile.data(), entry); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 238 | } |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 239 | } |
| 240 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 241 | void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) { |
| 242 | MutexLock mu(current_thread, *Locks::native_debug_interface_lock_); |
| 243 | auto it = __dex_debug_entries.find(dexfile.data()); |
| 244 | // We register dex files in the class linker and free them in DexFile_closeDexFile, but |
| 245 | // there might be cases where we load the dex file without using it in the class linker. |
| 246 | if (it != __dex_debug_entries.end()) { |
| 247 | DeleteJITCodeEntryInternal(__dex_debug_descriptor, |
| 248 | __dex_debug_register_code_ptr, |
| 249 | it->second); |
| 250 | __dex_debug_entries.erase(it); |
| 251 | } |
David Srbecky | c684f33 | 2018-01-19 17:38:06 +0000 | [diff] [blame] | 252 | } |
| 253 | |
David Srbecky | 440a9b3 | 2018-02-15 17:47:29 +0000 | [diff] [blame] | 254 | static size_t __jit_debug_mem_usage |
| 255 | GUARDED_BY(Locks::native_debug_interface_lock_) = 0; |
| 256 | |
| 257 | // Mapping from handle to entry. Used to manage life-time of the entries. |
| 258 | static std::unordered_map<const void*, JITCodeEntry*> __jit_debug_entries |
| 259 | GUARDED_BY(Locks::native_debug_interface_lock_); |
| 260 | |
| 261 | void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile) { |
| 262 | DCHECK_NE(symfile.size(), 0u); |
| 263 | |
| 264 | // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry. |
| 265 | uint8_t* copy = new uint8_t[symfile.size()]; |
| 266 | CHECK(copy != nullptr); |
| 267 | memcpy(copy, symfile.data(), symfile.size()); |
| 268 | |
| 269 | JITCodeEntry* entry = CreateJITCodeEntryInternal( |
| 270 | __jit_debug_descriptor, |
| 271 | __jit_debug_register_code_ptr, |
| 272 | ArrayRef<const uint8_t>(copy, symfile.size())); |
| 273 | __jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_; |
| 274 | |
| 275 | // We don't provide handle for type debug info, which means we cannot free it later. |
| 276 | // (this only happens when --generate-debug-info flag is enabled for the purpose |
| 277 | // of being debugged with gdb; it does not happen for debuggable apps by default). |
| 278 | bool ok = handle == nullptr || __jit_debug_entries.emplace(handle, entry).second; |
| 279 | DCHECK(ok) << "Native debug entry already exists for " << std::hex << handle; |
| 280 | } |
| 281 | |
| 282 | void RemoveNativeDebugInfoForJit(const void* handle) { |
| 283 | auto it = __jit_debug_entries.find(handle); |
| 284 | // We generate JIT native debug info only if the right runtime flags are enabled, |
| 285 | // but we try to remove it unconditionally whenever code is freed from JIT cache. |
| 286 | if (it != __jit_debug_entries.end()) { |
| 287 | JITCodeEntry* entry = it->second; |
| 288 | const uint8_t* symfile_addr = entry->symfile_addr_; |
| 289 | uint64_t symfile_size = entry->symfile_size_; |
| 290 | DeleteJITCodeEntryInternal(__jit_debug_descriptor, |
| 291 | __jit_debug_register_code_ptr, |
| 292 | entry); |
| 293 | __jit_debug_entries.erase(it); |
| 294 | __jit_debug_mem_usage -= sizeof(JITCodeEntry) + symfile_size; |
| 295 | delete[] symfile_addr; |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | size_t GetJitNativeDebugInfoMemUsage() { |
| 300 | return __jit_debug_mem_usage + __jit_debug_entries.size() * 2 * sizeof(void*); |
David Srbecky | 5cc349f | 2015-12-18 15:04:48 +0000 | [diff] [blame] | 301 | } |
| 302 | |
David Srbecky | 67feb17 | 2015-12-17 19:57:44 +0000 | [diff] [blame] | 303 | } // namespace art |