blob: 6cd719a55caa87da85485dbd4bd0ade1c391cdcc [file] [log] [blame]
David Srbecky67feb172015-12-17 19:57:44 +00001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger_interface.h"
18
Andreas Gampe57943812017-12-06 21:39:13 -080019#include <android-base/logging.h>
20
David Srbecky440a9b32018-02-15 17:47:29 +000021#include "base/array_ref.h"
David Srbecky5cc349f2015-12-18 15:04:48 +000022#include "base/mutex.h"
David Srbecky440a9b32018-02-15 17:47:29 +000023#include "base/time_utils.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070024#include "thread-current-inl.h"
David Srbecky5cc349f2015-12-18 15:04:48 +000025#include "thread.h"
26
David Srbeckyd767f2d2018-02-26 16:18:40 +000027#include <atomic>
David Srbecky5cc349f2015-12-18 15:04:48 +000028#include <unordered_map>
David Srbeckyd767f2d2018-02-26 16:18:40 +000029#include <cstddef>
David Srbecky5cc349f2015-12-18 15:04:48 +000030
David Srbecky440a9b32018-02-15 17:47:29 +000031//
32// Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
33//
34// See http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
35//
36// There are two ways for native tools to access the debug data safely:
37//
38// 1) Synchronously, by setting a breakpoint in the __*_debug_register_code
39// method, which is called after every modification of the linked list.
40// GDB does this, but it is complex to set up and it stops the process.
41//
David Srbeckyd767f2d2018-02-26 16:18:40 +000042// 2) Asynchronously, by monitoring the action_seqlock_.
43// * The seqlock is a monotonically increasing counter which is incremented
44// before and after every modification of the linked list. Odd value of
45// the counter means the linked list is being modified (it is locked).
46// * The tool should read the value of the seqlock both before and after
47// copying the linked list. If the seqlock values match and are even,
48// the copy is consistent. Otherwise, the reader should try again.
49// * Note that using the data directly while is it being modified
50// might crash the tool. Therefore, the only safe way is to make
51// a copy and use the copy only after the seqlock has been checked.
52// * Note that the process might even free and munmap the data while
53// it is being copied, therefore the reader should either handle
54// SEGV or use OS calls to read the memory (e.g. process_vm_readv).
55// * The seqlock can be used to determine the number of modifications of
56// the linked list, which can be used to intelligently cache the data.
57// Note the possible overflow of the seqlock. It is intentionally
58// 32-bit, since 64-bit atomics can be tricky on some architectures.
59// * The timestamps on the entry record the time when the entry was
60// created which is relevant if the unwinding is not live and is
61// postponed until much later. All timestamps must be unique.
62// * Memory barriers are used to make it possible to reason about
63// the data even when it is being modified (e.g. the process crashed
64// while that data was locked, and thus it will be never unlocked).
65// * In particular, it should be possible to:
66// 1) read the seqlock and then the linked list head pointer.
67// 2) copy the entry and check that seqlock has not changed.
68// 3) copy the symfile and check that seqlock has not changed.
69// 4) go back to step 2 using the next pointer (if non-null).
70// This safely creates copy of all symfiles, although other data
71// might be inconsistent/unusable (e.g. prev_, action_timestamp_).
72// * For full conformance with the C++ memory model, all seqlock
73// protected accesses should be atomic. We currently do this in the
74// more critical cases. The rest will have to be fixed before
75// attempting to run TSAN on this code.
David Srbecky440a9b32018-02-15 17:47:29 +000076//
David Srbecky67feb172015-12-17 19:57:44 +000077
David Srbecky440a9b32018-02-15 17:47:29 +000078namespace art {
David Srbecky67feb172015-12-17 19:57:44 +000079extern "C" {
Andreas Gampec55bb392018-09-21 00:02:02 +000080 enum JITAction {
David Srbecky67feb172015-12-17 19:57:44 +000081 JIT_NOACTION = 0,
82 JIT_REGISTER_FN,
83 JIT_UNREGISTER_FN
Andreas Gampec55bb392018-09-21 00:02:02 +000084 };
David Srbecky67feb172015-12-17 19:57:44 +000085
86 struct JITCodeEntry {
David Srbeckyd767f2d2018-02-26 16:18:40 +000087 // Atomic to ensure the reader can always iterate over the linked list
88 // (e.g. the process could crash in the middle of writing this field).
89 std::atomic<JITCodeEntry*> next_;
90 // Non-atomic. The reader should not use it. It is only used for deletion.
David Srbecky67feb172015-12-17 19:57:44 +000091 JITCodeEntry* prev_;
David Srbecky440a9b32018-02-15 17:47:29 +000092 const uint8_t* symfile_addr_;
93 uint64_t symfile_size_; // Beware of the offset (12 on x86; but 16 on ARM32).
94
95 // Android-specific fields:
96 uint64_t register_timestamp_; // CLOCK_MONOTONIC time of entry registration.
David Srbecky67feb172015-12-17 19:57:44 +000097 };
98
99 struct JITDescriptor {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000100 uint32_t version_ = 1; // NB: GDB supports only version 1.
101 uint32_t action_flag_ = JIT_NOACTION; // One of the JITAction enum values.
102 JITCodeEntry* relevant_entry_ = nullptr; // The entry affected by the action.
103 std::atomic<JITCodeEntry*> head_{nullptr}; // Head of link list of all entries.
David Srbecky440a9b32018-02-15 17:47:29 +0000104
105 // Android-specific fields:
106 uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'};
David Srbeckyd767f2d2018-02-26 16:18:40 +0000107 uint32_t flags_ = 0; // Reserved for future use. Must be 0.
David Srbecky440a9b32018-02-15 17:47:29 +0000108 uint32_t sizeof_descriptor = sizeof(JITDescriptor);
109 uint32_t sizeof_entry = sizeof(JITCodeEntry);
David Srbeckyd767f2d2018-02-26 16:18:40 +0000110 std::atomic_uint32_t action_seqlock_{0}; // Incremented before and after any modification.
111 uint64_t action_timestamp_ = 1; // CLOCK_MONOTONIC time of last action.
David Srbecky67feb172015-12-17 19:57:44 +0000112 };
113
David Srbeckyd767f2d2018-02-26 16:18:40 +0000114 // Check that std::atomic has the expected layout.
115 static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
116 static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
117 static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
118 static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
David Srbecky440a9b32018-02-15 17:47:29 +0000119
120 // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
David Srbecky67feb172015-12-17 19:57:44 +0000121 void __attribute__((noinline)) __jit_debug_register_code() {
122 __asm__("");
123 }
124
David Srbecky440a9b32018-02-15 17:47:29 +0000125 // Alternatively, native tools may overwrite this field to execute custom handler.
David Srbeckye8b4e852016-03-15 17:02:41 +0000126 void (*__jit_debug_register_code_ptr)() = __jit_debug_register_code;
127
David Srbecky440a9b32018-02-15 17:47:29 +0000128 // The root data structure describing of all JITed methods.
129 JITDescriptor __jit_debug_descriptor {};
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000130
David Srbecky440a9b32018-02-15 17:47:29 +0000131 // The following globals mirror the ones above, but are used to register dex files.
132 void __attribute__((noinline)) __dex_debug_register_code() {
133 __asm__("");
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000134 }
David Srbecky440a9b32018-02-15 17:47:29 +0000135 void (*__dex_debug_register_code_ptr)() = __dex_debug_register_code;
136 JITDescriptor __dex_debug_descriptor {};
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000137}
138
David Srbeckyd767f2d2018-02-26 16:18:40 +0000139// Mark the descriptor as "locked", so native tools know the data is being modified.
140static void ActionSeqlock(JITDescriptor& descriptor) {
141 DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked";
142 descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
143 // Ensure that any writes within the locked section cannot be reordered before the increment.
144 std::atomic_thread_fence(std::memory_order_release);
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000145}
David Srbeckyc684f332018-01-19 17:38:06 +0000146
David Srbecky440a9b32018-02-15 17:47:29 +0000147// Mark the descriptor as "unlocked", so native tools know the data is safe to read.
David Srbeckyd767f2d2018-02-26 16:18:40 +0000148static void ActionSequnlock(JITDescriptor& descriptor) {
149 DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked";
150 // Ensure that any writes within the locked section cannot be reordered after the increment.
151 std::atomic_thread_fence(std::memory_order_release);
152 descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
David Srbecky440a9b32018-02-15 17:47:29 +0000153}
Vladimir Marko93205e32016-04-13 11:59:46 +0100154
David Srbecky440a9b32018-02-15 17:47:29 +0000155static JITCodeEntry* CreateJITCodeEntryInternal(
156 JITDescriptor& descriptor,
157 void (*register_code_ptr)(),
158 const ArrayRef<const uint8_t>& symfile)
159 REQUIRES(Locks::native_debug_interface_lock_) {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000160 // Ensure the timestamp is monotonically increasing even in presence of low
161 // granularity system timer. This ensures each entry has unique timestamp.
162 uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
David Srbecky5cc349f2015-12-18 15:04:48 +0000163
David Srbeckyd767f2d2018-02-26 16:18:40 +0000164 JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000165 JITCodeEntry* entry = new JITCodeEntry;
Vladimir Marko93205e32016-04-13 11:59:46 +0100166 CHECK(entry != nullptr);
David Srbecky440a9b32018-02-15 17:47:29 +0000167 entry->symfile_addr_ = symfile.data();
Vladimir Marko93205e32016-04-13 11:59:46 +0100168 entry->symfile_size_ = symfile.size();
David Srbecky67feb172015-12-17 19:57:44 +0000169 entry->prev_ = nullptr;
David Srbeckyd767f2d2018-02-26 16:18:40 +0000170 entry->next_.store(head, std::memory_order_relaxed);
171 entry->register_timestamp_ = timestamp;
172
173 // We are going to modify the linked list, so take the seqlock.
174 ActionSeqlock(descriptor);
175 if (head != nullptr) {
176 head->prev_ = entry;
David Srbecky67feb172015-12-17 19:57:44 +0000177 }
David Srbeckyd767f2d2018-02-26 16:18:40 +0000178 descriptor.head_.store(entry, std::memory_order_relaxed);
David Srbecky440a9b32018-02-15 17:47:29 +0000179 descriptor.relevant_entry_ = entry;
180 descriptor.action_flag_ = JIT_REGISTER_FN;
David Srbeckyd767f2d2018-02-26 16:18:40 +0000181 descriptor.action_timestamp_ = timestamp;
182 ActionSequnlock(descriptor);
David Srbecky440a9b32018-02-15 17:47:29 +0000183
184 (*register_code_ptr)();
David Srbecky67feb172015-12-17 19:57:44 +0000185 return entry;
186}
187
David Srbecky440a9b32018-02-15 17:47:29 +0000188static void DeleteJITCodeEntryInternal(
189 JITDescriptor& descriptor,
190 void (*register_code_ptr)(),
191 JITCodeEntry* entry)
192 REQUIRES(Locks::native_debug_interface_lock_) {
193 CHECK(entry != nullptr);
David Srbecky440a9b32018-02-15 17:47:29 +0000194
David Srbeckyd767f2d2018-02-26 16:18:40 +0000195 // Ensure the timestamp is monotonically increasing even in presence of low
196 // granularity system timer. This ensures each entry has unique timestamp.
197 uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
198
199 // We are going to modify the linked list, so take the seqlock.
200 ActionSeqlock(descriptor);
201 JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000202 if (entry->prev_ != nullptr) {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000203 entry->prev_->next_.store(next, std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000204 } else {
David Srbeckyd767f2d2018-02-26 16:18:40 +0000205 descriptor.head_.store(next, std::memory_order_relaxed);
David Srbecky67feb172015-12-17 19:57:44 +0000206 }
David Srbeckyd767f2d2018-02-26 16:18:40 +0000207 if (next != nullptr) {
208 next->prev_ = entry->prev_;
David Srbecky67feb172015-12-17 19:57:44 +0000209 }
David Srbecky440a9b32018-02-15 17:47:29 +0000210 descriptor.relevant_entry_ = entry;
211 descriptor.action_flag_ = JIT_UNREGISTER_FN;
David Srbeckyd767f2d2018-02-26 16:18:40 +0000212 descriptor.action_timestamp_ = timestamp;
213 ActionSequnlock(descriptor);
David Srbecky440a9b32018-02-15 17:47:29 +0000214
215 (*register_code_ptr)();
David Srbeckyd767f2d2018-02-26 16:18:40 +0000216
217 // Ensure that clear below can not be reordered above the unlock above.
218 std::atomic_thread_fence(std::memory_order_release);
219
220 // Aggressively clear the entry as an extra check of the synchronisation.
221 memset(entry, 0, sizeof(*entry));
222
David Srbecky67feb172015-12-17 19:57:44 +0000223 delete entry;
224}
225
David Srbecky440a9b32018-02-15 17:47:29 +0000226static std::unordered_map<const void*, JITCodeEntry*> __dex_debug_entries
David Srbeckyfb3de3d2018-01-29 16:11:49 +0000227 GUARDED_BY(Locks::native_debug_interface_lock_);
David Srbeckyc684f332018-01-19 17:38:06 +0000228
David Srbecky440a9b32018-02-15 17:47:29 +0000229void AddNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
230 MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
231 DCHECK(dexfile.data() != nullptr);
232 // This is just defensive check. The class linker should not register the dex file twice.
233 if (__dex_debug_entries.count(dexfile.data()) == 0) {
234 JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
235 __dex_debug_register_code_ptr,
236 dexfile);
237 __dex_debug_entries.emplace(dexfile.data(), entry);
David Srbecky5cc349f2015-12-18 15:04:48 +0000238 }
David Srbeckyc684f332018-01-19 17:38:06 +0000239}
240
David Srbecky440a9b32018-02-15 17:47:29 +0000241void RemoveNativeDebugInfoForDex(Thread* current_thread, ArrayRef<const uint8_t> dexfile) {
242 MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
243 auto it = __dex_debug_entries.find(dexfile.data());
244 // We register dex files in the class linker and free them in DexFile_closeDexFile, but
245 // there might be cases where we load the dex file without using it in the class linker.
246 if (it != __dex_debug_entries.end()) {
247 DeleteJITCodeEntryInternal(__dex_debug_descriptor,
248 __dex_debug_register_code_ptr,
249 it->second);
250 __dex_debug_entries.erase(it);
251 }
David Srbeckyc684f332018-01-19 17:38:06 +0000252}
253
David Srbecky440a9b32018-02-15 17:47:29 +0000254static size_t __jit_debug_mem_usage
255 GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
256
257// Mapping from handle to entry. Used to manage life-time of the entries.
258static std::unordered_map<const void*, JITCodeEntry*> __jit_debug_entries
259 GUARDED_BY(Locks::native_debug_interface_lock_);
260
261void AddNativeDebugInfoForJit(const void* handle, const std::vector<uint8_t>& symfile) {
262 DCHECK_NE(symfile.size(), 0u);
263
264 // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
265 uint8_t* copy = new uint8_t[symfile.size()];
266 CHECK(copy != nullptr);
267 memcpy(copy, symfile.data(), symfile.size());
268
269 JITCodeEntry* entry = CreateJITCodeEntryInternal(
270 __jit_debug_descriptor,
271 __jit_debug_register_code_ptr,
272 ArrayRef<const uint8_t>(copy, symfile.size()));
273 __jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
274
275 // We don't provide handle for type debug info, which means we cannot free it later.
276 // (this only happens when --generate-debug-info flag is enabled for the purpose
277 // of being debugged with gdb; it does not happen for debuggable apps by default).
278 bool ok = handle == nullptr || __jit_debug_entries.emplace(handle, entry).second;
279 DCHECK(ok) << "Native debug entry already exists for " << std::hex << handle;
280}
281
282void RemoveNativeDebugInfoForJit(const void* handle) {
283 auto it = __jit_debug_entries.find(handle);
284 // We generate JIT native debug info only if the right runtime flags are enabled,
285 // but we try to remove it unconditionally whenever code is freed from JIT cache.
286 if (it != __jit_debug_entries.end()) {
287 JITCodeEntry* entry = it->second;
288 const uint8_t* symfile_addr = entry->symfile_addr_;
289 uint64_t symfile_size = entry->symfile_size_;
290 DeleteJITCodeEntryInternal(__jit_debug_descriptor,
291 __jit_debug_register_code_ptr,
292 entry);
293 __jit_debug_entries.erase(it);
294 __jit_debug_mem_usage -= sizeof(JITCodeEntry) + symfile_size;
295 delete[] symfile_addr;
296 }
297}
298
299size_t GetJitNativeDebugInfoMemUsage() {
300 return __jit_debug_mem_usage + __jit_debug_entries.size() * 2 * sizeof(void*);
David Srbecky5cc349f2015-12-18 15:04:48 +0000301}
302
David Srbecky67feb172015-12-17 19:57:44 +0000303} // namespace art