blob: 067c7c153daae0a2f550001268e37430904415a1 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070039#include "art_jvmti.h"
40#include "art_method-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080041#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070042#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080043#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070044#include "dex_file.h"
45#include "dex_file_annotations.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080046#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070047#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080048#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070049#include "mirror/class.h"
50#include "mirror/dex_cache.h"
51#include "scoped_thread_state_change-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080052#include "ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070053#include "stack.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080054#include "thread-inl.h"
55#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070056#include "thread_pool.h"
57
58namespace openjdkjvmti {
59
60struct GetStackTraceVisitor : public art::StackVisitor {
61 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070062 size_t start_,
63 size_t stop_)
64 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070065 start(start_),
66 stop(stop_) {}
67
68 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
69 art::ArtMethod* m = GetMethod();
70 if (m->IsRuntimeMethod()) {
71 return true;
72 }
73
74 if (start == 0) {
75 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080076 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070077
Andreas Gampe2340e3f2016-12-12 19:37:19 -080078 uint32_t dex_pc = GetDexPc(false);
79 jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070080
Andreas Gampe2340e3f2016-12-12 19:37:19 -080081 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070082 frames.push_back(info);
83
84 if (stop == 1) {
85 return false; // We're done.
86 } else if (stop > 0) {
87 stop--;
88 }
89 } else {
90 start--;
91 }
92
93 return true;
94 }
95
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070096 std::vector<jvmtiFrameInfo> frames;
97 size_t start;
98 size_t stop;
99};
100
101struct GetStackTraceClosure : public art::Closure {
102 public:
103 GetStackTraceClosure(size_t start, size_t stop)
104 : start_input(start),
105 stop_input(stop),
106 start_result(0),
107 stop_result(0) {}
108
Andreas Gampea1a27c62017-01-11 16:37:16 -0800109 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
110 GetStackTraceVisitor visitor(self, start_input, stop_input);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700111 visitor.WalkStack(false);
112
113 frames.swap(visitor.frames);
114 start_result = visitor.start;
115 stop_result = visitor.stop;
116 }
117
118 const size_t start_input;
119 const size_t stop_input;
120
121 std::vector<jvmtiFrameInfo> frames;
122 size_t start_result;
123 size_t stop_result;
124};
125
Andreas Gampea1a27c62017-01-11 16:37:16 -0800126static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
127 jint start_depth,
128 size_t start_result,
129 jint max_frame_count,
130 jvmtiFrameInfo* frame_buffer,
131 jint* count_ptr) {
132 size_t collected_frames = frames.size();
133
134 // Assume we're here having collected something.
135 DCHECK_GT(max_frame_count, 0);
136
137 // Frames from the top.
138 if (start_depth >= 0) {
139 if (start_result != 0) {
140 // Not enough frames.
141 return ERR(ILLEGAL_ARGUMENT);
142 }
143 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
144 if (frames.size() > 0) {
145 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
146 }
147 *count_ptr = static_cast<jint>(frames.size());
148 return ERR(NONE);
149 }
150
151 // Frames from the bottom.
152 if (collected_frames < static_cast<size_t>(-start_depth)) {
153 return ERR(ILLEGAL_ARGUMENT);
154 }
155
156 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
157 memcpy(frame_buffer,
158 &frames.data()[collected_frames + start_depth],
159 count * sizeof(jvmtiFrameInfo));
160 *count_ptr = static_cast<jint>(count);
161 return ERR(NONE);
162}
163
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800164static jvmtiError GetThread(JNIEnv* env, jthread java_thread, art::Thread** thread) {
165 if (java_thread == nullptr) {
166 *thread = art::Thread::Current();
167 if (*thread == nullptr) {
168 // GetStackTrace can only be run during the live phase, so the current thread should be
169 // attached and thus available. Getting a null for current means we're starting up or
170 // dying.
171 return ERR(WRONG_PHASE);
172 }
173 } else {
174 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
175 return ERR(INVALID_THREAD);
176 }
177
178 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
179 art::ScopedObjectAccess soa(art::Thread::Current());
180 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
181 *thread = art::Thread::FromManagedThread(soa, java_thread);
182 if (*thread == nullptr) {
183 return ERR(THREAD_NOT_ALIVE);
184 }
185 }
186 return ERR(NONE);
187}
188
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700189jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
190 jthread java_thread,
191 jint start_depth,
192 jint max_frame_count,
193 jvmtiFrameInfo* frame_buffer,
194 jint* count_ptr) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700195 art::Thread* thread;
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800196 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
197 if (thread_error != ERR(NONE)) {
198 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700199 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800200 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700201
202 art::ThreadState state = thread->GetState();
203 if (state == art::ThreadState::kStarting ||
204 state == art::ThreadState::kTerminated ||
205 thread->IsStillStarting()) {
206 return ERR(THREAD_NOT_ALIVE);
207 }
208
209 if (max_frame_count < 0) {
210 return ERR(ILLEGAL_ARGUMENT);
211 }
212 if (frame_buffer == nullptr || count_ptr == nullptr) {
213 return ERR(NULL_POINTER);
214 }
215
216 if (max_frame_count == 0) {
217 *count_ptr = 0;
218 return ERR(NONE);
219 }
220
221 GetStackTraceClosure closure(start_depth >= 0 ? static_cast<size_t>(start_depth) : 0,
Andreas Gampea1a27c62017-01-11 16:37:16 -0800222 start_depth >= 0 ? static_cast<size_t>(max_frame_count) : 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700223 thread->RequestSynchronousCheckpoint(&closure);
224
Andreas Gampea1a27c62017-01-11 16:37:16 -0800225 return TranslateFrameVector(closure.frames,
226 start_depth,
227 closure.start_result,
228 max_frame_count,
229 frame_buffer,
230 count_ptr);
231}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700232
Andreas Gampea1a27c62017-01-11 16:37:16 -0800233struct GetAllStackTraceClosure : public art::Closure {
234 public:
235 explicit GetAllStackTraceClosure(size_t stop)
236 : start_input(0),
237 stop_input(stop),
238 frames_lock("GetAllStackTraceGuard", art::LockLevel::kAbortLock),
239 start_result(0),
240 stop_result(0) {}
241
242 void Run(art::Thread* self)
243 OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) REQUIRES(!frames_lock) {
244 // self should be live here (so it could be suspended). No need to filter.
245
246 art::Thread* current = art::Thread::Current();
247 std::vector<jvmtiFrameInfo> self_frames;
248
249 GetStackTraceVisitor visitor(self, start_input, stop_input);
250 visitor.WalkStack(false);
251
252 self_frames.swap(visitor.frames);
253
254 art::MutexLock mu(current, frames_lock);
255 frames.emplace(self, self_frames);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700256 }
257
Andreas Gampea1a27c62017-01-11 16:37:16 -0800258 const size_t start_input;
259 const size_t stop_input;
260
261 art::Mutex frames_lock;
262 std::unordered_map<art::Thread*, std::vector<jvmtiFrameInfo>> frames GUARDED_BY(frames_lock);
263 size_t start_result;
264 size_t stop_result;
265};
266
267
268
269jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
270 jint max_frame_count,
271 jvmtiStackInfo** stack_info_ptr,
272 jint* thread_count_ptr) {
273 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700274 return ERR(ILLEGAL_ARGUMENT);
275 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800276 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
277 return ERR(NULL_POINTER);
278 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700279
Andreas Gampea1a27c62017-01-11 16:37:16 -0800280
281 art::Thread* current = art::Thread::Current();
282 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
283 art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
284 art::ScopedSuspendAll ssa("GetAllStackTraces");
285
286 std::vector<art::Thread*> threads;
287 std::vector<std::vector<jvmtiFrameInfo>> frames;
288 {
289 std::list<art::Thread*> thread_list;
290 {
291 art::MutexLock mu(current, *art::Locks::thread_list_lock_);
292 thread_list = art::Runtime::Current()->GetThreadList()->GetList();
293 }
294
295 for (art::Thread* thread : thread_list) {
Andreas Gampe984efb52017-01-12 17:43:13 -0800296 // Skip threads that are still starting.
297 if (thread->IsStillStarting()) {
298 continue;
299 }
300
Andreas Gampea1a27c62017-01-11 16:37:16 -0800301 GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
302 thread->RequestSynchronousCheckpoint(&closure);
303
304 threads.push_back(thread);
305 frames.emplace_back();
306 frames.back().swap(closure.frames);
307 }
308 }
309
310 // Convert the data into our output format. Note: we need to keep the threads suspended,
311 // as we need to access them for their peers.
312
313 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
314 // allocate one big chunk for this and the actual frames, which means we need
315 // to either be conservative or rearrange things later (the latter is implemented).
316 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
317 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
318 frame_infos.reserve(frames.size());
319
320 // Now run through and add data for each thread.
321 size_t sum_frames = 0;
322 for (size_t index = 0; index < frames.size(); ++index) {
323 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
324 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
325
326 art::Thread* self = threads[index];
327 const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
328
329 // For the time being, set the thread to null. We don't have good ScopedLocalRef
330 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000331 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800332 stack_info.thread = nullptr;
333 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
334
335 size_t collected_frames = thread_frames.size();
336 if (max_frame_count == 0 || collected_frames == 0) {
337 stack_info.frame_count = 0;
338 stack_info.frame_buffer = nullptr;
339 continue;
340 }
341 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
342
343 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
344 frame_infos.emplace_back(frame_info);
345
346 jint count;
347 jvmtiError translate_result = TranslateFrameVector(thread_frames,
348 0,
349 0,
350 static_cast<jint>(collected_frames),
351 frame_info,
352 &count);
353 DCHECK(translate_result == JVMTI_ERROR_NONE);
354 stack_info.frame_count = static_cast<jint>(collected_frames);
355 stack_info.frame_buffer = frame_info;
356 sum_frames += static_cast<size_t>(count);
357 }
358
359 // No errors, yet. Now put it all into an output buffer.
360 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * frames.size(),
361 alignof(jvmtiFrameInfo));
362 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
363 unsigned char* chunk_data;
364 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
365 if (alloc_result != ERR(NONE)) {
366 return alloc_result;
367 }
368
369 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
370 // First copy in all the basic data.
371 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * frames.size());
372
373 // Now copy the frames and fix up the pointers.
374 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
375 chunk_data + rounded_stack_info_size);
376 for (size_t i = 0; i < frames.size(); ++i) {
377 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
378 jvmtiStackInfo& new_stack_info = stack_info[i];
379
Andreas Gampe202f85a2017-02-06 10:23:26 -0800380 jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
381 threads[i]->GetPeerFromOtherThread());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800382 new_stack_info.thread = thread_peer;
383
384 if (old_stack_info.frame_count > 0) {
385 // Only copy when there's data - leave the nullptr alone.
386 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
387 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
388 new_stack_info.frame_buffer = frame_info;
389 frame_info += old_stack_info.frame_count;
390 }
391 }
392
393 *stack_info_ptr = stack_info;
394 *thread_count_ptr = static_cast<jint>(frames.size());
395
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700396 return ERR(NONE);
397}
398
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800399jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
400 jint thread_count,
401 const jthread* thread_list,
402 jint max_frame_count,
403 jvmtiStackInfo** stack_info_ptr) {
404 if (max_frame_count < 0) {
405 return ERR(ILLEGAL_ARGUMENT);
406 }
407 if (thread_count < 0) {
408 return ERR(ILLEGAL_ARGUMENT);
409 }
410 if (thread_count == 0) {
411 *stack_info_ptr = nullptr;
412 return ERR(NONE);
413 }
414 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
415 return ERR(NULL_POINTER);
416 }
417
418 art::Thread* current = art::Thread::Current();
419 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
420
421 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
422 art::VariableSizedHandleScope hs(current);
423 std::vector<art::Handle<art::mirror::Object>> handles;
424 for (jint i = 0; i != thread_count; ++i) {
425 if (thread_list[i] == nullptr) {
426 return ERR(INVALID_THREAD);
427 }
428 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
429 return ERR(INVALID_THREAD);
430 }
431 handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
432 }
433
434 std::vector<art::Thread*> threads;
435 std::vector<size_t> thread_list_indices;
436 std::vector<std::vector<jvmtiFrameInfo>> frames;
437
438 {
439 art::ScopedThreadSuspension sts(current, art::kWaitingForDebuggerSuspension);
440 art::ScopedSuspendAll ssa("GetThreadListStackTraces");
441
442 {
443 std::list<art::Thread*> art_thread_list;
444 {
445 art::MutexLock mu(current, *art::Locks::thread_list_lock_);
446 art_thread_list = art::Runtime::Current()->GetThreadList()->GetList();
447 }
448
449 for (art::Thread* thread : art_thread_list) {
450 if (thread->IsStillStarting()) {
451 // Skip this. We can't get the jpeer, and if it is for a thread in the thread_list,
452 // we'll just report STARTING.
453 continue;
454 }
455
456 // Get the peer, and check whether we know it.
Andreas Gampe202f85a2017-02-06 10:23:26 -0800457 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800458 for (size_t index = 0; index != handles.size(); ++index) {
459 if (peer == handles[index].Get()) {
460 // Found the thread.
461 GetStackTraceClosure closure(0u, static_cast<size_t>(max_frame_count));
462 thread->RequestSynchronousCheckpoint(&closure);
463
464 threads.push_back(thread);
465 thread_list_indices.push_back(index);
466 frames.emplace_back();
467 frames.back().swap(closure.frames);
468
469 continue;
470 }
471 }
472
473 // Must be not started, or dead. We'll deal with it at the end.
474 }
475 }
476 }
477
478 // Convert the data into our output format.
479
480 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
481 // allocate one big chunk for this and the actual frames, which means we need
482 // to either be conservative or rearrange things later (the latter is implemented).
483 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[frames.size()]);
484 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
485 frame_infos.reserve(frames.size());
486
487 // Now run through and add data for each thread.
488 size_t sum_frames = 0;
489 for (size_t index = 0; index < frames.size(); ++index) {
490 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
491 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
492
493 art::Thread* self = threads[index];
494 const std::vector<jvmtiFrameInfo>& thread_frames = frames[index];
495
496 // For the time being, set the thread to null. We don't have good ScopedLocalRef
497 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000498 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800499 stack_info.thread = nullptr;
500 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
501
502 size_t collected_frames = thread_frames.size();
503 if (max_frame_count == 0 || collected_frames == 0) {
504 stack_info.frame_count = 0;
505 stack_info.frame_buffer = nullptr;
506 continue;
507 }
508 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
509
510 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
511 frame_infos.emplace_back(frame_info);
512
513 jint count;
514 jvmtiError translate_result = TranslateFrameVector(thread_frames,
515 0,
516 0,
517 static_cast<jint>(collected_frames),
518 frame_info,
519 &count);
520 DCHECK(translate_result == JVMTI_ERROR_NONE);
521 stack_info.frame_count = static_cast<jint>(collected_frames);
522 stack_info.frame_buffer = frame_info;
523 sum_frames += static_cast<size_t>(count);
524 }
525
526 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
527 // potentially.
528 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
529 alignof(jvmtiFrameInfo));
530 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
531 unsigned char* chunk_data;
532 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
533 if (alloc_result != ERR(NONE)) {
534 return alloc_result;
535 }
536
537 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
538 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
539 chunk_data + rounded_stack_info_size);
540
541 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
542 // Check whether we found a running thread for this.
543 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
544 // search. (The list is *not* sorted!)
545 auto it = std::find(thread_list_indices.begin(), thread_list_indices.end(), i);
546 if (it == thread_list_indices.end()) {
547 // No native thread. Must be new or dead. We need to fill out the stack info now.
548 // (Need to read the Java "started" field to know whether this is starting or terminated.)
549 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
550 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
551 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
552 CHECK(started_field != nullptr);
553 bool started = started_field->GetBoolean(peer) != 0;
554 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
555 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
556 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
557 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
558 stack_info[i].state = started ? kTerminatedState : kStartedState;
559 stack_info[i].frame_count = 0;
560 stack_info[i].frame_buffer = nullptr;
561 } else {
562 // Had a native thread and frames.
563 size_t f_index = it - thread_list_indices.begin();
564
565 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
566 jvmtiStackInfo& new_stack_info = stack_info[i];
567
568 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
569 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
570 if (old_stack_info.frame_count > 0) {
571 // Only copy when there's data - leave the nullptr alone.
572 size_t frames_size =
573 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
574 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
575 new_stack_info.frame_buffer = frame_info;
576 frame_info += old_stack_info.frame_count;
577 }
578 }
579 }
580
581 * stack_info_ptr = stack_info;
582
583 return ERR(NONE);
584}
585
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800586// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
587// runtime methods and transitions must not be counted.
588struct GetFrameCountVisitor : public art::StackVisitor {
589 explicit GetFrameCountVisitor(art::Thread* thread)
590 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
591 count(0) {}
592
593 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
594 art::ArtMethod* m = GetMethod();
595 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
596 if (do_count) {
597 count++;
598 }
599 return true;
600 }
601
602 size_t count;
603};
604
605struct GetFrameCountClosure : public art::Closure {
606 public:
607 GetFrameCountClosure() : count(0) {}
608
609 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
610 GetFrameCountVisitor visitor(self);
611 visitor.WalkStack(false);
612
613 count = visitor.count;
614 }
615
616 size_t count;
617};
618
619jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
620 jthread java_thread,
621 jint* count_ptr) {
622 art::Thread* thread;
623 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
624 if (thread_error != ERR(NONE)) {
625 return thread_error;
626 }
627 DCHECK(thread != nullptr);
628
629 if (count_ptr == nullptr) {
630 return ERR(NULL_POINTER);
631 }
632
633 GetFrameCountClosure closure;
634 thread->RequestSynchronousCheckpoint(&closure);
635
636 *count_ptr = closure.count;
637 return ERR(NONE);
638}
639
640// Walks up the stack 'n' callers, when used with Thread::WalkStack.
641struct GetLocationVisitor : public art::StackVisitor {
642 GetLocationVisitor(art::Thread* thread, size_t n_in)
643 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
644 n(n_in),
645 count(0),
646 caller(nullptr),
647 caller_dex_pc(0) {}
648
649 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
650 art::ArtMethod* m = GetMethod();
651 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
652 if (do_count) {
653 DCHECK(caller == nullptr);
654 if (count == n) {
655 caller = m;
656 caller_dex_pc = GetDexPc(false);
657 return false;
658 }
659 count++;
660 }
661 return true;
662 }
663
664 const size_t n;
665 size_t count;
666 art::ArtMethod* caller;
667 uint32_t caller_dex_pc;
668};
669
670struct GetLocationClosure : public art::Closure {
671 public:
672 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
673
674 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
675 GetLocationVisitor visitor(self, n);
676 visitor.WalkStack(false);
677
678 method = visitor.caller;
679 dex_pc = visitor.caller_dex_pc;
680 }
681
682 const size_t n;
683 art::ArtMethod* method;
684 uint32_t dex_pc;
685};
686
687jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
688 jthread java_thread,
689 jint depth,
690 jmethodID* method_ptr,
691 jlocation* location_ptr) {
692 art::Thread* thread;
693 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(), java_thread, &thread);
694 if (thread_error != ERR(NONE)) {
695 return thread_error;
696 }
697 DCHECK(thread != nullptr);
698
699 if (depth < 0) {
700 return ERR(ILLEGAL_ARGUMENT);
701 }
702 if (method_ptr == nullptr || location_ptr == nullptr) {
703 return ERR(NULL_POINTER);
704 }
705
706 GetLocationClosure closure(static_cast<size_t>(depth));
707 thread->RequestSynchronousCheckpoint(&closure);
708
709 if (closure.method == nullptr) {
710 return ERR(NO_MORE_FRAMES);
711 }
712
713 *method_ptr = art::jni::EncodeArtMethod(closure.method);
714 if (closure.method->IsNative()) {
715 *location_ptr = -1;
716 } else {
717 if (closure.dex_pc == art::DexFile::kDexNoIndex) {
718 return ERR(INTERNAL);
719 }
720 *location_ptr = static_cast<jlocation>(closure.dex_pc);
721 }
722
723 return ERR(NONE);
724}
725
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700726} // namespace openjdkjvmti