blob: 20de9aa8fdf9e6bc5ad1262143488fe85d563db8 [file] [log] [blame]
Andreas Gampeb5eb94a2016-10-27 19:23:09 -07001/* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32#include "ti_stack.h"
33
Andreas Gampeeba32fb2017-01-12 17:40:05 -080034#include <algorithm>
Andreas Gampea1a27c62017-01-11 16:37:16 -080035#include <list>
36#include <unordered_map>
37#include <vector>
38
Andreas Gampea1d2f952017-04-20 22:53:58 -070039#include "art_field-inl.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070040#include "art_jvmti.h"
Steven Morelande431e272017-07-18 16:53:49 -070041#include "art_method-inl.h"
Andreas Gampe6237cd32017-06-22 22:17:38 -070042#include "barrier.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080043#include "base/bit_utils.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070044#include "base/enums.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080045#include "base/mutex.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070046#include "dex_file.h"
47#include "dex_file_annotations.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070048#include "gc_root.h"
Andreas Gampeeba32fb2017-01-12 17:40:05 -080049#include "handle_scope-inl.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070050#include "jni_env_ext.h"
Andreas Gampe13b27842016-11-07 16:48:23 -080051#include "jni_internal.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070052#include "mirror/class.h"
53#include "mirror/dex_cache.h"
Steven Morelande431e272017-07-18 16:53:49 -070054#include "nativehelper/ScopedLocalRef.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070055#include "scoped_thread_state_change-inl.h"
56#include "stack.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070057#include "thread-current-inl.h"
Andreas Gampea1a27c62017-01-11 16:37:16 -080058#include "thread_list.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070059#include "thread_pool.h"
Alex Light88e1ddd2017-08-21 13:09:55 -070060#include "ti_thread.h"
Andreas Gampea1d2f952017-04-20 22:53:58 -070061#include "well_known_classes.h"
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070062
63namespace openjdkjvmti {
64
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070065template <typename FrameFn>
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070066struct GetStackTraceVisitor : public art::StackVisitor {
67 GetStackTraceVisitor(art::Thread* thread_in,
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070068 size_t start_,
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070069 size_t stop_,
70 FrameFn fn_)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070071 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070072 fn(fn_),
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070073 start(start_),
74 stop(stop_) {}
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070075 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
76 GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070077
78 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
79 art::ArtMethod* m = GetMethod();
80 if (m->IsRuntimeMethod()) {
81 return true;
82 }
83
84 if (start == 0) {
85 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
Andreas Gampe13b27842016-11-07 16:48:23 -080086 jmethodID id = art::jni::EncodeArtMethod(m);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070087
Andreas Gampe2340e3f2016-12-12 19:37:19 -080088 uint32_t dex_pc = GetDexPc(false);
89 jlong dex_location = (dex_pc == art::DexFile::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070090
Andreas Gampe2340e3f2016-12-12 19:37:19 -080091 jvmtiFrameInfo info = { id, dex_location };
Andreas Gampe6db6b4d2017-06-12 16:36:33 -070092 fn(info);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -070093
94 if (stop == 1) {
95 return false; // We're done.
96 } else if (stop > 0) {
97 stop--;
98 }
99 } else {
100 start--;
101 }
102
103 return true;
104 }
105
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700106 FrameFn fn;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700107 size_t start;
108 size_t stop;
109};
110
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700111template <typename FrameFn>
112GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
113 size_t start,
114 size_t stop,
115 FrameFn fn) {
116 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
117}
118
119struct GetStackTraceVectorClosure : public art::Closure {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700120 public:
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700121 GetStackTraceVectorClosure(size_t start, size_t stop)
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700122 : start_input(start),
123 stop_input(stop),
124 start_result(0),
125 stop_result(0) {}
126
Andreas Gampea1a27c62017-01-11 16:37:16 -0800127 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
Andreas Gampe6db6b4d2017-06-12 16:36:33 -0700128 auto frames_fn = [&](jvmtiFrameInfo info) {
129 frames.push_back(info);
130 };
131 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
132 visitor.WalkStack(/* include_transitions */ false);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700133
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700134 start_result = visitor.start;
135 stop_result = visitor.stop;
136 }
137
138 const size_t start_input;
139 const size_t stop_input;
140
141 std::vector<jvmtiFrameInfo> frames;
142 size_t start_result;
143 size_t stop_result;
144};
145
Andreas Gampea1a27c62017-01-11 16:37:16 -0800146static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
147 jint start_depth,
148 size_t start_result,
149 jint max_frame_count,
150 jvmtiFrameInfo* frame_buffer,
151 jint* count_ptr) {
152 size_t collected_frames = frames.size();
153
154 // Assume we're here having collected something.
155 DCHECK_GT(max_frame_count, 0);
156
157 // Frames from the top.
158 if (start_depth >= 0) {
159 if (start_result != 0) {
160 // Not enough frames.
161 return ERR(ILLEGAL_ARGUMENT);
162 }
163 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
164 if (frames.size() > 0) {
165 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
166 }
167 *count_ptr = static_cast<jint>(frames.size());
168 return ERR(NONE);
169 }
170
171 // Frames from the bottom.
172 if (collected_frames < static_cast<size_t>(-start_depth)) {
173 return ERR(ILLEGAL_ARGUMENT);
174 }
175
176 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
177 memcpy(frame_buffer,
178 &frames.data()[collected_frames + start_depth],
179 count * sizeof(jvmtiFrameInfo));
180 *count_ptr = static_cast<jint>(count);
181 return ERR(NONE);
182}
183
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700184struct GetStackTraceDirectClosure : public art::Closure {
185 public:
186 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
187 : frame_buffer(frame_buffer_),
188 start_input(start),
189 stop_input(stop),
190 index(0) {
191 DCHECK_GE(start_input, 0u);
192 }
193
194 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
195 auto frames_fn = [&](jvmtiFrameInfo info) {
196 frame_buffer[index] = info;
197 ++index;
198 };
199 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
200 visitor.WalkStack(/* include_transitions */ false);
201 }
202
203 jvmtiFrameInfo* frame_buffer;
204
205 const size_t start_input;
206 const size_t stop_input;
207
208 size_t index = 0;
209};
210
Andreas Gampe28c4a232017-06-21 21:21:31 -0700211static jvmtiError GetThread(JNIEnv* env,
212 art::ScopedObjectAccessAlreadyRunnable& soa,
213 jthread java_thread,
214 art::Thread** thread)
215 REQUIRES_SHARED(art::Locks::mutator_lock_) // Needed for FromManagedThread.
216 REQUIRES(art::Locks::thread_list_lock_) { // Needed for FromManagedThread.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800217 if (java_thread == nullptr) {
218 *thread = art::Thread::Current();
219 if (*thread == nullptr) {
220 // GetStackTrace can only be run during the live phase, so the current thread should be
221 // attached and thus available. Getting a null for current means we're starting up or
222 // dying.
223 return ERR(WRONG_PHASE);
224 }
225 } else {
226 if (!env->IsInstanceOf(java_thread, art::WellKnownClasses::java_lang_Thread)) {
227 return ERR(INVALID_THREAD);
228 }
229
230 // TODO: Need non-aborting call here, to return JVMTI_ERROR_INVALID_THREAD.
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800231 *thread = art::Thread::FromManagedThread(soa, java_thread);
232 if (*thread == nullptr) {
233 return ERR(THREAD_NOT_ALIVE);
234 }
235 }
236 return ERR(NONE);
237}
238
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700239jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
240 jthread java_thread,
241 jint start_depth,
242 jint max_frame_count,
243 jvmtiFrameInfo* frame_buffer,
244 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700245 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
246 // that the thread isn't dying on us.
247 art::ScopedObjectAccess soa(art::Thread::Current());
248 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
249
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700250 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700251 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
252 soa,
253 java_thread,
254 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800255 if (thread_error != ERR(NONE)) {
256 return thread_error;
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700257 }
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800258 DCHECK(thread != nullptr);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700259
260 art::ThreadState state = thread->GetState();
261 if (state == art::ThreadState::kStarting ||
262 state == art::ThreadState::kTerminated ||
263 thread->IsStillStarting()) {
264 return ERR(THREAD_NOT_ALIVE);
265 }
266
267 if (max_frame_count < 0) {
268 return ERR(ILLEGAL_ARGUMENT);
269 }
270 if (frame_buffer == nullptr || count_ptr == nullptr) {
271 return ERR(NULL_POINTER);
272 }
273
274 if (max_frame_count == 0) {
275 *count_ptr = 0;
276 return ERR(NONE);
277 }
278
Andreas Gampe850a0fe2017-06-12 18:37:19 -0700279 if (start_depth >= 0) {
280 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
281 GetStackTraceDirectClosure closure(frame_buffer,
282 static_cast<size_t>(start_depth),
283 static_cast<size_t>(max_frame_count));
284 thread->RequestSynchronousCheckpoint(&closure);
285 *count_ptr = static_cast<jint>(closure.index);
286 if (closure.index < static_cast<size_t>(start_depth)) {
287 return ERR(ILLEGAL_ARGUMENT);
288 }
289 return ERR(NONE);
290 }
291
292 GetStackTraceVectorClosure closure(0, 0);
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700293 thread->RequestSynchronousCheckpoint(&closure);
294
Andreas Gampea1a27c62017-01-11 16:37:16 -0800295 return TranslateFrameVector(closure.frames,
296 start_depth,
297 closure.start_result,
298 max_frame_count,
299 frame_buffer,
300 count_ptr);
301}
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700302
Andreas Gampef1221a12017-06-21 21:20:47 -0700303template <typename Data>
304struct GetAllStackTracesVectorClosure : public art::Closure {
Andreas Gampe6237cd32017-06-22 22:17:38 -0700305 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
306 : barrier(0), stop_input(stop), data(data_) {}
Andreas Gampef1221a12017-06-21 21:20:47 -0700307
308 void Run(art::Thread* thread) OVERRIDE
309 REQUIRES_SHARED(art::Locks::mutator_lock_)
310 REQUIRES(!data->mutex) {
311 art::Thread* self = art::Thread::Current();
Andreas Gampe6237cd32017-06-22 22:17:38 -0700312 Work(thread, self);
313 barrier.Pass(self);
314 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700315
Andreas Gampe6237cd32017-06-22 22:17:38 -0700316 void Work(art::Thread* thread, art::Thread* self)
317 REQUIRES_SHARED(art::Locks::mutator_lock_)
318 REQUIRES(!data->mutex) {
Andreas Gampef1221a12017-06-21 21:20:47 -0700319 // Skip threads that are still starting.
320 if (thread->IsStillStarting()) {
321 return;
322 }
323
324 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
325 if (thread_frames == nullptr) {
326 return;
327 }
328
329 // Now collect the data.
330 auto frames_fn = [&](jvmtiFrameInfo info) {
331 thread_frames->push_back(info);
332 };
333 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
334 visitor.WalkStack(/* include_transitions */ false);
335 }
336
Andreas Gampe6237cd32017-06-22 22:17:38 -0700337 art::Barrier barrier;
Andreas Gampef1221a12017-06-21 21:20:47 -0700338 const size_t stop_input;
339 Data* data;
340};
341
Andreas Gampe6237cd32017-06-22 22:17:38 -0700342template <typename Data>
343static void RunCheckpointAndWait(Data* data, size_t max_frame_count) {
344 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
345 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
346 if (barrier_count == 0) {
347 return;
348 }
349 art::Thread* self = art::Thread::Current();
350 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
351 closure.barrier.Increment(self, barrier_count);
352}
353
Andreas Gampea1a27c62017-01-11 16:37:16 -0800354jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
355 jint max_frame_count,
356 jvmtiStackInfo** stack_info_ptr,
357 jint* thread_count_ptr) {
358 if (max_frame_count < 0) {
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700359 return ERR(ILLEGAL_ARGUMENT);
360 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800361 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
362 return ERR(NULL_POINTER);
363 }
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700364
Andreas Gampef1221a12017-06-21 21:20:47 -0700365 struct AllStackTracesData {
366 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
367 ~AllStackTracesData() {
368 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
369 for (jthread global_thread_ref : thread_peers) {
370 jni_env->DeleteGlobalRef(global_thread_ref);
371 }
Andreas Gampea1a27c62017-01-11 16:37:16 -0800372 }
373
Andreas Gampef1221a12017-06-21 21:20:47 -0700374 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
375 REQUIRES_SHARED(art::Locks::mutator_lock_)
376 REQUIRES(!mutex) {
377 art::MutexLock mu(self, mutex);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800378
379 threads.push_back(thread);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800380
Andreas Gampef1221a12017-06-21 21:20:47 -0700381 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
382 self, thread->GetPeerFromOtherThread());
383 thread_peers.push_back(peer);
384
Andreas Gampead9173d2017-06-22 16:33:08 -0700385 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
386 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700387 }
388
389 art::Mutex mutex;
390
391 // Storage. Only access directly after completion.
392
393 std::vector<art::Thread*> threads;
394 // "thread_peers" contains global references to their peers.
395 std::vector<jthread> thread_peers;
396
Andreas Gampead9173d2017-06-22 16:33:08 -0700397 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700398 };
399
400 AllStackTracesData data;
Andreas Gampe6237cd32017-06-22 22:17:38 -0700401 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampef1221a12017-06-21 21:20:47 -0700402
403 art::Thread* current = art::Thread::Current();
404
405 // Convert the data into our output format.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800406
407 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
408 // allocate one big chunk for this and the actual frames, which means we need
409 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700410 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800411 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700412 frame_infos.reserve(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800413
414 // Now run through and add data for each thread.
415 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700416 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800417 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
418 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
419
Andreas Gampead9173d2017-06-22 16:33:08 -0700420 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampea1a27c62017-01-11 16:37:16 -0800421
Andreas Gampef1221a12017-06-21 21:20:47 -0700422 // For the time being, set the thread to null. We'll fix it up in the second stage.
Andreas Gampea1a27c62017-01-11 16:37:16 -0800423 stack_info.thread = nullptr;
424 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
425
426 size_t collected_frames = thread_frames.size();
427 if (max_frame_count == 0 || collected_frames == 0) {
428 stack_info.frame_count = 0;
429 stack_info.frame_buffer = nullptr;
430 continue;
431 }
432 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
433
434 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
435 frame_infos.emplace_back(frame_info);
436
437 jint count;
438 jvmtiError translate_result = TranslateFrameVector(thread_frames,
439 0,
440 0,
441 static_cast<jint>(collected_frames),
442 frame_info,
443 &count);
444 DCHECK(translate_result == JVMTI_ERROR_NONE);
445 stack_info.frame_count = static_cast<jint>(collected_frames);
446 stack_info.frame_buffer = frame_info;
447 sum_frames += static_cast<size_t>(count);
448 }
449
450 // No errors, yet. Now put it all into an output buffer.
Andreas Gampef1221a12017-06-21 21:20:47 -0700451 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
Andreas Gampea1a27c62017-01-11 16:37:16 -0800452 alignof(jvmtiFrameInfo));
453 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
454 unsigned char* chunk_data;
455 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
456 if (alloc_result != ERR(NONE)) {
457 return alloc_result;
458 }
459
460 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
461 // First copy in all the basic data.
Andreas Gampef1221a12017-06-21 21:20:47 -0700462 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800463
464 // Now copy the frames and fix up the pointers.
465 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
466 chunk_data + rounded_stack_info_size);
Andreas Gampef1221a12017-06-21 21:20:47 -0700467 for (size_t i = 0; i < data.frames.size(); ++i) {
Andreas Gampea1a27c62017-01-11 16:37:16 -0800468 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
469 jvmtiStackInfo& new_stack_info = stack_info[i];
470
Andreas Gampef1221a12017-06-21 21:20:47 -0700471 // Translate the global ref into a local ref.
472 new_stack_info.thread =
473 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
Andreas Gampea1a27c62017-01-11 16:37:16 -0800474
475 if (old_stack_info.frame_count > 0) {
476 // Only copy when there's data - leave the nullptr alone.
477 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
478 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
479 new_stack_info.frame_buffer = frame_info;
480 frame_info += old_stack_info.frame_count;
481 }
482 }
483
484 *stack_info_ptr = stack_info;
Andreas Gampef1221a12017-06-21 21:20:47 -0700485 *thread_count_ptr = static_cast<jint>(data.frames.size());
Andreas Gampea1a27c62017-01-11 16:37:16 -0800486
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700487 return ERR(NONE);
488}
489
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800490jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
491 jint thread_count,
492 const jthread* thread_list,
493 jint max_frame_count,
494 jvmtiStackInfo** stack_info_ptr) {
495 if (max_frame_count < 0) {
496 return ERR(ILLEGAL_ARGUMENT);
497 }
498 if (thread_count < 0) {
499 return ERR(ILLEGAL_ARGUMENT);
500 }
501 if (thread_count == 0) {
502 *stack_info_ptr = nullptr;
503 return ERR(NONE);
504 }
505 if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) {
506 return ERR(NULL_POINTER);
507 }
508
509 art::Thread* current = art::Thread::Current();
510 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
511
Andreas Gampef1221a12017-06-21 21:20:47 -0700512 struct SelectStackTracesData {
513 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
514
515 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
516 REQUIRES_SHARED(art::Locks::mutator_lock_)
517 REQUIRES(!mutex) {
518 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
519 for (size_t index = 0; index != handles.size(); ++index) {
520 if (peer == handles[index].Get()) {
521 // Found the thread.
522 art::MutexLock mu(self, mutex);
523
524 threads.push_back(thread);
525 thread_list_indices.push_back(index);
526
Andreas Gampead9173d2017-06-22 16:33:08 -0700527 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
528 return frames.back().get();
Andreas Gampef1221a12017-06-21 21:20:47 -0700529 }
530 }
531 return nullptr;
532 }
533
534 art::Mutex mutex;
535
536 // Selection data.
537
538 std::vector<art::Handle<art::mirror::Object>> handles;
539
540 // Storage. Only access directly after completion.
541
542 std::vector<art::Thread*> threads;
543 std::vector<size_t> thread_list_indices;
544
Andreas Gampead9173d2017-06-22 16:33:08 -0700545 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
Andreas Gampef1221a12017-06-21 21:20:47 -0700546 };
547
548 SelectStackTracesData data;
549
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800550 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
551 art::VariableSizedHandleScope hs(current);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800552 for (jint i = 0; i != thread_count; ++i) {
553 if (thread_list[i] == nullptr) {
554 return ERR(INVALID_THREAD);
555 }
556 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
557 return ERR(INVALID_THREAD);
558 }
Andreas Gampef1221a12017-06-21 21:20:47 -0700559 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800560 }
561
Andreas Gampe6237cd32017-06-22 22:17:38 -0700562 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800563
564 // Convert the data into our output format.
565
566 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
567 // allocate one big chunk for this and the actual frames, which means we need
568 // to either be conservative or rearrange things later (the latter is implemented).
Andreas Gampef1221a12017-06-21 21:20:47 -0700569 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800570 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
Andreas Gampef1221a12017-06-21 21:20:47 -0700571 frame_infos.reserve(data.frames.size());
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800572
573 // Now run through and add data for each thread.
574 size_t sum_frames = 0;
Andreas Gampef1221a12017-06-21 21:20:47 -0700575 for (size_t index = 0; index < data.frames.size(); ++index) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800576 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
577 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
578
Andreas Gampef1221a12017-06-21 21:20:47 -0700579 art::Thread* self = data.threads[index];
Andreas Gampead9173d2017-06-22 16:33:08 -0700580 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800581
582 // For the time being, set the thread to null. We don't have good ScopedLocalRef
583 // infrastructure.
Nicolas Geoffrayffc8cad2017-02-10 10:59:22 +0000584 DCHECK(self->GetPeerFromOtherThread() != nullptr);
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800585 stack_info.thread = nullptr;
586 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
587
588 size_t collected_frames = thread_frames.size();
589 if (max_frame_count == 0 || collected_frames == 0) {
590 stack_info.frame_count = 0;
591 stack_info.frame_buffer = nullptr;
592 continue;
593 }
594 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
595
596 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
597 frame_infos.emplace_back(frame_info);
598
599 jint count;
600 jvmtiError translate_result = TranslateFrameVector(thread_frames,
601 0,
602 0,
603 static_cast<jint>(collected_frames),
604 frame_info,
605 &count);
606 DCHECK(translate_result == JVMTI_ERROR_NONE);
607 stack_info.frame_count = static_cast<jint>(collected_frames);
608 stack_info.frame_buffer = frame_info;
609 sum_frames += static_cast<size_t>(count);
610 }
611
612 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
613 // potentially.
614 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
615 alignof(jvmtiFrameInfo));
616 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
617 unsigned char* chunk_data;
618 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
619 if (alloc_result != ERR(NONE)) {
620 return alloc_result;
621 }
622
623 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
624 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
625 chunk_data + rounded_stack_info_size);
626
627 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
628 // Check whether we found a running thread for this.
629 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
630 // search. (The list is *not* sorted!)
Andreas Gampef1221a12017-06-21 21:20:47 -0700631 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
632 if (it == data.thread_list_indices.end()) {
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800633 // No native thread. Must be new or dead. We need to fill out the stack info now.
634 // (Need to read the Java "started" field to know whether this is starting or terminated.)
635 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
636 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
637 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
638 CHECK(started_field != nullptr);
639 bool started = started_field->GetBoolean(peer) != 0;
640 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
641 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
642 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
643 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
644 stack_info[i].state = started ? kTerminatedState : kStartedState;
645 stack_info[i].frame_count = 0;
646 stack_info[i].frame_buffer = nullptr;
647 } else {
648 // Had a native thread and frames.
Andreas Gampef1221a12017-06-21 21:20:47 -0700649 size_t f_index = it - data.thread_list_indices.begin();
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800650
651 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
652 jvmtiStackInfo& new_stack_info = stack_info[i];
653
654 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
655 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
656 if (old_stack_info.frame_count > 0) {
657 // Only copy when there's data - leave the nullptr alone.
658 size_t frames_size =
659 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
660 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
661 new_stack_info.frame_buffer = frame_info;
662 frame_info += old_stack_info.frame_count;
663 }
664 }
665 }
666
Andreas Gampef1221a12017-06-21 21:20:47 -0700667 *stack_info_ptr = stack_info;
Andreas Gampeeba32fb2017-01-12 17:40:05 -0800668
669 return ERR(NONE);
670}
671
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800672// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
673// runtime methods and transitions must not be counted.
674struct GetFrameCountVisitor : public art::StackVisitor {
675 explicit GetFrameCountVisitor(art::Thread* thread)
676 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
677 count(0) {}
678
679 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
680 art::ArtMethod* m = GetMethod();
681 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
682 if (do_count) {
683 count++;
684 }
685 return true;
686 }
687
688 size_t count;
689};
690
691struct GetFrameCountClosure : public art::Closure {
692 public:
693 GetFrameCountClosure() : count(0) {}
694
695 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
696 GetFrameCountVisitor visitor(self);
697 visitor.WalkStack(false);
698
699 count = visitor.count;
700 }
701
702 size_t count;
703};
704
705jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
706 jthread java_thread,
707 jint* count_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700708 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
709 // that the thread isn't dying on us.
710 art::ScopedObjectAccess soa(art::Thread::Current());
711 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
712
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800713 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700714 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
715 soa,
716 java_thread,
717 &thread);
718
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800719 if (thread_error != ERR(NONE)) {
720 return thread_error;
721 }
722 DCHECK(thread != nullptr);
723
724 if (count_ptr == nullptr) {
725 return ERR(NULL_POINTER);
726 }
727
728 GetFrameCountClosure closure;
729 thread->RequestSynchronousCheckpoint(&closure);
730
731 *count_ptr = closure.count;
732 return ERR(NONE);
733}
734
735// Walks up the stack 'n' callers, when used with Thread::WalkStack.
736struct GetLocationVisitor : public art::StackVisitor {
737 GetLocationVisitor(art::Thread* thread, size_t n_in)
738 : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
739 n(n_in),
740 count(0),
741 caller(nullptr),
742 caller_dex_pc(0) {}
743
744 bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
745 art::ArtMethod* m = GetMethod();
746 const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
747 if (do_count) {
748 DCHECK(caller == nullptr);
749 if (count == n) {
750 caller = m;
751 caller_dex_pc = GetDexPc(false);
752 return false;
753 }
754 count++;
755 }
756 return true;
757 }
758
759 const size_t n;
760 size_t count;
761 art::ArtMethod* caller;
762 uint32_t caller_dex_pc;
763};
764
765struct GetLocationClosure : public art::Closure {
766 public:
767 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
768
769 void Run(art::Thread* self) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
770 GetLocationVisitor visitor(self, n);
771 visitor.WalkStack(false);
772
773 method = visitor.caller;
774 dex_pc = visitor.caller_dex_pc;
775 }
776
777 const size_t n;
778 art::ArtMethod* method;
779 uint32_t dex_pc;
780};
781
782jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
783 jthread java_thread,
784 jint depth,
785 jmethodID* method_ptr,
786 jlocation* location_ptr) {
Andreas Gampe28c4a232017-06-21 21:21:31 -0700787 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
788 // that the thread isn't dying on us.
789 art::ScopedObjectAccess soa(art::Thread::Current());
790 art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
791
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800792 art::Thread* thread;
Andreas Gampe28c4a232017-06-21 21:21:31 -0700793 jvmtiError thread_error = GetThread(art::Thread::Current()->GetJniEnv(),
794 soa,
795 java_thread,
796 &thread);
Andreas Gampef6f3b5f2017-01-13 09:21:42 -0800797 if (thread_error != ERR(NONE)) {
798 return thread_error;
799 }
800 DCHECK(thread != nullptr);
801
802 if (depth < 0) {
803 return ERR(ILLEGAL_ARGUMENT);
804 }
805 if (method_ptr == nullptr || location_ptr == nullptr) {
806 return ERR(NULL_POINTER);
807 }
808
809 GetLocationClosure closure(static_cast<size_t>(depth));
810 thread->RequestSynchronousCheckpoint(&closure);
811
812 if (closure.method == nullptr) {
813 return ERR(NO_MORE_FRAMES);
814 }
815
816 *method_ptr = art::jni::EncodeArtMethod(closure.method);
817 if (closure.method->IsNative()) {
818 *location_ptr = -1;
819 } else {
820 if (closure.dex_pc == art::DexFile::kDexNoIndex) {
821 return ERR(INTERNAL);
822 }
823 *location_ptr = static_cast<jlocation>(closure.dex_pc);
824 }
825
826 return ERR(NONE);
827}
828
Alex Light88e1ddd2017-08-21 13:09:55 -0700829struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
830 // We need a context because VisitLocks needs it retrieve the monitor objects.
831 explicit MonitorVisitor(art::Thread* thread)
832 REQUIRES_SHARED(art::Locks::mutator_lock_)
833 : art::StackVisitor(thread,
834 art::Context::Create(),
835 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
836 hs(art::Thread::Current()),
837 current_stack_depth(0) {}
838
839 ~MonitorVisitor() {
840 delete context_;
841 }
842
843 bool VisitFrame() OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
844 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
845 if (!GetMethod()->IsRuntimeMethod()) {
846 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
847 ++current_stack_depth;
848 }
849 return true;
850 }
851
852 static void AppendOwnedMonitors(art::mirror::Object* owned_monitor, void* arg)
853 REQUIRES_SHARED(art::Locks::mutator_lock_) {
854 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
855 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
856 art::ObjPtr<art::mirror::Object> mon(owned_monitor);
857 // Filter out duplicates.
858 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
859 if (monitor.Get() == mon.Ptr()) {
860 return;
861 }
862 }
863 visitor->monitors.push_back(visitor->hs.NewHandle(mon));
864 visitor->stack_depths.push_back(visitor->current_stack_depth);
865 }
866
867 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
868 OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
869 for (const art::Handle<art::mirror::Object>& m : monitors) {
870 if (m.Get() == obj) {
871 return;
872 }
873 }
874 monitors.push_back(hs.NewHandle(obj));
875 stack_depths.push_back(-1);
876 }
877
878 art::VariableSizedHandleScope hs;
879 jint current_stack_depth;
880 std::vector<art::Handle<art::mirror::Object>> monitors;
881 std::vector<jint> stack_depths;
882};
883
884template<typename Fn>
885struct MonitorInfoClosure : public art::Closure {
886 public:
887 MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results)
888 : soa_(soa), err_(OK), handle_results_(handle_results) {}
889
890 void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) {
891 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
892 // Find the monitors on the stack.
893 MonitorVisitor visitor(target);
894 visitor.WalkStack(/* include_transitions */ false);
895 // Find any other monitors, including ones acquired in native code.
896 art::RootInfo root_info(art::kRootVMInternal);
897 target->GetJniEnv()->monitors.VisitRoots(&visitor, root_info);
898 err_ = handle_results_(soa_, visitor);
899 }
900
901 jvmtiError GetError() {
902 return err_;
903 }
904
905 private:
906 art::ScopedObjectAccess& soa_;
907 jvmtiError err_;
908 Fn handle_results_;
909};
910
911
912template <typename Fn>
913static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) {
914 art::Thread* self = art::Thread::Current();
915 art::ScopedObjectAccess soa(self);
916 MonitorInfoClosure<Fn> closure(soa, handle_results);
917 bool called_method = false;
918 {
919 art::MutexLock mu(self, *art::Locks::thread_list_lock_);
920 art::Thread* target = ThreadUtil::GetNativeThread(thread, soa);
921 if (target == nullptr && thread == nullptr) {
922 return ERR(INVALID_THREAD);
923 }
924 if (target == nullptr) {
925 return ERR(THREAD_NOT_ALIVE);
926 }
927 if (target != self) {
928 called_method = true;
929 if (!target->RequestSynchronousCheckpoint(&closure)) {
930 return ERR(THREAD_NOT_ALIVE);
931 }
932 }
933 }
934 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
935 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
936 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
937 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
938 if (!called_method) {
939 closure.Run(self);
940 }
941 return closure.GetError();
942}
943
944jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
945 jthread thread,
946 jint* info_cnt,
947 jvmtiMonitorStackDepthInfo** info_ptr) {
948 if (info_cnt == nullptr || info_ptr == nullptr) {
949 return ERR(NULL_POINTER);
950 }
951 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
952 REQUIRES_SHARED(art::Locks::mutator_lock_) {
953 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size();
954 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
955 if (err != OK) {
956 return err;
957 }
958 *info_cnt = visitor.monitors.size();
959 for (size_t i = 0; i < visitor.monitors.size(); i++) {
960 (*info_ptr)[i] = {
961 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()),
962 visitor.stack_depths[i]
963 };
964 }
965 return OK;
966 };
967 return GetOwnedMonitorInfoCommon(thread, handle_fun);
968}
969
970jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
971 jthread thread,
972 jint* owned_monitor_count_ptr,
973 jobject** owned_monitors_ptr) {
974 if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) {
975 return ERR(NULL_POINTER);
976 }
977 auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor)
978 REQUIRES_SHARED(art::Locks::mutator_lock_) {
979 auto nbytes = sizeof(jobject) * visitor.monitors.size();
980 jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
981 if (err != OK) {
982 return err;
983 }
984 *owned_monitor_count_ptr = visitor.monitors.size();
985 for (size_t i = 0; i < visitor.monitors.size(); i++) {
986 (*owned_monitors_ptr)[i] =
987 soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get());
988 }
989 return OK;
990 };
991 return GetOwnedMonitorInfoCommon(thread, handle_fun);
992}
993
Andreas Gampeb5eb94a2016-10-27 19:23:09 -0700994} // namespace openjdkjvmti